工作中使用到的命令

-》mysql sql语句:

--插入或更新字段数据

insert into offer_stat_20170913(StatDate, AppId,Subip,CountryId,CarrierId,SdkVersion,ActivityCount) values(?,?,?,?,?,?,?)ON DUPLICATE KEY UPDATE ActivityCount=ActivityCount+?;

insert into offer_stat_20170913(StatDate, AppId,Subip,CountryId,CarrierId,SdkVersion,ShowCount) values(?,?,?,?,?,?,?) ON DUPLICATE KEY UPDATE ShowCount=ShowCount+?;

insert into offer_stat(StatDate, AppId,Subip,Country,Carrier,SdkVersion,ShowCount) (SELECT create_time,aid,sp,country,carrier,sv,net FROM `offer_show` WHERE oid=1024) ON DUPLICATE KEY UPDATE ShowCount=ShowCount+10;

--更新表的数据

update offer_stat_201712 a inner join offer_stat_income_20171201 b on a.StatDate=b.StatDate ANDa.AppId=b.AppId ANDa.Subip=b.Subip ANDa.Country=b.Country ANDa.Carrier=b.Carrier ANDa.SdkVersion=b.SdkVersion ANDa.PublisherId=b.PublisherIdset a.Fee=b.Fee,a.FeeCount=b.FeeCount,a.ConcersionFee=b.ConcersionFee;

--删除表中某个字段重复的数据,但保留一条

insert into tmpselect id,click_id FROM tb_third_postback201712 where id<>(select min(id) from tb_third_postback201712 d where tb_third_postback201712.click_id =d.click_id ) AND click_id in (select click_id from tb_third_postback201712 b GROUP BY click_id HAVING COUNT(click_id)>1); 

 DELETE from tb_third_postback201712 where id in (select id from tmp);

--更新表的某个字段数据

update tablename set name='newname' where age='oldname';

 --查看渠道报表的总数据

SELECT SUM(ClickCount) cc,SUM(ShowCount) sc,SUM(DlCount) dc,SUM(UserCount) uc,SUM(ActivityCount) ac,SUM(Fee) f,SUM(FeeCount) fcFROM `offer_stat` WHERE StatDate='2017-10-10';

--删除某条记录

delete from offer_channel_stat_201710 where StatDate='2017-10-24';

--查询时间段的数据

SELECT * FROM mob_share.offer_stat WHERE DATE_FORMAT(StatDate,'%Y-%m')='2017-10'

 --查看下发报表的总数据

SELECT SUM(ClickCount) cc,SUM(ShowCount) sc,SUM(DlCount) dc,SUM(SendCount) sc,SUM(SendFee) sf,SUM(SendFeeCount) sfc FROM `offer_sent_stat` WHERE StatDate='2017-09-30';

--查看当前用户(自己)权限:

show grants;

-- 查看mysql赋予的所有用户和主机的权限

select host,user,password,authentication_string from mysql.user;

--查看其他 MySQL 用户权限:

show grants for [email protected];

--创建用户

CREATE USER readuser IDENTIFIED BY 'password';

--mysql授予远程连接权限

mysql> grant all privileges on *.* to 'root'@'192.168.1.*' identified by 'password' with grant option;  mysql> grant all privileges on *.* to 'root'@'192.168.0.106' identified by 'password' with grant option; mysql> grant all privileges on *.* to 'cloudera-scm'@'codis3' identified by 'password' with grant option;

mysql>grant select on *.* to readuser@'%' identified by 'password' with grant option; 

mysql> flush privileges;//刷新一下权限,不需要重启mysql 

--查看mysql binlog_format的值

mysql> show variables like 'binlog_format';

+---------------+-----------+| Variable_name | Value |+---------------+-----------+| binlog_format | STATEMENT |+---------------+-----------+1 row in set (0.00 sec)

--设置binlog_format的值

mysql> SET SESSION binlog_format = 'MIXED';

mysql> SET GLOBAL binlog_format = 'MIXED';

mysql> show variables like 'binlog_format';

+---------------+-------+| Variable_name | Value |

+---------------+-------+| binlog_format | MIXED |

--只里面得laster detected deadlock可以看到最近造成死锁的两条sql是什么

mysql>show engine innodb status

--查看InnoDB存储引擎 系统级的隔离级别 和 会话级的隔离级别:

mysql>select @@global.tx_isolation,@@tx_isolation;

mysql>set global transaction isolation level REPEATABLE READ;//全局的

mysql>set session transaction isolation level read committed; //当前会话

--重启

mysql/etc/rc.d/init.d/mysqld restart

--查询某个字段数据重复的记录

select * from usernew_tmp group by imsi having count(imsi) >1 

--vachar转double类型,并格式化时间

SELECT sum(cast(price as DECIMAL(15,10))) prices FROM `tb_third_postback_notgethbaseappconfig` where DATE_FORMAT(create_time,'%Y-%m-%d')='2017-12-07';

-- 对换某两列的值

update offer_sent_stat_201801 as a, offer_sent_stat_201801 as b set a.AdvertId=b.PublisherId, a.PublisherId=b.AdvertIdwhere a.StatDate='2018-01-31' AND b.StatDate='2018-01-31'AND a.StatDate=b.StatDate AND a.Carrier=b.Carrier AND a.Country=b.Country AND a.AppId=b.AppId AND a.SdkVersion=b.SdkVersion AND a.Subip=b.SubipAND a.OfferId=b.OfferIdAND a.AdvertId=b.AdvertIdAND a.PublisherId=b.PublisherId

-》hive语句

--初始化hive metastory 元数据库在hive安装目录下scripts目录下运行schematool -initSchema -dbType mysql命令进行Hive元数据库的初始化:

--加载本地数据到hive表

load data local inpath '/2tb/log/log_`date +%Y-%m-%d`.txt' into table logdetailhadoop.log_show_tmp;

--加载hdfs数据到hive表

load data inpath '/flume/events/20171018/FlumeData.1508256000588.tmp' into table logdetailhadoop.log_show_tmp;

--加载数据到指定分区

LOAD DATA LOCAL INPATH '/2tb/coll_log_detail/collect_2017-11-22.txt' OVERWRITE INTO TABLE logdetailhadoop.coll_log_detail PARTITION(time = '2017-11-22');

--查询数据是否插入成功

select options,id,oid,aid,ip,country,carrier,sv,sp from log_show_tmp;

--导出hive中的数据到hdfs中

dfs -mv /user/hive/warehouse/logdetailhadoop.db/log_show_tmp/FlumeData.1508256000588.tmp /flume/events/20171018/

--修改hive表的列名称及类型

hive> ALTER TABLE logdetailhadoop.log_sdc_tmp CHANGE createTime date String;

--修改hive表的字段类型

hive> ALTER TABLE logdetailhadoop.log_sdc_tmp CHANGE createTime createTime String;

--启用hive动态分区,只需要在hive会话中设置两个参数

hive> set hive.exec.dynamic.partition=true; 

hive> set hive.exec.dynamic.partition.mode=nonstrict; 

--清空hive表的数据(外部表不能被清除)

hive>truncate table log_show; 

--插入数据到hive静态分区

insert into table logdetailhadoop.log_show partition (time='2017-10-11',appId='502') select id,aid,oid,offerName,type,ip,country,carrier,imei,model,version,ua,sv,net,sub1,sub2,sub3,createTime,msg,offers,videos,sp,imsi from logdetailhadoop.log_sdc_tmp where options='show';

--插入数据到hive动态分区

--插入动态分区

insert into table logdetailhadoop.log_show partition (time,appid) select id,aid,oid,offerName,type,ip,country,carrier,imei,model,version,ua,sv,net,sub1,sub2,sub3,createTime,msg,offers,videos,sp,imsi,createTime,aid from logdetailhadoop.log_sdc_tmp lst where options='show';

--启动metastorehive 

--service metastore &

--后台运行nohup bin/hive 

--service metastore &nohup bin/hive 

--service hiveserver2 &

--远程连接

hive2beeline> !connect jdbc:hive2://主机ip:10001/

Connecting to jdbc:hive2://主机ip:10001/

Enter username for jdbc:hive2://主机ip:10001/: root

Enter password for jdbc:hive2://主机ip:10001/: password

--远程连接hive2的方式

!connect jdbc:hive2://主机ip:10001/logdetailhadoop;hive.server2.transport.mode=http;hive.server2.thrift.http.path=cliservice 

!connect jdbc:hive2://主机ip:10001 org.apache.hive.jdbc.HiveDriver

!connect jdbc:hive2://主机ip:10001/logdetailhadoop;auth=noSasl

--查看服务是否启动

ps aux | grep HiveServer2

--修改分区表的字段类型

ALTER TABLE logdetailhadoop.log_offerlist CHANGE column aid aid STRING cascade;alter table logdetailhadoop.log_offerlist change column aid String, alter table logdetailhadoop.log_offerlist partition(time='2017-10-17') change column aid String;

--启动hive job时的提示信息

Number of reduce tasks not specified. Estimated from input data size: 8In order to change the average load for a reducer (in bytes):set hive.exec.reducers.bytes.per.reducer=In order to limit the maximum number of reducers:set hive.exec.reducers.max=In order to set a constant number of reducers:set mapreduce.job.reduces=

--查看表结构的详细信息

desc formatted logdetailhadoop.log_thirdpostback_tmp;

--修改表明

ALTER TABLE logdetailhadoop.log_offerlist RENAME TO logdetailhadoop.log_offerlist_old;

--删除分区

ALTER TABLE log_income DROP IF EXISTS PARTITION (time='2017-12-06');

ALTER TABLE log_usernew DROP IF EXISTS PARTITION (time='__HIVE_DEFAULT_PARTITION__');

ALTER TABLE log_offerlist REPLACE COLUMNS (aid STRING)

ALTER TABLE coll_log_detail DROP IF EXISTS PARTITION (time='`date +%22%25Y-%25m-%25d%22`');

--给分区表增加字段

hive(logdetailhadoop)>alter table log_click add columns(publisherid string);

报错:ERROR exec.DDLTask: org.apache.hadoop.hive.ql.metadata.HiveException: Unable to alter table.Caused by: MetaException(message:org.datanucleus.exceptions.NucleusDataStoreException: Clear request failed : DELETE FROM `TABLE_PARAMS` WHERE `TBL_ID`=?)Caused by: org.datanucleus.exceptions.NucleusDataStoreException: Clear request failed : DELETE FROM `TABLE_PARAMS` WHERE `TBL_ID`=?Caused by: java.sql.SQLException: Cannot execute statement: impossible to write to binary log since BINLOG_FORMAT = STATEMENT and at least one table uses a storage engine limited to row-based logging. InnoDB is limited to row-logging when transaction isolation level is READ COMMITTED or READ UNCOMMITTED.

解决:启动mysql,运行以下命令

mysql>  SET SESSION binlog_format = 'MIXED';

mysql> SET GLOBAL binlog_format = 'MIXED';

接着在执行:alter table log_click add columns(publisherid string);

为了保证新插入的数据不为null(新增字段),需做以下步骤:

第一步:在hive元数据中的sds表找到字段增加后新分配的字段组ID(CD_ID,表的所有字段对应一个CD_ID字段值),如:SELECT * FROM sds WHERE location LIKE '%table_name%'

第二步:在SDS表中可以看到新分配的字段组值(CD_ID)、已有分区所对应的旧字段组值ID(CD_ID),在该表中把旧的CD_ID值更新为新的CD_ID值即可,如:UPDATE SDS SET CD_ID=NEW_CD_ID(所找到的新值) WHERE CD_ID=OLD_CD_ID(旧值)

UPDATE SDS SET CD_ID=61 WHERE CD_ID=38;

--修改指定列

ALTER TABLE log_show CHANGE publisherid advertid string;

--启动hive 设置日志打印到控制台,已便查看报错信息。

hive -hiveconf hive.root.logger=DEBUG,console

--复制表的机构

CREATE TABLE show_tmp LIKE log_show;

--手动添加分区

ALTER TABLE coll_log_detail ADD PARTITION (time='`date +"%Y-%m-%d"`');

bin/beeline -u "jdbc:hive2://192.168.1.113:10000/logdetailhadoop" -n root -e "ALTER TABLE coll_log_detail ADD PARTITION (time='`date +"%Y-%m-%d"`')"

bin/beeline -u "jdbc:hive2://主机ip:10001/logdetailhadoop" -n root -e "ALTER TABLE coll_log_detail ADD PARTITION (time='`date +"%Y-%m-%d"`')"

-》hbase

--启动hbase shell 客户端

bin/hbase shell

--启动集群中所有的regionserver

bin/hbase-daemons.sh start regionserver

--启动某个regionserver

bin/hbase-daemon.sh start regionserver

--hbase shell中创建命名空间、创建命名空间中的表、移除命名空间、修改命名空间 

create_namespace 'logdetailhbase'

#create my_table in my_ns namespace 

create 'logdetailhbase:log_offerlist201801','appconfig','userinfo','otherinfo'

--收集公关信息存储表

create 'logdetailhbase:coll_common','commonInfo'

create 'logdetailhbase:coll_mutable','mutableInfo'

create 'logdetailhbase:coll_result','resultInfo'

hbase(main):013:0>create 'logdetailhbase:usernew_remove', 'appid_imsi' 

hbase(main):013:0>create 'logdetailhbase:useractivity_remove','userinfo'

hbase(main):013:0>create 'logdetailhbase:usernew_remove','userinfo'

#drop namespace 

drop_namespace 'logdetaihbase' 


#alter namespace 

alter_namespace 'logdetaihbase', {METHOD => 'set', 'PROPERTY_NAME' => 'PROPERTY_VALUE'} 

--创建表

create 'logdetailhbase:log_offerlist','click_id','appconfig' 

-- 表usernew_remove添加userinfo列族

hbase(main):013:0> disable 'logdetailhbase:usernew_remove'

hbase(main):013:0> alter 'logdetailhbase:usernew_remove', {NAME => 'userinfo'}

hbase(main):013:0> enable 'logdetailhbase:usernew_remove'

hbase(main):014:0> put 'logdetailhbase:usernew_remove','2062354364082381841','userinfo:StatDates','2017-12-12 16:21:22'

--若报错,可能是hbase版本较旧

把表disable后alter,然后enable即可

--插入数据

put 'logdetailhbase:log_offerlist201712','b4cd70e9e925452baf133c3c2cq60439','appconfig:create_time','2017-11-11 00:00:00'

--获取数据

获取一个id的所有数据

hbase(main):001:0>get 'logdetaihbase:log_offerlist','b4cd70e9e925452baf133c3c2cq60439'

获取一个id,一个列族的所有数据

hbase(main):002:0>get 'logdetaihbase:log_offerlist','b4cd70e9e925452baf133c3c2cq60439','appconfig'

获取一个id,一个列族中一个列的所有数据

hbase(main):002:0>get 'logdetaihbase:log_offerlist','b4cd70e9e925452baf133c3c2cq60439','appconfig:app_id'

通过timestamp来获取两个版本的数据

hbase(main):010:0>get 'logdetaihbase:log_offerlist','b4cd70e9e925452baf133c3c2cq60439',{COLUMN=>'appconfig:app_id',TIMESTAMP=>1321586238965}

查看所有数据

scan 'logdetaihbase:log_offerlist'

--查询某条数据

    val g = new Get("id001".getBytes)

    val result = table.get(g)

    val value = Bytes.toString(result.getValue("basic".getBytes,"name".getBytes))

--count命令

1.最直接的方式是在hbase shell中执行count的命令可以统计行数。

hbase> count ‘t1′ 

hbase> count ‘t1′, INTERVAL => 100000 

hbase> count ‘t1′, CACHE => 1000 

hbase> count ‘t1′, INTERVAL => 10, CACHE => 1000

2.调用Mapreduce

$HBASE_HOME/bin/hbase  org.apache.hadoop.hbase.mapreduce.RowCounter ‘tablename’

3.hive over hbase

如果已经见了hive和hbase的关联表的话,可以直接在hive中执行sql语句统计hbase表的行数。

hive over hbase 表的建表语句为:

/*创建hive与hbase的关联表*/

CREATE TABLE hive_hbase_1(key INT,value STRING) 

STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler' 

WITH SERDEPROPERTIES ("hbase.columns.mapping"=":key,cf:val") 

TBLPROPERTIES("hbase.table.name"="t_hive","hbase.table.default.storage.type"="binary"); 

/*hive关联已经存在的hbase*/

CREATE EXTERNAL TABLE hive_hbase_1(key INT,value STRING) 

STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler' 

WITH SERDEPROPERTIES ("hbase.columns.mapping"=":key,cf:val") 

TBLPROPERTIES("hbase.table.name"="t_hive","hbase.table.default.storage.type"="binary"); 

--hue基于hbase,需启动thrift server

bin/hbase-daemon.sh start thrift

--停止thrift server

bin/hbase-daemon.sh stop thrift

--9090端口被占用,启动thrift server并指定端口

bin/hbase-daemon.sh start thrift --infoport 9095 -p 9099

--hbase shell基本操作

  --帮助:help

  --查看表:list

--region管理

1)移动region

# 语法:move 'encodeRegionName', 'ServerName'

# encodeRegionName指的regioName后面的编码,ServerName指的是master-status的Region Servers列表

# 示例

#如:regionName是logdetailhbase:useractivity_remove,2018012722864044009931,1517654320664.78df088b578209afe5332452e8f60098.

# ServerName是192-168-0-106.static.gorillaservers.com,60020,1517641361811

hbase(main)>move '78df088b578209afe5332452e8f60098', '192-168-0-106.static.gorillaservers.com,60020,1517641361811'

2)开启/关闭region

# 语法:balance_switch true|false

hbase(main)> balance_switch

3)手动split

# 语法:split 'regionName', 'splitKey'

4)手动触发major compaction

#语法:

#Compact all regions in a table:

#hbase> major_compact 't1'

#Compact an entire region:

#hbase> major_compact 'r1'

#Compact a single column family within a region:

#hbase> major_compact 'r1', 'c1'

#Compact a single column family within a table:

#hbase> major_compact 't1', 'c1'

-》spark hive

    --用于注册UDFs的函数,不是用于DataFrame DSL就是SQL,已经被移动了SQLContext中的udf对象中。

sqlCtx.udf.register("strLen", (s: String) => s.length())

--spark sql 导出数据格式

scala> sqlContext.sql("select * from predict ").write.format("json").save("predictj") 

scala> sqlContext.sql("select * from predict ").write.format("parquet").save("predictp") 

scala> sqlContext.sql("select * from predict ").write.format("orc").save("predicto") 

-》Kafka 语句

--启动kafka服务

bin/kafka-server-start.sh -daemon config/server.properties

/home/dd/soft/cdh5.3.6/kafka_2.10-0.8.2.1/bin/kafka-server-start.sh -daemon /home/dd/soft/cdh5.3.6/kafka_2.10-0.8.2.1/config/server.properties

--停止kafka服务

bin/kafka-server-stop.sh /home/dd/soft/cdh5.3.6/kafka_2.10-0.8.2.1

--创建topic

bin/kafka-topics.sh --create --zookeeper codis1:2181,codis2:2181,codis3:2181 --replication-factor 1 --partitions 3 --topic flumeTopic

bin/kafka-topics.sh --create --zookeeper codis1:2181,codis2:2181,codis3:2181 --replication-factor 1 --partitions 1 --topic thirdPostBackClickId  //收益回调点击id数据

bin/kafka-topics.sh --create --zookeeper 主机ip:2181,主机ip:2181,主机ip:2181 --replication-factor 1 --partitions 3 --topic flumeTopic

bin/kafka-topics.sh --create --zookeeper 192.168.0.106:2181,192.168.0.108:2181,192.168.0.107:2181 --replication-factor 1 --partitions 3 --topic flumeTopic

bin/kafka-topics.sh --create --zookeeper 192.168.0.106:2181,192.168.0.108:2181,192.168.0.107:2181 --replication-factor 1 --partitions 1 --topic thirdPostBackClickId

--显示topic详细信息

bin/kafka-topics.sh --zookeeper codis1:2181,codis2:2181,codis3:2181 --describe

bin/kafka-topics.sh --zookeeper 192.168.0.106:2181,192.168.0.108:2181,192.168.0.107:2181 --describe

--增加分区

bin/kafka-topics.sh --zookeeper codis1:2181,codis2:2181,codis3:2181 --alter --partitions 6 --topic flumeTopic

bin/kafka-topics.sh --zookeeper 192.168.0.106:2181,192.168.0.108:2181,192.168.0.107:2181 --alter --partitions 6 --topic flumeTopic

bin/kafka-topics.sh --zookeeper codis1:2181,codis2:2181,codis3:2181 --alter --partitions 3 --topic thirdPostBackClickId

--查看有哪些topic

bin/kafka-topics.sh --list --zookeeper codis1:2181

bin/kafka-topics.sh --list --zookeeper codis1:2181,codis2:2181,codis3:2181

bin/kafka-topics.sh --list --zookeeper codis1:2182,codis2:2182,codis3:2182

bin/kafka-topics.sh --list --zookeeper 主机ip:2181,主机ip:2181,主机ip:2181

bin/kafka-topics.sh --list --zookeeper 192.168.0.106:2181,192.168.0.108:2181,192.168.0.107:2181

--启动生产者

bin/kafka-console-producer.sh --broker-list codis1:9092,codis2:9092,codis3:9092 --topic sparkTopic

bin/kafka-console-producer.sh --broker-list codis1:9092,codis2:9092,codis3:9092 --topic flumeTopic

bin/kafka-console-producer.sh --broker-list codis1:9092,codis2:9092,codis3:9092 --topic thirdPostBackClickId

bin/kafka-console-producer.sh --broker-list 主机ip:9092,主机ip:9092,主机ip:9092 --topic flumeTopic

bin/kafka-console-producer.sh --broker-list 192.168.0.106:9092,192.168.0.108:9092,192.168.0.107:9092 --topic callback_Info

--启动消费者

bin/kafka-console-consumer.sh --zookeeper codis1:2181,codis2:2181,codis3:2181 --topic flumeTopic

bin/kafka-console-consumer.sh --zookeeper codis1:2181,codis2:2181,codis3:2181 --topic sparkTopic

bin/kafka-console-consumer.sh --zookeeper codis1:2181,codis2:2181,codis3:2181 --topic callback_Info 

bin/kafka-console-consumer.sh --zookeeper codis1:2181,codis2:2181,codis3:2181 --topic thirdPostBackClickId

bin/kafka-console-consumer.sh --zookeeper 主机ip:2181,主机ip:2181,主机ip:2181 --topic flumeTopic

bin/kafka-console-consumer.sh --zookeeper 192.168.0.106:2181,192.168.0.108:2181,192.168.0.107:2181 --topic flumeTopic

bin/kafka-console-consumer.sh --zookeeper 192.168.0.106:2181,192.168.0.108:2181,192.168.0.107:2181 --topic thirdPostBackClickId

bin/kafka-console-consumer.sh --zookeeper 192.168.0.106:2181,192.168.0.108:2181,192.168.0.107:2181 --topic callback_Info

--执行topic增加副本操作

bin/kafka-reassign-partitions.sh --zookeeper codis1:2181,codis2:2181,codis3:2181 --reassignment-json-file addReplicas.json --execute

--kafka查看topic各个分区的消息的信息

bin/kafka-run-class.sh kafka.tools.ConsumerOffsetChecker --group flumeTopicChannelTwo  --topic flumeTopic  --zookeeper 192.168.0.106:2181,192.168.0.107:2181,192.168.0.108:2181/kafka

bin/kafka-run-class.sh kafka.tools.ConsumerOffsetChecker --group flumeTopicChannelTwo  --topic flumeTopic  --zookeeper 192.168.1.113:2181,192.168.1.122:2181,192.168.1.126:2181/kafka

-》flume命令

--flume启动

nohup bin/flume-ng agent -c conf/ -n logser -f conf/agent/kafka_sparkstreaming_statement_test.conf -Dflume.root.logger=INFO,console >/flumeStartInfo/194 2>&1 &

nohup bin/flume-ng agent -c conf/ -n a1 -f conf/agent/flume_kafka_cluster.conf -Dflume.root.logger=INFO,console

nohup bin/flume-ng agent -c conf/ -n a1 -f conf/agent/flume_kafka_cluster_27.conf >/flumeStartInfo/194 2>&1 &

bin/flume-ng agent -c conf/ -n agent_sparkTopic -f conf/agent/sparkTopic_cluster_test.conf -Dflume.root.logger=INFO,console

nohup bin/flume-ng agent -c conf/ -n kafka_tmp -f conf/agent/flume_kafka_tmp.conf -Dflume.root.logger=INFO,console >/flumeStartInfo/kafka_tmp 2>&1 &

nohup bin/flume-ng agent -c conf/ -n hdfssink -f conf/agent/flume_hdfs_sink.conf -Dflume.root.logger=INFO,console >/flumeStartInfo/hdfssink 2>&1 &

--flume source exec 命令

a1.sources.s1.type = exec

a1.sources.s1.command = tail -1000f /2tb/log/log_`date +%Y-%m-%d`.txt

a1.sources.s1.shell = /bin/sh -c

--保存flume读取文件的位置

a1.sources.r1.command = tail -n +$(tail -n1 /home/hadoop/flume_read/m) -F /2tb/offer_statement/log_2017-11-16_income.txt | awk 'ARGIND==1{i=$0;next}{i++;if($0~/^tail/){i=0};print $0;print i >> "/home/hadoop/flume_read/m";fflush("")}' /home/hadoop/flume_read/m -

--报错flume读取文件的offset,已经在flume agent关闭后,tail进程也随之关闭。

a1.sources.r1.command = tail -n +$(tail -n1 /2tb/log/postback_offset) -F /2tb/log/ThirdPostbackClickId_`date +%Y-%m-%d`.txt | awk 'ARGIND==1{i=$0;next}{i++;if($0~/^tail/){i=0};print $0;print i >> "/2tb/log/postback_offset";fflush("")}' /2tb/log/postback_offset - --pid $(ps -ef|grep java|grep thirdpostback_clickid_topic.conf|awk '{print $2}')

--收集收益数据的flume agent source

tail -n +$(tail -n1 /2tb/log/postback_offset) -F /2tb/log/ThirdPostbackClickId_`date +%Y-%m-%d`.txt --pid $(ps -ef|grep java|grep thirdpostback_clickid_topic.conf|awk '{print $2}') | awk 'ARGIND==1{i=$0;next}{i++;if($0~/^tail/){i=0};print $0;print i >> "/2tb/log/postback_offset";fflush("")}' /2tb/log/postback_offset -

tail -n +$(tail -n1 /2tb/log/log_offset) -F /2tb/log/log_`date +%Y-%m-%d`.txt --pid $(ps -ef|grep java|grep report_statistics_to_flumeTopic_and_hdfs.conf|awk '{print $2}') | awk 'ARGIND==1{i=$0;next}{i++;if($0~/^tail/){i=0};print $0;print i >> "/2tb/log/log_offset";fflush("")}' /2tb/log/log_offset -

--远程启动flume

ssh root@主机ip "nohup $FLUME_HOME/bin/flume-ng agent -c $FLUME_HOME/conf/ -n a1 -f $FLUME_HOME/conf/agent/flume_kafka_cluster.conf >/flumeStartInfo/194 2>&1 &"

--远程启动flume(ssh 端口号为50的机器)

ssh [email protected] -p 50 "nohup $FLUME_HOME/bin/flume-ng agent -c $FLUME_HOME/conf/ -n a1 -f $FLUME_HOME/conf/agent/flume_kafka_cluster.conf >/flumeStartInfo/194 2>&1 &"

--flume收集日志的机器

scp -P 50 thirdpostback_clickid_topic.conf [email protected]:/home/dd/soft/cdh5.3.6/apache-flume-1.5.0-cdh5.3.6-bin/conf/agent/

scp -P 50 [email protected]:/home/dd/soft/cdh5.3.6/apache-flume-1.5.0-cdh5.3.6-bin/conf/agent/

scp thirdpostback_clickid_topic.conf [email protected]:/home/dd/soft/cdh5.3.6/apache-flume-1.5.0-cdh5.3.6-bin/conf/agent/

scp thirdpostback_clickid_topic.conf [email protected]:/home/dd/soft/cdh5.3.6/apache-flume-1.5.0-cdh5.3.6-bin/conf/agent/

scp thirdpostback_clickid_topic.conf [email protected]:/home/dd/soft/cdh5.3.6/apache-flume-1.5.0-cdh5.3.6-bin/conf/agent/

scp thirdpostback_clickid_topic.conf [email protected]:/home/dd/soft/cdh5.3.6/apache-flume-1.5.0-cdh5.3.6-bin/conf/agent/

scp thirdpostback_clickid_topic.conf [email protected]:/home/dd/soft/cdh5.3.6/apache-flume-1.5.0-cdh5.3.6-bin/conf/agent/

scp thirdpostback_clickid_topic.conf [email protected]:/home/dd/soft/cdh5.3.6/apache-flume-1.5.0-cdh5.3.6-bin/conf/agent/

scp thirdpostback_clickid_topic.conf [email protected]:/home/dd/soft/cdh5.3.6/apache-flume-1.5.0-cdh5.3.6-bin/conf/agent/

--获取flume application的pid

ps -ef | grep java | grep flume | awk '{print $2}'

--kill flume application 进程

ps -ef | grep java | grep flume |awk '{print "kill -9 " $2}'|sh

--精确查找某个agent的工作进程

ps -ef | grep tail | grep report_statistics_to_flumeTopic_and_hdfs.conf |awk '{print "kill -9 " $2}'|sh

--查看shell是否在运行(tailLogShell为shell脚本的名字)

ps -ef | grep tailLogShell

--在有日志的文件的机器上执行如下命令,重启flume 脚本flumeRestart.sh

#!/bin/sh

ps -ef | grep java | grep flume |awk '{print "kill -9 " $2}'|sh

FLUME_HOME=/home/dd/soft/cdh5.3.6/apache-flume-1.5.0-cdh5.3.6-bin

nohup $FLUME_HOME/bin/flume-ng agent -c $FLUME_HOME/conf/ -n a1 -f $FLUME_HOME/conf/agent/flume_kafka_cluster.conf -Dflume.root.logger=INFO,console >/flumeStartInfo/kafkasink 2>&1 &

nohup $FLUME_HOME/bin/flume-ng agent -c $FLUME_HOME/conf/ -n hdfssink -f $FLUME_HOME/conf/agent/flume_hdfs_sink.conf -Dflume.root.logger=INFO,console >/flumeStartInfo/hdfssink 2>&1 &

FLUME_HOME=/home/dd/soft/cdh5.3.6/apache-flume-1.5.0-cdh5.3.6-bin

nohup $FLUME_HOME/bin/flume-ng agent -c $FLUME_HOME/conf/ -n hdfssink -f $FLUME_HOME/conf/agent/flume_hdfs_sink.conf -Dflume.root.logger=INFO,console >/flumeStartInfo/hdfssink 2>&1 &

FLUME_HOME=/home/dd/soft/cdh5.3.6/apache-flume-1.5.0-cdh5.3.6-bin

nohup $FLUME_HOME/bin/flume-ng agent -c $FLUME_HOME/conf/ -n click_id -f $FLUME_HOME/conf/agent/thirdpostback_clickid_topic.conf -Dflume.root.logger=INFO,console >/flumeStartInfo/click_id 2>&1 &

--使用ganglia监控

bin/flume-ng agent -c conf/ -n kafka_tmp -f conf/agent/flume_kafka_tmp.conf -Dflume.monitoring.type=ganglia -Dflume.monitoring.hosts=192.168.1.126:8655 -Dflume.root.logger=INFO,console

-》spark-submit在命令行提交任务

--在主节点启动所有服务(包括slave节点,需要做免密码登录)

sbin/start-all.sh 

--单独启动主节点

sbin/start-master.sh 

--单独启动slave节点

  ->启动所有的slaves节点

sbin/start-slaves.sh spark://192.168.1.113:7077 

  ->启动单台的slaves节点

sbin/start-slave.sh spark://192.168.1.113:7077 

--测试集群提交spark程序命令

bin/spark-submit \

--class com.yunmei.ssp.spark.channelstatement.KafkaToMysqlOfferStatement00 \

--master spark://codis1:6066 \

--supervise \ 

--driver-cores 1 \

--deploy-mode cluster \

--executor-cores 1 \

--total-executor-cores 5 \

/sparkprojectjar/statementmodel/kafkastatement07.jar

--正式集群提交spark程序命令

bin/spark-submit \

--class com.yunmei.ssp.spark.channelstatement.KafkaToMysqlOfferStatement \

--master spark://主机ip:6066 \

--supervise \

--driver-cores 1 \

--deploy-mode cluster \

--executor-cores 1 \

--total-executor-cores 5 \

/sparkprojectjar/statementmodel/logs-analyzer08.jar

-》spark jobserver

--启动、停止服务

$JOBSERVER_HOME/bin/server_start 启动服务,默认监听端口为8090,可在启动前修改datacloud.conf进行配置。

$JOBSERVER_HOME/bin/server_stop停止服务,注意服务停止后,常驻context将停止运行,因此,重启jobserver需要重新创建常驻context。

sbin/start-slave.sh spark://192.168.0.106:7077 

-》linux 命令

--查看占用cpu前10的使用情况:ps -aux | sort -k4nr | head -10

USER  PID  %CPU  %MEM    VSZ    RSS  TTY  STAT  START  TIME  COMMAND

root 14804  8.6  3.6  5912172 593656  ?    Sl    16:54  0:51  /home/dd/soft/cdh5.3.6/jdk1.7.0_79/bin/java -cp /home/dd/soft/cdh5.3.6/spark-1.6.1-bin-2.5.0-cdh5.3.6/conf/:/home/dd/soft/cdh5.3.6/spark-1.6.1-bin-2.5.0-cdh5.3.6/lib/spark-assembly-1.6.1-hadoop2.5.0-cdh5.3.6.jar:/home/dd/soft/cdh5.3.6/spark-1.6.1-bin-2.5.0-cdh5.3.6/lib/datanucleus-api-jdo-3.2.6.jar:/home/dd/soft/cdh5.3.6/spark-1.6.1-bin-2.5.0-cdh5.3.6/lib/datanucleus-core-3.2.10.jar:/home/dd/soft/cdh5.3.6/spark-1.6.1-bin-2.5.0-cdh5.3.6/lib/datanucleus-rdbms-3.2.9.jar:/home/dd/soft/cdh5.3.6/hadoop-2.5.0-cdh5.3.6/etc/hadoop/ -Xms1024M -Xmx1024M -Dspark.jars=file:/sparkprojectjar/statementmodel/logs-analyzer.jar -Dspark.cores.max=40 -Dspark.app.name=com.yunmei.ssp.spark.channelstatement.KafkaStreamOfferStatement -Dspark.driver.supervise=false -Dspark.master=spark://codis1:7077 -Dspark.executor.cores=30 -Dspark.submit.deployMode=cluster -Dspark.executor.memory=1G -Dspark.driver.cores=3 -XX:MaxPermSize=256m org.apache.spark.deploy.worker.DriverWrapper spark://[email protected]:7078 /home/dd/soft/cdh5.3.6/spark-1.6.1-bin-2.5.0-cdh5.3.6/data/spark_data/spark_work/driver-20170923165403-0008/logs-analyzer.jar com.yunmei.ssp.spark.channelstatement.KafkaStreamOfferStatement

--说明:PID 14804是spark任务进程

--查看端口占用情况

netstat -nat | grep :22

--由于192.168.0.101的ssh端口号被改成了50,所以scp时需要指定端口 -P 50

scp -r -P 50 apache-flume-1.5.0-cdh5.3.6-bin [email protected]:/home/dd/soft/cdh5.3.6/

--linux shell命令

--安装定时任务crontab

yum install -y vixie-cron

--查看定时任务是否是开机启动

chkconfig --list crond

--设置定时任务开机启动

chkconfig crond on

--创建定时任务

crontab -e

##sync time

10 00 * * * /home/dd/soft/cdh5.3.6/apache-flume-1.5.0-cdh5.3.6-bin/flumeRestart.sh 

--重启crond

service crond restart

/bin/systemctl restart crond.service  #启动服务

/bin/systemctl reload  crond.service  #重新载入配置

/bin/systemctl status  crond.service  #查看crontab服务状态

--查看定时任务

crontab -l

--更改Linux系统时间

sudo rm -f /etc/localtime

sudo ln -s /usr/share/zoneinfo/Asia/Shanghai /etc/localtime

--修改linux系统时区

echo 'Asia/Shanghai' >/etc/timezone

--从根目录开始查找所有扩展名为.log的文本文件,并找出包含”ERROR”的行

find / -type f -name "*.log" | xargs grep "ERROR"

--例子:从当前目录开始查找所有扩展名为.in的文本文件,并找出包含”thermcontact”的行

find . -name "*.in" | xargs grep "thermcontact"

find log_2017-11-14.txt | xargs grep "ThirdPostback" >> log_thidpostback_2017-11-14.txt

find /2tb/coll_log_detail/collect_2017-12-28.txt | xargs grep "121cc3dc22a04d569390c4admplc0061" >> /2tb/coll_log_detail/0061.txt

wc -l /2tb/coll_log_detail/0061.txt

find /2tb/coll_log_detail/collect_2017-12-28.txt | xargs grep "2fea54c518ce48ec94ca4a458ijs0524" >> /2tb/coll_log_detail/0315.txt

wc -l /2tb/coll_log_detail/0315.txt

find log_show_local2017-09-10.txt | xargs grep "createTime=2017-10-06" >> log_show.txt

find ./log_*.txt | xargs grep "show" >> log_show_local2017-09-10.txt

--删包含string的行

sed -i "/createTime=2017-10-06/d" log_show_local2017-09-10.txt

--查看cpu型号

cat /proc/cpuinfo | grep name | sort | uniq 

--查看物理cpu个数

cat /proc/cpuinfo | grep "physical id" | sort | uniq | wc -l

--查看cpu核数

cat /proc/cpuinfo | grep "core id" | sort | uniq | wc -l

--查看cpu线程数

cat /proc/cpuinfo | grep "processor" | sort | uniq | wc -l

--查看cpu全部信息

# lscpu

--查看内存

# free -m

# free -g

--查看磁盘

# df -hT

--查看磁盘结构

[root@codis2 ~]# lsblk

NAME                        MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT

sda                            8:0    0 465.8G  0 disk

├─sda1                        8:1    0  500M  0 part /boot

└─sda2                        8:2    0 465.3G  0 part

├─vg_codis2-lv_root (dm-0) 253:0    0    50G  0 lvm  /

├─vg_codis2-lv_swap (dm-1) 253:1    0  7.9G  0 lvm  [SWAP]

└─vg_codis2-lv_home (dm-2) 253:2    0 407.4G  0 lvm  /home

--清理系统cashe内存空间

--清理前执行#sync命令,将所有未写的系统缓冲区写到磁盘中,包含已修改的 i-node、已延迟的块 I/O 和读写映射文件。否则在释放缓存的过程中,可能会丢失未保存的文件。

接下来,我们需要将需要的参数写进/proc/sys/vm/drop_caches文件中,比如我们需要释放所有缓存,就输入下面的命令:

#echo 1 > /proc/sys/vm/drop_caches

#echo 2 > /proc/sys/vm/drop_caches

#echo 3 > /proc/sys/vm/drop_caches

0 – 不释放

1 – 释放页缓存

2 – 释放dentries和inodes

3 – 释放所有缓存

--查看应用程序详情

ps -aux | grep tomcat

--查看linux所有端口使用情况

netstat -apn

--查看端口是否被占用

netstat -apn | grep 50010

--看看该端口是否有服务

netstat -nl | grep 9090

--查看端口是否能正常连接

telnet 192.168.0.106 8020

--在spark conf目录下创建hive-site.xml软连接,用于集成spark+hive

ln -s /home/dd/soft/cdh5.3.6/hive-0.13.1-cdh5.3.6/conf/hive-site.xml

--查看详细java进程

jps -m

24080 Jps -m

31428 Application -n a1 -f /home/dd/soft/cdh5.3.6/apache-flume-1.5.0-cdh5.3.6-bin/conf/agent/flume_kafka_cluster.conf

23862 Application -n hdfssink -f conf/agent/flume_hdfs_sink.conf

23465 Bootstrap start

--清空文件里的内容

]# > log.txt

--设置系统时间

date -s 21:28:53

--将当前时间和日期写入BIOS,避免重启后失效

    hwclock -w

--查看ntp是否安装

[root@codis3 logs]# rpm -q ntp

ntp-4.2.6p5-10.el6.centos.2.x86_64

[root@codis3 logs]#rpm -qa |grep ntp

--安装ntp

[root@localhost kevin]# yum -y install ntp

[root@localhost kevin]# systemctl enable ntpd

[root@localhost kevin]# systemctl start ntpd

--同步服务器时间

[root@codis3 logs]# ntpdate -u 192.168.1.113

28 Nov 10:57:54 ntpdate[11824]: step time server 192.168.1.113 offset 68.125139 sec

--scp命令

scp user_new_detail180104.jar [email protected]:/sparkprojectjar/statementmodel/

    scp user_new_detail180104.jar [email protected]:/sparkprojectjar/statementmodel/

scp user_new_detail180104.jar [email protected]:/sparkprojectjar/statementmodel/

scp user_new_detail180104.jar [email protected]:/sparkprojectjar/statementmodel/

--远程删除文件

ssh [email protected] "rm -f /sparkprojectjar/hivedetaildata/hivedetail_offline_income20171207.jar"

ssh [email protected] "rm -f /sparkprojectjar/hivedetaildata/hivedetail_offline_income20171207.jar"

ssh [email protected] "rm -f /sparkprojectjar/hivedetaildata/hivedetail_offline_income20171207.jar"

ssh [email protected] "rm -f /sparkprojectjar/hivedetaildata/hivedetail_offline_income20171207.jar"

--查看安装软件

rpm -qa | grep java

--卸载安装软件

rpm -e --nodeps java-1.5.0-gcj-1.5.0.0-29.1.el6.x86_64

--快速清空文件内容的几种方法

:> filename

> filename

cat /dev/null > filename

上面这3种方式,能将文件清空,而且文件大小为0

而下面两种方式,导致文本都有一个"\0",而是得文件大小为1

echo "" > filename

echo > filename

--读取某文件中的前300条数据到另一个文件中

cat ThirdPostbackClickId_2017-12-07.txt | head -300 >> tmp_2017-12-18.txt

--查看shell是否在运行(tailLogShell为shell脚本的名字)

ps -ef | grep tailLogShell

--Too many open files问题解决步骤

--查看打开文件数的进程号

lsof -n|awk '{print$2}'|sort|uniq -c |sort -nr|more

文件数  进程号

103040 30749

80195 30532

--查看进程号详细信息

ps-aef|grep 30749

--所以应该将值调为4096,那么要想永久性的调整,请按如下2步操作:

1、修改/etc/security/limits.conf

通过 vi /etc/security/limits.conf修改其内容,在文件最后加入(数值也可以自己定义):

* soft  nofile = 4096

* hard  nofile = 4096

2、修改/etc/profile

通过vi/etc/profile修改,在最后加入以下内容:

ulimit -n4096

然后重新登录即可生效了。

--从指定行开始查看文件内容

more +10 log.log

--用top命令单独对这个进程中的所有线程作监视:

top -p 23377 -H

--查看修改后进程使用内存情况

jmap -heap PID

执行命令查看安装路径

rpm -ql  ganglia-gmetad-3.7.2-2.el6.x86_64

--找到最耗CPU的java线程

  ps -mp pid -o THREAD,tid,time 或者 ps -Lfp pid

   --判断I/O瓶颈

  mpstat -P ALL 1 1000

      注意一下这里面的%iowait列,CPU等待I/O操作所花费的时间。这个值持续很高通常可能是I/O瓶颈所导致的.通过这个参数可以比较直观的看出当前的I/O操作是否

  存在瓶颈

--线上应用故障排查之一:高CPU占用,一个应用占用CPU很高,除了确实是计算密集型应用之外,通常原因都是出现了死循环。

--首先显示线程列表:

ps -mp pid -o THREAD,tid,time

--其次将需要的线程ID转换为16进制格式:

printf "%x\n" tid

--最后打印线程的堆栈信息:

jstack pid |grep tid -A 30

--hdfs基本命令

--查看文件有多少条数据

bin/hdfs dfs -cat /flume/events/20171018/FlumeData.1508256000588.tmp | wc -l

--cp hdfs文件到hive表中

dfs -cp /flume/events/20171018/* /user/hive/warehouse/logdetailhadoop.db/log_sdc_tmp/

dfs -cp /flume/events/20171018/FlumeData.1508256000588.tmp /user/hive/warehouse/logdetailhadoop.db/log_sdc_tmp/

--把HDFS 上的多个文件 合并成一个  本地文件:

bin/hdfs -getmerge /tem/spark/distinctDate /tem/spark/distinctDate/distinct.txt

--也可以:

bin/hdfs fs -cat /hdfs/output/part-r-* > /local/file.txt

--查看目录下的文件大小(字节单位)

hadoop fs -du /flume/events/20171018/

--查看目录下的文件大小(M单位)

hadoop fs -du -h /flume/events/20171018/

--统计目录占有空间大小(G单位)

hadoop fs -du -s -h /flume/events/20171018/

--统计目录占有空间大小(字节单位)

hadoop fs -du -s /flume/events/20171018/

-》redis基本操作

--连接客户端

[root@hostname src]# ./redis-cli

--连接redis节点服务器

[yiibai@ubuntu:~]$ ./redis-cli  -h 主机ip -p 7000 -c

--密码认证

主机ip:7000> auth password

--获取随意key

主机ip:7000> RANDOMKEY

"ba7886ebc8cd478280e9a1deo2ud0990"

--连接redis本地服务器

[root@codis1 src]# ./redis-cli -h 192.168.1.113 -p 6379

192.168.1.113:6379> auth 113test

OK

[root@hostname src]# ./redis-cli -h 192.168.0.101 -p 6379

192.168.0.101:6379> auth password

OK

--获取key值

192.168.0.101:6379> keys *

1) "api.app.config.2050"

2) "api.o.offers.UY~2"

3) "api.app.config.2051"

4) "api.app.config.130"

--连接集群redis节点服务器

[root@codis1 src]# ./redis-cli -h 192.168.1.113 -p 7000 -c

192.168.1.113:7000> auth 123456

OK

[root@codis2 redis-3.2.9]# src/redis-cli -h 192.168.1.122 -p 7000 -c

192.168.1.122:7000> auth 123456

-》zookeeper

--启动zookeeper客户端

bin/zkCli.sh  -server  192.168.0.106:2181,192.168.0.106:2181,192.168.0.106:2181

--Offsets值记录在zk客户端具体的路径为 

/consumers/[groupId]/offsets/[topic]/[partitionId]

比如查看test3主题0分区下的offsets 

[zk: localhost:42182(CONNECTED) 22] get /consumers/flumeTopic_spark_streaming_flumeTopicChannelTwo/offsets/flumeTopic/0

[zk: localhost:42182(CONNECTED) 22] get /consumers/flumeTopicChannelTwo/offsets/flumeTopic/0

[zk: localhost:42182(CONNECTED) 22] get /consumers/clickId/offsets/thirdPostBackClickId/0

--修改flumeTopic主题0分区下的offsets值为100

[zk: localhost:42182(CONNECTED) 22] set /consumers/flumeTopic_spark_streaming_flumeTopicChannelTwo/offsets/flumeTopic/0 100

-》hue

--启动hue

nohup build/env/bin/supervisor &

--查看hue进程

[root@hostname hue-3.7.0-cdh5.3.6]# ps -ef | grep hue

root      1909 26008  0 18:07 pts/1    00:00:00 /home/dd/soft/cdh5.3.6/hue-3.7.0-cdh5.3.6/build/env/bin/python2.6 build/env/bin/supervisor

hue      1912  1909  1 18:07 pts/1    00:00:06 /home/dd/soft/cdh5.3.6/hue-3.7.0-cdh5.3.6/build/env/bin/python2.6 /home/dd/soft/cdh5.3.6/hue-3.7.0-cdh5.3.6/build/env/bin/hue runcherrypyserver  

--停止hue

[root@hostname hue-3.7.0-cdh5.3.6]# kill -9 1909

[root@hostname hue-3.7.0-cdh5.3.6]# kill -9 1912

[1]+  Killed                  nohup build/env/bin/supervisor

你可能感兴趣的:(工作中使用到的命令)