Hadoop相关配置文件-命令

Hadoop相关配置文件-命令

Hadoop Shell 命令

hadoop 格式化

hadoop namenode -format

启动hadoop

$HADOOP_HOME/sbin/start-all.sh

进入安全模式

hdfs dfsadmin -safemode enter

退出安全模式

hdfs dfsadmin -safemode leave

在hadoop上创建文件夹

hadoop fs -mkdir /"filename"

创建文件

hadoop fs -touchz /'filename'

查看根目录下所有文件

hadoop fs -ls /

递归查询子目录所有文件

hadoop fs -ls -R /

复制文件“filename1” 并重命名为”filename2“

hadoop fs -cp /"filename" /"filename2"

移动文件”filename1” 并重命名为”filename2”

hadoop fs -mv /"filename1" /"filename2"

移除文件

hadoop fs -rm /"filename"

清空回收站

hadoop fs -expunge

查看文件大小

hadoop fs -du -s (-R) /test1/data.txt

查看文件内容

hadoop fs -cat /'filename'
hadoop fs -text /'filename'      //输出文本格式
hadoop fs -tail /'filename' 	 //将文件尾部1K字节的内容输出

将本地文件上传到hadoop里

hadoop fs -put "本地文件路径" "hadoop目标路径"

将hadoop文件下载到本地

hadoop fs -get "hadoop文件路径" ”本地目标路径"

改变文件的拥有者为”name”

hadoop fs -chown name (-R) /test1/data.txt

关闭hadoop

$HADOOP_HOME/sbin/stop-all.sh

Hadoop 配置

系统/etc/profile中加入hadoop环境

export HADOOP_HOME=/usr/hadoop/hadoop-2.7.3
export CLASS_PATH=$HADOOP_HOME:$HADOOP_HOME/lib
export PATH=$PATH:$HADOOP_HOME/bin

生效:source /etc/profile

hadoop-env.sh中加入java环境

export JAVA_HOME=/usr/java/jdk1.8.0_171

core-site.xml配置

<property>
  <name>fs.default.namename>
  <value>hdfs://master:9000value>
property>
<property>
  <name>hadoop.tmp.dirname>
  <value>/usr/hadoop/hadoop-2.7.3/hdfs/tmpvalue>
property>
<property>
  <name>io.file.buffer.sizename>
  <value>131072value>
property>
<property>
  <name>fs.checkpoint.periodname>
  <value>60value>
property>
<property>
  <name>fs.checkpoint.sizename>
  <value>67108864value>
property>

yarn-site.xml配置

<property>
  <name>yarn.resourcemanager.addressname>
  <value>master:18040value>
property>
<property>
  <name>yarn.resourcemanager.scheduler.addressname>
  <value>master:18030value>
property>
<property>
  <name>yarn.resourcemanager.webapp.addressname>
  <value>master:18088value>
property>
<property>
  <name>yarn.resourcemanager.resource-tracker.addressname>
  <value>master:18025value>
property>
<property>
  <name>yarn.resourcemanager.admin.addressname>
  <value>master:18141value>
property>
<property>
  <name>yarn.nodemanager.aux-servicesname>
  <value>mapreduce_shufflevalue>
property>
<property>
  <name>yarn.nodemanager.auxservices.mapreduce.shuffle.classname>
  <value>org.apache.hadoop.mapred.ShuffleHandlervalue>
property>

slaves文件

slave1
slave2

master

master

hdfs-site.xml配置

<property>
  <name>dfs.replicationname>
  <value>2value>
property>
<property>
  <name>dfs.namenode.name.dirname>
  <value>file:/usr/hadoop/hadoop-2.7.3/hdfs/namevalue>
  <final>truefinal>
property>
<property>
  <name>dfs.datanode.data.dirname>
  <value>file:/usr/hadoop/hadoop-2.7.3/hdfs/datavalue>
  <final>truefinal>
property>
<property>
  <name>dfs.namenode.secondary.http-addressname>
  <value>master:9001value>
property>
<property>
  <name>dfs.webhdfs.enabledname>
  <value>truevalue>
property>
<property>
  <name>dfs.permissionsname>
  <value>falsevalue>
property>

mapred-site.xml配置

<property>
  <name>mapreduce.framework.namename>
  <value>yarnvalue>
property>

你可能感兴趣的:(大数据)