MACOS Ventura 本地安装HDFS 3.1.4

1、终端,输入第一条命令一直回车即可,然后将生成的将公钥内容写入到~/.ssh/authorized_keys

ssh-keygen -t rsa

cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys

2、系统登录-共享-远程登录,打开。如下:

MACOS Ventura 本地安装HDFS 3.1.4_第1张图片

3、官网下载安装包并且解压:

Apache Hadoop 

cd ~
tar -zxvf hadoop-3.1.4.tar.gz

4、修改 core-site.xml

cd ~/hadoop-3.1.4/etc/hadoop
vim core-site.xml


    
       hadoop.tmp.dir
       file:/Users/winhye/hadoop/tmp
    
    
        
        fs.defaultFS
        hdfs://hadoop:9000
    
    
    
        io.file.buffer.size
        4096
    
    
    
        fs.trash.interval
        10080
    


5、修改 hdfs-site.xml

cd ~/hadoop-3.1.4/etc/hadoop
vim hdfs-site.xml


    
    
        dfs.namenode.http-address
        0.0.0.0:9870
    
    
        dfs.namenode.secondary.http-address
        0.0.0.0:9868
    

    
    
        dfs.namenode.name.dir
        file:/Users/winhye/hadoop_bigdata/data/hadoop/namenode
    
    
        dfs.datanode.data.dir
        file:/Users/winhye/hadoop_bigdata/data/hadoop/datanode
        
    
    
    
        dfs.namenode.edits.dir
        file:/Users/winhye/hadoop_bigdata/data/hadoop/edits
    
    
        dfs.namenode.checkpoint.dir
        file:/Users/winhye/hadoop_bigdata/data/hadoop/snn/checkpoint
    
    
        dfs.namenode.checkpoint.edits.dir
        file:/Users/winhye/hadoop_bigdata/data/hadoop/snn/edits
    
    
    
    
        dfs.tmp.dir
        file:/Users/winhye/hadoop_bigdata/data/hadoop/tmp
    
    
    
    
        dfs.replication
        1
    
    
    
        dfs.permissions.enabled
        true
    
    
    
        dfs.blocksize
        134217728
    

6、修改 yarn-site.xml

cd ~/hadoop-3.1.4/etc/hadoop
vim yarn-site.xml


    
        
        yarn.resourcemanager.hostname
        0.0.0.0
    
    
        yarn.resourcemanager.webapp.address
        0.0.0.0:8088
    
    
        yarn.nodemanager.aux-services
        mapreduce_shuffle
    
    
    
        yarn.log-aggregation-enable
        true
    
    
        yarn.log-aggregation.retain-seconds
        604800
    
        
        yarn.nodemanager.resource.memory-mb    
        2048
    
      
      yarn.scheduler.minimum-allocation-mb
      512
    
    
        yarn.nodemanager.vmem-pmem-ratio
        2.1
    

7、修改 mapred-site.xml

cd ~/hadoop-3.1.4/etc/hadoop
vim mapred-site.xml


    
    
        mapreduce.jobhistory.address
        0.0.0.0:10020
    
    
    
        mapreduce.jobhistory.webapp.address
        0.0.0.0:19888
    


8、配置环境变量

vim ~/.bash_profile

export JAVA_8_HOME=/Library/Java/JavaVirtualMachines/jdk1.8.0_202.jdk/Contents/Home
export JAVA_HOME=$JAVA_8_HOME
export PATH=$JAVA_HOME/bin:$PATH
export HADOOP_HOME=/Users/winhye/hadoop-3.1.4
export PATH=$HADOOP_HOME/sbin:$HADOOP_HOME/bin:$PATH

9、配置HOSTS

sudo vim /etc/hosts

127.0.0.1  hadoop

10、启动

# 先格式化操作:
cd ~/bigdata/hadoop-3.2.1/
# 格式化命令:
bin/hdfs namenode -format  # 或:bin/hadoop namenode –format

# 启动 HDFS:
sbin/start-dfs.sh
# 启动 Yarn:
sbin/start-yarn.sh
# 启动 HistoryServer:
sbin/mr-jobhistory-daemon.sh start historyserver
# 注意:上述命令已过时,应使用此命令启动 HistoryServer:
bin/mapred --daemon start historyserver

你可能感兴趣的:(macos,hdfs,hadoop)