网站关键词优化案例,城市分站cms,网站建设总流程图,佛山seo教程相关
hyper-v安装ubuntu-20-server hyper-v建立快照 hyper-v快速创建虚拟机-导入导出虚拟机
准备
虚拟机设置
采用hyper-v方式安装ubuntu-20虚拟机和koolshare
hostnameiph01192.168.66.20h02192.168.66.21h03192.168.66.22
静态IP
所有机器都需要按需设置
sudo vim /e…相关
hyper-v安装ubuntu-20-server hyper-v建立快照 hyper-v快速创建虚拟机-导入导出虚拟机
准备
虚拟机设置
采用hyper-v方式安装ubuntu-20虚拟机和koolshare
hostnameiph01192.168.66.20h02192.168.66.21h03192.168.66.22
静态IP
所有机器都需要按需设置
sudo vim /etc/netplan/00-installer-config.yaml
sudo netplan apply00-installer-config.yaml addresses中192.168.66.10是机器的IP地址 gateway4是koolshare的地址
network:ethernets:eth0:dhcp4: noaddresses: [192.168.66.20/24]optional: truegateway4: 192.168.66.1nameservers:addresses: [223.5.5.5,223.6.6.6]version: 2更改hostname
hostnamectl set-hostname h01
hostnamectl set-hostname h02
hostnamectl set-hostname h03配置hosts
每台机器都要操作
sudo vim /etc/hosts
# 注意注释掉或更好名称
127.0.0.1 h01/h02/h03
# 添加一下内容
192.168.66.20 h01
192.168.66.21 h02
192.168.66.22 h03新增mybigdata用户
每台机器都要操作
# 添加mybigdata用户 (用户名)》密码是必须设置的其它可选
sudo adduser mybigdata
# 用户加入sudo组aappendG不要将用户从其它组移除
sudo usermod -aG sudo mybigdata
# 补充删除用户和文件
# sudo deluser --remove-home mybigdatassh登录
ssh -p 22 mybigdata192.168.66.20
ssh -p 22 mybigdata192.168.66.21
ssh -p 22 mybigdata192.168.66.22免密登录
每台机器都需要设置
# 秘钥生成
ssh-keygen -t rsa
# 秘钥拷贝
ssh-copy-id h01
ssh-copy-id h02
ssh-copy-id h03
# 测试
ssh h01
ssh h02
ssh h03rsync分发
在h01执行
cd /home/mybigdata
# 创建xsync 分发脚本
vim xsync
# xsync增加可执行权限
chmod x xsync
# 运行示例
# xsync test.txtxsync 分发脚本
#!/bin/bash
pcount$#
if [ $pcount -lt 1 ]
thenecho Not Enough Arguement!exit;
fifor host in h01 h02 h03
doecho $hostfor file in $doif [ -e $file ]thenpdir$(cd -P $(dirname $file); pwd)echo pdir$pdirfname$(basename $file)echo fname$fnamessh $host mkdir -p $pdirrsync -av $pdir/$fname $USER$host:$pdirelseecho $file does not exists!fidone
donejdk
在h01执行
cd /home/mybigdata
# windows上传linux h01
scp -P 22 -r D:\00garbage\jdk-8u321-linux-x64.tar.gz mybigdata192.168.66.20:/home/mybigdata/
# 解压
tar -zxvf jdk-8u321-linux-x64.tar.gz
# 配置环境变量
vim .bashrc
# 刷新环境变量
source .bashrc
# 测试
java -version
javac -help.bashrc
#JAVA_HOME
export JAVA_HOME/home/mybigdata/jdk1.8.0_321
export JRE_HOME/home/mybigdata/jdk1.8.0_321/jre
export CLASSPATH.:$CLASSPATH:$JAVA_HOME/lib:$JRE_HOME/lib
export PATH$PATH:$JAVA_HOME/bin:$JRE_HOME/bin安装hadoop
在h01操作
cd /home/mybigdata
# windows上传linux h01
scp -P 22 -r D:\00garbage\hadoop-3.1.3.tar.gz mybigdata192.168.66.20:/home/mybigdata/
# 解压
tar -zxvf hadoop-3.1.3.tar.gz
# 配置环境变量
vim .bashrc
# 刷新环境变量
source .bashrc
# 测试
scp -P 22 -r D:\00garbage\hadoop-mapreduce-examples-3.1.3.jar mybigdata192.168.66.20:/home/mybigdata/
mkdir input
vim input/word.txt
hadoop jar ./hadoop-mapreduce-examples-3.1.3.jar wordcount input/ ./output
cd output
vim part-r-00000.bashrc
#HADOOP_HOME
export HADOOP_HOME/home/mybigdata/hadoop-3.1.3
export PATH$PATH:$HADOOP_HOME/bin
export PATH$PATH:$HADOOP_HOME/sbin配置hadoop集群
组件地址介绍hdfs namenodehdfs://h01:9000hdfs secondary namenodeh02:50090hdfs datanode所有结点都有mapred/yarn JobHistoryServeru01yarn resourcemanagerh01yarn nodemanager所有结点都有
配置文件地址介绍core-site.xml/fs.defaultFShdfs://h01:9000namenode 地址client与namenode通信地址hdfs-site.xml/dfs.namenode.secondary.http-addressh02:50090secondary namenode 地址yarn-site.xml/yarn.resourcemanager.hostnameh01yarn resourcemanager 地址yarn-site.xml/yarn.log.server.urlhttp://h01:19888/jobhistory/logsyarn日志服务端地址mapred-site.xml/mapreduce.jobhistory.addressh01:10020mapreduce jobhistory 地址mapred-site.xml/mapreduce.jobhistory.webapp.addressh01:19888mapreduce jobhistory web端地址
现在h01执行再分发
cd /home/mybigdata/hadoop-3.1.3/etc/hadoop/
vim hadoop-env.sh
vim workers
# hdfs namenode
vim core-site.xml
# hdfs secondary namenode
vim hdfs-site.xml
# mapred
vim mapred-site.xml
# yarn resourcemanager
vim yarn-site.xmlhadoop-env.sh
添加
export JAVA_HOME/home/mybigdata/jdk1.8.0_321workers
删除localhost 添加
h01
h02
h03core-site.xml
configurationpropertynamefs.defaultFS/name!--namenode 地址client与namenode通信地址--valuehdfs://h01:9000/value/property!--Hadoop的临时目录默认/tem/hadoop-${user.name}--propertynamehadoop.tmp.dir/namevalue/home/mybigdata/hadoop-3.1.3/tmp/value/property
/configurationhdfs-site.xml
configurationproperty!--副本数量--namedfs.replication/namevalue3/value/property!--secondary namenode 地址--propertynamedfs.namenode.secondary.http-address/namevalueh02:50090/value/property
/configurationmapred-site.xml
configuration!--指定MapReduce运行时的调度框架这里指定在Yarn上默认在local--propertynamemapreduce.framework.name/namevalueyarn/value/property!--mapreduce jobhistory 地址--propertynamemapreduce.jobhistory.address/namevalueh01:10020/value/property!--mapreduce jobhistory web端地址--propertynamemapreduce.jobhistory.webapp.address/namevalueh01:19888/value/propertypropertynameyarn.app.mapreduce.am.env/namevalueHADOOP_MAPRED_HOME${HADOOP_HOME}/value/propertypropertynamemapreduce.map.env/namevalueHADOOP_MAPRED_HOME${HADOOP_HOME}/value/propertypropertynamemapreduce.reduce.env/namevalueHADOOP_MAPRED_HOME${HADOOP_HOME}/value/property
/configurationyarn-site.xml
configuration!--ResourceManager地址--propertynameyarn.resourcemanager.hostname/namevalueh01/value/propertypropertynameyarn.nodemanager.aux-services/namevaluemapreduce_shuffle/value/propertypropertynameyarn.log-aggregation-enable/namevaluetrue/value/property!--yarn日志服务端地址mapred-site.xml已配置--propertynameyarn.log.server.url/namevaluehttp://h01:19888/jobhistory/logs/value/propertypropertynameyarn.log-aggregation.retain-seconds/namevalue604800/value/propertypropertynameyarn.nodemanager.vmem-check-enabled/namevaluefalse/valuedescriptionWhether virtual memory limits will be enforced for containers/description/propertypropertynameyarn.nodemanager.vmem-pmem-ratio/namevalue4/valuedescriptionRatio between virtual memory to physical memory when setting memory limits for containers/description/property
/configuration分发
在h01执行
# 环境变量 .bashrc
./xsync .bashrc
# jdk
./xsync jdk1.8.0_321
# hadoop
./xsync hadoop-3.1.3# 在每个虚拟机上执行激活环境变量
source .bashrc脚本
在h01执行
# 启动 停止脚本hse.sh start/stop
vim hse
# 修改权限
chmod x hse# 每个虚拟机执行jps软连接
ln -s -f /home/mybigdata/jdk1.8.0_321/bin/jps /usr/bin/jps# hjps
vim hjps
# 修改权限
chmod x hjpshse.sh
#!/bin/bash
if [ $# -lt 1 ]
then echo 请输入start/stop exit ;
fi case $1 in
start) echo 启动hadoop集群 echo ---启动hdfs--- ssh h01 /home/mybigdata/hadoop-3.1.3/sbin/start-dfs.sh echo ---启动yarn--- ssh h01 /home/mybigdata/hadoop-3.1.3/sbin/start-yarn.sh echo ---启动historyserver--- ssh h01 /home/mybigdata/hadoop-3.1.3/bin/mapred --daemon start historyserver
;;
stop) echo 关闭hadoop集群 echo ---关闭historyserver--- ssh h01 /home/mybigdata/hadoop-3.1.3/bin/mapred --daemon stop historyserver echo ---关闭yarn--- ssh h01 /home/mybigdata/hadoop-3.1.3/sbin/stop-yarn.sh #若yarn在u02机器上则 ssh h02echo ---关闭hdfs--- ssh h01 /home/mybigdata/hadoop-3.1.3/sbin/stop-dfs.sh
;;
*) echo 请输入start/stop
;;
esachjps
#!/bin/bash
for host in h01 h02 h03
doecho $hostssh $host jps
donewindows配置
hosts 位置 C:\windows\system32\drivers\etc\
192.168.66.20 h01
192.168.66.21 h02
192.168.66.22 h03测试
在h01执行
cd /home/mybigdata
# namenode 格式化
hdfs namenode -format
# 启动
./hse start
# 检查jps
./hjps# 执行
hadoop dfs -mkdir /wordin
vim word.txt
hadoop dfs -moveFromLocal ./word.txt /wordin
hadoop jar ./hadoop-mapreduce-examples-3.1.3.jar wordcount /wordin /wordout# hdfs web地址
http://u01:9870
# yarn web地址
http://u01:8088./hjps 执行结果