[網絡環境設置]
vim /etc/hosts
192.168.100.52 hadoop1
192.168.99.34 hadoop2
192.168.103.135 hadoop3
分別到對應機器執行:
hostname hadoop1
hostname hadoop2
hostname hadoop3
[打通機器]
hadoop1# ssh-keygen -t dsa -P '' -f ~/.ssh/id_dsa
hadoop1# scp ~/.ssh/id_dsa.pub hadoop2:/root/
hadoop1# scp ~/.ssh/id_dsa.pub hadoop3:/root/
hadoop2# cat id_dsa.pub >> ~/.ssh/authorized_keys
hadoop3# cat id_dsa.pub >> ~/.ssh/authorized_keys
驗證:從hadoop1登錄到hadoop2和hadoop3,不再需要密碼。
[安裝hadoop]
確保所有機器有 ssh rsync jdk
確保設置了:
export JAVA_HOME=/opt/soft/jdk
hive在0.20.x的hadoop做了大量的測試,因此選擇0.20
cd /opt/soft/
wget http://mirror.bjtu.edu.cn/apache/hadoop/core/hadoop-0.20.2/hadoop-0.20.2.tar.gz
tar -zxvf hadoop-0.20.2.tar.gz
cd hadoop-0.20.2/
vim .bashrc
export HADOOP_HOME=/opt/soft/hadoop-0.20.2
(重復以上操作到另外兩機器)
[配置hadoop]
vim conf/core-site.xml
修改:
<configuration>
<property>
<!-- 用於dfs命令模塊中指定默認的文件系統協議 -->
<name>fs.default.name</name>
<value>hdfs://hadoop1:9000</value>
</property>
</configuration>
vim conf/hdfs-site.xml
修改:
<configuration>
<property>
<!-- DFS中存儲文件命名空間信息的目錄 -->
<name>dfs.name.dir</name>
<value>/opt/hadoop/data/dfs.name.dir</value>
</property>
<property>
<!-- DFS中存儲文件數據的目錄 -->
<name>dfs.data.dir</name>
<value>/opt/hadoop/data/dfs.data.dir</value>
</property>
<property>
<!-- 是否對DFS中的文件進行權限控制(測試中一般用false)-->
<name>dfs.permissions</name>
<value>false</value>
</property>
</configuration>
vim conf/mapred-site.xml
修改:
<configuration>
<property>
<!-- 用來作JobTracker的節點的(一般與NameNode保持一致) -->
<name>mapred.job.tracker</name>
<value>hadoop1:9001</value>
</property>
<property>
<!-- map/reduce的系統目錄(使用的HDFS的路徑) -->
<name>mapred.system.dir</name>
<value>/opt/hadoop/system/mapred.system.dir</value>
</property>
<property>
<!-- map/reduce的臨時目錄(可使用“,”隔開,設置多重路徑來分攤磁盤IO) -->
<name>mapred.local.dir</name>
<value>/opt/hadoop/data/mapred.local.dir</value>
</property>
</configuration>
vim masters
hadoop1
vim slaves
hadoop2
hadoop3
scp conf/* hadoop2:/opt/soft/hadoop-0.20.2/conf/
scp conf/* hadoop3:/opt/soft/hadoop-0.20.2/conf/
[初始化]
cd $HADOOP_HOME/bin
./hadoop namenode -format
啟動
./start-all.sh
[驗證]
$HADOOP_HOME/bin/hadoop dfs -ls /
打開 http://192.168.100.52:50030
http://192.168.100.52:50070
[搭建hive集群]
下載
只需要在hadoop1機器上安裝
cd /opt/soft/hadoop-0.20.2
wget http://mirror.bjtu.edu.cn/apache/hive/hive-0.7.0/hive-0.7.0.tar.gz
tar zxvf hive-0.7.0.tar.gz
cd hive-0.7.0
vim ~/.bashrc
export HIVE_HOME=/opt/soft/hadoop-0.20.2/hive-0.7.0$HIVE_HOME/bin/hive
>create table tt(id int,name string) row format delimited fields terminated by ',' collection items terminated by "\n" stored as textfile;
>select * from tt;
>drop table tt;
試玩結束。
[配置hive]
准備mysql:hadoop1 user:hadoop pwd:hadoop
>create database hive
>GRANT all ON hive.* TO hadoop@% IDENTIFIED BY 'hadoop';
>FLUSH PRIVILEGES ;
vim $HIVE_HOME/conf/hive-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>hive.metastore.local</name>
<value>true</value>
</property><property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://hadoop1:3306/hive?createDatabaseIfNotExist=true</value>
</property><property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
</property><property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>hadoop</value>
</property><property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>hadoop</value>
</property>
</configuration>
[啟動]
復制一個mysql-connector-java-5.1.10.jar到hive/lib下后:
$HIVE_HOME/bin/hive
>create table tt(id int,name string) row format delimited fields terminated by ',' collection items terminated by "\n" stored as textfile;
如果報如下錯:
FAILED: Error in metadata: javax.jdo.JDOException: Couldnt obtain a new sequence (unique id) : Binary logging not possible. Message: Transaction level 'READ-COMMITTED' in InnoDB is not safe for binlog mode 'STATEMENT'
退出hive后,以root進入mysql執行:
>set global binlog_format='MIXED';
這是mysql的一個bug。
安裝結束。