91超碰碰碰碰久久久久久综合_超碰av人澡人澡人澡人澡人掠_国产黄大片在线观看画质优化_txt小说免费全本

溫馨提示×

溫馨提示×

您好,登錄后才能下訂單哦!

密碼登錄×
登錄注冊×
其他方式登錄
點擊 登錄注冊 即表示同意《億速云用戶服務條款》

Oozie-4.1.0和hadoop-2.7.1怎么進行編譯

發布時間:2021-12-10 09:29:06 來源:億速云 閱讀:148 作者:iii 欄目:云計算

這篇文章主要介紹“Oozie-4.1.0和hadoop-2.7.1怎么進行編譯”,在日常操作中,相信很多人在Oozie-4.1.0和hadoop-2.7.1怎么進行編譯問題上存在疑惑,小編查閱了各式資料,整理出簡單好用的操作方法,希望對大家解答”Oozie-4.1.0和hadoop-2.7.1怎么進行編譯”的疑惑有所幫助!接下來,請跟著小編一起來學習吧!

一、環境

maven-3.3.0

hadoop-2.7.1

二、編譯

[root@hftclclw0001 opt]# pwd
/opt

[root@hftclclw0001 opt]# wget http://apache.mirrors.pair.com/oozie/4.1.0/oozie-4.1.0.tar.gz
[root@hftclclw0001 opt]# tar -zxvf  oozie-4.1.0.tar.gz
[root@hftclclw0001 opt]# cd oozie-4.1.0

#默認
#sqoop.version=1.4.3 
#hive.version=0.13.1     => 修改為其他,編譯出錯
#hbase.version=0.94.2    => 修改為其他,編譯出錯
#pig.version=0.12.1 
#hadoop.version=2.3.0    => 最新版本是2.3.0 但是支持2.7.1
#tomcat.version=6.0.43
[root@hftclclw0001 opt]# ./bin/mkdistro.sh -DskipTests -Phadoop-2  -Dsqoop.version=1.4.6
...
...
...
INFO] BUILD SUCCESS
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 07:25 min
[INFO] Finished at: 2016-06-19T12:46:07+00:00
[INFO] Final Memory: 128M/1178M
[INFO] ------------------------------------------------------------------------

Oozie distro created, DATE[2016.06.19-12:38:39GMT] VC-REV[unavailable], available at [/opt/oozie-4.1.0/distro/target]

三、配置

[root@hftclclw0001 opt]# pwd
/opt

[root@hftclclw0001 opt]# mkdir Oozie
[root@hftclclw0001 opt]# cd Oozie

[root@hftclclw0001 Oozie]# pwd
/opt/Oozie

[root@hftclclw0001 Oozie]# cp ../oozie-4.1.0/distro/target/oozie-4.1.0-distro.tar.gz ./
[root@hftclclw0001 Oozie]# tar -zxvf oozie-4.1.0-distro.tar.gz
[root@hftclclw0001 Oozie]# cd oozie-4.1.0
[root@hftclclw0001 oozie-4.1.0]# pwd
/opt/Oozie/oozie-4.1.0

[root@hftclclw0001 oozie-4.1.0]# mkdir libext
[root@hftclclw0001 oozie-4.1.0]# cp /opt/oozie-4.1.0/hadooplibs/hadoop-2/target/hadooplibs/hadooplib-2.3.0.oozie-4.1.0/* ./libext
[root@hftclclw0001 oozie-4.1.0]# cd libext

[root@hftclclw0001 libext]# curl -O http://archive.cloudera.com/gplextras/misc/ext-2.2.zip

下載mysql驅動放入libext,因為用mysql作為元數據庫,默認為Derby
[root@hftclclw0001 libext]# ll
total 26452
...
-rw------- 1 root root  848401 Jun 19 13:41 mysql-connector-java-5.1.25-bin.jar
...

[root@hftclclw0001 libext]# cd ..
[root@hftclclw0001 oozie-4.1.0]# pwd
/opt/Oozie/oozie-4.1.0

[root@hftclclw0001 oozie-4.1.0]# ./bin/oozie-setup.sh prepare-war

[root@hftclclw0001 oozie-4.1.0]# ./bin/oozie-setup.sh sharelib create -fs hdfs://localhost:9000


創建Oozie數據庫
[root@hftclclw0001 oozie-4.1.0]# mysql -uroot -p


mysql>CREATE DATABASE OOZIEDB;
mysql>GRANT ALL PRIVILEGES ON OOZIEDB.* TO oozie IDENTIFIED BY "oozie";
mysql>FLUSH PRIVILEGES;


配置conf/oozie-site.xml
oozie.service.JPAService.jdbc.driver
oozie.service.JPAService.jdbc.url
oozie.service.JPAService.jdbc.username
oozie.service.JPAService.jdbc.password

[root@hftclclw0001 oozie-4.1.0]# ./bin/ooziedb.sh create db -run


配置etc/hadoop/core-site.xml,配置oozie的proxyuser
<property>
   <name>hadoop.proxyuser.$USER.hosts</name>
   <value>*</value>
</property>
<property>
   <name>hadoop.proxyuser.$USER.groups</name>
   <value>*/value>
</property>

$USER替換為oozie service的用戶,或oozie,或root等

[root@hftclclw0001 oozie-4.1.0]# ./oozied.sh start

四、examples

job.properties

nameNode=hdfs://nameservice1
#nameNode=hdfs://nameservice1 ==> HA
#nameNode=hdfs://${namenode}:8020 ==> single namenode

jobTracker=dapdevhmn001.qa.webex.com:8032
#jobTracker=rm1,rm2 ==> HA
#jobTracker(yarn.resourcemanager.address)=10.224.243.124:8032
queueName=default
examplesRoot=examples
#oozie.use.system.libpath=true

oozie.wf.application.path=${nameNode}/user/${user.name}/${examplesRoot}/apps/map-reduce
outputDir=map-reduce

workflow.xml

<workflow-app xmlns="uri:oozie:workflow:0.2" name="map-reduce-wf">
    <start to="mr-node"/>
    <action name="mr-node">
        <map-reduce>
            <job-tracker>${jobTracker}</job-tracker>
            <name-node>${nameNode}</name-node>
            <prepare>
                <delete path="${nameNode}/user/${wf:user()}/${examplesRoot}/output-data/${outputDir}"/>
            </prepare>
            <configuration>
                <property>
                    <name>mapred.job.queue.name</name>
                    <value>${queueName}</value>
                </property>
                <property>
                    <name>mapred.mapper.class</name>
                    <value>org.apache.oozie.example.SampleMapper</value>
                </property>
                <property>
                    <name>mapred.reducer.class</name>
                    <value>org.apache.oozie.example.SampleReducer</value>
                </property>
                <property>
                    <name>mapred.map.tasks</name>
                    <value>1</value>
                </property>
                <property>
                    <name>mapred.input.dir</name>
                    <value>/user/${wf:user()}/${examplesRoot}/input-data/text</value>
                </property>
                <property>
                    <name>mapred.output.dir</name>
                    <value>/user/${wf:user()}/${examplesRoot}/output-data/${outputDir}</value>
                </property>
            </configuration>
        </map-reduce>
        <ok to="end"/>
        <error to="fail"/>
    </action>
    <kill name="fail">
        <message>Map/Reduce failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
    </kill>
    <end name="end"/>
</workflow-app>

lib/oozie-examples-4.1.0.jar

hadoop fs -mkdir -p /user/root/examples/apps/map-reduce

hadoop fs -put ./job.properties /user/root/examples/apps/map-reduce

hadoop fs -put ./workflow.xml /user/root/examples/apps/map-reduce

hadoop fs -put ./lib/oozie-examples-4.1.0.jar /user/root/examples/apps/map-reduce

job.properties ==> 不僅僅需要在HDFS,本地也需要一份。執行命令-config是指向本地的文件

oozie job -oozie ${OOZIE_URL} -config job.properties -run

oozie job -oozie ${OOZIE_URL} -info ${oozie_id}

#oozie job -oozie ${OOZIE_URL} -info 0000001-170206083712434-oozie-oozi-W

oozie job -oozie ${OOZIE_URL} -log ${oozie_id}

#oozie job -oozie ${OOZIE_URL} -log 0000001-170206083712434-oozie-oozi-W

五、distcp

job.properties

nameNode=hdfs://${sourceNameNode}:8020
destNameNode=hdfs://${destNameNode}:8020
jobTracker=${RM}:8032
#yarn.resourcemanager.address=${RM}:8032
queueName=default
examplesRoot=examples
oozie.use.system.libpath=true

oozie.wf.application.path=${nameNode}/user/${user.name}/${examplesRoot}/apps/distcp_2
outputDir=distcp

workflow.xml

<workflow-app xmlns="uri:oozie:workflow:0.3" name="distcp-wf">
    <start to="distcp-node"/>
    <action name="distcp-node">
        <distcp xmlns="uri:oozie:distcp-action:0.1">
            <job-tracker>${jobTracker}</job-tracker>
            <name-node>${nameNode}</name-node>
            <prepare>
                <delete path="${nameNode}/user/${wf:user()}/${examplesRoot}/output-data/${outputDir}"/>
            </prepare>
            <configuration>
                <property>
                    <name>mapred.job.queue.name</name>
                    <value>${queueName}</value>
                </property>
            </configuration>
            <arg>${nameNode}/user/${wf:user()}/${examplesRoot}/input-data/text/data.txt</arg>
            <arg>${destNameNode}/tmp/data.txt</arg>
            </distcp>
        <ok to="end"/>
        <error to="fail"/>
    </action>
    <kill name="fail">
        <message>DistCP failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
    </kill>
    <end name="end"/>
</workflow-app>

到此,關于“Oozie-4.1.0和hadoop-2.7.1怎么進行編譯”的學習就結束了,希望能夠解決大家的疑惑。理論與實踐的搭配能更好的幫助大家學習,快去試試吧!若想繼續學習更多相關知識,請繼續關注億速云網站,小編會繼續努力為大家帶來更多實用的文章!

向AI問一下細節

免責聲明:本站發布的內容(圖片、視頻和文字)以原創、轉載和分享為主,文章觀點不代表本網站立場,如果涉及侵權請聯系站長郵箱:is@yisu.com進行舉報,并提供相關證據,一經查實,將立刻刪除涉嫌侵權內容。

AI

蒲城县| 山东省| 镇远县| 金塔县| 平安县| 湘乡市| 东乌珠穆沁旗| 门头沟区| 山阳县| 祁东县| 囊谦县| 禹城市| 南华县| 唐海县| 永安市| 昭觉县| 元江| 左云县| 汕头市| 高陵县| 木里| 南宫市| 泽库县| 施秉县| 信宜市| 汝城县| 南江县| 平阴县| 龙海市| 正镶白旗| 山阴县| 旅游| 呈贡县| 汤阴县| 孝义市| 阿鲁科尔沁旗| 会昌县| 万荣县| 葵青区| 寻乌县| 互助|