spark 集群

salve 上, root@slave2:/opt/spark-2.1.0-bin-hadoop2.6/conf# vim spark-env.sh:

export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-amd64
export SCALA_HOME=/opt/scala-2.11.8
export SPARK_MASTER_IP=namenode
export HADOOP_HOME=/opt/hadoop-2.6.5
export SPARK_WORKER_CORES=3
export SPARK_WORKER_MEMORY=12g
export HADOOP_CONF_DIR=/opt/hadoop-2.6.5/etc/hadoop
export HADOOP_CONF_LIB_NATIVE_DIR=/opt/hadoop-2.6.5/lib/native
export HADOOP_MAPRED_HOME=/opt/hadoop-2.6.5
export HADOOP_COMMON_HOME=/opt/hadoop-2.6.5
export HADOOP_HDFS_HOME=/opt/hadoop-2.6.5
export YARN_HOME=/opt/hadoop-2.6.5
export HADOOP_INSTALL=/opt/hadoop-2.6.5
export YARN_CONF_DIR=/opt/hadoop-2.6.5/etc/hadoop
export SPARK_HOME=/opt/spark-2.1.0-bin-hadoop2.6
export SPARK_CLASSPATH=/opt/hadoop-2.6.5/etc/hadoop:/opt/hadoop-2.6.5/share/hadoop/common/lib/*:/opt/hadoop-2.6.5/share/hadoop/common/*:/opt/hadoop-2.6.5/share/hadoop/hdfs:/opt/hadoop-2.6.5/share/hadoop/hdfs/lib/*:/opt/hadoop-2.6.5/share/hadoop/hdfs/*:/opt/hadoop-2.6.5/share/hadoop/yarn/lib/*:/opt/hadoop-2.6.5/share/hadoop/yarn/*:/opt/hadoop-2.6.5/share/hadoop/mapreduce/lib/*:/opt/hadoop-2.6.5/share/hadoop/mapreduce/*:/opt/hadoop-2.6.5/contrib/capacity-scheduler/*.jar

slave2上 yarn-site.xml

<?xml version="1.0"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->
<configuration>

<!-- Site specific YARN configuration properties -->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
<name>yarn.resourcemanager.address</name>
<value>master:8032</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>master:8030</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>master:8035</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>master:8033</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>0.0.0.0:8088</value>
</property>
<property>
<name>yarn.nodemanager.resource.memory-mb</name>
<value>7168</value>
</property>
<property>
<name>yarn.scheduler.minimum-allocation-mb</name>
<value>1024</value>
</property>
<!--<property>
<name>yarn.scheduler.maximum-allocation-mb</name>
<value>4096</value>
</property>
-->
<property>
<name>yarn.nodemanager.pmem-check-enabled</name>
<value>false</value>
</property>

<property>
<name>yarn.nodemanager.vmem-check-enabled</name>
<value>false</value>
</property>
</configuration>

/opt/spark-2.1.0-bin-hadoop2.6/conf# vim spark-defaults.conf:

# Example:
# spark.master                     spark://master:7077
# spark.eventLog.enabled           true
# spark.eventLog.dir               hdfs://namenode:8021/directory
# spark.serializer                 org.apache.spark.serializer.KryoSerializer
spark.driver.memory              12g
spark.scheduler.mode             FAIR
# spark.executor.extraJavaOptions  -XX:+PrintGCDetails -Dkey=value -Dnumbers="one two three"

/opt/spark-2.1.0-bin-hadoop2.6/conf# vim slaves

slave1
slave2
slave3
slave4
最后编辑于
©著作权归作者所有,转载或内容合作请联系作者
【社区内容提示】社区部分内容疑似由AI辅助生成,浏览时请结合常识与多方信息审慎甄别。
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。

推荐阅读更多精彩内容