1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071 |
- [base]
- ;库来源
- library = docker.shemic.com/
- path = {base}
- network = hadoop
- [java]
- image = dev/java
- command = share_java
- alias = java
- restart = false
- [sbt]
- image = dev/java/sbt
- alias = sbt
- [mysql]
- image = db/mysql
- port = 3309:3306
- volumes = {path}conf/mysql:/etc/mysql,/mysql10/{name}/data:/var/lib/mysql
- environment = MYSQL_ROOT_PASSWORD=123456
- [hive]
- image = java/hive
- volumes = {path}conf/java/hive:/usr/local/hive/conf
- alias = hive
- [zookeeper]
- image = java/zookeeper
- num = 3
- volumes = {path}conf/java/zookeeper:/usr/local/zookeeper/conf
- command = zookeeper {i}
- alias = zkCli.sh -server data-zookeeper:2181->zkcli
- [spark]
- image = java/spark
- volumes = {path}conf/java/spark:/usr/local/spark/conf
- command = spark_log share
- alias = spark-shell,pyspark,spark-submit --class org.apache.spark.examples.SparkPi --driver-memory 512M --executor-memory 512M --master yarn --deploy-mode client /usr/local/spark/examples/jars/spark-examples_2.11-2.1.1.jar->spark-pi,spark-submit --driver-memory 512M --executor-memory 512M --master yarn --deploy-mode client /share/src/spark/target/scala-2.11/*.jar --class ->spark-submit
- [hadoop]
- image = java/hadoop
- port = 50070:50070,8088:8088
- volumes = {path}conf/java/hadoop:/usr/local/hadoop/etc/hadoop
- command = hadoop share
- alias = hadoop
- slave = 2
- hook.start = hadoop
- [hbase]
- image = java/hbase
- port = 60010:60010
- volumes = {path}conf/java/hbase:/usr/local/hbase/conf
- command = hbase
- slave = 2
- alias = hbase
- [zeppelin]
- image = java/zeppelin
- port = 9999:6060
- volumes = {path}conf/java/zeppelin:/usr/local/zeppelin/conf
- command = zeppelin
- [#master]
- image = data/hadoop
- num = 1
- port = 50070:50070,8088:8088,9000:9000,50090:50090,6066:6066,7077:7077,8080:8080,8081:8081,16010:16010
- #volumes = {path}conf/data/hadoop:/usr/local/hadoop/etc/hadoop,{path}conf/data/spark:/usr/local/spark/conf,{path}conf/data/zookeeper:/usr/local/zookeeper/conf,{path}conf/data/hbase:/usr/local/hbase/conf,{path}conf/data/flume:/usr/local/flume/conf,{path}logs/nginx/web-nginx/logs:/root/flume/output,{path}conf/data/kafka:/usr/local/kafka/conf,{path}src/java/lib:/opt/jdk
- #command = zookeeper-0 hadoop hbase
- #command = zookeeper-0 hadoop flume kafka
- command = hadoop
|