data.conf 2.2 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071
  1. [base]
  2. ;库来源
  3. library = docker.shemic.com/
  4. path = {base}
  5. network = hadoop
  6. [java]
  7. image = dev/java
  8. command = share_java
  9. alias = java
  10. restart = false
  11. [sbt]
  12. image = dev/java/sbt
  13. alias = sbt
  14. [mysql]
  15. image = db/mysql
  16. port = 3309:3306
  17. volumes = {path}conf/mysql:/etc/mysql,/mysql10/{name}/data:/var/lib/mysql
  18. environment = MYSQL_ROOT_PASSWORD=123456
  19. [hive]
  20. image = java/hive
  21. volumes = {path}conf/java/hive:/usr/local/hive/conf
  22. alias = hive
  23. [zookeeper]
  24. image = java/zookeeper
  25. num = 3
  26. volumes = {path}conf/java/zookeeper:/usr/local/zookeeper/conf
  27. command = zookeeper {i}
  28. alias = zkCli.sh -server data-zookeeper:2181->zkcli
  29. [spark]
  30. image = java/spark
  31. volumes = {path}conf/java/spark:/usr/local/spark/conf
  32. command = spark_log share
  33. alias = spark-shell,pyspark,spark-submit --class org.apache.spark.examples.SparkPi --driver-memory 512M --executor-memory 512M --master yarn --deploy-mode client /usr/local/spark/examples/jars/spark-examples_2.11-2.1.1.jar->spark-pi,spark-submit --driver-memory 512M --executor-memory 512M --master yarn --deploy-mode client /share/src/spark/target/scala-2.11/*.jar --class ->spark-submit
  34. [hadoop]
  35. image = java/hadoop
  36. port = 50070:50070,8088:8088
  37. volumes = {path}conf/java/hadoop:/usr/local/hadoop/etc/hadoop
  38. command = hadoop share
  39. alias = hadoop
  40. slave = 2
  41. hook.start = hadoop
  42. [hbase]
  43. image = java/hbase
  44. port = 60010:60010
  45. volumes = {path}conf/java/hbase:/usr/local/hbase/conf
  46. command = hbase
  47. slave = 2
  48. alias = hbase
  49. [zeppelin]
  50. image = java/zeppelin
  51. port = 9999:6060
  52. volumes = {path}conf/java/zeppelin:/usr/local/zeppelin/conf
  53. command = zeppelin
  54. [#master]
  55. image = data/hadoop
  56. num = 1
  57. port = 50070:50070,8088:8088,9000:9000,50090:50090,6066:6066,7077:7077,8080:8080,8081:8081,16010:16010
  58. #volumes = {path}conf/data/hadoop:/usr/local/hadoop/etc/hadoop,{path}conf/data/spark:/usr/local/spark/conf,{path}conf/data/zookeeper:/usr/local/zookeeper/conf,{path}conf/data/hbase:/usr/local/hbase/conf,{path}conf/data/flume:/usr/local/flume/conf,{path}logs/nginx/web-nginx/logs:/root/flume/output,{path}conf/data/kafka:/usr/local/kafka/conf,{path}src/java/lib:/opt/jdk
  59. #command = zookeeper-0 hadoop hbase
  60. #command = zookeeper-0 hadoop flume kafka
  61. command = hadoop