rabin 7 anni fa
commit
d1f1594b41
100 ha cambiato i file con 2591 aggiunte e 0 eliminazioni
  1. 14 0
      LICENSE
  2. 81 0
      README.md
  3. 30 0
      conf/core.conf
  4. 126 0
      conf/db/influxdb/influxdb.conf
  5. 152 0
      conf/db/mysql/my.conf
  6. 69 0
      conf/dever/daocloud.conf
  7. 71 0
      conf/dever/data.conf
  8. 62 0
      conf/dever/dev.conf
  9. 22 0
      conf/dever/docker.conf
  10. 37 0
      conf/dever/elk.conf
  11. 27 0
      conf/dever/iot.conf
  12. 18 0
      conf/dever/server.conf
  13. 23 0
      conf/dever/share.conf
  14. 26 0
      conf/dever/tool.conf
  15. 33 0
      conf/dever/web.conf
  16. 3 0
      conf/docker/db/influxdb/Dockerfile
  17. 15 0
      conf/docker/db/influxdb/my/Dockerfile
  18. 19 0
      conf/docker/db/influxdb/my/influxdb.sh
  19. 13 0
      conf/docker/db/memcached/Dockerfile
  20. 42 0
      conf/docker/db/memcached/entrypoint.sh
  21. 13 0
      conf/docker/db/mongodb/Dockerfile
  22. 27 0
      conf/docker/db/mysql/Dockerfile
  23. 91 0
      conf/docker/db/mysql/entrypoint.sh
  24. 17 0
      conf/docker/db/pipelinedb/Dockerfile
  25. 66 0
      conf/docker/db/pipelinedb/postgresql.sh
  26. 3 0
      conf/docker/db/postgresql/Dockerfile
  27. 8 0
      conf/docker/db/postgresql/pgadmin/Dockerfile
  28. 66 0
      conf/docker/db/postgresql/pgadmin/postgresql.sh
  29. 66 0
      conf/docker/db/postgresql/postgresql.sh
  30. 17 0
      conf/docker/db/prometheus/Dockerfile
  31. 66 0
      conf/docker/db/prometheus/postgresql.sh
  32. 19 0
      conf/docker/db/redis/Dockerfile
  33. 17 0
      conf/docker/db/redis/entrypoint.sh
  34. 23 0
      conf/docker/db/redis/redis-live.conf
  35. 8 0
      conf/docker/dev/erlang/Dockerfile
  36. 8 0
      conf/docker/dev/golang/Dockerfile
  37. 54 0
      conf/docker/dev/java/Dockerfile
  38. 11 0
      conf/docker/dev/java/java.sh
  39. 12 0
      conf/docker/dev/java/maven/Dockerfile
  40. 16 0
      conf/docker/dev/java/sbt/Dockerfile
  41. 4 0
      conf/docker/dev/java/sbt/repositories
  42. 7 0
      conf/docker/dev/java/sbt/sbt.sh
  43. 14 0
      conf/docker/dev/nodejs/Dockerfile
  44. 23 0
      conf/docker/dev/nodejs/nodejs.sh
  45. 10 0
      conf/docker/dev/nodejs/reload.sh
  46. 24 0
      conf/docker/dev/php/Dockerfile
  47. 13 0
      conf/docker/dev/php/base/Dockerfile
  48. 19 0
      conf/docker/dev/php/base/php.sh
  49. 15 0
      conf/docker/dev/php/mosquitto/Dockerfile
  50. 15 0
      conf/docker/dev/php/swoole/Dockerfile
  51. 9 0
      conf/docker/dev/python/Dockerfile
  52. 13 0
      conf/docker/dev/python/flask/Dockerfile
  53. 27 0
      conf/docker/dev/python/flask/entrypoint.sh
  54. 9 0
      conf/docker/dev/python/v3/Dockerfile
  55. 21 0
      conf/docker/dev/scala/Dockerfile
  56. 31 0
      conf/docker/elastic/elasticsearch/Dockerfile
  57. 17 0
      conf/docker/elastic/elasticsearch/elasticsearch.sh
  58. 19 0
      conf/docker/elastic/filebeat/Dockerfile
  59. 13 0
      conf/docker/elastic/filebeat/filebeat.sh
  60. 24 0
      conf/docker/elastic/kibana/Dockerfile
  61. 17 0
      conf/docker/elastic/kibana/kibana.sh
  62. 19 0
      conf/docker/elastic/logstash/Dockerfile
  63. 17 0
      conf/docker/elastic/logstash/logstash.sh
  64. 14 0
      conf/docker/ha/haproxy/Dockerfile
  65. 13 0
      conf/docker/ha/haproxy/nginx.sh
  66. 14 0
      conf/docker/ha/keepalived/Dockerfile
  67. 13 0
      conf/docker/ha/keepalived/nginx.sh
  68. 22 0
      conf/docker/iot/demeter/Dockerfile
  69. 17 0
      conf/docker/iot/demeter/demeter.sh
  70. 16 0
      conf/docker/iot/emqtt/Dockerfile
  71. 18 0
      conf/docker/iot/emqtt/emqtt.sh
  72. 23 0
      conf/docker/java/base/Dockerfile
  73. 8 0
      conf/docker/java/base/init.sh
  74. 9 0
      conf/docker/java/base/ssh_config
  75. 49 0
      conf/docker/java/combine/entrypoint.sh
  76. 25 0
      conf/docker/java/flume/Dockerfile
  77. 24 0
      conf/docker/java/flume/flume.sh
  78. 29 0
      conf/docker/java/hadoop/Dockerfile
  79. 35 0
      conf/docker/java/hadoop/hadoop.sh
  80. 27 0
      conf/docker/java/hbase/Dockerfile
  81. 25 0
      conf/docker/java/hbase/hbase.sh
  82. 23 0
      conf/docker/java/hive/Dockerfile
  83. 18 0
      conf/docker/java/hive/hive.sh
  84. BIN
      conf/docker/java/hive/plugin/mysql-connector-java-5.1.42.jar
  85. 27 0
      conf/docker/java/spark/Dockerfile
  86. 45 0
      conf/docker/java/spark/spark.sh
  87. 16 0
      conf/docker/java/thrift/Dockerfile
  88. 22 0
      conf/docker/java/zeppelin/Dockerfile
  89. 20 0
      conf/docker/java/zeppelin/me/Dockerfile
  90. 6 0
      conf/docker/java/zeppelin/me/entrypoint.sh
  91. 12 0
      conf/docker/java/zeppelin/zeppelin.sh
  92. 19 0
      conf/docker/java/zookeeper/Dockerfile
  93. 19 0
      conf/docker/java/zookeeper/zookeeper.sh
  94. 20 0
      conf/docker/mq/kafka/Dockerfile
  95. 12 0
      conf/docker/mq/kafka/kafka.sh
  96. 20 0
      conf/docker/mq/rabbitmq/Dockerfile
  97. 12 0
      conf/docker/mq/rabbitmq/rabbitmq.sh
  98. 20 0
      conf/docker/mq/rocketmq/Dockerfile
  99. 12 0
      conf/docker/mq/rocketmq/rocketmq.sh
  100. 16 0
      conf/docker/os/alpine/Dockerfile

+ 14 - 0
LICENSE

@@ -0,0 +1,14 @@
+Apache License
+Copyright 2016-2017 Dever(dever.cc)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.

+ 81 - 0
README.md

@@ -0,0 +1,81 @@
+# dever-manage(dm) docker编排工具
+<pre>
+git clone http://git.shemic.com:3000/dever/docker.git dever
+cd dever
+chmod +x dever.py
+./dever.py init
+</pre>
+
+无需关注是否安装了docker,本工具会自动安装docker
+
+<pre>
+启动全部:dever run web
+启动某一个软件:dever run web mysql
+删除全部:dever rm web
+删除某一个软件:dever rm web php
+</pre>
+
+执行完之后,请用浏览器访问你的ip即可。修改nginx配置可到dever/nginx下。
+
+<pre>
+目录说明:
+1、conf:配置目录
+2、data:生成的数据目录,可以用作备份
+3、logs:日志目录
+4、src:编辑程序开发源码目录,可以直接使用docker.shemic.com/*镜像作为编译容器来编译这些源码
+5、web:php、javascript等web源码目录,可以直接使用nginx来启动http服务,用你的宿主主机ip来访问
+
+配置文件可以随意更改,如需更改,请直接更改conf/dever/*.conf
+</pre>
+
+<pre>
+dever指令传参说明:
+
+-h 查看帮助 
+-m or --method 方法名
+-c or --conf 配置文件名
+-n or --name 配置文件中的索引名称
+-p or --param 执行参数,一般要根据动作判断
+
+方法列表(不使用conf参数):
+1、init:初始化并更新dever代码
+2、up:更新dever代码
+3、show:显示当前启动的docker容器
+4、showi:显示当前的docker镜像
+5、rm:删除出现异常或者没有启动的docker容器
+6、rmi:删除过期的docker镜像
+7、package:显示可用的docker镜像(2017-07-25)
+
+方法列表(使用conf参数):
+1、run:运行容器
+2、stop:停止容器
+3、crate:创建容器
+4、call:运行容器,仅运行一次,用于执行一些特殊指令
+5、up:运行容器并更新docker镜像
+6、rm:删除正在运行的docker容器
+7、save:保存或备份正在运行的docker容器
+8、load:将保存或备份的docker容器恢复并重新运行
+9、show:显示当前启动的docker容器(2017-07-25)
+
+当-m参数没有匹配到内置方法列表时,将自动匹配conf/dever/*.conf的配置文件,其中*为-m的值
+
+例子:
+1、dever -m run -c web -n php:根据conf/dever/web.conf里的php配置,来持续运行php容器
+2、dever -m run -c tool -n apidoc -p input=demo^out=output:根据conf/dever/tool.conf里的apidoc配置,来运行apidoc容器,这个配置里设置了run参数,指令中加入run,则apidoc容器将作为工具使用,无需持续运行apidoc容器,仅执行一次。input=demo将替换{$input}为demo,out=output将替换{$out}为output,冒号“:”为默认值
+容器的配置请修改conf/dever/*.conf
+
+也可以使用无参数名的方式来传入参数:
+1、dever run web php
+2、dever call tool apidoc input=demo^out=output
+
+2017-07-25更新:
+当使用dever run之后,会自动生成这个容器的基本指令,比如执行了上述的dever run web php
+
+之后可以这样使用:
+1、web-php: 进入该容器的sh命令行
+2、web-php logs:查看该容器启动日志
+3、web-php inspect:查看该容器的基本信息
+4、web-php stop:停止该容器
+5、web-php rm:停止并删除该容器,等同于dever rm web php
+6、web-php show:显示该容器的状态
+</pre>

+ 30 - 0
conf/core.conf

@@ -0,0 +1,30 @@
+[base]
+dockerhub			= shemic
+dockerme			= docker.shemic.com
+
+[source]
+hub					= shemic
+shemic				= docker.shemic.com
+
+
+[package]
+php					= dev/php
+php-mosquitto		= dev/php/mosquitto
+mysql				= db/mysql
+memcached			= db/memcached
+mysql				= db/mysql
+nginx				= web/nginx
+java				= dev/java
+python				= dev/python
+scala				= dev/scala
+nodejs				= dev/nodejs
+golang				= dev/golang
+apidoc				= tool/apidoc
+elasticsearch		= elastic/elasticsearch
+kibana				= elastic/kibana
+logstash			= elastic/logstash
+filebeat			= elastic/filebeat
+emqtt				= iot/emqtt
+influxdb			= db/influxdb
+postgresql			= db/postgresql
+demeter				= iot/demeter

+ 126 - 0
conf/db/influxdb/influxdb.conf

@@ -0,0 +1,126 @@
+reporting-disabled = false
+bind-address = "0.0.0.0:8088"
+
+[meta]
+  dir = "/var/lib/influxdb/meta"
+  retention-autocreate = true
+  logging-enabled = true
+
+[data]
+  dir = "/var/lib/influxdb/data"
+  index-version = "inmem"
+  wal-dir = "/var/lib/influxdb/wal"
+  wal-fsync-delay = "0s"
+  query-log-enabled = true
+  cache-max-memory-size = 1073741824
+  cache-snapshot-memory-size = 26214400
+  cache-snapshot-write-cold-duration = "10m0s"
+  compact-full-write-cold-duration = "4h0m0s"
+  max-series-per-database = 1000000
+  max-values-per-tag = 100000
+  max-concurrent-compactions = 0
+  trace-logging-enabled = false
+
+[coordinator]
+  write-timeout = "10s"
+  max-concurrent-queries = 0
+  query-timeout = "0s"
+  log-queries-after = "0s"
+  max-select-point = 0
+  max-select-series = 0
+  max-select-buckets = 0
+
+[retention]
+  enabled = true
+  check-interval = "30m0s"
+
+[shard-precreation]
+  enabled = true
+  check-interval = "10m0s"
+  advance-period = "30m0s"
+
+[monitor]
+  store-enabled = true
+  store-database = "_internal"
+  store-interval = "10s"
+
+[subscriber]
+  enabled = true
+  http-timeout = "30s"
+  insecure-skip-verify = false
+  ca-certs = ""
+  write-concurrency = 40
+  write-buffer-size = 1000
+
+[http]
+  enabled = true
+  bind-address = "0.0.0.0:8086"
+  auth-enabled = false
+  log-enabled = true
+  write-tracing = false
+  pprof-enabled = true
+  https-enabled = false
+  https-certificate = "/etc/ssl/influxdb.pem"
+  https-private-key = ""
+  max-row-limit = 0
+  max-connection-limit = 0
+  shared-secret = ""
+  realm = "InfluxDB"
+  unix-socket-enabled = false
+  bind-socket = "/var/run/influxdb.sock"
+
+[[graphite]]
+  enabled = false
+  bind-address = "0.0.0.0:2003"
+  database = "graphite"
+  retention-policy = ""
+  protocol = "tcp"
+  batch-size = 5000
+  batch-pending = 10
+  batch-timeout = "1s"
+  consistency-level = "one"
+  separator = "."
+  udp-read-buffer = 0
+
+[[collectd]]
+  enabled = false
+  bind-address = "0.0.0.0:25826"
+  database = "collectd"
+  retention-policy = ""
+  batch-size = 5000
+  batch-pending = 10
+  batch-timeout = "10s"
+  read-buffer = 0
+  typesdb = "/usr/share/collectd/types.db"
+  security-level = "none"
+  auth-file = "/etc/collectd/auth_file"
+
+[[opentsdb]]
+  enabled = false
+  bind-address = "0.0.0.0:4242"
+  database = "opentsdb"
+  retention-policy = ""
+  consistency-level = "one"
+  tls-enabled = false
+  certificate = "/etc/ssl/influxdb.pem"
+  batch-size = 1000
+  batch-pending = 5
+  batch-timeout = "1s"
+  log-point-errors = true
+
+[[udp]]
+  enabled = false
+  bind-address = "0.0.0.0:8089"
+  database = "udp"
+  retention-policy = ""
+  batch-size = 5000
+  batch-pending = 10
+  read-buffer = 0
+  batch-timeout = "1s"
+  precision = ""
+
+[continuous_queries]
+  log-enabled = true
+  enabled = true
+  run-interval = "1s"
+

+ 152 - 0
conf/db/mysql/my.conf

@@ -0,0 +1,152 @@
+# Example MariaDB config file for medium systems.                             
+#                                                                             
+# This is for a system with little memory (32M - 64M) where MariaDB plays     
+# an important part, or systems up to 128M where MariaDB is used together with
+# other programs (such as a web server)                                    
+#                                                                          
+# MariaDB programs look for option files in a set of                       
+# locations which depend on the deployment platform.                       
+# You can copy this option file to one of those                            
+# locations. For information about these locations, do:                    
+# 'my_print_defaults --help' and see what is printed under                 
+# Default options are read from the following files in the given order:    
+# More information at: http://dev.mysql.com/doc/mysql/en/option-files.html 
+#                                                                          
+# In this file, you can use all long options that a program supports.      
+# If you want to know which options a program supports, run the program    
+# with the "--help" option.                                                
+                                                                           
+# The following options will be passed to all MariaDB clients              
+[client]                                                                   
+#password       = 123456                                            
+port            = 3306                                                     
+socket          = /run/mysqld/mysqld.sock                                  
+                                                                           
+# Here follows entries for some specific programs                          
+                                                                           
+# The MariaDB server                                                       
+[mysqld] 
+skip-name-resolve
+skip-host-cache
+user=root                 
+port            = 3306                                                     
+socket          = /run/mysqld/mysqld.sock                                  
+skip-external-locking                                                      
+key_buffer_size = 16M                                                      
+max_allowed_packet = 1M                                                    
+table_open_cache = 64                                                      
+sort_buffer_size = 512K                                                    
+net_buffer_length = 8K                                                     
+read_buffer_size = 256K                                                    
+read_rnd_buffer_size = 512K                                                
+myisam_sort_buffer_size = 8M                                               
+                                                                           
+# Point the following paths to different dedicated disks                   
+#tmpdir         = /tmp/                                                    
+                                                                           
+# Don't listen on a TCP/IP port at all. This can be a security enhancement,
+# if all processes that need to connect to mysqld run on the same host.    
+# All interaction with mysqld must be made via Unix sockets or named pipes.
+# Note that using this option without enabling named pipes on Windows      
+# (via the "enable-named-pipe" option) will render mysqld useless!         
+#                                                                          
+#skip-networking                                                           
+                                                                           
+# Replication Master Server (default)                                      
+# binary logging is required for replication                               
+#log-bin=mysql-bin                                                          
+                                                                           
+# binary logging format - mixed recommended                                
+#binlog_format=mixed                                                        
+                                                                           
+# required unique id between 1 and 2^32 - 1                                
+# defaults to 1 if master-host is not set                                  
+# but will not function as a master if omitted                             
+server-id       = 1                                                        
+                                                                           
+# Replication Slave (comment out master section to use this)               
+#                                                                          
+# To configure this host as a replication slave, you can choose between
+# two methods :                                                               
+#                                                                          
+# 1) Use the CHANGE MASTER TO command (fully described in our manual) -    
+#    the syntax is:                                                        
+#                                                                          
+#    CHANGE MASTER TO MASTER_HOST=<host>, MASTER_PORT=<port>,              
+#    MASTER_USER=<user>, MASTER_PASSWORD=<password> ;                      
+#                                                                          
+#    where you replace <host>, <user>, <password> by quoted strings and    
+#    <port> by the master's port number (3306 by default).                 
+#                                                                          
+#    Example:                                                              
+#                                                                          
+#    CHANGE MASTER TO MASTER_HOST='125.564.12.1', MASTER_PORT=3306,        
+#    MASTER_USER='joe', MASTER_PASSWORD='secret';                          
+#                                                                          
+# OR                                                                       
+#                                                                          
+# 2) Set the variables below. However, in case you choose this method, then
+#    start replication for the first time (even unsuccessfully, for example
+#    if you mistyped the password in master-password and the slave fails to
+#    connect), the slave will create a master.info file, and any later     
+#    change in this file to the variables' values below will be ignored and
+#    overridden by the content of the master.info file, unless you shutdown
+#    the slave server, delete master.info and restart the slaver server.   
+#    For that reason, you may want to leave the lines below untouched      
+#    (commented) and instead use CHANGE MASTER TO (see above)              
+#                                                                          
+# required unique id between 2 and 2^32 - 1                                
+# (and different from the master)                                          
+# defaults to 2 if master-host is set                                      
+# but will not function as a slave if omitted                              
+#server-id       = 2                                                       
+#                                                                          
+# The replication master for this slave - required                         
+#master-host     =   <hostname>                                            
+#                                                                          
+# The username the slave will use for authentication when connecting       
+# to the master - required                                                 
+#master-user     =   <username>                                            
+#                                                                          
+# The password the slave will authenticate with when connecting to         
+# the master - required                                                    
+#master-password =   <password>                                            
+#                                                                          
+# The port the master is listening on.                                     
+# optional - defaults to 3306                                              
+#master-port     =  <port>                                                 
+#                                                                          
+# binary logging - not required for slaves, but recommended                
+#log-bin=mysql-bin                                                         
+                                                                           
+# Uncomment the following if you are using InnoDB tables                   
+#innodb_data_home_dir = /var/lib/mysql                                     
+#innodb_data_file_path = ibdata1:10M:autoextend                            
+#innodb_log_group_home_dir = /var/lib/mysql                                
+# You can set .._buffer_pool_size up to 50 - 80 %                          
+# of RAM but beware of setting memory usage too high                       
+#innodb_buffer_pool_size = 16M                                             
+#innodb_additional_mem_pool_size = 2M                                      
+# Set .._log_file_size to 25 % of buffer pool size                         
+#innodb_log_file_size = 5M                                                 
+#innodb_log_buffer_size = 8M                                              
+#innodb_flush_log_at_trx_commit = 1                                        
+#innodb_lock_wait_timeout = 50                                             
+                                                                           
+[mysqldump]                                                                
+quick                                                                      
+max_allowed_packet = 16M                                                   
+                                                                           
+[mysql]                                                                    
+no-auto-rehash                                                             
+# Remove the next comment character if you are not familiar with SQL       
+#safe-updates                                                              
+                                                                           
+[myisamchk]                                                                
+key_buffer_size = 20M                                                      
+sort_buffer_size = 20M                                                     
+read_buffer = 2M                                                           
+write_buffer = 2M                                                          
+                                                                           
+[mysqlhotcopy]                                                             
+interactive-timeout

+ 69 - 0
conf/dever/daocloud.conf

@@ -0,0 +1,69 @@
+[base]
+;库来源 可以为空
+library = daocloud.io/
+path = {base}
+
+[mysql]
+num = 1
+image = library/mysql:5.7.8-rc
+port = 3399:3306
+volumes = {path}conf/mysql/config:/etc/mysql/conf.d,{path}mysql/{name}/data:/var/lib/mysql
+environment = MYSQL_ROOT_PASSWORD=123456
+
+[php]
+num = 1
+image = library/php:5.6.25-fpm-alpine
+link = mysql{num}:mysql{num}
+volumes_from = mysql{num}
+volumes = {path}web:/var/www/html
+
+[nginx]
+num = 1
+image = library/nginx:stable-alpine
+port = 80:80
+link = php{num}:php{num}
+volumes_from = php{num}
+volumes = {path}web:/www,{path}nginx/config:/etc/nginx/conf.d,{path}nginx/{name}/logs:/var/log/nginx
+
+[gogs]
+num = 1
+image = daocloud/gogs:latest
+port = 10022:22,3000:3000
+link = mysql{num}:mysql{num}
+volumes_from = mysql{num}
+volumes = {path}gogs:/data
+
+;私人仓库
+[registry]
+num = 1
+image = library/registry:latest
+port = 5001:5000
+volumes = {path}registry/auth:/auth,{path}registry/certs:/certs,{path}registry/data:/var/lib/registry
+environment = REGISTRY_AUTH=htpasswd,"REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm",REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd
+entrypoint = htpasswd
+command = -Bbn rabin ilyxdd > {path}registry/auth/htpasswd
+;REGISTRY_HTTP_TLS_CERTIFICATE=/certs/registry.crt,REGISTRY_HTTP_TLS_KEY=/certs/registry.key
+
+[#memcached]
+num = 1
+image = library/memcached:alpine
+
+[#redis]
+num = 1
+image = library/redis:alpine
+
+[#python]
+num = 1
+image = library/python:alpine
+
+[#golang]
+num = 1
+image = library/golang:alpine
+
+[#alpine]
+num = 1
+image = library/alpine:latest
+
+[#ubuntu]
+num = 1
+image = library/ubuntu:latest

+ 71 - 0
conf/dever/data.conf

@@ -0,0 +1,71 @@
+[base]
+;库来源
+library = docker.shemic.com/
+path = {base}
+network = hadoop
+
+[java]
+image = dev/java
+command = share_java
+alias = java
+restart = false
+
+[sbt]
+image = dev/java/sbt
+alias = sbt
+
+[mysql]
+image = db/mysql
+port = 3309:3306
+volumes = {path}conf/mysql:/etc/mysql,/mysql10/{name}/data:/var/lib/mysql
+environment = MYSQL_ROOT_PASSWORD=123456
+
+[hive]
+image = java/hive
+volumes = {path}conf/java/hive:/usr/local/hive/conf
+alias = hive
+
+[zookeeper]
+image = java/zookeeper
+num = 3
+volumes = {path}conf/java/zookeeper:/usr/local/zookeeper/conf
+command = zookeeper {i}
+alias = zkCli.sh -server data-zookeeper:2181->zkcli
+
+[spark]
+image = java/spark
+volumes = {path}conf/java/spark:/usr/local/spark/conf
+command = spark_log share
+alias = spark-shell,pyspark,spark-submit --class org.apache.spark.examples.SparkPi --driver-memory 512M --executor-memory 512M --master yarn --deploy-mode client /usr/local/spark/examples/jars/spark-examples_2.11-2.1.1.jar->spark-pi,spark-submit --driver-memory 512M --executor-memory 512M --master yarn --deploy-mode client /share/src/spark/target/scala-2.11/*.jar --class ->spark-submit
+
+[hadoop]
+image = java/hadoop
+port = 50070:50070,8088:8088
+volumes = {path}conf/java/hadoop:/usr/local/hadoop/etc/hadoop
+command = hadoop share
+alias = hadoop
+slave = 2
+hook.start = hadoop
+
+[hbase]
+image = java/hbase
+port = 60010:60010
+volumes = {path}conf/java/hbase:/usr/local/hbase/conf
+command = hbase
+slave = 2
+alias = hbase
+
+[zeppelin]
+image = java/zeppelin
+port = 9999:6060
+volumes = {path}conf/java/zeppelin:/usr/local/zeppelin/conf
+command = zeppelin
+
+[#master]
+image = data/hadoop
+num = 1
+port = 50070:50070,8088:8088,9000:9000,50090:50090,6066:6066,7077:7077,8080:8080,8081:8081,16010:16010
+#volumes = {path}conf/data/hadoop:/usr/local/hadoop/etc/hadoop,{path}conf/data/spark:/usr/local/spark/conf,{path}conf/data/zookeeper:/usr/local/zookeeper/conf,{path}conf/data/hbase:/usr/local/hbase/conf,{path}conf/data/flume:/usr/local/flume/conf,{path}logs/nginx/web-nginx/logs:/root/flume/output,{path}conf/data/kafka:/usr/local/kafka/conf,{path}src/java/lib:/opt/jdk
+#command = zookeeper-0 hadoop hbase
+#command = zookeeper-0 hadoop flume kafka
+command = hadoop

+ 62 - 0
conf/dever/dev.conf

@@ -0,0 +1,62 @@
+[base]
+;库来源
+library = docker.shemic.com/
+path = {base}
+
+
+[python]
+image = dev/python
+volumes = {path}src/python:/src
+
+[flask]
+image = dev/python/flask
+#daemon = false
+port = 5000:5000
+volumes = {path}src/python:/src
+
+[python3]
+#image = shemic/python3
+image = dev/python3
+volumes = {path}src/python3:/src
+
+[nodejs]
+image = dev/nodejs
+port = 8080:4000,8090:8080
+volumes = {path}src/nodejs:/src
+#command = nodejs reload
+
+[golang]
+image = dev/golang
+volumes = {path}src/golang:/src
+alias = go
+
+[java]
+image = dev/java
+volumes = {path}src/java:/src
+command = java
+alias = java
+
+[maven]
+image = dev/java/maven
+volumes = {path}src/maven:/src
+alias = mvn
+rely = data java
+
+[scala]
+image = dev/scala
+volumes = {path}src/scala:/src
+
+[thrift]
+image = java/thrift
+port = 8081:8080
+alias = thrift,thrift -r --gen php:server->thrift-php,php -S localhost:8080->start-php
+
+[alpine]
+image = os/alpine
+
+[alpine3.4]
+image = os/alpine/v3.4
+
+[ubuntu]
+image = os/ubuntu
+command = bash

+ 22 - 0
conf/dever/docker.conf

@@ -0,0 +1,22 @@
+[base]
+;官方源
+library = 
+path = {base}
+
+[mongodb]
+num = 1
+image = mongo
+volumes = {path}data/elastic/{name}:/data/db
+
+[elastic]
+num = 1
+port = 21000:21000
+image = docker.elastic.co/elasticsearch/elasticsearch:5.4.1
+volumes = {path}conf/elastic/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml,{path}data/elastic/{name}:/data/elasticsearch
+environment = http.host=0.0.0.0,transport.host=127.0.0.1,ES_JAVA_OPTS=-Xms4096m -Xmx4096m
+
+[kibana]
+num = 1
+port = 5601:5601
+image =  docker.elastic.co/kibana/kibana:5.4.1
+environment = ELASTICSEARCH_URL=http://127.0.0.1:9200,ELASTICSEARCH_USERNAME=elastic,ELASTICSEARCH_PASSWORD=changeme

+ 37 - 0
conf/dever/elk.conf

@@ -0,0 +1,37 @@
+[base]
+;库来源
+library = docker.shemic.com/
+path = {base}
+network = elk
+;版本号暂无用处
+version = elk5.5.1
+
+[java]
+image = dev/java
+command = share_java
+alias = java
+restart = false
+
+[es]
+image = elastic/elasticsearch
+port = 9200:9200
+volumes = {path}conf/elastic/elasticsearch:/usr/local/elasticsearch/config
+command = elasticsearch
+hook.start = elasticsearch
+
+[#logstash]
+;暂时不用,如需启用,请修改filebeat配置
+image = elastic/logstash
+volumes = {path}conf/elastic/logstash:/usr/local/logstash/config
+command = logstash
+
+[filebeat]
+image = elastic/filebeat
+volumes = {path}conf/elastic/filebeat:/usr/local/filebeat/config,{path}logs/nginx/web-nginx/logs:/root/filebeat
+command = filebeat
+
+[kibana]
+image = elastic/kibana
+port = 5602:5601
+volumes = {path}conf/elastic/kibana:/usr/local/kibana/config
+command = kibana

+ 27 - 0
conf/dever/iot.conf

@@ -0,0 +1,27 @@
+[base]
+;库来源
+library = docker.shemic.com/
+path = {base}
+network = iot
+
+[emq]
+image = iot/emqtt
+port = 1883:1883,7083:8083,8883:8883,8084:8084,18083:18083
+
+[influx]
+image = db/influxdb
+volumes = {path}conf/db/influxdb/influxdb.conf:/etc/influxdb/influxdb.conf
+environment = INFLUXDB_ADMIN_ENABLED=true
+port = 8083:8083,8086:8086
+expose = 8090,8099
+
+[pgsql]
+image = db/postgresql
+environment = POSTGRES_PASSWORD=123456
+port = 5432:5432
+alias = psql -U postgres->psql
+
+[demeter]
+image = iot/demeter
+port = 8099:8087
+command = demeter

+ 18 - 0
conf/dever/server.conf

@@ -0,0 +1,18 @@
+[base]
+;库来源
+library = docker.shemic.com/
+path = {base}
+
+[memcached]
+num = 1
+image = db/memcached
+environment = MEMCACHED_PORT=11211-11212
+
+[mongodb]
+num = 1
+image = db/mongodb
+
+[redis]
+num = 1
+image = db/redis
+port = 8888:8888,6379:6379

+ 23 - 0
conf/dever/share.conf

@@ -0,0 +1,23 @@
+[base]
+;库来源
+library = docker.shemic.com/
+path = {base}
+rsync = 1,2
+
+[java]
+image = dev/java
+command = share_java exit
+alias = java
+restart = false
+
+[hadoop]
+image = java/hadoop
+volumes = {path}conf/java/hadoop:/usr/local/hadoop/etc/hadoop
+command = share_hadoop exit
+restart = false
+
+[spark]
+image = java/spark
+volumes = {path}conf/java/spark:/usr/local/spark/conf
+command = share_spark exit
+restart = false

+ 26 - 0
conf/dever/tool.conf

@@ -0,0 +1,26 @@
+[base]
+;库来源
+library = docker.shemic.com/
+path = {base}
+network = hadoop
+
+[zeppelin]
+num = 1
+image = data/zeppelin/me
+port = 10000:8080
+volumes = {path}conf/zeppelin:/usr/local/zeppelin/conf
+
+[jupyter]
+num = 1
+image = tool/jupyter
+port = 10001:8888
+volumes = {path}conf/jupyter:/root/.jupyter
+
+[apidoc]
+num = 1
+image = tool/apidoc
+volumes = {path}src/apidoc/input:/root/input,{path}web/apidoc:/root/output,{path}src/apidoc/config:/root/config
+call = apidoc
+#使用dever call tool apidoc input=demo&out=output来运行,然后可以在宿主机里使用apidoc命令
+param = -i /root/input/{$input:demo}/ -o /root/{$out:output} -c /root/config
+alias = apidoc

+ 33 - 0
conf/dever/web.conf

@@ -0,0 +1,33 @@
+[base]
+;库来源
+library = docker.shemic.com/
+path = {base}
+network = iot
+
+[mysql]
+num = 1
+image = db/mysql
+port = 3309:3306
+volumes = {path}conf/db/mysql:/etc/mysql,/mysql/{name}/data:/var/lib/mysql
+environment = MYSQL_ROOT_PASSWORD=123456
+
+[php]
+num = 1
+#image = dev/php
+image = dev/php/mosquitto
+port = 8082:8080
+#server-memcached:server-memcached
+link = [mysql]{num}:[mysql]{num}
+volumes_from = [mysql]{num}
+volumes = {path}web:/www,{path}conf/php:/etc/php5,/etc/hosts:/etc/hosts.main
+#host = test:127.0.0.1,test1:127.0.0.1
+# 容器里对宿主机提供的指令的别名,可以直接用php -i访问容器中的php命令了
+alias = php,composer
+
+[nginx]
+num = 1
+image = web/nginx
+port = 80:80,443:443
+link = [php]{num}:[php]{num}
+volumes_from = [php]{num}
+volumes = {path}web:/www,{path}conf/nginx:/etc/nginx,{path}logs/nginx/{name}/logs:/var/log/nginx

+ 3 - 0
conf/docker/db/influxdb/Dockerfile

@@ -0,0 +1,3 @@
+FROM influxdb:alpine
+
+MAINTAINER Rabin "https://github.com/shemic"

+ 15 - 0
conf/docker/db/influxdb/my/Dockerfile

@@ -0,0 +1,15 @@
+FROM docker.shemic.com/os/alpine:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+ENV $GOPATH=/usr/local/go
+RUN apk add --no-cache --update git go openssl-dev && \
+	go get github.com/influxdata/influxdb && \
+	cd $GOPATH/src/github.com/influxdata/ && \
+	go get ./... && \
+	go install ./... && \
+	apk del curl go
+
+COPY influxdb.sh /entrypoint/influxdb.sh
+
+CMD ["influxdb"]

+ 19 - 0
conf/docker/db/influxdb/my/influxdb.sh

@@ -0,0 +1,19 @@
+#!/usr/bin/env sh
+set -e
+PHP="php-fpm"
+start_php()
+{
+	# 使用exec 将替换主进程,信号检测将失效,无法执行end_php
+	#exec php-fpm
+	process_start $PHP
+}
+
+stop_php()
+{
+	process_stop $PHP
+}
+
+monit_php()
+{
+	process_monit $PHP
+}

+ 13 - 0
conf/docker/db/memcached/Dockerfile

@@ -0,0 +1,13 @@
+FROM docker.shemic.com/os/alpine:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+USER root
+RUN apk update
+RUN apk add bash
+RUN apk add memcached
+
+COPY entrypoint.sh /entrypoint.sh
+ENTRYPOINT ["/entrypoint.sh"]
+
+CMD ["memcached"]

+ 42 - 0
conf/docker/db/memcached/entrypoint.sh

@@ -0,0 +1,42 @@
+#!/bin/bash
+set -e
+
+if [ "${1:0:1}" = '-' ]; then
+	set -- memcached "$@"
+fi
+
+dever_memcache()
+{
+	e=${1}" -u root -d -p "${2}" "${3}
+	eval $e
+	echo $e
+	echo 'Memcached init process complete; ready for start up.'
+}
+
+dever_start()
+{
+	if [ -n "$MEMCACHED_PORT" ] ; then
+		ifs="-"
+		if [[ $MEMCACHED_PORT =~ $ifs ]] ; then
+			port=(${MEMCACHED_PORT//-/ })
+			for i in ${port[@]} ;
+			do
+				dever_memcache $1 $i $3
+			done
+		else
+			dever_memcache $1 $MEMCACHED_PORT $3
+
+		fi
+	else
+		dever_memcache $1 $2 $3
+	fi
+}
+
+if [ "$1" = 'memcached' ]; then
+	d=11211
+	c=$MEMCACHED_COMMAND
+	m="memcached"
+	dever_start $m $d $c
+fi
+
+exec sh

+ 13 - 0
conf/docker/db/mongodb/Dockerfile

@@ -0,0 +1,13 @@
+FROM docker.shemic.com/os/alpine:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+RUN echo 'http://dl-3.alpinelinux.org/alpine/edge/testing'>>/etc/apk/repositories
+RUN apk update
+RUN wget https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-3.4.5.tgz
+
+RUN mkdir /data
+
+VOLUME ["/data"]
+
+ENTRYPOINT ["sh"]

+ 27 - 0
conf/docker/db/mysql/Dockerfile

@@ -0,0 +1,27 @@
+FROM docker.shemic.com/os/alpine/v3.4:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+USER root
+RUN apk add --no-cache --update bash mysql mysql-client && \
+	mkdir /run/mysqld && chmod 777 /run/mysqld && \
+	rm -rf /var/lib/mysql && mkdir -p /var/lib/mysql && \
+	chmod 664 /etc/mysql/my.cnf
+
+
+RUN sed -Ei 's/^(bind-address|log|binlog)/#&/' /etc/mysql/my.cnf \
+	#&& sed -Ei 's/^(#skip-networking)/skip-networking/' /etc/mysql/my.cnf \
+	&& echo 'user=root' | awk '{ print } $1 == "[mysqld]" && c == 0 { c = 1; system("cat") }' /etc/mysql/my.cnf > /tmp/my.cnf \
+	&& mv /tmp/my.cnf /etc/mysql/my.cnf \
+	&& echo 'skip-host-cache' | awk '{ print } $1 == "[mysqld]" && c == 0 { c = 1; system("cat") }' /etc/mysql/my.cnf > /tmp/my.cnf \
+	&& mv /tmp/my.cnf /etc/mysql/my.cnf \
+	&& echo 'skip-name-resolve' | awk '{ print } $1 == "[mysqld]" && c == 0 { c = 1; system("cat") }' /etc/mysql/my.cnf > /tmp/my.cnf \
+	&& mv /tmp/my.cnf /etc/mysql/my.cnf
+
+VOLUME ["/var/lib/mysql", "/etc/mysql"]
+
+COPY entrypoint.sh /entrypoint.sh
+ENTRYPOINT ["/entrypoint.sh"]
+
+EXPOSE 3306
+CMD ["mysqld"]

+ 91 - 0
conf/docker/db/mysql/entrypoint.sh

@@ -0,0 +1,91 @@
+#!/bin/bash
+set -e
+
+# if command starts with an option, prepend mysqld
+if [ "${1:0:1}" = '-' ]; then
+	set -- mysqld "$@"
+fi
+
+if [ "$1" = 'mysqld' ]; then
+	# Get config
+	DATADIR="$("$@" --verbose --help 2>/dev/null | awk '$1 == "datadir" { print $2; exit }')"
+
+	if [ ! -d "$DATADIR/mysql" ]; then
+		if [ -z "$MYSQL_ROOT_PASSWORD" -a -z "$MYSQL_ALLOW_EMPTY_PASSWORD" ]; then
+			echo >&2 'error: database is uninitialized and MYSQL_ROOT_PASSWORD not set'
+			echo >&2 '  Did you forget to add -e MYSQL_ROOT_PASSWORD=... ?'
+			exit 1
+		fi
+
+		mkdir -p "$DATADIR"
+		#chown -R mysql:mysql "$DATADIR"
+
+		echo 'Running mysql_install_db'
+		mysql_install_db --user=root --datadir="$DATADIR" --rpm
+		echo 'Finished mysql_install_db'
+
+		mysqld --user=root --datadir="$DATADIR" --skip-networking &
+		pid="$!"
+
+		mysql=( mysql --protocol=socket -uroot )
+
+		for i in {30..0}; do
+			if echo 'SELECT 1' | "${mysql[@]}" &> /dev/null; then
+				break
+			fi
+			echo 'MySQL init process in progress...'
+			sleep 1
+		done
+		if [ "$i" = 0 ]; then
+			echo >&2 'MySQL init process failed.'
+			exit 1
+		fi
+
+		# sed is for https://bugs.mysql.com/bug.php?id=20545
+		mysql_tzinfo_to_sql /usr/share/zoneinfo | sed 's/Local time zone must be set--see zic manual page/FCTY/' | "${mysql[@]}" mysql
+
+		"${mysql[@]}" <<-EOSQL
+			-- What's done in this file shouldn't be replicated
+			--  or products like mysql-fabric won't work
+			SET @@SESSION.SQL_LOG_BIN=0;
+			DELETE FROM mysql.user ;
+			CREATE USER 'root'@'%' IDENTIFIED BY '${MYSQL_ROOT_PASSWORD}' ;
+			GRANT ALL ON *.* TO 'root'@'%' WITH GRANT OPTION ;
+			DROP DATABASE IF EXISTS test ;
+			FLUSH PRIVILEGES ;
+		EOSQL
+
+		if [ ! -z "$MYSQL_ROOT_PASSWORD" ]; then
+			mysql+=( -p"${MYSQL_ROOT_PASSWORD}" )
+		fi
+
+		if [ "$MYSQL_DATABASE" ]; then
+			echo "CREATE DATABASE IF NOT EXISTS \`$MYSQL_DATABASE\` ;" | "${mysql[@]}"
+			mysql+=( "$MYSQL_DATABASE" )
+		fi
+
+		if [ "$MYSQL_USER" -a "$MYSQL_PASSWORD" ]; then
+			echo "CREATE USER '"$MYSQL_USER"'@'%' IDENTIFIED BY '"$MYSQL_PASSWORD"' ;" | "${mysql[@]}"
+
+			if [ "$MYSQL_DATABASE" ]; then
+				echo "GRANT ALL ON \`"$MYSQL_DATABASE"\`.* TO '"$MYSQL_USER"'@'%' ;" | "${mysql[@]}"
+			fi
+
+			echo 'FLUSH PRIVILEGES ;' | "${mysql[@]}"
+		fi
+
+		if ! kill -s TERM "$pid" || ! wait "$pid"; then
+			echo >&2 'MySQL init process failed.'
+			exit 1
+		fi
+
+		echo
+		echo 'MySQL init process done. Ready for start up.'
+		echo
+	fi
+
+	#chown -R mysql:mysql "$DATADIR"
+fi
+
+mysqld --user=root --datadir="$DATADIR"
+#exec "$@"

+ 17 - 0
conf/docker/db/pipelinedb/Dockerfile

@@ -0,0 +1,17 @@
+FROM docker.shemic.com/dev/python:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+ENV PGADMIN_HOME=/usr/local/hadoop 
+ENV PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
+
+RUN apk add --no-cache --update postgresql curl && \
+	curl -O https://ftp.postgresql.org/pub/pgadmin/pgadmin4/v1.6/pip/pgadmin4-1.6-py2.py3-none-any.whl && \
+	pip pgadmin4-1.6-py2.py3-none-any.whl && \
+
+
+RUN mkdir /data
+
+VOLUME ["/data"]
+
+ENTRYPOINT ["sh"]

+ 66 - 0
conf/docker/db/pipelinedb/postgresql.sh

@@ -0,0 +1,66 @@
+#!/bin/bash
+set -e
+
+dever_yarn()
+{
+	$HADOOP_HOME/sbin/start-yarn.sh
+}
+
+dever_dfs()
+{
+	$HADOOP_HOME/sbin/start-dfs.sh
+}
+
+dever_start()
+{
+	dever_dfs
+	echo -e "\n"
+	dever_yarn
+	echo -e "\n"
+}
+
+dever_wordcount()
+{
+	mkdir input
+	echo "Hello Docker" >input/file2.txt
+	echo "Hello Hadoop" >input/file1.txt
+
+	# create input directory on HDFS
+	hadoop fs -mkdir -p input
+
+	# put input files to HDFS
+	hdfs dfs -put ./input/* input
+
+	# run wordcount 
+	hadoop jar $HADOOP_HOME/share/hadoop/mapreduce/sources/hadoop-mapreduce-examples-2.7.2-sources.jar org.apache.hadoop.examples.WordCount input output
+
+	# print the input files
+	echo -e "\ninput file1.txt:"
+	hdfs dfs -cat input/file1.txt
+
+	echo -e "\ninput file2.txt:"
+	hdfs dfs -cat input/file2.txt
+
+	# print the output of wordcount
+	echo -e "\nwordcount output:"
+	hdfs dfs -cat output/part-r-00000
+}
+
+dever_ssh()
+{
+	/usr/sbin/sshd
+}
+
+if [ "$1" = 'start' ]; then
+	dever_start
+fi
+
+if [ "$1" = 'wordcount' ]; then
+	dever_wordcount
+fi
+
+if [ "$1" = 'ssh' ]; then
+	dever_ssh
+fi
+
+exec $0

+ 3 - 0
conf/docker/db/postgresql/Dockerfile

@@ -0,0 +1,3 @@
+FROM postgres:9.6.3-alpine
+
+MAINTAINER Rabin "https://github.com/shemic"

+ 8 - 0
conf/docker/db/postgresql/pgadmin/Dockerfile

@@ -0,0 +1,8 @@
+FROM docker.shemic.com/db/postgresql:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+RUN apk add --no-cache --update python py-pip python-dev pkgconf gcc gmp-dev postgresql-dev curl && \
+	curl -O https://ftp.postgresql.org/pub/pgadmin/pgadmin4/v1.6/pip/pgadmin4-1.6-py2.py3-none-any.whl && \
+	pip install pgadmin4-1.6-py2.py3-none-any.whl && \
+	rm pgadmin4-1.6-py2.py3-none-any.whl

+ 66 - 0
conf/docker/db/postgresql/pgadmin/postgresql.sh

@@ -0,0 +1,66 @@
+#!/bin/bash
+set -e
+
+dever_yarn()
+{
+	$HADOOP_HOME/sbin/start-yarn.sh
+}
+
+dever_dfs()
+{
+	$HADOOP_HOME/sbin/start-dfs.sh
+}
+
+dever_start()
+{
+	dever_dfs
+	echo -e "\n"
+	dever_yarn
+	echo -e "\n"
+}
+
+dever_wordcount()
+{
+	mkdir input
+	echo "Hello Docker" >input/file2.txt
+	echo "Hello Hadoop" >input/file1.txt
+
+	# create input directory on HDFS
+	hadoop fs -mkdir -p input
+
+	# put input files to HDFS
+	hdfs dfs -put ./input/* input
+
+	# run wordcount 
+	hadoop jar $HADOOP_HOME/share/hadoop/mapreduce/sources/hadoop-mapreduce-examples-2.7.2-sources.jar org.apache.hadoop.examples.WordCount input output
+
+	# print the input files
+	echo -e "\ninput file1.txt:"
+	hdfs dfs -cat input/file1.txt
+
+	echo -e "\ninput file2.txt:"
+	hdfs dfs -cat input/file2.txt
+
+	# print the output of wordcount
+	echo -e "\nwordcount output:"
+	hdfs dfs -cat output/part-r-00000
+}
+
+dever_ssh()
+{
+	/usr/sbin/sshd
+}
+
+if [ "$1" = 'start' ]; then
+	dever_start
+fi
+
+if [ "$1" = 'wordcount' ]; then
+	dever_wordcount
+fi
+
+if [ "$1" = 'ssh' ]; then
+	dever_ssh
+fi
+
+exec $0

+ 66 - 0
conf/docker/db/postgresql/postgresql.sh

@@ -0,0 +1,66 @@
+#!/bin/bash
+set -e
+
+dever_yarn()
+{
+	$HADOOP_HOME/sbin/start-yarn.sh
+}
+
+dever_dfs()
+{
+	$HADOOP_HOME/sbin/start-dfs.sh
+}
+
+dever_start()
+{
+	dever_dfs
+	echo -e "\n"
+	dever_yarn
+	echo -e "\n"
+}
+
+dever_wordcount()
+{
+	mkdir input
+	echo "Hello Docker" >input/file2.txt
+	echo "Hello Hadoop" >input/file1.txt
+
+	# create input directory on HDFS
+	hadoop fs -mkdir -p input
+
+	# put input files to HDFS
+	hdfs dfs -put ./input/* input
+
+	# run wordcount 
+	hadoop jar $HADOOP_HOME/share/hadoop/mapreduce/sources/hadoop-mapreduce-examples-2.7.2-sources.jar org.apache.hadoop.examples.WordCount input output
+
+	# print the input files
+	echo -e "\ninput file1.txt:"
+	hdfs dfs -cat input/file1.txt
+
+	echo -e "\ninput file2.txt:"
+	hdfs dfs -cat input/file2.txt
+
+	# print the output of wordcount
+	echo -e "\nwordcount output:"
+	hdfs dfs -cat output/part-r-00000
+}
+
+dever_ssh()
+{
+	/usr/sbin/sshd
+}
+
+if [ "$1" = 'start' ]; then
+	dever_start
+fi
+
+if [ "$1" = 'wordcount' ]; then
+	dever_wordcount
+fi
+
+if [ "$1" = 'ssh' ]; then
+	dever_ssh
+fi
+
+exec $0

+ 17 - 0
conf/docker/db/prometheus/Dockerfile

@@ -0,0 +1,17 @@
+FROM docker.shemic.com/dev/python:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+ENV PGADMIN_HOME=/usr/local/hadoop 
+ENV PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
+
+RUN apk add --no-cache --update postgresql curl && \
+	curl -O https://ftp.postgresql.org/pub/pgadmin/pgadmin4/v1.6/pip/pgadmin4-1.6-py2.py3-none-any.whl && \
+	pip pgadmin4-1.6-py2.py3-none-any.whl && \
+
+
+RUN mkdir /data
+
+VOLUME ["/data"]
+
+ENTRYPOINT ["sh"]

+ 66 - 0
conf/docker/db/prometheus/postgresql.sh

@@ -0,0 +1,66 @@
+#!/bin/bash
+set -e
+
+dever_yarn()
+{
+	$HADOOP_HOME/sbin/start-yarn.sh
+}
+
+dever_dfs()
+{
+	$HADOOP_HOME/sbin/start-dfs.sh
+}
+
+dever_start()
+{
+	dever_dfs
+	echo -e "\n"
+	dever_yarn
+	echo -e "\n"
+}
+
+dever_wordcount()
+{
+	mkdir input
+	echo "Hello Docker" >input/file2.txt
+	echo "Hello Hadoop" >input/file1.txt
+
+	# create input directory on HDFS
+	hadoop fs -mkdir -p input
+
+	# put input files to HDFS
+	hdfs dfs -put ./input/* input
+
+	# run wordcount 
+	hadoop jar $HADOOP_HOME/share/hadoop/mapreduce/sources/hadoop-mapreduce-examples-2.7.2-sources.jar org.apache.hadoop.examples.WordCount input output
+
+	# print the input files
+	echo -e "\ninput file1.txt:"
+	hdfs dfs -cat input/file1.txt
+
+	echo -e "\ninput file2.txt:"
+	hdfs dfs -cat input/file2.txt
+
+	# print the output of wordcount
+	echo -e "\nwordcount output:"
+	hdfs dfs -cat output/part-r-00000
+}
+
+dever_ssh()
+{
+	/usr/sbin/sshd
+}
+
+if [ "$1" = 'start' ]; then
+	dever_start
+fi
+
+if [ "$1" = 'wordcount' ]; then
+	dever_wordcount
+fi
+
+if [ "$1" = 'ssh' ]; then
+	dever_ssh
+fi
+
+exec $0

+ 19 - 0
conf/docker/db/redis/Dockerfile

@@ -0,0 +1,19 @@
+FROM docker.shemic.com/dev/python:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+RUN apk add --update redis git && \
+	echo 'vm.overcommit_memory = 1' >> /etc/sysctl.conf && \
+	echo "echo never > /sys/kernel/mm/transparent_hugepage/enabled" >> /etc/rc.local && \
+	pip install tornado && \
+	pip install redis && \
+	pip install python-dateutil && \
+	cd /usr/local && \
+	git clone https://github.com/kumarnitin/RedisLive.git redislive && \
+	apk del git
+
+COPY redis-live.conf /usr/local/redislive/src/redis-live.conf
+COPY entrypoint.sh /entrypoint.sh
+ENTRYPOINT ["/entrypoint.sh"]
+
+CMD ["redis"]

+ 17 - 0
conf/docker/db/redis/entrypoint.sh

@@ -0,0 +1,17 @@
+#!/bin/bash
+set -e
+
+dever_start()
+{
+	redis-server &
+	cd /usr/local/redislive/src/
+	./redis-monitor.py --duration=120 > /dev/null &
+	./redis-live.py > /dev/null &	
+}
+
+if [ "$1" = 'redis' ]; then
+	d=6379
+	dever_start $d
+fi
+
+exec sh

+ 23 - 0
conf/docker/db/redis/redis-live.conf

@@ -0,0 +1,23 @@
+{
+	"RedisServers":
+	[ 
+		{
+  			"server": "localhost",
+  			"port" : 6379
+		}
+		
+	],
+
+	"DataStoreType" : "redis",
+
+	"RedisStatsServer":
+	{
+		"server" : "localhost",
+		"port" : 6379
+	},
+	
+	"SqliteStatsStore" :
+	{
+		"path":  "to your sql lite file"
+	}
+}

+ 8 - 0
conf/docker/dev/erlang/Dockerfile

@@ -0,0 +1,8 @@
+FROM docker.shemic.com/os/alpine:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+RUN apk add --no-cache --update erlang
+RUN mkdir /src
+
+VOLUME ["/src"]

+ 8 - 0
conf/docker/dev/golang/Dockerfile

@@ -0,0 +1,8 @@
+FROM docker.shemic.com/os/alpine:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+RUN apk add --no-cache --update go g++
+RUN mkdir /src
+
+VOLUME ["/src"]

+ 54 - 0
conf/docker/dev/java/Dockerfile

@@ -0,0 +1,54 @@
+FROM docker.shemic.com/os/alpine/glibc:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+RUN apk add --no-cache --update curl && \
+    JAVA_VERSION_MAJOR=8 && \
+    JAVA_VERSION_MINOR=131 && \
+    JAVA_VERSION_BUILD=11 && \
+    JAVA_PACKAGE=jdk && \
+    mkdir /opt && \
+    mkdir /src && \
+    curl -jkSLH "Cookie: oraclelicense=accept-securebackup-cookie" http://download.oracle.com/otn-pub/java/jdk/${JAVA_VERSION_MAJOR}u${JAVA_VERSION_MINOR}-b${JAVA_VERSION_BUILD}/d54c1d3a095b4ff2b6607d096fa80163/${JAVA_PACKAGE}-${JAVA_VERSION_MAJOR}u${JAVA_VERSION_MINOR}-linux-x64.tar.gz \
+    | tar -xzf - -C /usr/local && \
+    ln -s /usr/local/jdk1.${JAVA_VERSION_MAJOR}.0_${JAVA_VERSION_MINOR} /opt/jdk && \
+    apk del curl && \
+    rm -rf /opt/jdk/*src.zip \
+           /opt/jdk/lib/missioncontrol \
+           /opt/jdk/lib/visualvm \
+           /opt/jdk/lib/*javafx* \
+           /opt/jdk/jre/plugin \
+           /opt/jdk/jre/bin/javaws \
+           /opt/jdk/jre/bin/jjs \
+           /opt/jdk/jre/bin/orbd \
+           /opt/jdk/jre/bin/pack200 \
+           /opt/jdk/jre/bin/policytool \
+           /opt/jdk/jre/bin/rmid \
+           /opt/jdk/jre/bin/rmiregistry \
+           /opt/jdk/jre/bin/servertool \
+           /opt/jdk/jre/bin/tnameserv \
+           /opt/jdk/jre/bin/unpack200 \
+           /opt/jdk/jre/lib/javaws.jar \
+           /opt/jdk/jre/lib/deploy* \
+           /opt/jdk/jre/lib/desktop \
+           /opt/jdk/jre/lib/*javafx* \
+           /opt/jdk/jre/lib/*jfx* \
+           /opt/jdk/jre/lib/amd64/libdecora_sse.so \
+           /opt/jdk/jre/lib/amd64/libprism_*.so \
+           /opt/jdk/jre/lib/amd64/libfxplugins.so \
+           /opt/jdk/jre/lib/amd64/libglass.so \
+           /opt/jdk/jre/lib/amd64/libgstreamer-lite.so \
+           /opt/jdk/jre/lib/amd64/libjavafx*.so \
+           /opt/jdk/jre/lib/amd64/libjfx*.so \
+           /opt/jdk/jre/lib/ext/jfxrt.jar \
+           /opt/jdk/jre/lib/ext/nashorn.jar \
+           /opt/jdk/jre/lib/oblique-fonts \
+           /opt/jdk/jre/lib/plugin.jar \
+           /tmp/* /var/cache/apk/*
+
+ENV JAVA_HOME=/opt/jdk
+ENV PATH=${PATH}:${JAVA_HOME}/bin:${JAVA_HOME}/sbin
+
+COPY java.sh /entrypoint/java.sh
+
+VOLUME ["/src"]

+ 11 - 0
conf/docker/dev/java/java.sh

@@ -0,0 +1,11 @@
+#!/usr/bin/env sh
+set -e
+
+start_share_java()
+{
+    share $JAVA_HOME jdk
+}
+start_java()
+{
+    start_share_java
+}

+ 12 - 0
conf/docker/dev/java/maven/Dockerfile

@@ -0,0 +1,12 @@
+FROM docker.shemic.com/java/base:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+ENV MAVEN_HOME=/usr/local/mvn \
+	MAVEN_VERSION=3.5.0
+ENV PATH=$PATH:$MAVEN_HOME/bin
+
+RUN curl -O ${MIRRORS}apache/maven/maven-3/$MAVEN_VERSION/binaries/apache-maven-$MAVEN_VERSION-bin.tar.gz && \
+	tar -zxvf apache-maven-$MAVEN_VERSION-bin.tar.gz && \
+	rm apache-maven-$MAVEN_VERSION-bin.tar.gz && \
+	mv apache-maven-$MAVEN_VERSION $MAVEN_HOME

+ 16 - 0
conf/docker/dev/java/sbt/Dockerfile

@@ -0,0 +1,16 @@
+FROM docker.shemic.com/java/base:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+ENV SBT_HOME=/usr/local/sbt \
+	SBT_VERSION=0.13.15
+ENV PATH=$PATH:$SBT_HOME/bin
+
+COPY repositories /root/repositories
+
+RUN curl -O -L https://github.com/sbt/sbt/releases/download/v${SBT_VERSION}/sbt-${SBT_VERSION}.tgz && \
+	tar -zxvf sbt-${SBT_VERSION}.tgz && \
+	rm sbt-${SBT_VERSION}.tgz && \
+	mv sbt ${SBT_HOME}
+
+VOLUME ["/usr/local/sbt/conf"]

+ 4 - 0
conf/docker/dev/java/sbt/repositories

@@ -0,0 +1,4 @@
+[repositories]
+  local
+  aliyun: http://maven.aliyun.com/nexus/content/groups/public/
+  central: http://repo1.maven.org/maven2/

+ 7 - 0
conf/docker/dev/java/sbt/sbt.sh

@@ -0,0 +1,7 @@
+#!/usr/bin/env sh
+set -e
+
+start_sbt()
+{
+    sbt sbt-version &
+}

+ 14 - 0
conf/docker/dev/nodejs/Dockerfile

@@ -0,0 +1,14 @@
+FROM docker.shemic.com/os/alpine:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+RUN mkdir /src
+VOLUME ["/src"]
+WORKDIR /src
+
+RUN apk add --no-cache --update nodejs nodejs-npm
+
+EXPOSE 8080
+
+COPY nodejs.sh /entrypoint/nodejs.sh
+COPY reload.sh /entrypoint/reload.sh

+ 23 - 0
conf/docker/dev/nodejs/nodejs.sh

@@ -0,0 +1,23 @@
+#!/usr/bin/env sh
+set -e
+
+start_nodejs()
+{
+	npm install
+	npm install -g pm2
+	pm2 start main.js --watch
+	echo 'Nodejs web init process complete; ready for start up.'
+	if [ "$1" == "reload" ]; then
+		/entrypoint/reload.sh reload &
+	fi
+}
+
+stop_nodejs()
+{
+	pm2 kill
+}
+
+monit_nodejs()
+{
+	process_monit pm2
+}

+ 10 - 0
conf/docker/dev/nodejs/reload.sh

@@ -0,0 +1,10 @@
+#!/usr/bin/env sh
+set -e
+
+if [ $1 == "reload" ]; then
+	while true
+	do
+		pm2 reload all
+		sleep 3
+	done
+fi

+ 24 - 0
conf/docker/dev/php/Dockerfile

@@ -0,0 +1,24 @@
+FROM docker.shemic.com/dev/php/base:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+RUN apk add --no-cache --update php5-dev m4 autoconf gcc g++ make openssl-dev curl && \
+	cd /tmp && \
+	curl -O http://www.shemic.com/file/php/php-mongo-1.6.14.tgz && \
+	tar -xzvf php-mongo-1.6.14.tgz && \
+	cd mongo-1.6.14 && \
+	phpize && \
+	./configure --with-php-config=/usr/bin/php-config && \
+	make && \
+	make install && \
+	cd .. && \
+	curl -O http://www.shemic.com/file/php/php-redis-3.1.2.tgz && \
+	tar -xzvf php-redis-3.1.2.tgz && \
+	cd redis-3.1.2 && \
+	phpize && \
+	./configure --with-php-config=/usr/bin/php-config && \
+	make && \
+	make install && \
+	cd .. && \
+	apk del php5-dev m4 autoconf gcc g++ make openssl-dev curl git && \
+	rm -rf /tmp/* /var/cache/apk/*

+ 13 - 0
conf/docker/dev/php/base/Dockerfile

@@ -0,0 +1,13 @@
+FROM docker.shemic.com/os/alpine/v3.4:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+RUN apk add --no-cache --update curl php5-fpm php5-mysql php5-pgsql php5-pdo_mysql php5-pdo_pgsql php5-gd php5-curl php5-mcrypt php5-json php5-zlib php5-xml php5-iconv php5-zip php5-phar php5-memcache php5-openssl php5-dom && curl -sS https://getcomposer.org/installer | php && mv composer.phar /usr/bin/composer && apk del curl
+
+COPY php.sh /entrypoint/php.sh
+
+VOLUME ["/www", "/etc/php5"]
+
+EXPOSE 9000
+
+CMD ["php"]

+ 19 - 0
conf/docker/dev/php/base/php.sh

@@ -0,0 +1,19 @@
+#!/usr/bin/env sh
+set -e
+PHP="php-fpm"
+start_php()
+{
+	# 使用exec 将替换主进程,信号检测将失效,无法执行end_php
+	#exec php-fpm
+	process_start $PHP
+}
+
+stop_php()
+{
+	process_stop $PHP
+}
+
+monit_php()
+{
+	process_monit $PHP
+}

+ 15 - 0
conf/docker/dev/php/mosquitto/Dockerfile

@@ -0,0 +1,15 @@
+FROM docker.shemic.com/dev/php:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+RUN apk add --no-cache --update mosquitto-dev php5-dev autoconf gcc g++ make openssl-dev curl git && \
+	cd /tmp && \
+	git clone https://github.com/mgdm/Mosquitto-PHP && \
+	cd Mosquitto-PHP && \
+	phpize && \
+	./configure --with-php-config=/usr/bin/php-config && \
+	make && \
+	make install && \
+	cd .. && rm -rf Mosquitto-PHP && \
+	apk del php5-dev m4 autoconf gcc g++ make openssl-dev curl git && \
+	rm -rf /tmp/* /var/cache/apk/*

+ 15 - 0
conf/docker/dev/php/swoole/Dockerfile

@@ -0,0 +1,15 @@
+FROM docker.shemic.com/dev/php:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+RUN apk add --no-cache --update libaio-dev php5-dev autoconf gcc g++ make openssl-dev curl git && \
+	cd /tmp && \
+	git clone https://github.com/swoole/swoole-src.git && \
+	cd swoole-src && \
+	phpize && \
+	./configure --with-php-config=/usr/bin/php-config && \
+	make && \
+	make install && \
+	cd .. && rm -rf swoole-src && \
+	apk del php5-dev m4 autoconf gcc g++ make openssl-dev curl git && \
+	rm -rf /tmp/* /var/cache/apk/*

+ 9 - 0
conf/docker/dev/python/Dockerfile

@@ -0,0 +1,9 @@
+FROM docker.shemic.com/os/alpine:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+RUN apk update && apk add --no-cache bash python py-pip && \
+	pip install --upgrade pip && \
+	mkdir /src
+
+VOLUME ["/src"]

+ 13 - 0
conf/docker/dev/python/flask/Dockerfile

@@ -0,0 +1,13 @@
+FROM docker.shemic.com/dev/python:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+RUN pip install flask && \
+	pip install flask-sqlalchemy
+
+EXPOSE 5000
+
+COPY entrypoint.sh /entrypoint.sh
+ENTRYPOINT ["/entrypoint.sh"]
+
+CMD ["python-web"]

+ 27 - 0
conf/docker/dev/python/flask/entrypoint.sh

@@ -0,0 +1,27 @@
+#!/bin/bash
+set -e
+
+dever_python()
+{
+	e=${1}" "${2}"/main.py"
+	eval $e
+	echo $e
+	echo 'Python web init process complete; ready for start up.'
+}
+
+dever_start()
+{
+	if [ -n "$PYTHON_PATH" ] ; then
+		dever_python $1 $PYTHON_PATH
+	else
+		dever_python $1 $2
+	fi
+}
+
+if [ "$1" = 'python-web' ]; then
+	p="/src"
+	m="python"
+	dever_start $m $p
+fi
+
+exec sh

+ 9 - 0
conf/docker/dev/python/v3/Dockerfile

@@ -0,0 +1,9 @@
+FROM docker.shemic.com/os/alpine:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+RUN apk update && apk add --no-cache bash python3 py-pip && \
+	pip install --upgrade pip && \
+	mkdir /src
+
+VOLUME ["/src"]

+ 21 - 0
conf/docker/dev/scala/Dockerfile

@@ -0,0 +1,21 @@
+FROM docker.shemic.com/dev/java:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+ARG SCALA_VERSION
+ARG SCALA_HOME
+
+ENV SCALA_VERSION=${SCALA_VERSION:-2.12.2}
+ENV SCALA_HOME=${SCALA_HOME:-/opt/scala}
+ENV PATH=${PATH}:${JAVA_HOME}/bin:${JAVA_HOME}/sbin:${SCALA_HOME}/bin
+
+RUN java -version 2>&1 | grep version | sed -e 's/^openjdk version /JAVA_VERSION=/' > $JAVA_HOME/release
+
+RUN apk add --no-cache --update curl && \ 
+    curl -O https://downloads.lightbend.com/scala/$SCALA_VERSION/scala-$SCALA_VERSION.tgz && \
+    tar -xf scala-$SCALA_VERSION.tgz && \
+    rm scala-$SCALA_VERSION.tgz && \
+    mv scala-$SCALA_VERSION $SCALA_HOME && \
+    apk del curl
+
+ENTRYPOINT ["sh"]

+ 31 - 0
conf/docker/elastic/elasticsearch/Dockerfile

@@ -0,0 +1,31 @@
+FROM docker.shemic.com/os/alpine/glibc:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+# set environment variable
+ENV ES_HOME=/usr/local/elasticsearch
+ENV ELASTICSEARCH_VERSION=5.5.1
+ENV PATH=$PATH:$ES_HOME/bin
+ENV JAVA_HOME=/share/lib/jdk
+ENV PATH=${PATH}:${JAVA_HOME}/bin:${JAVA_HOME}/sbin
+
+# install elasticsearch
+RUN apk add --no-cache --update bash curl procps ncurses openjdk8 && \
+	curl -O https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-${ELASTICSEARCH_VERSION}.tar.gz && \
+    tar -xzvf elasticsearch-${ELASTICSEARCH_VERSION}.tar.gz && \
+    mv elasticsearch-${ELASTICSEARCH_VERSION} $ES_HOME && \
+    rm elasticsearch-${ELASTICSEARCH_VERSION}.tar.gz && \
+    adduser es -D && \
+    mkdir -p /elasticsearch/data && \
+    mkdir -p /elasticsearch/logs && \
+    chown -R es:es $ES_HOME /elasticsearch /tmp && \
+    chmod -R 777 /tmp /elasticsearch && \
+    cd $ES_HOME && \
+    echo y | elasticsearch-plugin install x-pack && \
+    apk del curl openjdk8
+
+USER es
+
+COPY elasticsearch.sh /entrypoint/elasticsearch.sh
+
+VOLUME ["/usr/local/elasticsearch/config"]

+ 17 - 0
conf/docker/elastic/elasticsearch/elasticsearch.sh

@@ -0,0 +1,17 @@
+#!/usr/bin/env sh
+set -e
+
+start_elasticsearch()
+{
+   	elasticsearch -d
+}
+
+stop_elasticsearch()
+{
+	true
+}
+
+monit_elasticsearch()
+{
+	process_monit elasticsearch
+}

+ 19 - 0
conf/docker/elastic/filebeat/Dockerfile

@@ -0,0 +1,19 @@
+FROM docker.shemic.com/java/base:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+# set environment variable
+ENV FILEBEAT_HOME=/usr/local/filebeat
+ENV FILEBEAT_VERSION=5.5.1
+ENV PATH=$PATH:$FILEBEAT_HOME
+
+# install filebeat
+RUN curl -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-${FILEBEAT_VERSION}-linux-x86_64.tar.gz && \
+    tar -xzvf filebeat-${FILEBEAT_VERSION}-linux-x86_64.tar.gz && \
+    mv filebeat-${FILEBEAT_VERSION}-linux-x86_64 $FILEBEAT_HOME && \
+    rm filebeat-${FILEBEAT_VERSION}-linux-x86_64.tar.gz && \
+    mkdir -p /root/filebeat
+
+COPY filebeat.sh /entrypoint/filebeat.sh
+
+VOLUME ["/usr/local/filebeat/config", "/root/filebeat"]

+ 13 - 0
conf/docker/elastic/filebeat/filebeat.sh

@@ -0,0 +1,13 @@
+#!/usr/bin/env sh
+set -e
+
+start_filebeat()
+{
+	cp -R $FILEBEAT_HOME/config/filebeat.yml $FILEBEAT_HOME/filebeat.yml
+	$FILEBEAT_HOME/filebeat -c $FILEBEAT_HOME/filebeat.yml &
+}
+
+monit_filebeat()
+{
+	process_monit filebeat
+}

+ 24 - 0
conf/docker/elastic/kibana/Dockerfile

@@ -0,0 +1,24 @@
+FROM docker.shemic.com/java/base:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+# set environment variable
+ENV KIBANA_HOME=/usr/local/kibana
+ENV KIBANA_VERSION=5.5.1
+ENV PATH=$PATH:$KIBANA_HOME/bin
+
+# install kibana
+RUN apk add --no-cache --update libstdc++ openjdk8 nodejs && \
+	curl -O https://artifacts.elastic.co/downloads/kibana/kibana-${KIBANA_VERSION}-linux-x86_64.tar.gz && \
+    tar -xzvf kibana-${KIBANA_VERSION}-linux-x86_64.tar.gz && \
+    mv kibana-${KIBANA_VERSION}-linux-x86_64 $KIBANA_HOME && \
+    rm kibana-${KIBANA_VERSION}-linux-x86_64.tar.gz && \
+    cd $KIBANA_HOME && \
+    sed -i 's/NODE="${DIR}\/node\/bin\/node"/NODE="node"/g' $KIBANA_HOME/bin/kibana && \
+    sed -i 's/NODE="${DIR}\/node\/bin\/node"/NODE="node"/g' $KIBANA_HOME/bin/kibana-plugin && \
+    kibana-plugin install x-pack && \
+    apk del openjdk8
+
+COPY kibana.sh /entrypoint/kibana.sh
+
+VOLUME ["/usr/local/kibana/config"]

+ 17 - 0
conf/docker/elastic/kibana/kibana.sh

@@ -0,0 +1,17 @@
+#!/usr/bin/env sh
+set -e
+
+start_kibana()
+{
+	kibana >/dev/null &
+}
+
+stop_kibana()
+{
+	true
+}
+
+monit_kibana()
+{
+	process_monit kibana
+}

+ 19 - 0
conf/docker/elastic/logstash/Dockerfile

@@ -0,0 +1,19 @@
+FROM docker.shemic.com/java/base:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+# set environment variable
+ENV LOGSTASH_HOME=/usr/local/logstash
+ENV LOGSTASH_VERSION=5.5.1
+ENV PATH=$PATH:$LOGSTASH_HOME/bin
+
+# install logstash
+RUN curl -O https://artifacts.elastic.co/downloads/logstash/logstash-${LOGSTASH_VERSION}.tar.gz && \
+    tar -xzvf logstash-${LOGSTASH_VERSION}.tar.gz && \
+    mv logstash-${LOGSTASH_VERSION} $LOGSTASH_HOME && \
+    rm logstash-${LOGSTASH_VERSION}.tar.gz && \
+    mkdir -p /root/logstash
+
+COPY logstash.sh /entrypoint/logstash.sh
+
+VOLUME ["/usr/local/logstash/config", "/root/logstash"]

+ 17 - 0
conf/docker/elastic/logstash/logstash.sh

@@ -0,0 +1,17 @@
+#!/usr/bin/env sh
+set -e
+
+start_logstash()
+{
+	logstash -f $LOGSTASH_HOME/config/logstash.conf -d
+}
+
+stop_logstash()
+{
+	true
+}
+
+monit_logstash()
+{
+	process_monit logstash
+}

+ 14 - 0
conf/docker/ha/haproxy/Dockerfile

@@ -0,0 +1,14 @@
+FROM docker.shemic.com/os/alpine/v3.4:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+RUN apk add --no-cache --update nginx
+
+COPY nginx.sh /entrypoint/nginx.sh
+
+VOLUME ["/www", "/etc/nginx", "/var/log/nginx"]
+
+EXPOSE 80
+EXPOSE 443
+
+CMD ["nginx"]

+ 13 - 0
conf/docker/ha/haproxy/nginx.sh

@@ -0,0 +1,13 @@
+#!/usr/bin/env sh
+set -e
+
+start_nginx()
+{
+	#exec nginx
+	nginx &
+}
+
+stop_nginx()
+{
+    nginx -s stop
+}

+ 14 - 0
conf/docker/ha/keepalived/Dockerfile

@@ -0,0 +1,14 @@
+FROM docker.shemic.com/os/alpine/v3.4:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+RUN apk add --no-cache --update nginx
+
+COPY nginx.sh /entrypoint/nginx.sh
+
+VOLUME ["/www", "/etc/nginx", "/var/log/nginx"]
+
+EXPOSE 80
+EXPOSE 443
+
+CMD ["nginx"]

+ 13 - 0
conf/docker/ha/keepalived/nginx.sh

@@ -0,0 +1,13 @@
+#!/usr/bin/env sh
+set -e
+
+start_nginx()
+{
+	#exec nginx
+	nginx &
+}
+
+stop_nginx()
+{
+    nginx -s stop
+}

+ 22 - 0
conf/docker/iot/demeter/Dockerfile

@@ -0,0 +1,22 @@
+FROM docker.shemic.com/dev/python:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+ENV DEMETER_HOME=/usr/local/demeter
+ENV PATH=$PATH:$DEMETER_HOME
+
+RUN apk add --no-cache --update py-gevent py-psycopg2 git && \
+	pip install tornado && \
+	pip install pytest-runner && \
+	pip install paho-mqtt && \
+	pip install influxdb && \
+	pip install short_url && \
+	#pip install hashlib && \
+	git clone http://git.shemic.com:3000/atom/demeter.git $DEMETER_HOME && \
+	chmod -R 777 $DEMETER_HOME/* && \
+	rm -rf $DEMETER_HOME/runtime/postgresql/* && \
+	apk del git
+
+EXPOSE 8087 8088
+
+COPY demeter.sh /entrypoint/demeter.sh

+ 17 - 0
conf/docker/iot/demeter/demeter.sh

@@ -0,0 +1,17 @@
+#!/usr/bin/env sh
+set -e
+start_demeter()
+{
+    install.py
+    process_start admin.py
+}
+
+stop_demeter()
+{
+	process_stop admin.py
+}
+
+monit_demeter()
+{
+    process_monit admin.py
+}

+ 16 - 0
conf/docker/iot/emqtt/Dockerfile

@@ -0,0 +1,16 @@
+FROM docker.shemic.com/os/alpine:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+RUN mkdir /src
+VOLUME ["/src"]
+WORKDIR /src
+
+RUN apk add --no-cache --update erlang git make perl && \
+	git clone https://github.com/emqtt/emq-relx.git && \
+	cd emq-relx && \
+	make && \
+
+EXPOSE 1883 8083 8883 8084 18083
+
+COPY emqtt.sh /entrypoint/emqtt.sh

+ 18 - 0
conf/docker/iot/emqtt/emqtt.sh

@@ -0,0 +1,18 @@
+#!/usr/bin/env sh
+set -e
+
+start_nodejs()
+{
+	npm install
+	npm install -g pm2
+	pm2 start main.js --watch
+	echo 'Nodejs web init process complete; ready for start up.'
+	if [ "$1" == "reload" ]; then
+		/entrypoint/reload.sh reload &
+	fi
+}
+
+stop_nodejs()
+{
+	npm stop &
+}

+ 23 - 0
conf/docker/java/base/Dockerfile

@@ -0,0 +1,23 @@
+FROM docker.shemic.com/os/alpine/glibc:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+WORKDIR /root
+
+# set environment variable
+ENV JAVA_HOME=/share/lib/jdk
+ENV PATH=${PATH}:${JAVA_HOME}/bin:${JAVA_HOME}/sbin
+
+COPY ssh_config /root/.ssh/config
+
+# intall ssh... [del bash-completion]
+RUN apk add --no-cache --update openssh rsync procps bash ncurses curl && \
+	ssh-keygen -t rsa -f /root/.ssh/id_rsa -P '' && \
+    cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys && \
+	ssh-keygen -t rsa -f /etc/ssh/ssh_host_rsa_key -P '' && \
+	ssh-keygen -t rsa -f /etc/ssh/ssh_host_dsa_key -P '' && \
+	ssh-keygen -t rsa -f /etc/ssh/ssh_host_ecdsa_key -P '' && \
+	ssh-keygen -t rsa -f /etc/ssh/ssh_host_ed25519_key -P '' && \
+	chmod -R 600 /root/.ssh/*
+
+COPY init.sh /entrypoint/init.sh

+ 8 - 0
conf/docker/java/base/init.sh

@@ -0,0 +1,8 @@
+#!/usr/bin/env sh
+set -e
+start_sshd()
+{
+	/usr/sbin/sshd
+}
+check jdk lib
+hosts

+ 9 - 0
conf/docker/java/base/ssh_config

@@ -0,0 +1,9 @@
+Host localhost
+  StrictHostKeyChecking no
+
+Host 0.0.0.0
+  StrictHostKeyChecking no
+  
+Host *
+   StrictHostKeyChecking no
+   UserKnownHostsFile=/dev/null

+ 49 - 0
conf/docker/java/combine/entrypoint.sh

@@ -0,0 +1,49 @@
+#!/bin/sh
+set -e
+
+init()
+{
+    /usr/sbin/sshd
+}
+
+loadStart()
+{
+    dir=/entrypoint/
+    loop=$(ls -l $dir |awk '{print $9}')
+    for i in $loop
+    do
+        source $dir$i
+    done
+}
+
+start()
+{
+	loadStart
+    if [[ $1 =~ "-" ]]; then
+        OLD_IFS="$IFS"
+        IFS="-"
+        arr=($1)
+        IFS="$OLD_IFS"
+        start="start_${arr[0]}"
+        echo $start
+        eval $start ${arr[1]}
+    else
+        start="start_$1"
+        echo $start
+        eval $start
+    fi
+}
+
+init
+
+if [ "$1" != "sh" ]; then
+    for args in $@
+    do
+        start $args
+        echo -e "\n"
+    done
+fi
+jps
+netstat -apn
+
+exec sh

+ 25 - 0
conf/docker/java/flume/Dockerfile

@@ -0,0 +1,25 @@
+FROM docker.shemic.com/java/base:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+# set environment variable
+ENV HADOOP_HOME=/share/lib/hadoop
+ENV HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
+ENV YARN_CONF_DIR=$HADOOP_CONF_DIR
+ENV PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
+
+ENV FLUME_HOME=/usr/local/flume
+ENV FLUME_VERSION=1.7.0
+ENV PATH=$PATH:$FLUME_HOME/bin
+
+# install
+RUN curl -O ${MIRRORS}apache/flume/$FLUME_VERSION/apache-flume-$FLUME_VERSION-bin.tar.gz && \
+	tar -xzvf apache-flume-$FLUME_VERSION-bin.tar.gz && \
+	mv apache-flume-$FLUME_VERSION-bin $FLUME_HOME && \
+	rm apache-flume-$FLUME_VERSION-bin.tar.gz && \
+	mkdir -p /root/flume/input && \
+	mkdir -p /root/flume/output
+
+COPY flume.sh /entrypoint/flume.sh
+
+VOLUME ["/usr/local/flume/conf", "/root/flume/input", "/root/flume/output"]

+ 24 - 0
conf/docker/java/flume/flume.sh

@@ -0,0 +1,24 @@
+#!/bin/bash
+set -e
+
+start_flume()
+{
+	check hadoop
+	hadoop_mkdir flume
+	#flume-ng agent -n agent1 -c conf -f /usr/local/flume/conf/flume.conf -Dflume.root.logger=DEBUG,console
+	if [ "$1" != '' ]; then
+		flume-ng agent -n $1 -c conf -f /usr/local/flume/conf/flume.conf 
+	else
+		flume-ng agent -n agent -c conf -f /usr/local/flume/conf/flume.conf 
+	fi
+}
+
+stop_flume()
+{
+	true
+}
+
+monit_flume()
+{
+	process_monit flume-ng
+}

+ 29 - 0
conf/docker/java/hadoop/Dockerfile

@@ -0,0 +1,29 @@
+FROM docker.shemic.com/java/base:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+# set environment variable
+ENV HADOOP_HOME=/usr/local/hadoop
+ENV HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
+ENV HADOOP_VERSION=2.7.3
+ENV HADOOP_LOG_DIR=/root/hadoop/logs
+ENV YARN_CONF_DIR=$HADOOP_CONF_DIR
+ENV YARN_LOG_DIR=/root/yarn/logs
+ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$HADOOP_HOME/lib/native
+ENV PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
+
+# install hadoop
+RUN mkdir -p /root/hdfs/namenode && \ 
+    mkdir -p /root/hdfs/datanode && \
+    mkdir -p /root/hadoop/tmp && \
+    mkdir -p /root/hadoop/logs && \
+    mkdir -p /root/yarn/logs && \
+    curl -O ${MIRRORS}apache/hadoop/common/hadoop-$HADOOP_VERSION/hadoop-$HADOOP_VERSION.tar.gz && \
+    tar -xzvf hadoop-$HADOOP_VERSION.tar.gz && \
+    rm -rf hadoop-$HADOOP_VERSION/share/doc hadoop-$HADOOP_VERSION/share/hadoop/common/jdiff && \
+    mv hadoop-$HADOOP_VERSION $HADOOP_HOME && \
+    rm hadoop-$HADOOP_VERSION.tar.gz
+
+COPY hadoop.sh /entrypoint/hadoop.sh
+
+VOLUME ["/usr/local/hadoop/etc/hadoop"]

+ 35 - 0
conf/docker/java/hadoop/hadoop.sh

@@ -0,0 +1,35 @@
+#!/usr/bin/env sh
+set -e
+
+start_share_hadoop()
+{
+    share $HADOOP_HOME hadoop
+}
+
+stop_share_hadoop()
+{
+    true
+}
+
+start_hadoop()
+{
+	if [ "$1" == "share" ]; then
+        start_share_hadoop
+    fi
+    rm -rf /root/hdfs/*
+    rm -rf /root/hadoop/tmp/*
+    $HADOOP_HOME/bin/hdfs namenode -format
+    #$HADOOP_HOME/sbin/start-dfs.sh
+    #$HADOOP_HOME/sbin/start-yarn.sh
+    $HADOOP_HOME/sbin/start-all.sh
+}
+
+stop_hadoop()
+{
+	stop-all.sh &
+}
+
+monit_hadoop()
+{
+    process_monit hdfs
+}

+ 27 - 0
conf/docker/java/hbase/Dockerfile

@@ -0,0 +1,27 @@
+FROM docker.shemic.com/java/base:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+# set environment variable
+ENV HADOOP_HOME=/share/lib/hadoop
+ENV HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
+ENV YARN_CONF_DIR=$HADOOP_CONF_DIR
+ENV PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
+
+ENV HBASE_HOME=/usr/local/hbase
+ENV HBASE_VERSION=1.2.6
+ENV HBASE_MANAGES_ZK=false
+ENV HBASE_LOG_DIR=/root/hbase/logs
+ENV PATH=$PATH:$HBASE_HOME/bin
+
+# install
+RUN curl -O ${MIRRORS}apache/hbase/$HBASE_VERSION/hbase-$HBASE_VERSION-bin.tar.gz && \
+	tar -xzvf hbase-$HBASE_VERSION-bin.tar.gz && \
+	mv hbase-$HBASE_VERSION $HBASE_HOME && \
+	rm hbase-$HBASE_VERSION-bin.tar.gz && \
+	mkdir -p $HBASE_LOG_DIR && \
+	rm -rf $HBASE_HOME/docs/
+
+COPY hbase.sh /entrypoint/hbase.sh
+
+VOLUME ["/usr/local/hbase/conf"]

+ 25 - 0
conf/docker/java/hbase/hbase.sh

@@ -0,0 +1,25 @@
+#!/usr/bin/env sh
+set -e
+
+start_share_hbase()
+{
+    share $HBASE_HOME hbase
+}
+
+stop_share_hbase()
+{
+    true
+}
+
+start_hbase()
+{
+	#share $HBASE_HOME hbase
+	check hadoop
+	hadoop_mkdir hbase
+    start-hbase.sh
+}
+
+stop_hbase()
+{
+	stop-hbase.sh &
+}

+ 23 - 0
conf/docker/java/hive/Dockerfile

@@ -0,0 +1,23 @@
+FROM docker.shemic.com/java/base:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+# set environment variable
+ENV HADOOP_HOME=/share/lib/hadoop
+ENV SPARK_HOME=/share/lib/spark
+ENV PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$SPARK_HOME/bin
+
+ENV HIVE_HOME=/usr/local/hive
+ENV HIVE_VERSION=2.1.1
+ENV PATH=$PATH:$HIVE_HOME/bin
+
+# install
+RUN curl -O ${MIRRORS}apache/hive/hive-$HIVE_VERSION/apache-hive-$HIVE_VERSION-bin.tar.gz && tar -xzvf apache-hive-$HIVE_VERSION-bin.tar.gz && mv apache-hive-$HIVE_VERSION-bin HIVE_HOME && rm apache-hive-$HIVE_VERSION-bin.tar.gz && \
+	mkdir -p /root/hive/tmp && \
+	mkdir -p /root/hive/logs
+
+#COPY apache-hive-$HIVE_VERSION-bin $HIVE_HOME
+COPY hive.sh /entrypoint/hive.sh
+COPY plugin/* $HIVE_HOME/lib/
+
+VOLUME ["/usr/local/hive/conf"]

+ 18 - 0
conf/docker/java/hive/hive.sh

@@ -0,0 +1,18 @@
+#!/usr/bin/env sh
+set -e
+
+start_hive()
+{
+	check hadoop
+	hadoop fs -mkdir -p /hive/warehouse
+	hadoop fs -mkdir -p /hive/tmp
+	hadoop fs -chmod -R 777 /hive
+	hive --service metastore &
+	hive --service hiveserver2 &
+    echo "start hive"
+}
+
+stop_hive()
+{
+	echo "stop hive"
+}

BIN
conf/docker/java/hive/plugin/mysql-connector-java-5.1.42.jar


+ 27 - 0
conf/docker/java/spark/Dockerfile

@@ -0,0 +1,27 @@
+FROM docker.shemic.com/java/base:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+# set environment variable
+ENV HADOOP_HOME=/share/lib/hadoop
+ENV HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
+ENV YARN_CONF_DIR=$HADOOP_CONF_DIR
+ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$HADOOP_HOME/lib/native
+ENV PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
+
+ENV SPARK_HOME=/usr/local/spark
+ENV SPARK_VERSION=2.1.1
+ENV SPARK_HADOOP=hadoop2.7
+ENV PATH=$PATH:$SPARK_HOME/bin
+
+# install spark
+RUN  apk add --no-cache --update python && \
+	curl -O ${MIRRORS}apache/spark/spark-$SPARK_VERSION/spark-$SPARK_VERSION-bin-$SPARK_HADOOP.tgz && \
+	tar -xzvf spark-$SPARK_VERSION-bin-$SPARK_HADOOP.tgz && \
+	mv spark-$SPARK_VERSION-bin-$SPARK_HADOOP $SPARK_HOME && \
+	rm spark-$SPARK_VERSION-bin-$SPARK_HADOOP.tgz && \
+	mkdir -p /root/spark/logs
+
+COPY spark.sh /entrypoint/spark.sh
+
+VOLUME ["/usr/local/spark/conf"]

+ 45 - 0
conf/docker/java/spark/spark.sh

@@ -0,0 +1,45 @@
+#!/usr/bin/env sh
+set -e
+
+start_share_spark()
+{
+    share $SPARK_HOME spark
+}
+
+stop_share_spark()
+{
+    true
+}
+
+start_spark()
+{
+    if [ "$1" == "share" ]; then
+        start_share_spark
+    fi
+    $SPARK_HOME/sbin/start-all.sh
+}
+
+stop_spark()
+{
+    $SPARK_HOME/sbin/stop-all.sh &
+}
+
+start_spark_log()
+{
+    if [ "$1" == "share" ]; then
+        start_share_spark
+    fi
+    check hadoop
+    hadoop_mkdir spark spark/jars spark/log
+    hadoop fs -put -f /usr/local/spark/jars/* /spark/jars/ &
+}
+
+stop_spark_log()
+{
+    true
+}
+
+spark_test_pi()
+{
+	spark-submit --class org.apache.spark.examples.SparkPi --master yarn --deploy-mode client /usr/local/spark/examples/jars/spark-examples_2.11-2.1.1.jar
+}

+ 16 - 0
conf/docker/java/thrift/Dockerfile

@@ -0,0 +1,16 @@
+FROM docker.shemic.com/java/base:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+ENV THRIFT_HOME=/usr/local/thrift \
+	THRIFT_VERSION=0.10.0
+ENV PATH=$PATH:$THRIFT_HOME/bin
+
+RUN apk add --no-cache --update automake bison g++ git libevent-dev libressl-dev libtool make pkgconf php7-dev && \
+	curl -O ${MIRRORS}apache/thrift/$THRIFT_VERSION/thrift-$THRIFT_VERSION.tar.gz && \
+	tar -zxvf thrift-$THRIFT_VERSION.tar.gz && \
+	cd thrift-$THRIFT_VERSION && \
+	./configure --prefix=$THRIFT_HOME && make && make install && \
+	cd .. && \
+	rm -rf thrift-$THRIFT_VERSION* && \
+	apk del automake bison g++ git libevent-dev libressl-dev libtool make

+ 22 - 0
conf/docker/java/zeppelin/Dockerfile

@@ -0,0 +1,22 @@
+FROM docker.shemic.com/java/base:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+# set environment variable
+ENV HADOOP_HOME=/share/lib/hadoop
+ENV SPARK_HOME=/share/lib/spark
+ENV PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$SPARK_HOME/bin
+
+ENV ZEPPELIN_HOME=/usr/local/zeppelin
+ENV ZEPPELIN_VERSION=0.7.2
+ENV ZEPPELIN_TYPE=all
+ENV PATH=$PATH:$ZEPPELIN_HOME/bin
+
+# install zeppelin
+#RUN curl -O ${MIRRORS}apache/zeppelin/zeppelin-$ZEPPELIN_VERSION/zeppelin-$ZEPPELIN_VERSION-bin-$ZEPPELIN_TYPE.tgz && tar -xzvf zeppelin-$ZEPPELIN_VERSION-bin-$ZEPPELIN_TYPE && mv zeppelin-$ZEPPELIN_VERSION-bin-$ZEPPELIN_TYPE.tgz $ZEPPELIN_HOME && rm zeppelin-$ZEPPELIN_VERSION-bin-$ZEPPELIN_TYPE.tgz
+
+COPY zeppelin-$ZEPPELIN_VERSION-bin-$ZEPPELIN_TYPE $ZEPPELIN_HOME
+
+COPY zeppelin.sh /entrypoint/zeppelin.sh
+
+VOLUME ["/usr/local/zeppelin/conf"]

+ 20 - 0
conf/docker/java/zeppelin/me/Dockerfile

@@ -0,0 +1,20 @@
+FROM docker.shemic.com/dev/java:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+# set environment variable
+ENV ZEPPELIN_HOME=/usr/local/zeppelin \
+	ZEPPELIN_VERSION=0.7.2 \
+	ZEPPELIN_TYPE=dever \
+	PATH=$PATH:$ZEPPELIN_HOME/bin
+
+COPY zeppelin-$ZEPPELIN_VERSION-bin-$ZEPPELIN_TYPE $ZEPPELIN_HOME
+COPY entrypoint.sh /entrypoint.sh
+
+RUN apk update && apk add --no-cache bash
+
+VOLUME ["/usr/local/zeppelin/conf"]
+
+ENTRYPOINT ["/entrypoint.sh"]
+
+CMD ["sh"]

+ 6 - 0
conf/docker/java/zeppelin/me/entrypoint.sh

@@ -0,0 +1,6 @@
+#!/bin/sh
+set -e
+
+$ZEPPELIN_HOME/bin/zeppelin-daemon.sh start
+
+exec sh

+ 12 - 0
conf/docker/java/zeppelin/zeppelin.sh

@@ -0,0 +1,12 @@
+#!/bin/sh
+set -e
+
+start_zeppelin()
+{
+    $ZEPPELIN_HOME/bin/zeppelin-daemon.sh start
+}
+
+stop_zeppelin()
+{
+	echo "end"
+}

+ 19 - 0
conf/docker/java/zookeeper/Dockerfile

@@ -0,0 +1,19 @@
+FROM docker.shemic.com/java/base:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+# set environment variable
+ENV ZOOKEEPER_HOME=/usr/local/zookeeper
+ENV ZOOKEEPER_VERSION=3.5.2-alpha
+ENV PATH=$PATH:$ZOOKEEPER_HOME/bin
+
+# install
+RUN curl -O ${MIRRORS}apache/zookeeper/zookeeper-$ZOOKEEPER_VERSION/zookeeper-$ZOOKEEPER_VERSION.tar.gz && \
+	tar -xzvf zookeeper-$ZOOKEEPER_VERSION.tar.gz && \
+	mv zookeeper-$ZOOKEEPER_VERSION $ZOOKEEPER_HOME && \
+	rm zookeeper-$ZOOKEEPER_VERSION.tar.gz && \
+	mkdir -p /root/zookeeper/tmp
+
+COPY zookeeper.sh /entrypoint/zookeeper.sh
+
+VOLUME ["/usr/local/zookeeper/conf"]

+ 19 - 0
conf/docker/java/zookeeper/zookeeper.sh

@@ -0,0 +1,19 @@
+#!/bin/bash
+set -e
+
+start_zookeeper()
+{
+	check spark
+	#ln -s /share/lib/hbase /usr/local/hbase
+    if [ "$1" != '' ]; then
+        echo $1 > /root/zookeeper/tmp/myid
+    fi
+    zkServer.sh start
+    #hdfs zkfc -formatZK
+}
+
+stop_zookeeper()
+{
+	#rm -rf /usr/local/hbase
+	zkServer.sh stop
+}

+ 20 - 0
conf/docker/mq/kafka/Dockerfile

@@ -0,0 +1,20 @@
+FROM docker.shemic.com/java/base:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+# set environment variable
+ENV KAFKA_HOME=/usr/local/kafka
+ENV KAFKA_VERSION=2.11
+ENV KAFKA_PATH=0.9.0.1
+ENV PATH=$PATH:$KAFKA_HOME/bin
+
+# install
+RUN curl -O ${MIRRORS}apache/kafka/$KAFKA_PATH/kafka_$KAFKA_VERSION-$KAFKA_PATH.tgz && \
+	tar -xzvf kafka_$KAFKA_VERSION-$KAFKA_PATH.tgz && \
+	mv kafka_$KAFKA_VERSION-$KAFKA_PATH $KAFKA_HOME && \
+	rm kafka_$KAFKA_VERSION-$KAFKA_PATH.tgz && \
+	mkdir -p /root/kafka/logs
+
+COPY kafka.sh /entrypoint/kafka.sh
+
+VOLUME ["/usr/local/kafka/conf"]

+ 12 - 0
conf/docker/mq/kafka/kafka.sh

@@ -0,0 +1,12 @@
+#!/bin/bash
+set -e
+
+start_kafka()
+{
+	kafka-server-start.sh
+}
+
+stop_kafka()
+{
+	kafka-server-end.sh
+}

+ 20 - 0
conf/docker/mq/rabbitmq/Dockerfile

@@ -0,0 +1,20 @@
+FROM docker.shemic.com/java/base:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+# set environment variable
+ENV KAFKA_HOME=/usr/local/kafka
+ENV KAFKA_VERSION=2.11
+ENV KAFKA_PATH=0.9.0.1
+ENV PATH=$PATH:$KAFKA_HOME/bin
+
+# install
+RUN curl -O ${MIRRORS}apache/kafka/$KAFKA_PATH/kafka_$KAFKA_VERSION-$KAFKA_PATH.tgz && \
+	tar -xzvf kafka_$KAFKA_VERSION-$KAFKA_PATH.tgz && \
+	mv kafka_$KAFKA_VERSION-$KAFKA_PATH $KAFKA_HOME && \
+	rm kafka_$KAFKA_VERSION-$KAFKA_PATH.tgz && \
+	mkdir -p /root/kafka/logs
+
+COPY kafka.sh /entrypoint/kafka.sh
+
+VOLUME ["/usr/local/kafka/conf"]

+ 12 - 0
conf/docker/mq/rabbitmq/rabbitmq.sh

@@ -0,0 +1,12 @@
+#!/bin/bash
+set -e
+
+start_kafka()
+{
+	kafka-server-start.sh
+}
+
+stop_kafka()
+{
+	kafka-server-end.sh
+}

+ 20 - 0
conf/docker/mq/rocketmq/Dockerfile

@@ -0,0 +1,20 @@
+FROM docker.shemic.com/java/base:latest
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+# set environment variable
+ENV KAFKA_HOME=/usr/local/kafka
+ENV KAFKA_VERSION=2.11
+ENV KAFKA_PATH=0.9.0.1
+ENV PATH=$PATH:$KAFKA_HOME/bin
+
+# install
+RUN curl -O ${MIRRORS}apache/kafka/$KAFKA_PATH/kafka_$KAFKA_VERSION-$KAFKA_PATH.tgz && \
+	tar -xzvf kafka_$KAFKA_VERSION-$KAFKA_PATH.tgz && \
+	mv kafka_$KAFKA_VERSION-$KAFKA_PATH $KAFKA_HOME && \
+	rm kafka_$KAFKA_VERSION-$KAFKA_PATH.tgz && \
+	mkdir -p /root/kafka/logs
+
+COPY kafka.sh /entrypoint/kafka.sh
+
+VOLUME ["/usr/local/kafka/conf"]

+ 12 - 0
conf/docker/mq/rocketmq/rocketmq.sh

@@ -0,0 +1,12 @@
+#!/bin/bash
+set -e
+
+start_kafka()
+{
+	kafka-server-start.sh
+}
+
+stop_kafka()
+{
+	kafka-server-end.sh
+}

+ 16 - 0
conf/docker/os/alpine/Dockerfile

@@ -0,0 +1,16 @@
+FROM alpine:3.6
+
+MAINTAINER Rabin "https://github.com/shemic"
+
+ENV CHINA_MIRRORS=http://mirrors.ustc.edu.cn/
+ENV FAU_MIRRORS=http://ftp.fau.de/
+ENV MIRRORS=$CHINA_MIRRORS
+
+RUN sed -i "s/http:\/\/dl-cdn.alpinelinux.org/http:\/\/mirrors.ustc.edu.cn/g" /etc/apk/repositories && apk --no-cache --update add tzdata && cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && date && apk del tzdata && mkdir /entrypoint && mkdir /share
+
+VOLUME ["/share"]
+
+COPY v3.4/entrypoint.sh /entrypoint.sh
+COPY v3.4/process.sh /entrypoint/process.sh
+
+ENTRYPOINT ["/entrypoint.sh"]

Some files were not shown because too many files changed in this diff