metrics.properties 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. #
  2. # Licensed to the Apache Software Foundation (ASF) under one or more
  3. # contributor license agreements. See the NOTICE file distributed with
  4. # this work for additional information regarding copyright ownership.
  5. # The ASF licenses this file to You under the Apache License, Version 2.0
  6. # (the "License"); you may not use this file except in compliance with
  7. # the License. You may obtain a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS,
  13. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. # See the License for the specific language governing permissions and
  15. # limitations under the License.
  16. #
  17. # syntax: [instance].sink|source.[name].[options]=[value]
  18. # This file configures Spark's internal metrics system. The metrics system is
  19. # divided into instances which correspond to internal components.
  20. # Each instance can be configured to report its metrics to one or more sinks.
  21. # Accepted values for [instance] are "master", "worker", "executor", "driver",
  22. # and "applications". A wildcard "*" can be used as an instance name, in
  23. # which case all instances will inherit the supplied property.
  24. #
  25. # Within an instance, a "source" specifies a particular set of grouped metrics.
  26. # there are two kinds of sources:
  27. # 1. Spark internal sources, like MasterSource, WorkerSource, etc, which will
  28. # collect a Spark component's internal state. Each instance is paired with a
  29. # Spark source that is added automatically.
  30. # 2. Common sources, like JvmSource, which will collect low level state.
  31. # These can be added through configuration options and are then loaded
  32. # using reflection.
  33. #
  34. # A "sink" specifies where metrics are delivered to. Each instance can be
  35. # assigned one or more sinks.
  36. #
  37. # The sink|source field specifies whether the property relates to a sink or
  38. # source.
  39. #
  40. # The [name] field specifies the name of source or sink.
  41. #
  42. # The [options] field is the specific property of this source or sink. The
  43. # source or sink is responsible for parsing this property.
  44. #
  45. # Notes:
  46. # 1. To add a new sink, set the "class" option to a fully qualified class
  47. # name (see examples below).
  48. # 2. Some sinks involve a polling period. The minimum allowed polling period
  49. # is 1 second.
  50. # 3. Wildcard properties can be overridden by more specific properties.
  51. # For example, master.sink.console.period takes precedence over
  52. # *.sink.console.period.
  53. # 4. A metrics specific configuration
  54. # "spark.metrics.conf=${SPARK_HOME}/conf/metrics.properties" should be
  55. # added to Java properties using -Dspark.metrics.conf=xxx if you want to
  56. # customize metrics system. You can also put the file in ${SPARK_HOME}/conf
  57. # and it will be loaded automatically.
  58. # 5. The MetricsServlet sink is added by default as a sink in the master,
  59. # worker and driver, and you can send HTTP requests to the "/metrics/json"
  60. # endpoint to get a snapshot of all the registered metrics in JSON format.
  61. # For master, requests to the "/metrics/master/json" and
  62. # "/metrics/applications/json" endpoints can be sent separately to get
  63. # metrics snapshots of the master instance and applications. This
  64. # MetricsServlet does not have to be configured.
  65. ## List of available common sources and their properties.
  66. # org.apache.spark.metrics.source.JvmSource
  67. # Note: Currently, JvmSource is the only available common source.
  68. # It can be added to an instance by setting the "class" option to its
  69. # fully qualified class name (see examples below).
  70. ## List of available sinks and their properties.
  71. # org.apache.spark.metrics.sink.ConsoleSink
  72. # Name: Default: Description:
  73. # period 10 Poll period
  74. # unit seconds Unit of the poll period
  75. # org.apache.spark.metrics.sink.CSVSink
  76. # Name: Default: Description:
  77. # period 10 Poll period
  78. # unit seconds Unit of the poll period
  79. # directory /tmp Where to store CSV files
  80. # org.apache.spark.metrics.sink.GangliaSink
  81. # Name: Default: Description:
  82. # host NONE Hostname or multicast group of the Ganglia server,
  83. # must be set
  84. # port NONE Port of the Ganglia server(s), must be set
  85. # period 10 Poll period
  86. # unit seconds Unit of the poll period
  87. # ttl 1 TTL of messages sent by Ganglia
  88. # dmax 0 Lifetime in seconds of metrics (0 never expired)
  89. # mode multicast Ganglia network mode ('unicast' or 'multicast')
  90. # org.apache.spark.metrics.sink.JmxSink
  91. # org.apache.spark.metrics.sink.MetricsServlet
  92. # Name: Default: Description:
  93. # path VARIES* Path prefix from the web server root
  94. # sample false Whether to show entire set of samples for histograms
  95. # ('false' or 'true')
  96. #
  97. # * Default path is /metrics/json for all instances except the master. The
  98. # master has two paths:
  99. # /metrics/applications/json # App information
  100. # /metrics/master/json # Master information
  101. # org.apache.spark.metrics.sink.GraphiteSink
  102. # Name: Default: Description:
  103. # host NONE Hostname of the Graphite server, must be set
  104. # port NONE Port of the Graphite server, must be set
  105. # period 10 Poll period
  106. # unit seconds Unit of the poll period
  107. # prefix EMPTY STRING Prefix to prepend to every metric's name
  108. # protocol tcp Protocol ("tcp" or "udp") to use
  109. ## Examples
  110. # Enable JmxSink for all instances by class name
  111. #*.sink.jmx.class=org.apache.spark.metrics.sink.JmxSink
  112. # Enable ConsoleSink for all instances by class name
  113. #*.sink.console.class=org.apache.spark.metrics.sink.ConsoleSink
  114. # Polling period for the ConsoleSink
  115. #*.sink.console.period=10
  116. # Unit of the polling period for the ConsoleSink
  117. #*.sink.console.unit=seconds
  118. # Polling period for the ConsoleSink specific for the master instance
  119. #master.sink.console.period=15
  120. # Unit of the polling period for the ConsoleSink specific for the master
  121. # instance
  122. #master.sink.console.unit=seconds
  123. # Enable CsvSink for all instances by class name
  124. #*.sink.csv.class=org.apache.spark.metrics.sink.CsvSink
  125. # Polling period for the CsvSink
  126. #*.sink.csv.period=1
  127. # Unit of the polling period for the CsvSink
  128. #*.sink.csv.unit=minutes
  129. # Polling directory for CsvSink
  130. #*.sink.csv.directory=/tmp/
  131. # Polling period for the CsvSink specific for the worker instance
  132. #worker.sink.csv.period=10
  133. # Unit of the polling period for the CsvSink specific for the worker instance
  134. #worker.sink.csv.unit=minutes
  135. # Enable Slf4jSink for all instances by class name
  136. #*.sink.slf4j.class=org.apache.spark.metrics.sink.Slf4jSink
  137. # Polling period for the Slf4JSink
  138. #*.sink.slf4j.period=1
  139. # Unit of the polling period for the Slf4jSink
  140. #*.sink.slf4j.unit=minutes
  141. # Enable JvmSource for instance master, worker, driver and executor
  142. #master.source.jvm.class=org.apache.spark.metrics.source.JvmSource
  143. #worker.source.jvm.class=org.apache.spark.metrics.source.JvmSource
  144. #driver.source.jvm.class=org.apache.spark.metrics.source.JvmSource
  145. #executor.source.jvm.class=org.apache.spark.metrics.source.JvmSource