yarn-site.xml 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109
  1. <?xml version="1.0"?>
  2. <configuration>
  3. <property>
  4. <name>yarn.resourcemanager.hostname</name>
  5. <value>data-hadoop</value>
  6. <description>The hostname of the RM.</description>
  7. </property>
  8. <property>
  9. <name>yarn.nodemanager.resource.cpu-vcores</name>
  10. <value>2</value>
  11. <discription>cpu核心数,务必根据实际情况修改</discription>
  12. </property>
  13. <property>
  14. <name>yarn.nodemanager.resource.memory-mb</name>
  15. <value>1024</value>
  16. <discription>每个节点可用内存,单位MB,如果一直提示state: ACCEPTED,请增大此值</discription>
  17. </property>
  18. <property>
  19. <name>yarn.scheduler.minimum-allocation-mb</name>
  20. <value>512</value>
  21. <discription>单个任务可申请最少内存,默认1024MB</discription>
  22. </property>
  23. <property>
  24. <name>yarn.scheduler.maximum-allocation-mb</name>
  25. <value>1024</value>
  26. <discription>单个任务可申请最大内存,默认8192MB</discription>
  27. </property>
  28. <property>
  29. <name>yarn.app.mapreduce.am.resource.mb</name>
  30. <value>1024</value>
  31. <discription>AM能够申请的最大内存,默认值为1536MB</discription>
  32. </property>
  33. <property>
  34. <name>yarn.nodemanager.pmem-check-enabled</name>
  35. <value>false</value>
  36. </property>
  37. <property>
  38. <name>yarn.nodemanager.vmem-check-enabled</name>
  39. <value>false</value>
  40. </property>
  41. <!--property>
  42. <name>yarn.resourcemanager.scheduler.class</name>
  43. <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
  44. </property>
  45. <property>
  46. <name>yarn.resourcemanager.address</name>
  47. <value>8032</value>
  48. </property>
  49. <property>
  50. <name>yarn.resourcemanager.scheduler.address</name>
  51. <value>8030</value>
  52. </property>
  53. <property>
  54. <name>yarn.resourcemanager.resource-tracker.address</name>
  55. <value>8031</value>
  56. </property>
  57. <property>
  58. <name>yarn.resourcemanager.admin.address</name>
  59. <value>8033</value>
  60. </property>
  61. <property>
  62. <name>yarn.resourcemanager.webapp.address</name>
  63. <value>8088</value>
  64. </property>
  65. <property>
  66. <name>yarn.nodemanager.localizer.address</name>
  67. <value>8040</value>
  68. </property>
  69. <property>
  70. <name>yarn.nodemanager.address</name>
  71. <value>8041</value>
  72. </property>
  73. <property>
  74. <name>yarn.nodemanager.webapp.address</name>
  75. <value>8042</value>
  76. </property>
  77. <property>
  78. <name>mapreduce.jobhistory.address</name>
  79. <value>10020</value>
  80. </property>
  81. <property>
  82. <name>mapreduce.jobhistory.webapp.address</name>
  83. <value>19888</value>
  84. </property>
  85. <property>
  86. <name>yarn.nodemanager.aux-services</name>
  87. <value>mapreduce_shuffle</value>
  88. </property>
  89. <property>
  90. <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
  91. <value>org.apache.hadoop.mapred.ShuffleHandler</value>
  92. </property>
  93. <property>
  94. <name>yarn.resourcemanager.scheduler.class</name>
  95. <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler</value>
  96. </property>
  97. <property>
  98. <name>yarn.scheduler.fair.preemption</name>
  99. <value>true</value>
  100. </property>
  101. <property>
  102. <name>yarn.scheduler.fair.preemption.cluster-utilization-threshold</name>
  103. <value>1.0</value>
  104. </property-->
  105. <property>
  106. <name>yarn.resourcemanager.am.max-attempts</name>
  107. <value>10000</value>
  108. </property>
  109. </configuration>