Skip to content

Commit

Permalink
AMBARI-23243. Adding missing properties to OneFS mpack (amagyar)
Browse files Browse the repository at this point in the history
  • Loading branch information
zeroflag committed Mar 23, 2018
1 parent 75670a8 commit 8b7422c
Show file tree
Hide file tree
Showing 4 changed files with 42 additions and 1 deletion.
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,31 @@
</property>
</depends-on>
</property>
<property>
<name>dfs.datanode.http.address</name>
<value>0.0.0.0:8082</value>
<description>The datanode http server address and port.</description>
<on-ambari-upgrade add="false"/>
</property>
<property>
<name>dfs.datanode.https.address</name>
<value>0.0.0.0:8080</value>
<description>The datanode https server address and port.</description>
<on-ambari-upgrade add="false"/>
</property>
<property>
<name>dfs.client-write-packet-size</name>
<value>131072</value>
<description>Packet size for clients to write</description>
<on-ambari-upgrade add="false"/>
</property>
<property>
<name>dfs.checksum.type</name>
<value>NULL</value>
<description>The checksum method to be used by default. To maintain
compatibility, it is being set to CRC32. Once all migration steps
are complete, we can change it to CRC32C and take advantage of the
additional performance benefit.</description>
<on-ambari-upgrade add="false"/>
</property>
</configuration>
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,8 @@
"core-site": {
"hadoop.security.authentication": "kerberos",
"hadoop.security.authorization": "true",
"hadoop.proxyuser.HTTP.groups": "${hadoop-env/proxyuser_group}"
"hadoop.proxyuser.HTTP.groups": "${hadoop-env/proxyuser_group}",
"hadoop.security.token.service.use_ip" : "false"
}
},
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@
<config-type>hdfs-site</config-type>
<config-type>onefs</config-type>
<config-type>hadoop-env</config-type>
<config-type>capacity-scheduler</config-type>
</configuration-dependencies>
<restartRequiredAfterRackChange>true</restartRequiredAfterRackChange>

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,24 @@ def getServiceConfigurationRecommendations(self, configs, clusterData, services,
putCoreSiteProperty("fs.defaultFS", Uri.default_fs(services).fix_host(onefs_host))
putHdfsSiteProperty("dfs.namenode.http-address", Uri.http_namenode(services).fix_host(onefs_host))
putHdfsSiteProperty("dfs.namenode.https-address", Uri.https_namenode(services).fix_host(onefs_host))
# self.updateYarnConfig(configs, services) TODO doesn't work possibly due to a UI bug (Couldn't retrieve 'capacity-scheduler' from services)

def updateYarnConfig(self, configs, services):
if not 'YARN' in self.installedServices(services): return
capacity_scheduler_dict, received_as_key_value_pair = self.getCapacitySchedulerProperties(services)
if capacity_scheduler_dict:
putCapSchedProperty = self.putProperty(configs, 'capacity-scheduler', services)
if received_as_key_value_pair:
capacity_scheduler_dict['yarn.scheduler.capacity.node-locality-delay'] = '0'
putCapSchedProperty('capacity-scheduler', self.concatenated(capacity_scheduler_dict))
else:
putCapSchedProperty('yarn.scheduler.capacity.node-locality-delay', '0')

def concatenated(self, capacity_scheduler_dict):
return ''.join('%s=%s\n' % (k,v) for k,v in capacity_scheduler_dict.items())

def installedServices(self, services):
return [service['StackServices']['service_name'] for service in services['services']]

def getServiceConfigurationsValidationItems(self, configs, recommendedDefaults, services, hosts):
validation_errors = []
Expand Down

0 comments on commit 8b7422c

Please sign in to comment.