ranger-nifi-policymgr-ssl { "configurations" : [ { "hdfs-logsearch-conf" : { "properties_attributes" : { }, "properties" : { "component_mappings" : "NAMENODE:hdfs_namenode;DATANODE:hdfs_datanode;SECONDARY_NAMENODE:hdfs_secondarynamenode;JOURNALNODE:hdfs_journalnode;ZKFC:hdfs_zkfc;NFS_GATEWAY:hdfs_nfs3", "content" : "\n{\n \"input\":[\n {\n \"type\":\"hdfs_datanode\",\n \"rowtype\":\"service\",\n \"path\":\"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-datanode-*.log\"\n },\n {\n \"type\":\"hdfs_namenode\",\n \"rowtype\":\"service\",\n \"path\":\"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-namenode-*.log\"\n },\n {\n \"type\":\"hdfs_journalnode\",\n \"rowtype\":\"service\",\n \"path\":\"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-journalnode-*.log\"\n },\n {\n \"type\":\"hdfs_secondarynamenode\",\n \"rowtype\":\"service\",\n \"path\":\"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-secondarynamenode-*.log\"\n },\n {\n \"type\":\"hdfs_zkfc\",\n \"rowtype\":\"service\",\n \"path\":\"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-zkfc-*.log\"\n },\n {\n \"type\":\"hdfs_nfs3\",\n \"rowtype\":\"service\",\n \"path\":\"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-nfs3-*.log\"\n },\n {\n \"type\":\"hdfs_audit\",\n \"rowtype\":\"audit\",\n \"is_enabled\":\"true\",\n \"add_fields\":{\n \"logType\":\"HDFSAudit\",\n \"enforcer\":\"hadoop-acl\",\n \"repoType\":\"1\",\n \"repo\":\"hdfs\"\n },\n \"path\":\"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hdfs-audit.log\"\n }\n ],\n \"filter\":[\n {\n \"filter\":\"grok\",\n \"conditions\":{\n \"fields\":{\n \"type\":[\n \"hdfs_datanode\",\n \"hdfs_journalnode\",\n \"hdfs_secondarynamenode\",\n \"hdfs_namenode\",\n \"hdfs_zkfc\",\n \"hdfs_nfs3\"\n ]\n }\n },\n \"log4j_format\":\"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\",\n \"multiline_pattern\":\"^(%{TIMESTAMP_ISO8601:logtime})\",\n \"message_pattern\":\"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\\\(%{JAVAFILE:file}:%{JAVAMETHOD:method}\\\\(%{INT:line_number}\\\\)\\\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}\",\n \"post_map_values\":{\n \"logtime\":{\n \"map_date\":{\n \"target_date_pattern\":\"yyyy-MM-dd HH:mm:ss,SSS\"\n }\n }\n }\n },\n {\n \"filter\":\"grok\",\n \"conditions\":{\n \"fields\":{\n \"type\":[\n \"hdfs_audit\"\n ]\n }\n },\n \"log4j_format\":\"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\",\n \"multiline_pattern\":\"^(%{TIMESTAMP_ISO8601:evtTime})\",\n \"message_pattern\":\"(?m)^%{TIMESTAMP_ISO8601:evtTime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}:%{SPACE}%{GREEDYDATA:log_message}\",\n \"post_map_values\":{\n \"evtTime\":{\n \"map_date\":{\n \"target_date_pattern\":\"yyyy-MM-dd HH:mm:ss,SSS\"\n }\n }\n }\n },\n {\n \"filter\":\"keyvalue\",\n \"sort_order\":1,\n \"conditions\":{\n \"fields\":{\n \"type\":[\n \"hdfs_audit\"\n ]\n }\n },\n \"source_field\":\"log_message\",\n \"value_split\":\"=\",\n \"field_split\":\"\\t\",\n \"post_map_values\":{\n \"src\":{\n \"map_fieldname\":{\n \"new_fieldname\":\"resource\"\n }\n },\n \"ip\":{\n \"map_fieldname\":{\n \"new_fieldname\":\"cliIP\"\n }\n },\n \"allowed\":[\n {\n \"map_fieldvalue\":{\n \"pre_value\":\"true\",\n \"post_value\":\"1\"\n }\n },\n {\n \"map_fieldvalue\":{\n \"pre_value\":\"false\",\n \"post_value\":\"0\"\n }\n },\n {\n \"map_fieldname\":{\n \"new_fieldname\":\"result\"\n }\n }\n ],\n \"cmd\":{\n \"map_fieldname\":{\n \"new_fieldname\":\"action\"\n }\n },\n \"proto\":{\n \"map_fieldname\":{\n \"new_fieldname\":\"cliType\"\n }\n },\n \"callerContext\":{\n \"map_fieldname\":{\n \"new_fieldname\":\"req_caller_id\"\n }\n }\n }\n },\n {\n \"filter\":\"grok\",\n \"sort_order\":2,\n \"source_field\":\"ugi\",\n \"remove_source_field\":\"false\",\n \"conditions\":{\n \"fields\":{\n \"type\":[\n \"hdfs_audit\"\n ]\n }\n },\n \"message_pattern\":\"%{USERNAME:p_user}.+auth:%{USERNAME:p_authType}.+via %{USERNAME:k_user}.+auth:%{USERNAME:k_authType}|%{USERNAME:user}.+auth:%{USERNAME:authType}|%{USERNAME:x_user}\",\n \"post_map_values\":{\n \"user\":{\n \"map_fieldname\":{\n \"new_fieldname\":\"reqUser\"\n }\n },\n \"x_user\":{\n \"map_fieldname\":{\n \"new_fieldname\":\"reqUser\"\n }\n },\n \"p_user\":{\n \"map_fieldname\":{\n \"new_fieldname\":\"reqUser\"\n }\n },\n \"k_user\":{\n \"map_fieldname\":{\n \"new_fieldname\":\"proxyUsers\"\n }\n },\n \"p_authType\":{\n \"map_fieldname\":{\n \"new_fieldname\":\"authType\"\n }\n },\n \"k_authType\":{\n \"map_fieldname\":{\n \"new_fieldname\":\"proxyAuthType\"\n }\n }\n }\n }\n ]\n }", "service_name" : "HDFS" } } ranger-nifi-policymgr-ssl How to get it for free? ranger-nifi-policymgr-ssl }, { "hive-logsearch-conf" : { "properties_attributes" : { }, "properties" : { "component_mappings" : "HIVE_METASTORE:hive_metastore;HIVE_SERVER:hive_hiveserver2;WEBHCAT_SERVER:webhcat_server", "content" : "\n{\n \"input\":[\n {\n \"type\":\"hive_hiveserver2\",\n \"rowtype\":\"service\",\n \"path\":\"{{default('/configurations/hive-env/hive_log_dir', '/var/log/hive')}}/hiveserver2.log\"\n },\n {\n \"type\":\"hive_metastore\",\n \"rowtype\":\"service\",\n \"path\":\"{{default('/configurations/hive-env/hive_log_dir', '/var/log/hive')}}/hivemetastore.log\"\n },\n {\n \"type\": \"webhcat_server\",\n \"rowntype\":\"service\",\n \"path\":\"{{default('configurations/hive-env/hcat_log_dir', '/var/log/webhcat')}}/webhcat.log\"\n }\n ],\n \"filter\":[\n {\n \"filter\":\"grok\",\n \"conditions\":{\n \"fields\":{\n \"type\":[\n \"hive_hiveserver2\",\n \"hive_metastore\"\n ]\n }\n },\n \"log4j_format\":\"%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n\",\n \"multiline_pattern\":\"^(%{TIMESTAMP_ISO8601:logtime})\",\n \"message_pattern\":\"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}\\\\[%{DATA:thread_name}\\\\]:%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\\\(%{JAVAFILE:file}:%{JAVAMETHOD:method}\\\\(%{INT:line_number}\\\\)\\\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}\",\n \"post_map_values\":{\n \"logtime\":{\n \"map_date\":{\n \"target_date_pattern\":\"yyyy-MM-dd HH:mm:ss,SSS\"\n }\n }\n }\n },\n {\n \"filter\":\"grok\",\n \"conditions\":{\n \"fields\":{\n \"type\":[\n \"webhcat_server\"\n ]\n }\n },\n \"log4j_format\":\" %-5p | %d{DATE} | %c | %m%n\",\n \"multiline_pattern\":\"^(%{SPACE}%{LOGLEVEL:level}%{CUSTOM_SEPARATOR}%{CUSTOM_DATESTAMP:logtime})\",\n \"message_pattern\":\"(?m)^%{SPACE}%{LOGLEVEL:level}%{CUSTOM_SEPARATOR}%{CUSTOM_DATESTAMP:logtime}%{CUSTOM_SEPARATOR}%{JAVACLASS:file}%{CUSTOM_SEPARATOR}%{GREEDYDATA:log_message}\",\n \"post_map_values\":{\n \"logtime\":{\n \"map_date\":{\n \"target_date_pattern\":\"dd MMM yyyy HH:mm:ss,SSS\"\n }\n },\n \"level\":{\n \"map_fieldvalue\":{\n \"pre_value\":\"WARNING\",\n \"post_value\":\"WARN\"\n }\n }\n }\n }\n ]\n }", "service_name" : "Hive" } } ranger-nifi-policymgr-ssl PasteShr ranger-nifi-policymgr-ssl }, { "zoo.cfg" : { "properties_attributes" : { }, "properties" : { "autopurge.purgeInterval" : "24", "dataDir" : "/hadoop/zookeeper", "autopurge.snapRetainCount" : "30", "clientPort" : "2181", "initLimit" : "10", ranger-nifi-policymgr-ssl How to get it? ranger-nifi-policymgr-ssl "tickTime" : "3000", "syncLimit" : "5" } } }, { "ams-hbase-site" : { "properties_attributes" : { "final" : { "hbase.zookeeper.quorum" : "true" ranger-nifi-policymgr-ssl PasteShr ranger-nifi-policymgr-ssl } }, "properties" : { "hfile.block.cache.size" : "0.3", "phoenix.query.spoolThresholdBytes" : "20971520", "hbase.zookeeper.property.dataDir" : "${hbase.tmp.dir}/zookeeper", "hbase.regionserver.thread.compaction.large" : "2", "hbase.master.port" : "61300", "zookeeper.session.timeout.localHBaseCluster" : "120000", "phoenix.query.keepAliveMs" : "300000", ranger-nifi-policymgr-ssl How to get it for free? ranger-nifi-policymgr-ssl "hbase.local.dir" : "${hbase.tmp.dir}/local", "phoenix.query.timeoutMs" : "300000", "hbase.regionserver.thread.compaction.small" : "3", "hbase.zookeeper.quorum" : "{{zookeeper_quorum_hosts}}", "hbase.client.scanner.timeout.period" : "300000", "hbase.regionserver.info.port" : "61330", "phoenix.query.rowKeyOrderSaltedTable" : "true", "phoenix.mutate.batchSize" : "10000", "zookeeper.znode.parent" : "/ams-hbase-unsecure", "hbase.master.info.port" : "61310", ranger-nifi-policymgr-ssl How to get it? ranger-nifi-policymgr-ssl "hbase.rootdir" : "file:///var/lib/ambari-metrics-collector/hbase", "hbase.cluster.distributed" : "false", "hbase.hregion.majorcompaction" : "0", "hbase.hstore.flusher.count" : "2", "hbase.master.normalizer.class" : "org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer", "hbase.snapshot.enabled" : "false", "phoenix.spool.directory" : "${hbase.tmp.dir}/phoenix-spool", "phoenix.coprocessor.maxMetaDataCacheSize" : "20480000", "hbase.zookeeper.property.tickTime" : "6000", "hbase.zookeeper.leaderport" : "61388", ranger-nifi-policymgr-ssl How to use it? ranger-nifi-policymgr-ssl "phoenix.sequence.saltBuckets" : "2", "hbase.hstore.blockingStoreFiles" : "200", "zookeeper.session.timeout" : "120000", "phoenix.coprocessor.maxServerCacheTimeToLiveMs" : "60000", "hbase.master.info.bindAddress" : "0.0.0.0", "hbase.regionserver.global.memstore.lowerLimit" : "0.3", "dfs.client.read.shortcircuit" : "true", "phoenix.query.maxGlobalMemoryPercentage" : "15", "hbase.hregion.memstore.flush.size" : "134217728", "hbase.hregion.max.filesize" : "4294967296", ranger-nifi-policymgr-ssl How to dowload it? ranger-nifi-policymgr-ssl "phoenix.groupby.maxCacheSize" : "307200000", "hbase.master.wait.on.regionservers.mintostart" : "1", "hbase.regionserver.global.memstore.upperLimit" : "0.35", "hbase.normalizer.period" : "600000", "hbase.tmp.dir" : "/var/lib/ambari-metrics-collector/hbase-tmp", "hbase.client.scanner.caching" : "10000", "hbase.replication" : "false", "hbase.rpc.timeout" : "300000", "hbase.superuser" : "activity_explorer,activity_analyzer", "hbase.zookeeper.peerport" : "61288", ranger-nifi-policymgr-ssl How to use it? ranger-nifi-policymgr-ssl "hbase.regionserver.port" : "61320", "hbase.zookeeper.property.clientPort" : "{{zookeeper_clientPort}}", "hbase.hregion.memstore.block.multiplier" : "4", "hbase.normalizer.enabled" : "false" } } }, { "zookeeper-log4j" : { "properties_attributes" : { }, ranger-nifi-policymgr-ssl How to dowload it? ranger-nifi-policymgr-ssl "properties" : { "zookeeper_log_max_backup_size" : "10", "zookeeper_log_number_of_backup_files" : "10", "content" : "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file\n#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n# Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File={{zk_log_dir}}/zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize={{zookeeper_log_max_backup_size}}MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex={{zookeeper_log_number_of_backup_files}}\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n# Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n" } } }, { "pig-env" : { "properties_attributes" : { }, ranger-nifi-policymgr-ssl PasteShr ranger-nifi-policymgr-ssl "properties" : { "content" : "\nJAVA_HOME={{java64_home}}\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\nif [ -d \"/usr/lib/tez\" ]; then\n PIG_OPTS=\"$PIG_OPTS -Dmapreduce.framework.name=yarn\"\nfi" } } }, { "ranger-hdfs-policymgr-ssl" : { "properties_attributes" : { }, "properties" : { } } ranger-nifi-policymgr-ssl How to use it? ranger-nifi-policymgr-ssl }, { "activity-log4j" : { "properties_attributes" : { }, "properties" : { "activity_max_file_size" : "30", "activity-log4j-content" : "\n# Copyright (c) 2011-2017, Hortonworks Inc. All rights reserved.\n# Except as expressly permitted in a written agreement between you\n# or your company and Hortonworks, Inc, any use, reproduction,\n# modification, redistribution, sharing, lending or other exploitation\n# of all or any part of the contents of this file is strictly prohibited.\n\n# Define some default values that can be overridden by system properties\n# Root logger option\nlog4j.rootLogger=INFO,file\n\nlog4j.appender.file=org.apache.log4j.RollingFileAppender\nlog4j.appender.file.File={{activity_log_dir}}/${log.file.name}\nlog4j.appender.file.MaxFileSize={{activity_max_file_size}}MB\nlog4j.appender.file.MaxBackupIndex={{activity_max_backup_index}}\nlog4j.appender.file.layout=org.apache.log4j.PatternLayout\nlog4j.appender.file.layout.ConversionPattern=%d{ISO8601} %5p [%t] %c{1}:%L - %m%n", "activity_log_dir" : "/var/log/smartsense-activity", "activity_max_backup_index" : "10" } ranger-nifi-policymgr-ssl How to use it? ranger-nifi-policymgr-ssl } }, { "ams-logsearch-conf" : { "properties_attributes" : { }, "properties" : { "component_mappings" : "METRICS_COLLECTOR:ams_collector,ams_hbase_master,ams_hbase_regionserver;METRICS_MONITOR:ams_monitor;METRICS_GRAFANA:ams_grafana", "content" : "\n{\n \"input\":[\n {\n \"type\":\"ams_hbase_master\",\n \"rowtype\":\"service\",\n \"path\":\"{{default('/configurations/ams-env/metrics_collector_log_dir', '/var/log/ambari-metrics-collector')}}/hbase-ams-master-*.log\"\n },\n {\n \"type\":\"ams_hbase_regionserver\",\n \"rowtype\":\"service\",\n \"path\":\"{{default('/configurations/ams-env/metrics_collector_log_dir', '/var/log/ambari-metrics-collector')}}/hbase-ams-regionserver-*.log\"\n },\n {\n \"type\":\"ams_collector\",\n \"rowtype\":\"service\",\n \"path\":\"{{default('/configurations/ams-env/metrics_collector_log_dir', '/var/log/ambari-metrics-collector')}}/ambari-metrics-collector.log\"\n },\n {\n \"type\":\"ams_monitor\",\n \"rowtype\":\"service\",\n \"path\":\"{{default('/configurations/ams-env/metrics_monitor_log_dir', '/var/log/ambari-metrics-monitor')}}/ambari-metrics-monitor.out\"\n },\n {\n \"type\":\"ams_grafana\",\n \"rowtype\":\"service\",\n \"path\":\"{{default('/configurations/ams-grafana-env/metrics_grafana_log_dir', '/var/log/ambari-metrics-grafana')}}/grafana.log\"\n }\n ],\n \"filter\":[\n {\n \"filter\":\"grok\",\n \"conditions\":{\n \"fields\":{\n \"type\":[\n \"ams_collector\"\n ]\n }\n },\n \"log4j_format\":\"%d{ISO8601} %p %c: %m%n\",\n \"multiline_pattern\":\"^(%{TIMESTAMP_ISO8601:logtime})\",\n \"message_pattern\":\"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}:%{SPACE}%{GREEDYDATA:log_message}\",\n \"post_map_values\":{\n \"logtime\":{\n \"map_date\":{\n \"target_date_pattern\":\"yyyy-MM-dd HH:mm:ss,SSS\"\n }\n }\n }\n },\n {\n \"filter\":\"grok\",\n \"conditions\":{\n \"fields\":{\n \"type\":[\n \"ams_hbase_master\",\n \"ams_hbase_regionserver\"\n ]\n }\n },\n \"log4j_format\":\"%d{ISO8601} %-5p [%t] %c{2}: %m%n\",\n \"multiline_pattern\":\"^(%{TIMESTAMP_ISO8601:logtime})\",\n \"message_pattern\":\"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}\\\\[%{DATA:thread_name}\\\\]%{SPACE}%{JAVACLASS:logger_name}:%{SPACE}%{GREEDYDATA:log_message}\",\n \"post_map_values\":{\n \"logtime\":{\n \"map_date\":{\n \"target_date_pattern\":\"yyyy-MM-dd HH:mm:ss,SSS\"\n }\n }\n }\n },\n {\n \"filter\":\"grok\",\n \"conditions\":{\n \"fields\":{\n \"type\":[\n \"ams_grafana\"\n ]\n }\n },\n \"log4j_format\":\"%d{ISO8601} %-5p [%t] %c{2}: %m%n\",\n \"multiline_pattern\":\"^(%{DATESTAMP:logtime})\",\n \"message_pattern\":\"(?m)^%{DATESTAMP:logtime}%{SPACE}\\\\[%{WORD:level}\\\\]%{SPACE}%{GREEDYDATA:log_message}\",\n \"post_map_values\":{\n \"logtime\":{\n \"map_date\":{\n \"target_date_pattern\":\"yyyy/MM/dd HH:mm:ss\"\n }\n },\n \"level\":[\n {\n \"map_fieldvalue\":{\n \"pre_value\":\"I\",\n \"post_value\":\"INFO\"\n }\n },\n {\n \"map_fieldvalue\":{\n \"pre_value\":\"W\",\n \"post_value\":\"WARN\"\n }\n },\n {\n \"map_fieldvalue\":{\n \"pre_value\":\"D\",\n \"post_value\":\"DEBUG\"\n }\n },\n {\n \"map_fieldvalue\":{\n \"pre_value\":\"E\",\n \"post_value\":\"ERROR\"\n }\n },\n {\n \"map_fieldvalue\":{\n \"pre_value\":\"F\",\n \"post_value\":\"FATAL\"\n }\n }\n ]\n }\n },\n {\n \"filter\":\"grok\",\n \"conditions\":{\n \"fields\":{\n \"type\":[\n \"ams_monitor\"\n ]\n }\n },\n \"log4j_format\":\"\",\n \"multiline_pattern\":\"^(%{TIMESTAMP_ISO8601:logtime})\",\n \"message_pattern\":\"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}\\\\[%{LOGLEVEL:level}\\\\]%{SPACE}%{JAVAFILE:file}:%{INT:line_number}%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}\",\n \"post_map_values\":{\n \"logtime\":{\n \"map_date\":{\n \"target_date_pattern\":\"yyyy-MM-dd HH:mm:ss,SSS\"\n }\n }\n },\n \"level\":[\n {\n \"map_fieldvalue\":{\n \"pre_value\":\"WARNING\",\n \"post_value\":\"WARN\"\n }\n }\n ]\n }\n ]\n }", "service_name" : "AMS" } ranger-nifi-policymgr-ssl How to get it for free? ranger-nifi-policymgr-ssl } }, { "ranger-yarn-policymgr-ssl" : { "properties_attributes" : { }, "properties" : { } } }, { "hive-log4j2" : { ranger-nifi-policymgr-ssl How to use it? ranger-nifi-policymgr-ssl "properties_attributes" : { }, "properties" : { "hive2_log_maxfilesize" : "256", "hive2_log_maxbackupindex" : "30", "content" : "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nstatus = INFO\nname = HiveLog4j2\npackages = org.apache.hadoop.hive.ql.log\n\n# list of properties\nproperty.hive.log.level = {{hive_log_level}}\nproperty.hive.root.logger = DRFA\nproperty.hive.log.dir = ${sys:java.io.tmpdir}/${sys:user.name}\nproperty.hive.log.file = hive.log\n\n# list of all appenders\nappenders = console, DRFA\n\n# console appender\nappender.console.type = Console\nappender.console.name = console\nappender.console.target = SYSTEM_ERR\nappender.console.layout.type = PatternLayout\nappender.console.layout.pattern = %d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n\n\n# daily rolling file appender\nappender.DRFA.type = RollingFile\nappender.DRFA.name = DRFA\nappender.DRFA.fileName = ${sys:hive.log.dir}/${sys:hive.log.file}\n# Use %pid in the filePattern to append process-id@host-name to the filename if you want separate log files for different CLI session\nappender.DRFA.filePattern = ${sys:hive.log.dir}/${sys:hive.log.file}.%d{yyyy-MM-dd}_%i.gz\nappender.DRFA.layout.type = PatternLayout\nappender.DRFA.layout.pattern = %d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n\nappender.DRFA.policies.type = Policies\nappender.DRFA.policies.time.type = TimeBasedTriggeringPolicy\nappender.DRFA.policies.time.interval = 1\nappender.DRFA.policies.time.modulate = true\nappender.DRFA.strategy.type = DefaultRolloverStrategy\nappender.DRFA.strategy.max = {{hive2_log_maxbackupindex}}\nappender.DRFA.policies.fsize.type = SizeBasedTriggeringPolicy\nappender.DRFA.policies.fsize.size = {{hive2_log_maxfilesize}}MB\n\n# list of all loggers\nloggers = NIOServerCnxn, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX\n\nlogger.NIOServerCnxn.name = org.apache.zookeeper.server.NIOServerCnxn\nlogger.NIOServerCnxn.level = WARN\n\nlogger.ClientCnxnSocketNIO.name = org.apache.zookeeper.ClientCnxnSocketNIO\nlogger.ClientCnxnSocketNIO.level = WARN\n\nlogger.DataNucleus.name = DataNucleus\nlogger.DataNucleus.level = ERROR\n\nlogger.Datastore.name = Datastore\nlogger.Datastore.level = ERROR\n\nlogger.JPOX.name = JPOX\nlogger.JPOX.level = ERROR\n\n# root logger\nrootLogger.level = ${sys:hive.log.level}\nrootLogger.appenderRefs = root\nrootLogger.appenderRef.root.ref = ${sys:hive.root.logger}" } } }, { "mapred-logsearch-conf" : { ranger-nifi-policymgr-ssl How to dowload it? ranger-nifi-policymgr-ssl "properties_attributes" : { }, "properties" : { "component_mappings" : "HISTORYSERVER:mapred_historyserver", "content" : "\n{\n \"input\":[\n {\n \"type\":\"mapred_historyserver\",\n \"rowtype\":\"service\",\n \"path\":\"{{default('/configurations/mapred-env/mapred_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/mapred-env/mapred_user', 'mapred')}}/mapred-{{default('configurations/mapred-env/mapred_user', 'mapred')}}-historyserver*.log\"\n }\n ],\n \"filter\":[\n {\n \"filter\":\"grok\",\n \"conditions\":{\n \"fields\":{\n \"type\":[\n \"mapred_historyserver\"\n ]\n }\n },\n \"log4j_format\":\"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\",\n \"multiline_pattern\":\"^(%{TIMESTAMP_ISO8601:logtime})\",\n \"message_pattern\":\"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\\\(%{JAVAFILE:file}:%{JAVAMETHOD:method}\\\\(%{INT:line_number}\\\\)\\\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}\",\n \"post_map_values\":{\n \"logtime\":{\n \"map_date\":{\n \"target_date_pattern\":\"yyyy-MM-dd HH:mm:ss,SSS\"\n }\n }\n }\n }\n ]\n }", "service_name" : "MapReduce" } } }, { "ranger-hive-policymgr-ssl" : { ranger-nifi-policymgr-ssl How to get it? ranger-nifi-policymgr-ssl "properties_attributes" : { }, "properties" : { } } }, { "zookeeper-logsearch-conf" : { "properties_attributes" : { }, "properties" : { "component_mappings" : "ZOOKEEPER_SERVER:zookeeper", "content" : "\n{\n \"input\":[\n {\n \"type\":\"zookeeper\",\n \"rowtype\":\"service\",\n \"path\":\"{{default('/configurations/zookeeper-env/zk_log_dir', '/var/log/zookeeper')}}/zookeeper*.log\"\n }\n ],\n \"filter\":[\n {\n \"filter\":\"grok\",\n \"conditions\":{\n \"fields\":{\"type\":[\"zookeeper\"]}\n },\n \"log4j_format\":\"%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\",\n \"multiline_pattern\":\"^(%{TIMESTAMP_ISO8601:logtime})\",\n \"message_pattern\":\"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}-%{SPACE}%{LOGLEVEL:level}%{SPACE}\\\\[%{DATA:thread_name}\\\\@%{INT:line_number}\\\\]%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}\",\n \"post_map_values\": {\n \"logtime\": {\n \"map_date\":{\n \"target_date_pattern\":\"yyyy-MM-dd HH:mm:ss,SSS\"\n }\n }\n }\n }\n ]\n}", ranger-nifi-policymgr-ssl How to use it? ranger-nifi-policymgr-ssl "service_name" : "Zookeeper" } } }, { "hcat-env" : { "properties_attributes" : { }, "properties" : { "content" : "\n # Licensed to the Apache Software Foundation (ASF) under one\n # or more contributor license agreements. See the NOTICE file\n # distributed with this work for additional information\n # regarding copyright ownership. The ASF licenses this file\n # to you under the Apache License, Version 2.0 (the\n # \"License\"); you may not use this file except in compliance\n # with the License. You may obtain a copy of the License at\n #\n # http://www.apache.org/licenses/LICENSE-2.0\n #\n # Unless required by applicable law or agreed to in writing, software\n # distributed under the License is distributed on an \"AS IS\" BASIS,\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n\n JAVA_HOME={{java64_home}}\n HCAT_PID_DIR={{hcat_pid_dir}}/\n HCAT_LOG_DIR={{hcat_log_dir}}/\n HCAT_CONF_DIR={{hcat_conf_dir}}\n HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n #DBROOT is the path where the connector jars are downloaded\n DBROOT={{hcat_dbroot}}\n USER={{hcat_user}}\n METASTORE_PORT={{hive_metastore_port}}" } ranger-nifi-policymgr-ssl How to dowload it? ranger-nifi-policymgr-ssl } }, { "ssl-server" : { "properties_attributes" : { }, "properties" : { "ssl.server.truststore.location" : "/etc/security/serverKeys/all.jks", "ssl.server.truststore.reload.interval" : "10000", "ssl.server.truststore.type" : "jks", "ssl.server.keystore.location" : "/etc/security/serverKeys/keystore.jks", ranger-nifi-policymgr-ssl PasteShr ranger-nifi-policymgr-ssl "ssl.server.keystore.type" : "jks" } } }, { "pig-properties" : { "properties_attributes" : { }, "properties" : { "content" : "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.\n# see bin/pig -help\n\n# brief logging (no timestamps)\nbrief=false\n\n# debug level, INFO is default\ndebug=INFO\n\n# verbose print all log messages to screen (default to print only INFO and above to screen)\nverbose=false\n\n# exectype local|mapreduce|tez, mapreduce is default\nexectype=tez\n\n# Enable insertion of information about script into hadoop job conf \npig.script.info.enabled=true\n\n# Do not spill temp files smaller than this size (bytes)\npig.spill.size.threshold=5000000\n\n# EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)\n# This should help reduce the number of files being spilled.\npig.spill.gc.activation.size=40000000\n\n# the following two parameters are to help estimate the reducer number\npig.exec.reducers.bytes.per.reducer=1000000000\npig.exec.reducers.max=999\n\n# Temporary location to store the intermediate data.\npig.temp.dir=/tmp/\n\n# Threshold for merging FRJoin fragment files\npig.files.concatenation.threshold=100\npig.optimistic.files.concatenation=false;\n\npig.disable.counter=false\n\nhcat.bin=/usr/bin/hcat" } ranger-nifi-policymgr-ssl How to get it? ranger-nifi-policymgr-ssl } }, { "hst-agent-conf" : { "properties_attributes" : { }, "properties" : { "security.anonymization.max.heap" : "2048", "agent.tmp_dir" : "/var/lib/smartsense/hst-agent/data/tmp", "upload.retry_count" : "100", "server.connection_retry_interval" : "10", ranger-nifi-policymgr-ssl How to get it for free? ranger-nifi-policymgr-ssl "agent.version" : "1.4.2.2.5.2.0-298", "upload.retry_interval" : "15", "agent.loglevel" : "INFO", "bundle.logs_to_capture" : "(.*).log$,(.*).out$", "server.connection_retry_count" : "100" } } }, { "ranger-hive-plugin-properties" : { ranger-nifi-policymgr-ssl PasteShr ranger-nifi-policymgr-ssl "properties_attributes" : { }, "properties" : { } } }, { "ranger-yarn-security" : { "properties_attributes" : { }, "properties" : { } } }, ranger-nifi-policymgr-ssl How to use it? ranger-nifi-policymgr-ssl { "hadoop-policy" : { "properties_attributes" : { }, "properties" : { "security.inter.datanode.protocol.acl" : "*", "security.refresh.usertogroups.mappings.protocol.acl" : "hadoop", "security.admin.operations.protocol.acl" : "hadoop", "security.client.datanode.protocol.acl" : "*", "security.datanode.protocol.acl" : "*", "security.inter.tracker.protocol.acl" : "*", ranger-nifi-policymgr-ssl How to get it for free? ranger-nifi-policymgr-ssl "security.job.client.protocol.acl" : "*", "security.client.protocol.acl" : "*", "security.job.task.protocol.acl" : "*", "security.refresh.policy.protocol.acl" : "hadoop", "security.namenode.protocol.acl" : "*" } } }, { "ranger-yarn-audit" : { ranger-nifi-policymgr-ssl How to get it for free? ranger-nifi-policymgr-ssl "properties_attributes" : { }, "properties" : { } } }, { "tez-interactive-site" : { "properties_attributes" : { }, "properties" : { "tez.runtime.pipelined.sorter.lazy-allocate.memory" : "true", "tez.runtime.report.partition.stats" : "true", ranger-nifi-policymgr-ssl How to dowload it? ranger-nifi-policymgr-ssl "tez.am.client.heartbeat.timeout.secs" : "90", "tez.runtime.shuffle.connect.timeout" : "30000", "tez.runtime.enable.final-merge.in.output" : "false", "tez.am.client.heartbeat.poll.interval.millis" : "6000", "tez.container.max.java.heap.fraction" : "-1", "tez.runtime.shuffle.memory.limit.percent" : "0.25", "tez.session.am.dag.submit.timeout.secs" : "1209600", "tez.runtime.shuffle.read.timeout" : "30000", "tez.am.am-rm.heartbeat.interval-ms.max" : "10000", "tez.lib.uris" : "/hdp/apps/${hdp.version}/tez_hive2/tez.tar.gz", ranger-nifi-policymgr-ssl How to use it? ranger-nifi-policymgr-ssl "tez.am.task.listener.thread-count" : "1", "tez.runtime.shuffle.fetch.buffer.percent" : "0.6", "tez.task.timeout-ms" : "90000", "tez.am.node-blacklisting.enabled" : "false", "tez.history.logging.taskattempt-filters" : "SERVICE_BUSY,EXTERNAL_PREEMPTION", "tez.am.task.reschedule.higher.priority" : "false", "tez.runtime.unordered.output.buffer.size-mb" : "100", "tez.runtime.pipelined-shuffle.enabled" : "false", "tez.runtime.shuffle.fetch.verify-disk-checksum" : "false", "tez.runtime.shuffle.parallel.copies" : "8", ranger-nifi-policymgr-ssl How to dowload it? ranger-nifi-policymgr-ssl "tez.dag.recovery.enabled" : "false", "tez.task.heartbeat.timeout.check-ms" : "15000", "tez.am.resource.memory.mb" : "1536", "tez.history.logging.timeline.num-dags-per-group" : "5", "tez.grouping.node.local.only" : "true", "tez.runtime.io.sort.mb" : "512", "tez.runtime.shuffle.keep-alive.enabled" : "true" } } }, ranger-nifi-policymgr-ssl How to get it for free? ranger-nifi-policymgr-ssl { "anonymization-rules" : { "properties_attributes" : { }, "properties" : { "anonymization-rules-content" : "{\n\"rules\":[\n {\n \"name\": \"IP\",\n \"rule_id\": \"Pattern\",\n \"patterns\": [\"(? 7 %}-Xss512k -XX:+UseG1GC -XX:TLABSize=8m -XX:+ResizeTLAB -XX:+UseNUMA -XX:+AggressiveOpts -XX:InitiatingHeapOccupancyPercent=40 -XX:G1ReservePercent=20 -XX:MaxGCPauseMillis=200{% else %}-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC{% endif %}{{heap_dump_opts}}" ranger-nifi-policymgr-ssl How to get it? ranger-nifi-policymgr-ssl } } }, { "hiveserver2-interactive-site" : { "properties_attributes" : { }, "properties" : { "hive.async.log.enabled" : "false", "hive.service.metrics.hadoop2.component" : "hiveserver2", "hive.metastore.metrics.enabled" : "true", ranger-nifi-policymgr-ssl PasteShr ranger-nifi-policymgr-ssl "hive.service.metrics.reporter" : "HADOOP2" } } }, { "ams-site" : { "properties_attributes" : { }, "properties" : { "timeline.metrics.cluster.aggregator.hourly.interval" : "3600", "timeline.metrics.cluster.aggregator.minute.checkpointCutOffMultiplier" : "2", ranger-nifi-policymgr-ssl How to get it for free? ranger-nifi-policymgr-ssl "timeline.metrics.cluster.aggregator.daily.checkpointCutOffMultiplier" : "2", "timeline.metrics.host.aggregator.hourly.interval" : "3600", "timeline.metrics.aggregators.skip.blockcache.enabled" : "false", "timeline.metrics.service.rpc.address" : "0.0.0.0:60200", "timeline.metrics.service.operation.mode" : "embedded", "timeline.metrics.aggregator.checkpoint.dir" : "/var/lib/ambari-metrics-collector/checkpoint", "failover.strategy" : "round-robin", "timeline.metrics.cluster.aggregator.second.checkpointCutOffMultiplier" : "2", "timeline.metrics.service.http.policy" : "HTTP_ONLY", "timeline.metrics.downsampler.topn.value" : "10", ranger-nifi-policymgr-ssl How to get it for free? ranger-nifi-policymgr-ssl "timeline.metrics.host.aggregator.minute.checkpointCutOffMultiplier" : "2", "timeline.metrics.service.watcher.timeout" : "30", "timeline.metrics.service.checkpointDelay" : "60", "timeline.metrics.cluster.aggregator.second.interval" : "120", "timeline.metrics.service.webapp.address" : "0.0.0.0:6188", "timeline.metrics.host.aggregator.daily.ttl" : "31536000", "timeline.metrics.service.watcher.delay" : "30", "timeline.metrics.service.watcher.disabled" : "false", "timeline.metrics.hbase.init.check.enabled" : "true", "timeline.metrics.host.aggregator.hourly.disabled" : "false", ranger-nifi-policymgr-ssl How to get it? ranger-nifi-policymgr-ssl "timeline.metrics.service.cluster.aggregator.appIds" : "datanode,nodemanager,hbase", "timeline.metrics.cluster.aggregator.hourly.checkpointCutOffMultiplier" : "2", "timeline.metrics.host.aggregator.daily.checkpointCutOffMultiplier" : "2", "timeline.metrics.service.resultset.fetchSize" : "2000", "timeline.metrics.cluster.aggregator.hourly.ttl" : "31536000", "cluster.zookeeper.quorum" : "{{cluster_zookeeper_quorum_hosts}}", "timeline.metrics.downsampler.topn.function" : "max", "timeline.metrics.host.aggregator.ttl" : "86400", "phoenix.spool.directory" : "/tmp", "timeline.metrics.host.aggregate.splitpoints" : "load_fifteen", ranger-nifi-policymgr-ssl How to get it for free? ranger-nifi-policymgr-ssl "timeline.metrics.service.handler.thread.count" : "20", "timeline.metrics.cache.size" : "100", "timeline.metrics.cluster.aggregator.minute.interval" : "300", "timeline.metrics.cluster.aggregator.minute.ttl" : "2592000", "timeline.metrics.host.aggregator.minute.interval" : "300", "timeline.metrics.cluster.aggregator.interpolation.enabled" : "true", "timeline.metrics.cache.commit.interval" : "10", "timeline.metrics.host.aggregator.minute.disabled" : "false", "timeline.metrics.service.metadata.filters" : "ContainerResource", "timeline.metrics.cache.enabled" : "true", ranger-nifi-policymgr-ssl How to dowload it? ranger-nifi-policymgr-ssl "timeline.metrics.cluster.aggregate.splitpoints" : "load_fifteen", "timeline.metrics.cluster.aggregator.minute.disabled" : "false", "timeline.metrics.service.use.groupBy.aggregators" : "true", "phoenix.query.maxGlobalMemoryPercentage" : "25", "timeline.metrics.service.default.result.limit" : "15840", "timeline.metrics.hbase.compression.scheme" : "SNAPPY", "timeline.metrics.cluster.aggregator.daily.ttl" : "63072000", "cluster.zookeeper.property.clientPort" : "{{cluster_zookeeper_clientPort}}", "timeline.metrics.sink.report.interval" : "60", "timeline.metrics.cluster.aggregator.second.timeslice.interval" : "30", ranger-nifi-policymgr-ssl PasteShr ranger-nifi-policymgr-ssl "timeline.metrics.cluster.aggregation.sql.filters" : "sdisk\\_%,boottime", "timeline.metrics.downsampler.topn.metric.patterns" : "dfs.NNTopUserOpCounts.windowMs=60000.op=__%.user=%,dfs.NNTopUserOpCounts.windowMs=300000.op=__%.user=%,dfs.NNTopUserOpCounts.windowMs=1500000.op=__%.user=%", "timeline.metrics.host.aggregator.hourly.ttl" : "2592000", "timeline.metrics.cluster.aggregator.daily.interval" : "86400", "timeline.metrics.host.aggregator.daily.disabled" : "false", "timeline.metrics.cluster.aggregator.daily.disabled" : "false", "timeline.metrics.cluster.aggregator.hourly.disabled" : "false", "timeline.metrics.service.watcher.initial.delay" : "600", "timeline.metrics.host.aggregator.minute.ttl" : "604800", "timeline.metrics.hbase.data.block.encoding" : "FAST_DIFF", ranger-nifi-policymgr-ssl How to use it? ranger-nifi-policymgr-ssl "timeline.metrics.cluster.aggregator.second.disabled" : "false", "timeline.metrics.sink.collection.period" : "10", "timeline.metrics.host.aggregator.hourly.checkpointCutOffMultiplier" : "2", "timeline.metrics.daily.aggregator.minute.interval" : "86400", "timeline.metrics.cluster.aggregator.second.ttl" : "259200" } } }, { "mapred-site" : { ranger-nifi-policymgr-ssl How to get it for free? ranger-nifi-policymgr-ssl "properties_attributes" : { }, "properties" : { "mapreduce.map.speculative" : "false", "mapreduce.jobhistory.recovery.store.class" : "org.apache.hadoop.mapreduce.v2.hs.HistoryServerLeveldbStateStoreService", "mapreduce.job.counters.max" : "130", "mapreduce.reduce.log.level" : "INFO", "mapreduce.shuffle.port" : "13562", "yarn.app.mapreduce.am.admin-command-opts" : "-Dhdp.version=${hdp.version}", "mapreduce.job.emit-timeline-data" : "false", "mapreduce.jobhistory.recovery.enable" : "true", ranger-nifi-policymgr-ssl How to dowload it? ranger-nifi-policymgr-ssl "mapreduce.map.log.level" : "INFO", "yarn.app.mapreduce.am.staging-dir" : "/user", "mapreduce.reduce.shuffle.merge.percent" : "0.66", "mapreduce.output.fileoutputformat.compress" : "false", "mapreduce.admin.map.child.java.opts" : "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", "mapreduce.job.reduce.slowstart.completedmaps" : "0.05", "mapreduce.jobhistory.http.policy" : "HTTP_ONLY", "mapreduce.job.queuename" : "default", "mapreduce.application.framework.path" : "/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework", "mapreduce.application.classpath" : "$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:$PWD/mr-framework/hadoop/share/hadoop/tools/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure:/usr/hdp/current/ext/hadoop/*", ranger-nifi-policymgr-ssl PasteShr ranger-nifi-policymgr-ssl "mapreduce.reduce.java.opts" : "-Xmx3276m", "mapreduce.map.output.compress" : "false", "mapreduce.reduce.input.buffer.percent" : "0.0", "mapreduce.jobhistory.intermediate-done-dir" : "/mr-history/tmp", "yarn.app.mapreduce.am.log.level" : "INFO", "mapreduce.reduce.shuffle.fetch.retry.interval-ms" : "1000", "mapreduce.reduce.shuffle.input.buffer.percent" : "0.7", "mapreduce.reduce.speculative" : "false", "mapreduce.output.fileoutputformat.compress.type" : "BLOCK", "mapreduce.task.io.sort.mb" : "1146", ranger-nifi-policymgr-ssl How to use it? ranger-nifi-policymgr-ssl "mapreduce.cluster.administrators" : " hadoop", "mapreduce.reduce.shuffle.fetch.retry.timeout-ms" : "30000", "mapreduce.jobhistory.address" : "%HOSTGROUP::host_group_1%:10020", "mapreduce.map.sort.spill.percent" : "0.7", "mapreduce.map.memory.mb" : "2048", "mapreduce.admin.reduce.child.java.opts" : "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", "mapreduce.jobhistory.bind-host" : "0.0.0.0", "mapreduce.task.timeout" : "300000", "mapreduce.admin.user.env" : "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-{{architecture}}-64", "mapreduce.am.max-attempts" : "2", ranger-nifi-policymgr-ssl PasteShr ranger-nifi-policymgr-ssl "mapreduce.reduce.memory.mb" : "4096", "mapreduce.framework.name" : "yarn", "mapreduce.reduce.shuffle.parallelcopies" : "30", "mapreduce.jobhistory.done-dir" : "/mr-history/done", "mapreduce.jobhistory.webapp.address" : "%HOSTGROUP::host_group_1%:19888", "mapreduce.reduce.shuffle.fetch.retry.enabled" : "1", "mapreduce.task.io.sort.factor" : "100", "mapreduce.jobhistory.recovery.store.leveldb.path" : "/hadoop/mapreduce/jhs", "yarn.app.mapreduce.client.job.max-retries" : "60", "yarn.app.mapreduce.am.command-opts" : "-Xmx1638m -Dhdp.version=${hdp.version}", ranger-nifi-policymgr-ssl How to dowload it? ranger-nifi-policymgr-ssl "mapreduce.map.java.opts" : "-Xmx1638m", "yarn.app.mapreduce.am.resource.mb" : "2048" } } }, { "parquet-logging" : { "properties_attributes" : { }, "properties" : { "content" : "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Properties file which configures the operation of the JDK\n# logging facility.\n\n# The system will look for this config file, first using\n# a System property specified at startup:\n#\n# >java -Djava.util.logging.config.file=myLoggingConfigFilePath\n#\n# If this property is not specified, then the config file is\n# retrieved from its default location at:\n#\n# JDK_HOME/jre/lib/logging.properties\n\n# Global logging properties.\n# ------------------------------------------\n# The set of handlers to be loaded upon startup.\n# Comma-separated list of class names.\n# (? LogManager docs say no comma here, but JDK example has comma.)\n# handlers=java.util.logging.ConsoleHandler\norg.apache.parquet.handlers= java.util.logging.FileHandler\n\n# Default global logging level.\n# Loggers and Handlers may override this level\n.level=INFO\n\n# Handlers\n# -----------------------------------------\n\n# --- ConsoleHandler ---\n# Override of global logging level\njava.util.logging.ConsoleHandler.level=INFO\njava.util.logging.ConsoleHandler.formatter=java.util.logging.SimpleFormatter\njava.util.logging.SimpleFormatter.format=[%1$tc] %4$s: %2$s - %5$s %6$s%n\n\n# --- FileHandler ---\n# Override of global logging level\njava.util.logging.FileHandler.level=ALL\n\n# Naming style for the output file:\n# (The output file is placed in the system temporary directory.\n# %u is used to provide unique identifier for the file.\n# For more information refer\n# https://docs.oracle.com/javase/7/docs/api/java/util/logging/FileHandler.html)\njava.util.logging.FileHandler.pattern=%t/parquet-%u.log\n\n# Limiting size of output file in bytes:\njava.util.logging.FileHandler.limit=50000000\n\n# Number of output files to cycle through, by appending an\n# integer to the base file name:\njava.util.logging.FileHandler.count=1\n\n# Style of output (Simple or XML):\njava.util.logging.FileHandler.formatter=java.util.logging.SimpleFormatter" ranger-nifi-policymgr-ssl How to get it for free? ranger-nifi-policymgr-ssl } } }, { "hdfs-log4j" : { "properties_attributes" : { }, "properties" : { "hadoop_security_log_max_backup_size" : "256", "hadoop_security_log_number_of_backup_files" : "20", "hadoop_log_number_of_backup_files" : "10", ranger-nifi-policymgr-ssl How to get it for free? ranger-nifi-policymgr-ssl "content" : "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.logger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize={{hadoop_security_log_max_backup_size}}MB\nhadoop.security.log.maxbackupindex={{hadoop_security_log_number_of_backup_files}}\nlog4j.category.SecurityLogger=${hadoop.security.logger}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.additivity.SecurityLogger=false\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# NameNode metrics logging.\n# The default is to retain two namenode-metrics.log files up to 64MB each.\n#\nnamenode.metrics.logger=INFO,NullAppender\nlog4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}\nlog4j.additivity.NameNodeMetricsLog=false\nlog4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log\nlog4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n\nlog4j.appender.NNMETRICSRFA.MaxBackupIndex=1\nlog4j.appender.NNMETRICSRFA.MaxFileSize=64MB\n\n#\n# mapred audit logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize={{hadoop_log_max_backup_size}}MB\nlog4j.appender.RFA.MaxBackupIndex={{hadoop_log_number_of_backup_files}}\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateChange=WARN", "hadoop_log_max_backup_size" : "256" } } }, { "hive-site" : { "properties_attributes" : { "hidden" : { "javax.jdo.option.ConnectionPassword" : "HIVE_CLIENT,WEBHCAT_SERVER,HCAT,CONFIG_DOWNLOAD" ranger-nifi-policymgr-ssl How to get it? ranger-nifi-policymgr-ssl } }, "properties" : { "hive.exec.reducers.bytes.per.reducer" : "67108864", "hive.metastore.pre.event.listeners" : "org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener", "hive.optimize.reducededuplication" : "true", "hive.metastore.sasl.enabled" : "false", "hive.vectorized.execution.enabled" : "true", "hive.security.authorization.manager" : "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory", "hive.auto.convert.join.noconditionaltask" : "true", ranger-nifi-policymgr-ssl How to get it? ranger-nifi-policymgr-ssl "hive.exec.orc.compression.strategy" : "SPEED", "hive.server2.allow.user.substitution" : "true", "hive.limit.pushdown.memory.usage" : "0.04", "hive.merge.rcfile.block.level" : "true", "hive.stats.fetch.column.stats" : "true", "hive.tez.dynamic.partition.pruning" : "true", "hive.exec.max.dynamic.partitions.pernode" : "2000", "hive.exec.orc.default.compress" : "ZLIB", "hive.merge.mapredfiles" : "false", "hive.compactor.abortedtxn.threshold" : "1000", ranger-nifi-policymgr-ssl How to dowload it? ranger-nifi-policymgr-ssl "hive.map.aggr" : "true", "hive.tez.dynamic.partition.pruning.max.data.size" : "104857600", "hive.compactor.initiator.on" : "false", "hive.smbjoin.cache.rows" : "10000", "hive.exec.max.dynamic.partitions" : "5000", "hive.auto.convert.join" : "true", "hive.server2.support.dynamic.service.discovery" : "true", "hive.tez.log.level" : "INFO", "hive.compactor.worker.timeout" : "86400L", "hive.custom-extensions.root" : "/hdp/ext/{{major_stack_version}}/hive", ranger-nifi-policymgr-ssl How to get it for free? ranger-nifi-policymgr-ssl "hive.metastore.authorization.storage.checks" : "false", "hive.merge.mapfiles" : "true", "hive.exec.post.hooks" : "org.apache.hadoop.hive.ql.hooks.ATSHook", "hive.server2.transport.mode" : "binary", "hive.server2.thrift.http.path" : "cliservice", "hive.enforce.sortmergebucketmapjoin" : "true", "hive.metastore.execute.setugi" : "true", "hive.tez.smb.number.waves" : "0.5", "hive.server2.authentication.spnego.keytab" : "HTTP/_HOST@EXAMPLE.COM", "hive.zookeeper.quorum" : "%HOSTGROUP::host_group_1%:2181", ranger-nifi-policymgr-ssl How to get it? ranger-nifi-policymgr-ssl "hive.mapjoin.bucket.cache.size" : "10000", "hive.metastore.kerberos.principal" : "hive/_HOST@EXAMPLE.COM", "hive.tez.auto.reducer.parallelism" : "true", "hive.security.metastore.authorization.manager" : "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider", "datanucleus.autoCreateSchema" : "false", "datanucleus.fixedDatastore" : "true", "hive.compute.query.using.stats" : "true", "hive.security.authorization.enabled" : "false", "hive.server2.thrift.sasl.qop" : "auth", "hive.merge.orcfile.stripe.level" : "true", ranger-nifi-policymgr-ssl How to use it? ranger-nifi-policymgr-ssl "hive.orc.splits.include.file.footer" : "false", "hive.exec.compress.output" : "false", "hive.user.install.directory" : "/user/", "hive.prewarm.enabled" : "false", "hive.compactor.delta.num.threshold" : "10", "hive.orc.compute.splits.num.threads" : "10", "hive.vectorized.groupby.checkinterval" : "4096", "hive.txn.manager" : "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager", "datanucleus.cache.level2.type" : "none", "hive.map.aggr.hash.percentmemory" : "0.5", ranger-nifi-policymgr-ssl How to get it? ranger-nifi-policymgr-ssl "hive.optimize.bucketmapjoin" : "true", "hive.tez.max.partition.factor" : "2.0", "hive.server2.thrift.port" : "10000", "hive.exec.failure.hooks" : "org.apache.hadoop.hive.ql.hooks.ATSHook", "hive.exec.compress.intermediate" : "false", "hive.exec.max.created.files" : "100000", "hive.mapred.reduce.tasks.speculative.execution" : "false", "hive.vectorized.groupby.flush.percent" : "0.1", "hive.metastore.client.socket.timeout" : "1800s", "hive.server2.tez.initialize.default.sessions" : "false", ranger-nifi-policymgr-ssl How to get it for free? ranger-nifi-policymgr-ssl "atlas.hook.hive.minThreads" : "1", "hive.stats.autogather" : "true", "hive.optimize.sort.dynamic.partition" : "false", "hive.txn.max.open.batch" : "1000", "hive.default.fileformat" : "TextFile", "hive.mapjoin.optimized.hashtable" : "true", "hive.vectorized.groupby.maxentries" : "100000", "hive.compactor.check.interval" : "300L", "hive.security.authenticator.manager" : "org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator", "hive.security.metastore.authorization.auth.reads" : "true", ranger-nifi-policymgr-ssl How to get it? ranger-nifi-policymgr-ssl "hive.compactor.delta.pct.threshold" : "0.1f", "hive.vectorized.execution.reduce.enabled" : "false", "javax.jdo.option.ConnectionDriverName" : "com.mysql.jdbc.Driver", "hive.stats.dbclass" : "fs", "hive.exec.parallel" : "false", "hive.compactor.worker.threads" : "0", "hive.exec.submitviachild" : "false", "hive.fetch.task.conversion" : "more", "hive.server2.authentication" : "NONE", "hive.map.aggr.hash.force.flush.memory.threshold" : "0.9", ranger-nifi-policymgr-ssl How to use it? ranger-nifi-policymgr-ssl "hive.start.cleanup.scratchdir" : "false", "hive.merge.tezfiles" : "false", "hive.metastore.cache.pinobjtypes" : "Table,Database,Type,FieldSchema,Order", "hive.optimize.index.filter" : "true", "hive.server2.authentication.spnego.principal" : "/etc/security/keytabs/spnego.service.keytab", "hive.convert.join.bucket.mapjoin.tez" : "false", "hive.metastore.warehouse.dir" : "/apps/hive/warehouse", "hive.execution.engine" : "tez", "atlas.hook.hive.maxThreads" : "1", "hive.tez.dynamic.partition.pruning.max.event.size" : "1048576", ranger-nifi-policymgr-ssl How to use it? ranger-nifi-policymgr-ssl "hive.cbo.enable" : "true", "hive.exec.orc.encoding.strategy" : "SPEED", "hive.optimize.constant.propagation" : "true", "hive.tez.container.size" : "2048", "hive.metastore.connect.retries" : "24", "hive.optimize.reducededuplication.min.reducer" : "4", "hive.tez.input.format" : "org.apache.hadoop.hive.ql.io.HiveInputFormat", "hive.cluster.delegation.token.store.zookeeper.connectString" : "%HOSTGROUP::host_group_1%:2181", "hive.metastore.uris" : "thrift://%HOSTGROUP::host_group_1%:9083", "hive.server2.max.start.attempts" : "5", ranger-nifi-policymgr-ssl How to dowload it? ranger-nifi-policymgr-ssl "hive.exec.dynamic.partition.mode" : "strict", "hive.server2.thrift.max.worker.threads" : "500", "hive.server2.use.SSL" : "false", "hive.tez.java.opts" : "-server -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps", "hive.exec.submit.local.task.via.child" : "true", "hive.optimize.null.scan" : "true", "hive.exec.orc.default.stripe.size" : "67108864", "hive.limit.optimize.enable" : "true", "hive.cluster.delegation.token.store.zookeeper.znode" : "/hive/cluster/delegation", "hive.exec.pre.hooks" : "org.apache.hadoop.hive.ql.hooks.ATSHook", ranger-nifi-policymgr-ssl PasteShr ranger-nifi-policymgr-ssl "hive.cluster.delegation.token.store.class" : "org.apache.hadoop.hive.thrift.ZooKeeperTokenStore", "ambari.hive.db.schema.name" : "hive", "hive.zookeeper.client.port" : "2181", "hive.enforce.sorting" : "true", "hive.tez.cpu.vcores" : "-1", "hive.metastore.client.connect.retry.delay" : "5s", "hive.server2.tez.default.queues" : "default", "hive.server2.tez.sessions.per.default.queue" : "1", "hive.server2.thrift.http.port" : "10001", "hive.server2.logging.operation.log.location" : "/tmp/hive/operation_logs", ranger-nifi-policymgr-ssl How to get it for free? ranger-nifi-policymgr-ssl "javax.jdo.option.ConnectionURL" : "jdbc:mysql://%HOSTGROUP::host_group_1%/hive?createDatabaseIfNotExist=true", "hive.map.aggr.hash.min.reduction" : "0.5", "hive.merge.size.per.task" : "256000000", "hive.merge.smallfiles.avgsize" : "16000000", "hive.exec.reducers.max" : "1009", "hive.optimize.metadataonly" : "true", "hive.fetch.task.conversion.threshold" : "1073741824", "hive.prewarm.numcontainers" : "3", "hive.tez.min.partition.factor" : "0.25", "hive.auto.convert.join.noconditionaltask.size" : "572662306", ranger-nifi-policymgr-ssl PasteShr ranger-nifi-policymgr-ssl "hive.server2.logging.operation.enabled" : "true", "hive.metastore.kerberos.keytab.file" : "/etc/security/keytabs/hive.service.keytab", "hive.exec.parallel.thread.number" : "8", "hive.support.concurrency" : "false", "javax.jdo.option.ConnectionUserName" : "hive", "hive.conf.restricted.list" : "hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role", "hive.auto.convert.sortmerge.join.to.mapjoin" : "false", "hive.fetch.task.aggr" : "false", "hive.cli.print.header" : "false", "hive.server2.table.type.mapping" : "CLASSIC", ranger-nifi-policymgr-ssl How to use it? ranger-nifi-policymgr-ssl "hive.txn.timeout" : "300", "hive.warehouse.subdir.inherit.perms" : "true", "hive.stats.fetch.partition.stats" : "true", "hive.optimize.bucketmapjoin.sortedmerge" : "false", "hive.security.metastore.authenticator.manager" : "org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator", "hive.server2.enable.doAs" : "true", "hive.server2.zookeeper.namespace" : "hiveserver2", "hive.default.fileformat.managed" : "TextFile", "hive.enforce.bucketing" : "true", "hive.exec.scratchdir" : "/tmp/hive", ranger-nifi-policymgr-ssl How to get it? ranger-nifi-policymgr-ssl "hive.exec.dynamic.partition" : "true", "hive.metastore.server.max.threads" : "100000", "hive.metastore.failure.retries" : "24", "hive.auto.convert.sortmerge.join" : "true", "hive.zookeeper.namespace" : "hive_zookeeper_namespace" } } }, { "activity-conf" : { ranger-nifi-policymgr-ssl PasteShr ranger-nifi-policymgr-ssl "properties_attributes" : { }, "properties" : { "global.activity.processing.parallelism" : "8", "yarn_app.activity.watcher.enabled" : "true", "mr_job.max.job.size.mb.for.parallel.execution" : "500", "global.activity.processor.pool.max.wait.seconds" : "60", "phoenix.sink.batch.size" : "100", "phoenix.sink.flush.interval.seconds" : "30", "hdfs.activity.watcher.enabled" : "true", "global.activity.analyzer.user" : "activity_analyzer", ranger-nifi-policymgr-ssl How to dowload it? ranger-nifi-policymgr-ssl "tez_job.activity.watcher.enabled" : "true", "activity.explorer.user" : "activity_explorer", "mr_job.activity.watcher.enabled" : "true", "tez_job.tmp.dir" : "/var/lib/smartsense/activity-analyzer/tez/tmp/" } } }, { "tez-site" : { "properties_attributes" : { }, ranger-nifi-policymgr-ssl How to dowload it? ranger-nifi-policymgr-ssl "properties" : { "tez.am.launch.cmd-opts" : "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC{{heap_dump_opts}}", "tez.runtime.sorter.class" : "PIPELINED", "tez.counters.max.groups" : "3000", "tez.am.view-acls" : "*", "tez.runtime.shuffle.memory.limit.percent" : "0.25", "tez.staging-dir" : "/tmp/${user.name}/staging", "tez.am.container.reuse.locality.delay-allocation-millis" : "250", "tez.runtime.compress" : "true", "tez.am.am-rm.heartbeat.interval-ms.max" : "250", ranger-nifi-policymgr-ssl How to use it? ranger-nifi-policymgr-ssl "tez.am.container.idle.release-timeout-min.millis" : "10000", "tez.task.launch.cmd-opts" : "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC{{heap_dump_opts}}", "tez.lib.uris" : "/hdp/apps/${hdp.version}/tez/tez.tar.gz", "tez.counters.max" : "10000", "tez.generate.debug.artifacts" : "false", "tez.runtime.convert.user-payload.to.history-text" : "false", "tez.shuffle-vertex-manager.max-src-fraction" : "0.4", "tez.am.log.level" : "INFO", "tez.task.am.heartbeat.counter.interval-ms.max" : "4000", "tez.am.container.idle.release-timeout-max.millis" : "20000", ranger-nifi-policymgr-ssl How to dowload it? ranger-nifi-policymgr-ssl "tez.am.resource.memory.mb" : "4096", "tez.am.max.app.attempts" : "2", "tez.cluster.additional.classpath.prefix" : "/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure", "tez.am.container.reuse.non-local-fallback.enabled" : "false", "tez.am.container.reuse.rack-fallback.enabled" : "true", "tez.task.get-task.sleep.interval-ms.max" : "200", "tez.runtime.io.sort.mb" : "540", "tez.task.resource.memory.mb" : "2048", "tez.runtime.optimize.local.fetch" : "true", "tez.runtime.pipelined.sorter.sort.threads" : "2", ranger-nifi-policymgr-ssl How to get it? ranger-nifi-policymgr-ssl "tez.shuffle-vertex-manager.min-src-fraction" : "0.2", "tez.session.am.dag.submit.timeout.secs" : "600", "tez.session.client.timeout.secs" : "-1", "tez.am.launch.cluster-default.cmd-opts" : "-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", "tez.am.maxtaskfailures.per.node" : "10", "tez.am.tez-ui.history-url.template" : "__HISTORY_URL_BASE__?viewPath=%2F%23%2Ftez-app%2F__APPLICATION_ID__", "tez.history.logging.timeline-cache-plugin.old-num-dags-per-group" : "5", "tez.runtime.shuffle.fetch.buffer.percent" : "0.6", "tez.task.launch.env" : "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-{{architecture}}-64", "tez.use.cluster.hadoop-libs" : "false", ranger-nifi-policymgr-ssl How to get it for free? ranger-nifi-policymgr-ssl "tez.queue.name" : "default", "tez.runtime.unordered.output.buffer.size-mb" : "153", "tez.am.container.reuse.enabled" : "true", "tez.grouping.split-waves" : "1.7", "tez.grouping.max-size" : "1073741824", "tez.task.launch.cluster-default.cmd-opts" : "-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", "tez.task.max-events-per-heartbeat" : "500", "tez.history.logging.service.class" : "org.apache.tez.dag.history.logging.ats.ATSV15HistoryLoggingService", "tez.grouping.min-size" : "16777216", "tez.task.generate.counters.per.io" : "true", ranger-nifi-policymgr-ssl PasteShr ranger-nifi-policymgr-ssl "tez.am.launch.env" : "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-{{architecture}}-64", "tez.runtime.compress.codec" : "org.apache.hadoop.io.compress.SnappyCodec" } } }, { "activity-zeppelin-site" : { "properties_attributes" : { }, "properties" : { "zeppelin.notebook.dir" : "/var/lib/smartsense/activity-explorer/notebook", ranger-nifi-policymgr-ssl How to get it? ranger-nifi-policymgr-ssl "zeppelin.server.context.path" : "/", "zeppelin.ssl.keystore.type" : "JKS", "zeppelin.ssl.truststore.path" : "/var/lib/smartsense/activity-explorer/truststore", "zeppelin.war.tempdir" : "/var/lib/smartsense/activity-explorer/webapp", "zeppelin.websocket.max.text.message.size" : "1024000", "zeppelin.notebook.homescreen.hide" : "false", "zeppelin.interpreters" : "org.apache.zeppelin.phoenix.PhoenixInterpreter", "zeppelin.server.port" : "9060", "zeppelin.ssl" : "false", "zeppelin.notebook.storage" : "org.apache.zeppelin.notebook.repo.VFSNotebookRepo", ranger-nifi-policymgr-ssl How to get it? ranger-nifi-policymgr-ssl "zeppelin.interpreter.connect.timeout" : "30000", "zeppelin.anonymous.allowed" : "false", "zeppelin.interpreter.dir" : "/usr/hdp/share/hst/activity-explorer/interpreter", "zeppelin.ssl.truststore.type" : "JKS", "zeppelin.ssl.keystore.path" : "/var/lib/smartsense/activity-explorer/keystore", "zeppelin.server.allowed.origins" : "*", "zeppelin.server.addr" : "0.0.0.0", "zeppelin.ssl.client.auth" : "false" } } ranger-nifi-policymgr-ssl How to get it? ranger-nifi-policymgr-ssl }, { "webhcat-site" : { "properties_attributes" : { }, "properties" : { "templeton.zookeeper.hosts" : "%HOSTGROUP::host_group_1%:2181", "webhcat.proxyuser.root.groups" : "*", "webhcat.proxyuser.root.hosts" : "sandbox-hdp.hortonworks.com", "templeton.hadoop.queue.name" : "default", "templeton.hadoop" : "/usr/hdp/${hdp.version}/hadoop/bin/hadoop", ranger-nifi-policymgr-ssl How to use it? ranger-nifi-policymgr-ssl "templeton.hive.extra.files" : "/usr/hdp/${hdp.version}/tez/conf/tez-site.xml,/usr/hdp/${hdp.version}/tez,/usr/hdp/${hdp.version}/tez/lib", "templeton.hcat.home" : "hive.tar.gz/hive/hcatalog", "templeton.override.enabled" : "false", "templeton.hive.properties" : "hive.metastore.local=false,hive.metastore.uris=thrift://%HOSTGROUP::host_group_1%:9083,hive.metastore.sasl.enabled=false,hive.metastore.execute.setugi=true", "templeton.python" : "${env.PYTHON_CMD}", "templeton.storage.class" : "org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage", "templeton.streaming.jar" : "hdfs:///hdp/apps/${hdp.version}/mapreduce/hadoop-streaming.jar", "templeton.hadoop.conf.dir" : "/etc/hadoop/conf", "templeton.hcat" : "/usr/hdp/${hdp.version}/hive/bin/hcat", "templeton.jar" : "/usr/hdp/${hdp.version}/hive/share/webhcat/svr/lib/hive-webhcat-*.jar", ranger-nifi-policymgr-ssl PasteShr ranger-nifi-policymgr-ssl "templeton.port" : "50111", "templeton.libjars" : "/usr/hdp/${hdp.version}/zookeeper/zookeeper.jar,/usr/hdp/${hdp.version}/hive/lib/hive-common.jar", "templeton.hive.path" : "hive.tar.gz/hive/bin/hive", "templeton.exec.timeout" : "60000", "templeton.sqoop.home" : "sqoop.tar.gz/sqoop", "templeton.sqoop.archive" : "hdfs:///hdp/apps/${hdp.version}/sqoop/sqoop.tar.gz", "templeton.hive.archive" : "hdfs:///hdp/apps/${hdp.version}/hive/hive.tar.gz", "templeton.hive.home" : "hive.tar.gz/hive", "templeton.pig.archive" : "hdfs:///hdp/apps/${hdp.version}/pig/pig.tar.gz", "templeton.pig.path" : "pig.tar.gz/pig/bin/pig", ranger-nifi-policymgr-ssl How to use it? ranger-nifi-policymgr-ssl "templeton.sqoop.path" : "sqoop.tar.gz/sqoop/bin/sqoop" } } }, { "slider-env" : { "properties_attributes" : { }, "properties" : { "content" : "\n# Set Slider-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME. All others are\n# optional. When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use. Required.\nexport JAVA_HOME={{java64_home}}\n# The hadoop conf directory. Optional as slider-client.xml can be edited to add properties.\nexport HADOOP_CONF_DIR={{hadoop_conf_dir}}" } ranger-nifi-policymgr-ssl How to get it? ranger-nifi-policymgr-ssl } }, { "ams-hbase-env" : { "properties_attributes" : { }, "properties" : { "hbase_master_maxperm_size" : "128", "max_open_files_limit" : "32768", "regionserver_xmn_size" : "128", "hbase_master_heapsize" : "640", ranger-nifi-policymgr-ssl PasteShr ranger-nifi-policymgr-ssl "hbase_regionserver_xmn_ratio" : "0.2", "hbase_master_xmn_size" : "192", "content" : "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6+ required.\nexport JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH elements. Optional.\nadditional_cp={{hbase_classpath_additional}}\nif [ -n \"$additional_cp\" ];\nthen\n export HBASE_CLASSPATH=${HBASE_CLASSPATH}:$additional_cp\nelse\n export HBASE_CLASSPATH=${HBASE_CLASSPATH}\nfi\n\n# The maximum amount of heap to use for hbase shell.\nexport HBASE_SHELL_OPTS=\"-Xmx256m\"\n\n# Extra Java runtime options.\n# Below are what we set by default. May only work with SUN JVM.\n# For more on why as well as other possible settings,\n# see http://wiki.apache.org/hadoop/PerformanceTuning\nexport HBASE_OPTS=\"-XX:+UseConcMarkSweepGC -XX:ErrorFile={{hbase_log_dir}}/hs_err_pid%p.log -Djava.io.tmpdir={{hbase_tmp_dir}}\"\nexport SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{hbase_log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable JMX exporting\n# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\n\n{% if java_version < 8 %}\nexport HBASE_MASTER_OPTS=\" -XX:PermSize=64m -XX:MaxPermSize={{hbase_master_maxperm_size}} -Xms{{hbase_heapsize}} -Xmx{{hbase_heapsize}} -Xmn{{hbase_master_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly\"\nexport HBASE_REGIONSERVER_OPTS=\"-XX:MaxPermSize=128m -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n{% else %}\nexport HBASE_MASTER_OPTS=\" -Xms{{hbase_heapsize}} -Xmx{{hbase_heapsize}} -Xmn{{hbase_master_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly\"\nexport HBASE_REGIONSERVER_OPTS=\" -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n{% endif %}\n\n\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# Extra ssh options. Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by default.\nexport HBASE_LOG_DIR={{hbase_log_dir}}\n\n# A string representing this instance of hbase. $USER by default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The directory where pid files are stored. /tmp by default.\nexport HBASE_PID_DIR={{hbase_pid_dir}}\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}}\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}} -Djavax.security.auth.useSubjectCredsOnly=false\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}} -Djavax.security.auth.useSubjectCredsOnly=false\"\nexport HBASE_ZOOKEEPER_OPTS=\"$HBASE_ZOOKEEPER_OPTS -Djava.security.auth.login.config={{ams_zookeeper_jaas_config_file}}\"\n{% endif %}\n\n# use embedded native libs\n_HADOOP_NATIVE_LIB=\"/usr/lib/ams-hbase/lib/hadoop-native/\"\nexport HBASE_OPTS=\"$HBASE_OPTS -Djava.library.path=${_HADOOP_NATIVE_LIB}\"\n\n# Unset HADOOP_HOME to avoid importing HADOOP installed cluster related configs like: /usr/hdp/2.2.0.0-2041/hadoop/conf/\nexport HADOOP_HOME={{ams_hbase_home_dir}}\n\n# Explicitly Setting HBASE_HOME for AMS HBase so that there is no conflict\nexport HBASE_HOME={{ams_hbase_home_dir}}", "hbase_pid_dir" : "/var/run/ambari-metrics-collector/", "hbase_regionserver_shutdown_timeout" : "30", "hbase_log_dir" : "/var/log/ambari-metrics-collector", "hbase_regionserver_heapsize" : "768", "hbase_classpath_additional" : "" } } ranger-nifi-policymgr-ssl How to get it for free? ranger-nifi-policymgr-ssl }, { "ams-log4j" : { "properties_attributes" : { }, "properties" : { "ams_log_number_of_backup_files" : "60", "ams_log_max_backup_size" : "80", "content" : "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n# Define some default values that can be overridden by system properties\nams.log.dir=.\nams.log.file=ambari-metrics-collector.log\n\n# Root logger option\nlog4j.rootLogger=INFO,file\n\n# Direct log messages to a log file\nlog4j.appender.file=org.apache.log4j.RollingFileAppender\nlog4j.appender.file.File=${ams.log.dir}/${ams.log.file}\nlog4j.appender.file.MaxFileSize={{ams_log_max_backup_size}}MB\nlog4j.appender.file.MaxBackupIndex={{ams_log_number_of_backup_files}}\nlog4j.appender.file.layout=org.apache.log4j.PatternLayout\nlog4j.appender.file.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n" } } ranger-nifi-policymgr-ssl How to get it for free? ranger-nifi-policymgr-ssl }, { "hivemetastore-site" : { "properties_attributes" : { }, "properties" : { "hive.service.metrics.hadoop2.component" : "hivemetastore", "hive.metastore.metrics.enabled" : "true", "hive.service.metrics.reporter" : "HADOOP2" } } ranger-nifi-policymgr-ssl How to get it for free? ranger-nifi-policymgr-ssl }, { "ams-ssl-server" : { "properties_attributes" : { }, "properties" : { "ssl.server.truststore.location" : "/etc/security/serverKeys/all.jks", "ssl.server.truststore.reload.interval" : "10000", "ssl.server.truststore.type" : "jks", "ssl.server.keystore.location" : "/etc/security/serverKeys/keystore.jks", "ssl.server.keystore.type" : "jks" ranger-nifi-policymgr-ssl PasteShr ranger-nifi-policymgr-ssl } } }, { "hive-exec-log4j2" : { "properties_attributes" : { }, "properties" : { "content" : "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nstatus = INFO\nname = HiveExecLog4j2\npackages = org.apache.hadoop.hive.ql.log\n\n# list of properties\nproperty.hive.log.level = {{hive_log_level}}\nproperty.hive.root.logger = FA\nproperty.hive.query.id = hadoop\nproperty.hive.log.dir = ${sys:java.io.tmpdir}/${sys:user.name}\nproperty.hive.log.file = ${sys:hive.query.id}.log\n\n# list of all appenders\nappenders = console, FA\n\n# console appender\nappender.console.type = Console\nappender.console.name = console\nappender.console.target = SYSTEM_ERR\nappender.console.layout.type = PatternLayout\nappender.console.layout.pattern = %d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n\n\n# simple file appender\nappender.FA.type = File\nappender.FA.name = FA\nappender.FA.fileName = ${sys:hive.log.dir}/${sys:hive.log.file}\nappender.FA.layout.type = PatternLayout\nappender.FA.layout.pattern = %d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n\n\n# list of all loggers\nloggers = NIOServerCnxn, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX\n\nlogger.NIOServerCnxn.name = org.apache.zookeeper.server.NIOServerCnxn\nlogger.NIOServerCnxn.level = WARN\n\nlogger.ClientCnxnSocketNIO.name = org.apache.zookeeper.ClientCnxnSocketNIO\nlogger.ClientCnxnSocketNIO.level = WARN\n\nlogger.DataNucleus.name = DataNucleus\nlogger.DataNucleus.level = ERROR\n\nlogger.Datastore.name = Datastore\nlogger.Datastore.level = ERROR\n\nlogger.JPOX.name = JPOX\nlogger.JPOX.level = ERROR\n\n# root logger\nrootLogger.level = ${sys:hive.log.level}\nrootLogger.appenderRefs = root\nrootLogger.appenderRef.root.ref = ${sys:hive.root.logger}" } } ranger-nifi-policymgr-ssl How to use it? ranger-nifi-policymgr-ssl }, { "ams-hbase-log4j" : { "properties_attributes" : { }, "properties" : { "ams_hbase_log_maxfilesize" : "256", "content" : "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n# Define some default values that can be overridden by system properties\nhbase.root.logger=INFO,console\nhbase.security.logger=INFO,console\nhbase.log.dir=.\nhbase.log.file=hbase.log\n\n# Define the root logger to the system property \"hbase.root.logger\".\nlog4j.rootLogger=${hbase.root.logger}\n\n# Logging Threshold\nlog4j.threshold=ALL\n\n#\n# Daily Rolling File Appender\n#\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n\n# Rolling File Appender properties\nhbase.log.maxfilesize={{ams_hbase_log_maxfilesize}}MB\nhbase.log.maxbackupindex={{ams_hbase_log_maxbackupindex}}\n\n# Rolling File Appender\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hbase.log.dir}/${hbase.log.file}\n\nlog4j.appender.RFA.MaxFileSize=${hbase.log.maxfilesize}\nlog4j.appender.RFA.MaxBackupIndex=${hbase.log.maxbackupindex}\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n\n#\n# Security audit appender\n#\nhbase.security.log.file=SecurityAuth.audit\nhbase.security.log.maxfilesize={{ams_hbase_security_log_maxfilesize}}MB\nhbase.security.log.maxbackupindex={{ams_hbase_security_log_maxbackupindex}}\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hbase.log.dir}/${hbase.security.log.file}\nlog4j.appender.RFAS.MaxFileSize=${hbase.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hbase.security.log.maxbackupindex}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.category.SecurityLogger=${hbase.security.logger}\nlog4j.additivity.SecurityLogger=false\n#log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.access.AccessController=TRACE\n\n#\n# Null Appender\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n\n# Custom Logging levels\n\nlog4j.logger.org.apache.zookeeper=INFO\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.hbase=INFO\n# Make these two classes INFO-level. Make them DEBUG to see more zk debug.\nlog4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO\nlog4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO\n#log4j.logger.org.apache.hadoop.dfs=DEBUG\n# Set this class to log INFO only otherwise its OTT\n# Enable this to get detailed connection error/retry logging.\n# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE\n\n\n# Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)\n#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG\n\n# Uncomment the below if you want to remove logging of client region caching'\n# and scan of .META. messages\n# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO\n# log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO", "ams_hbase_log_maxbackupindex" : "20", "ams_hbase_security_log_maxbackupindex" : "20", "ams_hbase_security_log_maxfilesize" : "256" ranger-nifi-policymgr-ssl How to get it? ranger-nifi-policymgr-ssl } } }, { "ssl-client" : { "properties_attributes" : { }, "properties" : { "ssl.client.truststore.reload.interval" : "10000", "ssl.client.keystore.location" : "/etc/security/clientKeys/keystore.jks", "ssl.client.truststore.location" : "/etc/security/clientKeys/all.jks", ranger-nifi-policymgr-ssl How to get it? ranger-nifi-policymgr-ssl "ssl.client.truststore.type" : "jks", "ssl.client.keystore.type" : "jks" } } }, { "ams-ssl-client" : { "properties_attributes" : { }, "properties" : { "ssl.client.truststore.location" : "/etc/security/clientKeys/all.jks", ranger-nifi-policymgr-ssl How to get it? ranger-nifi-policymgr-ssl "ssl.client.truststore.type" : "jks", "ssl.client.truststore.alias" : "" } } }, { "hive-atlas-application.properties" : { "properties_attributes" : { }, "properties" : { "atlas.hook.hive.maxThreads" : "5", ranger-nifi-policymgr-ssl How to use it? ranger-nifi-policymgr-ssl "atlas.hook.hive.keepAliveTime" : "10", "atlas.hook.hive.synchronous" : "false", "atlas.hook.hive.minThreads" : "5", "atlas.hook.hive.numRetries" : "3", "atlas.hook.hive.queueSize" : "1000" } } }, { "ams-hbase-policy" : { ranger-nifi-policymgr-ssl How to get it? ranger-nifi-policymgr-ssl "properties_attributes" : { }, "properties" : { "security.masterregion.protocol.acl" : "*", "security.admin.protocol.acl" : "*", "security.client.protocol.acl" : "*" } } }, { "ranger-hdfs-security" : { ranger-nifi-policymgr-ssl How to dowload it? ranger-nifi-policymgr-ssl "properties_attributes" : { }, "properties" : { } } }, { "ams-grafana-ini" : { "properties_attributes" : { }, "properties" : { "protocol" : "http", "cert_key" : "/etc/ambari-metrics-grafana/conf/ams-grafana.key", ranger-nifi-policymgr-ssl How to get it? ranger-nifi-policymgr-ssl "port" : "3000", "cert_file" : "/etc/ambari-metrics-grafana/conf/ams-grafana.crt", "ca_cert" : "", "content" : "\n##################### Grafana Configuration Example #####################\n#\n# Everything has defaults so you only need to uncomment things you want to\n# change\n\n# possible values : production, development\n; app_mode = production\n\n#################################### Paths ####################################\n[paths]\n# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)\n#\n;data = /var/lib/grafana\ndata = {{ams_grafana_data_dir}}\n#\n# Directory where grafana can store logs\n#\n;logs = /var/log/grafana\nlogs = {{ams_grafana_log_dir}}\n\n\n#################################### Server ####################################\n[server]\n# Protocol (http or https)\n;protocol = http\nprotocol = {{ams_grafana_protocol}}\n# The ip address to bind to, empty will bind to all interfaces\n;http_addr =\n\n# The http port to use\n;http_port = 3000\nhttp_port = {{ams_grafana_port}}\n\n# The public facing domain name used to access grafana from a browser\n;domain = localhost\n\n# Redirect to correct domain if host header does not match domain\n# Prevents DNS rebinding attacks\n;enforce_domain = false\n\n# The full public facing url\n;root_url = %(protocol)s://%(domain)s:%(http_port)s/\n\n# Log web requests\n;router_logging = false\n\n# the path relative working path\n;static_root_path = public\nstatic_root_path = /usr/lib/ambari-metrics-grafana/public\n\n# enable gzip\n;enable_gzip = false\n\n# https certs & key file\n;cert_file =\n;cert_key =\ncert_file = {{ams_grafana_cert_file}}\ncert_key = {{ams_grafana_cert_key}}\n\n#################################### Database ####################################\n[database]\n# Either \"mysql\", \"postgres\" or \"sqlite3\", it's your choice\n;type = sqlite3\n;host = 127.0.0.1:3306\n;name = grafana\n;user = root\n;password =\n\n# For \"postgres\" only, either \"disable\", \"require\" or \"verify-full\"\n;ssl_mode = disable\n\n# For \"sqlite3\" only, path relative to data_path setting\n;path = grafana.db\n\n#################################### Session ####################################\n[session]\n# Either \"memory\", \"file\", \"redis\", \"mysql\", \"postgres\", default is \"file\"\n;provider = file\n\n# Provider config options\n# memory: not have any config yet\n# file: session dir path, is relative to grafana data_path\n# redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=grafana`\n# mysql: go-sql-driver/mysql dsn config string, e.g. `user:password@tcp(127.0.0.1:3306)/database_name`\n# postgres: user=a password=b host=localhost port=5432 dbname=c sslmode=disable\n;provider_config = sessions\n\n# Session cookie name\n;cookie_name = grafana_sess\n\n# If you use session in https only, default is false\n;cookie_secure = false\n\n# Session life time, default is 86400\n;session_life_time = 86400\n\n#################################### Analytics ####################################\n[analytics]\n# Server reporting, sends usage counters to stats.grafana.org every 24 hours.\n# No ip addresses are being tracked, only simple counters to track\n# running instances, dashboard and error counts. It is very helpful to us.\n# Change this option to false to disable reporting.\n;reporting_enabled = true\n\n# Google Analytics universal tracking code, only enabled if you specify an id here\n;google_analytics_ua_id =\n\n#################################### Security ####################################\n[security]\n# default admin user, created on startup\nadmin_user = {{ams_grafana_admin_user}}\n\n# default admin password, can be changed before first start of grafana, or in profile settings\n;admin_password =\n\n# used for signing\n;secret_key = SW2YcwTIb9zpOOhoPsMm\n\n# Auto-login remember days\n;login_remember_days = 7\n;cookie_username = grafana_user\n;cookie_remember_name = grafana_remember\n\n# disable gravatar profile images\n;disable_gravatar = false\n\n# data source proxy whitelist (ip_or_domain:port seperated by spaces)\n;data_source_proxy_whitelist =\n\n#################################### Users ####################################\n[users]\n# disable user signup / registration\n;allow_sign_up = true\n\n# Allow non admin users to create organizations\n;allow_org_create = true\n\n# Set to true to automatically assign new users to the default organization (id 1)\n;auto_assign_org = true\n\n# Default role new users will be automatically assigned (if disabled above is set to true)\n;auto_assign_org_role = Viewer\n\n# Background text for the user field on the login page\n;login_hint = email or username\n\n#################################### Anonymous Auth ##########################\n[auth.anonymous]\n# enable anonymous access\nenabled = true\n\n# specify organization name that should be used for unauthenticated users\norg_name = Main Org.\n\n# specify role for unauthenticated users\n;org_role = Admin\n\n#################################### Github Auth ##########################\n[auth.github]\n;enabled = false\n;allow_sign_up = false\n;client_id = some_id\n;client_secret = some_secret\n;scopes = user:email,read:org\n;auth_url = https://github.com/login/oauth/authorize\n;token_url = https://github.com/login/oauth/access_token\n;api_url = https://api.github.com/user\n;team_ids =\n;allowed_organizations =\n\n#################################### Google Auth ##########################\n[auth.google]\n;enabled = false\n;allow_sign_up = false\n;client_id = some_client_id\n;client_secret = some_client_secret\n;scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email\n;auth_url = https://accounts.google.com/o/oauth2/auth\n;token_url = https://accounts.google.com/o/oauth2/token\n;api_url = https://www.googleapis.com/oauth2/v1/userinfo\n;allowed_domains =\n\n#################################### Auth Proxy ##########################\n[auth.proxy]\n;enabled = false\n;header_name = X-WEBAUTH-USER\n;header_property = username\n;auto_sign_up = true\n\n#################################### Basic Auth ##########################\n[auth.basic]\n;enabled = true\n\n#################################### Auth LDAP ##########################\n[auth.ldap]\n;enabled = false\n;config_file = /etc/grafana/ldap.toml\n\n#################################### SMTP / Emailing ##########################\n[smtp]\n;enabled = false\n;host = localhost:25\n;user =\n;password =\n;cert_file =\n;key_file =\n;skip_verify = false\n;from_address = admin@grafana.localhost\n\n[emails]\n;welcome_email_on_sign_up = false\n\n#################################### Logging ##########################\n[log]\n# Either \"console\", \"file\", default is \"console\"\n# Use comma to separate multiple modes, e.g. \"console, file\"\n;mode = console, file\n\n# Buffer length of channel, keep it as it is if you don't know what it is.\n;buffer_len = 10000\n\n# Either \"Trace\", \"Debug\", \"Info\", \"Warn\", \"Error\", \"Critical\", default is \"Trace\"\n;level = Info\n\n# For \"console\" mode only\n[log.console]\n;level =\n\n# For \"file\" mode only\n[log.file]\n;level =\n# This enables automated log rotate(switch of following options), default is true\n;log_rotate = true\n\n# Max line number of single file, default is 1000000\n;max_lines = 1000000\n\n# Max size shift of single file, default is 28 means 1 << 28, 256MB\n;max_lines_shift = 28\n\n# Segment log daily, default is true\n;daily_rotate = true\n\n# Expired days of log file(delete after max days), default is 7\n;max_days = 7\n\n#################################### AMPQ Event Publisher ##########################\n[event_publisher]\n;enabled = false\n;rabbitmq_url = amqp://localhost/\n;exchange = grafana_events\n\n;#################################### Dashboard JSON files ##########################\n[dashboards.json]\n;enabled = false\n;path = /var/lib/grafana/dashboards\npath = /usr/lib/ambari-metrics-grafana/public/dashboards" } } }, { "hadoop-env" : { "properties_attributes" : { }, ranger-nifi-policymgr-ssl How to get it? ranger-nifi-policymgr-ssl "properties" : { "hadoop_heapsize" : "1024", "proxyuser_group" : "users", "hadoop_root_logger" : "INFO,RFA", "dtnode_heapsize" : "1024m", "namenode_backup_dir" : "/tmp/upgrades", "hdfs_user" : "hdfs", "hadoop_pid_dir_prefix" : "/var/run/hadoop", "content" : "\n # Set Hadoop-specific environment variables here.\n\n # The only required environment variable is JAVA_HOME. All others are\n # optional. When running a distributed configuration it is best to\n # set JAVA_HOME in this file, so that it is correctly defined on\n # remote nodes.\n\n # The java implementation to use. Required.\n export JAVA_HOME={{java_home}}\n export HADOOP_HOME_WARN_SUPPRESS=1\n\n # Hadoop home directory\n export HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n # Hadoop Configuration Directory\n\n {# this is different for HDP1 #}\n # Path to jsvc required by secure HDP 2.0 datanode\n export JSVC_HOME={{jsvc_path}}\n\n\n # The maximum amount of heap to use, in MB. Default is 1000.\n export HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\n export HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n # Extra Java runtime options. Empty by default.\n export HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n # Command specific options appended to HADOOP_OPTS when specified\n HADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\n HADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n {% if java_version < 8 %}\n SHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\n export HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\n export HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly\"\n\n export HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n # The following applies to multiple commands (fs, dfs, fsck, distcp etc)\n export HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n {% else %}\n SHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\n export HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\n export HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly\"\n\n export HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n # The following applies to multiple commands (fs, dfs, fsck, distcp etc)\n export HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n {% endif %}\n\n HADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\n HADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n # On secure datanodes, user to run the datanode as after dropping privileges\n export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n # Extra ssh options. Empty by default.\n export HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n # Where log files are stored. $HADOOP_HOME/logs by default.\n export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n # History server logs\n export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n # Where log files are stored in the secure data environment.\n export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n # File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.\n # export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n # host:path where hadoop code should be rsync'd from. Unset by default.\n # export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n # Seconds to sleep between slave commands. Unset by default. This\n # can be useful in large clusters, where, e.g., slave rsyncs can\n # otherwise arrive faster than the master can service them.\n # export HADOOP_SLAVE_SLEEP=0.1\n\n # The directory where pid files are stored. /tmp by default.\n export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\n export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n # History server pid\n export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\n YARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n # A string representing this instance of hadoop. $USER by default.\n export HADOOP_IDENT_STRING=$USER\n\n # The scheduling priority for daemon processes. See 'man nice'.\n\n # export HADOOP_NICENESS=10\n\n # Add database libraries\n JAVA_JDBC_LIBS=\"\"\n if [ -d \"/usr/share/java\" ]; then\n for jarFile in `ls /usr/share/java | grep -E \"(mysql|ojdbc|postgresql|sqljdbc)\" 2>/dev/null`\n do\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\n done\n fi\n\n # Add libraries to the hadoop classpath - some may not need a colon as they already include it\n export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}\n\n # Setting path to hdfs command line\n export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n # Mostly required for hadoop 2.0\n export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:{{hadoop_lib_home}}/native/Linux-{{architecture}}-64\n\n export HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n\n\n # Fix temporary bug, when ulimit from conf files is not picked up, without full relogin.\n # Makes sense to fix only when runing DN as root\n if [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_SECURE_DN_USER\" ]; then\n {% if is_datanode_max_locked_memory_set %}\n ulimit -l {{datanode_max_locked_memory}}\n {% endif %}\n ulimit -n {{hdfs_user_nofile_limit}}\n fi\n\n {% if hadoop_custom_extensions_enabled %}\n #Enable custom extensions\n export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:{{stack_root}}/current/ext/hadoop/*\n {% endif %}\n\n # Enable ACLs on zookeper znodes if required\n {% if hadoop_zkfc_opts is defined %}\n export HADOOP_ZKFC_OPTS=\"{{hadoop_zkfc_opts}} $HADOOP_ZKFC_OPTS\"\n {% endif %}", "hdfs_log_dir_prefix" : "/var/log/hadoop", ranger-nifi-policymgr-ssl How to dowload it? ranger-nifi-policymgr-ssl "namenode_opt_newsize" : "128m", "namenode_heapsize" : "1024m", "hdfs_tmp_dir" : "/tmp", "namenode_opt_maxpermsize" : "256m", "nfsgateway_heapsize" : "1024", "hdfs_user_nofile_limit" : "128000", "keyserver_host" : " ", "keyserver_port" : "", "hdfs_user_nproc_limit" : "65536", "namenode_opt_maxnewsize" : "128m", ranger-nifi-policymgr-ssl How to dowload it? ranger-nifi-policymgr-ssl "namenode_opt_permsize" : "128m" } } }, { "zookeeper-env" : { "properties_attributes" : { }, "properties" : { "zk_log_dir" : "/var/log/zookeeper", "zk_user" : "zookeeper", ranger-nifi-policymgr-ssl How to dowload it? ranger-nifi-policymgr-ssl "zk_pid_dir" : "/var/run/zookeeper", "content" : "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}" } } }, { "yarn-log4j" : { "properties_attributes" : { }, "properties" : { "yarn_rm_summary_log_max_backup_size" : "256", ranger-nifi-policymgr-ssl How to use it? ranger-nifi-policymgr-ssl "yarn_rm_summary_log_number_of_backup_files" : "20", "content" : "\n#Relative to Yarn Log Dir Prefix\nyarn.log.dir=.\n#\n# Job Summary Appender\n#\n# Use following logger to send summary to separate file defined by\n# hadoop.mapreduce.jobsummary.log.file rolled daily:\n# hadoop.mapreduce.jobsummary.logger=INFO,JSA\n#\nhadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}\nhadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log\nlog4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender\n# Set the ResourceManager summary log filename\nyarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log\n# Set the ResourceManager summary log level and appender\nyarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}\n#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\n\n# To enable AppSummaryLogging for the RM,\n# set yarn.server.resourcemanager.appsummary.logger to\n# LEVEL,RMSUMMARY in hadoop-env.sh\n\n# Appender for ResourceManager Application Summary Log\n# Requires the following properties to be set\n# - hadoop.log.dir (Hadoop Log directory)\n# - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)\n# - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)\nlog4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender\nlog4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}\nlog4j.appender.RMSUMMARY.MaxFileSize={{yarn_rm_summary_log_max_backup_size}}MB\nlog4j.appender.RMSUMMARY.MaxBackupIndex={{yarn_rm_summary_log_number_of_backup_files}}\nlog4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.JSA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\nlog4j.appender.JSA.DatePattern=.yyyy-MM-dd\nlog4j.appender.JSA.layout=org.apache.log4j.PatternLayout\nlog4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}\nlog4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false\n\n# Appender for viewing information for errors and warnings\nyarn.ewma.cleanupInterval=300\nyarn.ewma.messageAgeLimitSeconds=86400\nyarn.ewma.maxUniqueMessages=250\nlog4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender\nlog4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}\nlog4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}\nlog4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}\n\n# Audit logging for ResourceManager\nrm.audit.logger=${hadoop.root.logger}\nlog4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=${rm.audit.logger}\nlog4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=false\nlog4j.appender.RMAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.RMAUDIT.File=${yarn.log.dir}/rm-audit.log\nlog4j.appender.RMAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.RMAUDIT.DatePattern=.yyyy-MM-dd\n\n# Audit logging for NodeManager\nnm.audit.logger=${hadoop.root.logger}\nlog4j.logger.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=${nm.audit.logger}\nlog4j.additivity.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=false\nlog4j.appender.NMAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.NMAUDIT.File=${yarn.log.dir}/nm-audit.log\nlog4j.appender.NMAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.NMAUDIT.DatePattern=.yyyy-MM-dd" } } }, { "mapred-env" : { "properties_attributes" : { }, "properties" : { "jobhistory_heapsize" : "900", ranger-nifi-policymgr-ssl How to use it? ranger-nifi-policymgr-ssl "mapred_user_nofile_limit" : "32768", "mapred_user" : "mapred", "mapred_user_nproc_limit" : "65536", "mapred_pid_dir_prefix" : "/var/run/hadoop-mapreduce", "mapred_log_dir_prefix" : "/var/log/hadoop-mapreduce", "content" : "\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\n\nexport HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}\n\nexport HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA\n\n{% if security_enabled %}\nexport HADOOP_JOB_HISTORYSERVER_OPTS=\"-Djava.security.auth.login.config={{mapred_jaas_file}} -Djavax.security.auth.useSubjectCredsOnly=false\"\n{% endif %}\n#export HADOOP_MAPRED_LOG_DIR=\"\" # Where log files are stored. $HADOOP_MAPRED_HOME/logs by default.\n#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.\n#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.\n#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default\n#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\nexport HADOOP_OPTS=\"-Djava.io.tmpdir={{hadoop_java_io_tmpdir}} $HADOOP_OPTS\"\nexport JAVA_LIBRARY_PATH=\"${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}\"" } } }, { ranger-nifi-policymgr-ssl How to get it for free? ranger-nifi-policymgr-ssl "activity-zeppelin-interpreter" : { "properties_attributes" : { }, "properties" : { "activity-zeppelin-interpreter-content" : "{\n \"interpreterSettings\": {\n \"2BJB693M8\": {\n \"id\": \"2BJB693M8\",\n \"name\": \"phoenix\",\n \"group\": \"phoenix\",\n \"properties\": {\n \"phoenix.jdbc.url\": \"{{activity_explorer_jdbc_url}}\",\n \"phoenix.user\": \"\",\n \"phoenix.password\": \"\",\n \"phoenix.max.result\": \"1000\",\n \"phoenix.driver.name\": \"org.apache.phoenix.jdbc.PhoenixDriver\"\n },\n \"interpreterGroup\": [\n {\n \"class\": \"org.apache.zeppelin.phoenix.PhoenixInterpreter\",\n \"name\": \"sql\"\n }\n ],\n \"dependencies\": [],\n \"option\": {\n \"remote\": true,\n \"perNoteSession\": false\n }\n }\n },\n\"interpreterBindings\": {\n \"2BNVQJUBK\": [\n \"2BJB693M8\"\n ],\n \"2BPD7951H\": [\n \"2BJB693M8\"\n ],\n \"2BQH91X36\": [\n \"2BJB693M8\"\n ],\n \"2BTCVPTMH\": [\n \"2BJB693M8\"\n ]\n },\n\"interpreterRepositories\": [\n {\n \"id\": \"central\",\n \"type\": \"default\",\n \"url\": \"http://repo1.maven.org/maven2/\",\n \"releasePolicy\": {\n \"enabled\": true,\n \"updatePolicy\": \"daily\",\n \"checksumPolicy\": \"warn\"\n },\n \"snapshotPolicy\": {\n \"enabled\": true,\n \"updatePolicy\": \"daily\",\n \"checksumPolicy\": \"warn\"\n },\n \"mirroredRepositories\": [],\n \"repositoryManager\": false\n }\n ]\n}" } } }, { "cluster-env" : { "properties_attributes" : { }, ranger-nifi-policymgr-ssl How to dowload it? ranger-nifi-policymgr-ssl "properties" : { "hide_yarn_memory_widget" : "false", "enable_external_ranger" : "false", "recovery_max_count" : "6", "manage_dirs_on_root" : "true", "managed_hdfs_resource_property_names" : "", "recovery_retry_interval" : "5", "stack_features" : "{\n \"HDP\": {\n \"stack_features\": [\n {\n \"name\": \"snappy\",\n \"description\": \"Snappy compressor/decompressor support\",\n \"min_version\": \"2.0.0.0\",\n \"max_version\": \"2.2.0.0\"\n },\n {\n \"name\": \"lzo\",\n \"description\": \"LZO libraries support\",\n \"min_version\": \"2.2.1.0\"\n },\n {\n \"name\": \"express_upgrade\",\n \"description\": \"Express upgrade support\",\n \"min_version\": \"2.1.0.0\"\n },\n {\n \"name\": \"rolling_upgrade\",\n \"description\": \"Rolling upgrade support\",\n \"min_version\": \"2.2.0.0\"\n },\n {\n \"name\": \"kafka_acl_migration_support\",\n \"description\": \"ACL migration support\",\n \"min_version\": \"2.3.4.0\"\n },\n {\n \"name\": \"secure_zookeeper\",\n \"description\": \"Protect ZNodes with SASL acl in secure clusters\",\n \"min_version\": \"2.6.0.0\"\n },\n {\n \"name\": \"config_versioning\",\n \"description\": \"Configurable versions support\",\n \"min_version\": \"2.3.0.0\"\n },\n {\n \"name\": \"datanode_non_root\",\n \"description\": \"DataNode running as non-root support (AMBARI-7615)\",\n \"min_version\": \"2.2.0.0\"\n },\n {\n \"name\": \"remove_ranger_hdfs_plugin_env\",\n \"description\": \"HDFS removes Ranger env files (AMBARI-14299)\",\n \"min_version\": \"2.3.0.0\"\n },\n {\n \"name\": \"ranger\",\n \"description\": \"Ranger Service support\",\n \"min_version\": \"2.2.0.0\"\n },\n {\n \"name\": \"ranger_tagsync_component\",\n \"description\": \"Ranger Tagsync component support (AMBARI-14383)\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"phoenix\",\n \"description\": \"Phoenix Service support\",\n \"min_version\": \"2.3.0.0\"\n },\n {\n \"name\": \"nfs\",\n \"description\": \"NFS support\",\n \"min_version\": \"2.3.0.0\"\n },\n {\n \"name\": \"tez_for_spark\",\n \"description\": \"Tez dependency for Spark\",\n \"min_version\": \"2.2.0.0\",\n \"max_version\": \"2.3.0.0\"\n },\n {\n \"name\": \"timeline_state_store\",\n \"description\": \"Yarn application timeline-service supports state store property (AMBARI-11442)\",\n \"min_version\": \"2.2.0.0\"\n },\n {\n \"name\": \"copy_tarball_to_hdfs\",\n \"description\": \"Copy tarball to HDFS support (AMBARI-12113)\",\n \"min_version\": \"2.2.0.0\"\n },\n {\n \"name\": \"spark_16plus\",\n \"description\": \"Spark 1.6+\",\n \"min_version\": \"2.4.0.0\"\n },\n {\n \"name\": \"spark_thriftserver\",\n \"description\": \"Spark Thrift Server\",\n \"min_version\": \"2.3.2.0\"\n },\n {\n \"name\": \"storm_kerberos\",\n \"description\": \"Storm Kerberos support (AMBARI-7570)\",\n \"min_version\": \"2.2.0.0\"\n },\n {\n \"name\": \"storm_ams\",\n \"description\": \"Storm AMS integration (AMBARI-10710)\",\n \"min_version\": \"2.2.0.0\"\n },\n {\n \"name\": \"create_kafka_broker_id\",\n \"description\": \"Ambari should create Kafka Broker Id (AMBARI-12678)\",\n \"min_version\": \"2.2.0.0\",\n \"max_version\": \"2.3.0.0\"\n },\n {\n \"name\": \"kafka_listeners\",\n \"description\": \"Kafka listeners (AMBARI-10984)\",\n \"min_version\": \"2.3.0.0\"\n },\n {\n \"name\": \"kafka_kerberos\",\n \"description\": \"Kafka Kerberos support (AMBARI-10984)\",\n \"min_version\": \"2.3.0.0\"\n },\n {\n \"name\": \"pig_on_tez\",\n \"description\": \"Pig on Tez support (AMBARI-7863)\",\n \"min_version\": \"2.2.0.0\"\n },\n {\n \"name\": \"ranger_usersync_non_root\",\n \"description\": \"Ranger Usersync as non-root user (AMBARI-10416)\",\n \"min_version\": \"2.3.0.0\"\n },\n {\n \"name\": \"ranger_audit_db_support\",\n \"description\": \"Ranger Audit to DB support\",\n \"min_version\": \"2.2.0.0\",\n \"max_version\": \"2.4.99.99\"\n },\n {\n \"name\": \"accumulo_kerberos_user_auth\",\n \"description\": \"Accumulo Kerberos User Auth (AMBARI-10163)\",\n \"min_version\": \"2.3.0.0\"\n },\n {\n \"name\": \"knox_versioned_data_dir\",\n \"description\": \"Use versioned data dir for Knox (AMBARI-13164)\",\n \"min_version\": \"2.3.2.0\"\n },\n {\n \"name\": \"knox_sso_topology\",\n \"description\": \"Knox SSO Topology support (AMBARI-13975)\",\n \"min_version\": \"2.3.8.0\"\n },\n {\n \"name\": \"atlas_rolling_upgrade\",\n \"description\": \"Rolling upgrade support for Atlas\",\n \"min_version\": \"2.3.0.0\"\n },\n {\n \"name\": \"oozie_admin_user\",\n \"description\": \"Oozie install user as an Oozie admin user (AMBARI-7976)\",\n \"min_version\": \"2.2.0.0\"\n },\n {\n \"name\": \"oozie_create_hive_tez_configs\",\n \"description\": \"Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)\",\n \"min_version\": \"2.2.0.0\"\n },\n {\n \"name\": \"oozie_setup_shared_lib\",\n \"description\": \"Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)\",\n \"min_version\": \"2.2.0.0\"\n },\n {\n \"name\": \"oozie_host_kerberos\",\n \"description\": \"Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)\",\n \"min_version\": \"2.0.0.0\"\n },\n {\n \"name\": \"falcon_extensions\",\n \"description\": \"Falcon Extension\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"hive_metastore_upgrade_schema\",\n \"description\": \"Hive metastore upgrade schema support (AMBARI-11176)\",\n \"min_version\": \"2.3.0.0\"\n },\n {\n \"name\": \"hive_server_interactive\",\n \"description\": \"Hive server interactive support (AMBARI-15573)\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"hive_webhcat_specific_configs\",\n \"description\": \"Hive webhcat specific configurations support (AMBARI-12364)\",\n \"min_version\": \"2.3.0.0\"\n },\n {\n \"name\": \"hive_purge_table\",\n \"description\": \"Hive purge table support (AMBARI-12260)\",\n \"min_version\": \"2.3.0.0\"\n },\n {\n \"name\": \"hive_server2_kerberized_env\",\n \"description\": \"Hive server2 working on kerberized environment (AMBARI-13749)\",\n \"min_version\": \"2.2.3.0\",\n \"max_version\": \"2.2.5.0\"\n },\n {\n \"name\": \"hive_env_heapsize\",\n \"description\": \"Hive heapsize property defined in hive-env (AMBARI-12801)\",\n \"min_version\": \"2.2.0.0\"\n },\n {\n \"name\": \"ranger_kms_hsm_support\",\n \"description\": \"Ranger KMS HSM support (AMBARI-15752)\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"ranger_log4j_support\",\n \"description\": \"Ranger supporting log-4j properties (AMBARI-15681)\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"ranger_kerberos_support\",\n \"description\": \"Ranger Kerberos support\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"hive_metastore_site_support\",\n \"description\": \"Hive Metastore site support\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"ranger_usersync_password_jceks\",\n \"description\": \"Saving Ranger Usersync credentials in jceks\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"ranger_install_infra_client\",\n \"description\": \"Ambari Infra Service support\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"falcon_atlas_support_2_3\",\n \"description\": \"Falcon Atlas integration support for 2.3 stack\",\n \"min_version\": \"2.3.99.0\",\n \"max_version\": \"2.4.0.0\"\n },\n {\n \"name\": \"falcon_atlas_support\",\n \"description\": \"Falcon Atlas integration\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"hbase_home_directory\",\n \"description\": \"Hbase home directory in HDFS needed for HBASE backup\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"spark_livy\",\n \"description\": \"Livy as slave component of spark\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"spark_livy2\",\n \"description\": \"Livy as slave component of spark\",\n \"min_version\": \"2.6.0.0\"\n },\n {\n \"name\": \"atlas_ranger_plugin_support\",\n \"description\": \"Atlas Ranger plugin support\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"atlas_conf_dir_in_path\",\n \"description\": \"Prepend the Atlas conf dir (/etc/atlas/conf) to the classpath of Storm and Falcon\",\n \"min_version\": \"2.3.0.0\",\n \"max_version\": \"2.4.99.99\"\n },\n {\n \"name\": \"atlas_upgrade_support\",\n \"description\": \"Atlas supports express and rolling upgrades\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"atlas_hook_support\",\n \"description\": \"Atlas support for hooks in Hive, Storm, Falcon, and Sqoop\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"ranger_pid_support\",\n \"description\": \"Ranger Service support pid generation AMBARI-16756\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"ranger_kms_pid_support\",\n \"description\": \"Ranger KMS Service support pid generation\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"ranger_admin_password_change\",\n \"description\": \"Allow ranger admin credentials to be specified during cluster creation (AMBARI-17000)\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"ranger_setup_db_on_start\",\n \"description\": \"Allows setup of ranger db and java patches to be called multiple times on each START\",\n \"min_version\": \"2.6.0.0\"\n },\n {\n \"name\": \"storm_metrics_apache_classes\",\n \"description\": \"Metrics sink for Storm that uses Apache class names\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"spark_java_opts_support\",\n \"description\": \"Allow Spark to generate java-opts file\",\n \"min_version\": \"2.2.0.0\",\n \"max_version\": \"2.4.0.0\"\n },\n {\n \"name\": \"atlas_hbase_setup\",\n \"description\": \"Use script to create Atlas tables in Hbase and set permissions for Atlas user.\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"ranger_hive_plugin_jdbc_url\",\n \"description\": \"Handle Ranger hive repo config jdbc url change for stack 2.5 (AMBARI-18386)\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"zkfc_version_advertised\",\n \"description\": \"ZKFC advertise version\",\n \"min_version\": \"2.5.0.0\"\n },\n {\n \"name\": \"phoenix_core_hdfs_site_required\",\n \"description\": \"HDFS and CORE site required for Phoenix\",\n \"max_version\": \"2.5.9.9\"\n },\n {\n \"name\": \"ranger_tagsync_ssl_xml_support\",\n \"description\": \"Ranger Tagsync ssl xml support.\",\n \"min_version\": \"2.6.0.0\"\n },\n {\n \"name\": \"ranger_xml_configuration\",\n \"description\": \"Ranger code base support xml configurations\",\n \"min_version\": \"2.3.0.0\"\n },\n {\n \"name\": \"kafka_ranger_plugin_support\",\n \"description\": \"Ambari stack changes for Ranger Kafka Plugin (AMBARI-11299)\",\n \"min_version\": \"2.3.0.0\"\n },\n {\n \"name\": \"yarn_ranger_plugin_support\",\n \"description\": \"Implement Stack changes for Ranger Yarn Plugin integration (AMBARI-10866)\",\n \"min_version\": \"2.3.0.0\"\n },\n {\n \"name\": \"ranger_solr_config_support\",\n \"description\": \"Showing Ranger solrconfig.xml on UI\",\n \"min_version\": \"2.6.0.0\"\n },\n {\n \"name\": \"hive_interactive_atlas_hook_required\",\n \"description\": \"Registering Atlas Hook for Hive Interactive.\",\n \"min_version\": \"2.6.0.0\"\n },\n {\n \"name\": \"core_site_for_ranger_plugins\",\n \"description\": \"Adding core-site.xml in when Ranger plugin is enabled for Storm, Kafka, and Knox.\",\n \"min_version\": \"2.6.0.0\"\n },\n {\n \"name\": \"atlas_install_hook_package_support\",\n \"description\": \"Stop installing packages from 2.6\",\n \"max_version\": \"2.5.9.9\"\n },\n {\n \"name\": \"atlas_hdfs_site_on_namenode_ha\",\n \"description\": \"Need to create hdfs-site under atlas-conf dir when Namenode-HA is enabled.\",\n \"min_version\": \"2.6.0.0\"\n },\n {\n \"name\": \"hive_interactive_ga\",\n \"description\": \"Hive Interactive GA support\",\n \"min_version\": \"2.6.0.0\"\n },\n {\n \"name\": \"secure_ranger_ssl_password\",\n \"description\": \"Securing Ranger Admin and Usersync SSL and Trustore related passwords in jceks\",\n \"min_version\": \"2.6.0.0\"\n },\n {\n \"name\": \"ranger_kms_ssl\",\n \"description\": \"Ranger KMS SSL properties in ambari stack\",\n \"min_version\": \"2.6.0.0\"\n },\n {\n \"name\": \"nifi_encrypt_config\",\n \"description\": \"Encrypt sensitive properties written to nifi property file\",\n \"min_version\": \"2.6.0.0\"\n },\n {\n \"name\": \"toolkit_config_update\",\n \"description\": \"Support separate input and output for toolkit configuration\",\n \"min_version\": \"2.6.0.0\"\n },\n {\n \"name\": \"admin_toolkit_support\",\n \"description\": \"Supports the nifi admin toolkit\",\n \"min_version\": \"2.6.0.0\"\n },\n {\n \"name\": \"tls_toolkit_san\",\n \"description\": \"Support subject alternative name flag\",\n \"min_version\": \"2.6.0.0\"\n },\n {\n \"name\": \"nifi_jaas_conf_create\",\n \"description\": \"Create NIFI jaas configuration when kerberos is enabled\",\n \"min_version\": \"2.6.0.0\"\n },\n {\n \"name\": \"atlas_core_site_support\",\n \"description\": \"Need to create core-site under Atlas conf directory.\",\n \"min_version\": \"2.6.0.0\"\n },\n {\n \"name\": \"hadoop_custom_extensions\",\n \"description\": \"Support hadoop custom extensions\",\n \"min_version\": \"2.6.0.0\"\n }\n ]\n }\n}", "stack_tools" : "{\n \"HDP\": {\n \"stack_selector\": [\n \"hdp-select\",\n \"/usr/bin/hdp-select\",\n \"hdp-select\"\n ],\n \"conf_selector\": [\n \"conf-select\",\n \"/usr/bin/conf-select\",\n \"conf-select\"\n ]\n }\n}", "repo_ubuntu_template" : "{{package_type}} {{base_url}} {{components}}", ranger-nifi-policymgr-ssl How to use it? ranger-nifi-policymgr-ssl "stack_root" : "{\"HDP\":\"/usr/hdp\"}", "fetch_nonlocal_groups" : "true", "one_dir_per_partition" : "false", "kerberos_domain" : "EXAMPLE.COM", "ignore_bad_mounts" : "false", "user_group" : "hadoop", "sysprep_skip_setup_jce" : "false", "override_uid" : "true", "security_enabled" : "false", "sysprep_skip_copy_fast_jar_hdfs" : "false", ranger-nifi-policymgr-ssl How to get it for free? ranger-nifi-policymgr-ssl "agent_mounts_ignore_list" : "", "recovery_lifetime_max_count" : "1024", "recovery_type" : "AUTO_START", "repo_suse_rhel_template" : "[{{repo_id}}]\nname={{repo_id}}\n{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0", "sysprep_skip_create_users_and_groups" : "false", "sysprep_skip_copy_tarballs_hdfs" : "false", "sysprep_skip_copy_oozie_share_lib_to_hdfs" : "false", "recovery_enabled" : "true", "smokeuser" : "ambari-qa", "smokeuser_keytab" : "/etc/security/keytabs/smokeuser.headless.keytab", ranger-nifi-policymgr-ssl How to get it? ranger-nifi-policymgr-ssl "alerts_repeat_tolerance" : "1", "recovery_window_in_minutes" : "60", "stack_name" : "HDP", "ignore_groupsusers_create" : "false" } } }, { "hive-env" : { "properties_attributes" : { }, ranger-nifi-policymgr-ssl How to get it for free? ranger-nifi-policymgr-ssl "properties" : { "webhcat_user" : "hcat", "hive_log_dir" : "/var/log/hive", "hive_security_authorization" : "None", "hive_user_nproc_limit" : "16000", "content" : "\nexport HADOOP_USER_CLASSPATH_FIRST=true #this prevents old metrics libs from mapreduce lib from bringing in old jar deps overriding HIVE_LIB\nif [ \"$SERVICE\" = \"cli\" ]; then\n if [ -z \"$DEBUG\" ]; then\n export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseNUMA -XX:+UseParallelGC -XX:-UseGCOverheadLimit\"\n else\n export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit\"\n fi\nfi\n\n# The heap size of the jvm stared by hive shell script can be controlled via:\n\nif [ \"$SERVICE\" = \"metastore\" ]; then\n export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore\nelse\n export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client\nfi\n\nexport HADOOP_CLIENT_OPTS=\"$HADOOP_CLIENT_OPTS -Xmx${HADOOP_HEAPSIZE}m\"\nexport HADOOP_CLIENT_OPTS=\"$HADOOP_CLIENT_OPTS{{heap_dump_opts}}\"\n\n# Larger heap size may be required when running queries over large number of files or partitions.\n# By default hive shell scripts use a heap size of 256 (MB). Larger heap size would also be\n# appropriate for hive server (hwi etc).\n\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\nexport HIVE_HOME=${HIVE_HOME:-{{hive_home_dir}}}\n\n# Hive Configuration Directory can be controlled by:\nexport HIVE_CONF_DIR=${HIVE_CONF_DIR:-{{hive_config_dir}}}\n\n# Folder containing extra libraries required for hive compilation/execution can be controlled by:\nexport HIVE_AUX_JARS_PATH={{stack_root}}/current/ext/hive\nif [ \"${HIVE_AUX_JARS_PATH}\" != \"\" ]; then\n if [ -f \"${HIVE_AUX_JARS_PATH}\" ] || [ -d \"${HIVE_AUX_JARS_PATH}\" ] ; then\n export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\n elif [ -d \"/usr/hdp/current/hive-webhcat/share/hcatalog\" ]; then\n export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar\n fi\nelif [ -d \"/usr/hdp/current/hive-webhcat/share/hcatalog\" ]; then\n export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar\nfi\n\nexport METASTORE_PORT={{hive_metastore_port}}\n\n{% if sqla_db_used or lib_dir_available %}\nexport LD_LIBRARY_PATH=\"$LD_LIBRARY_PATH:{{jdbc_libs_dir}}\"\nexport JAVA_LIBRARY_PATH=\"$JAVA_LIBRARY_PATH:{{jdbc_libs_dir}}\"\n{% endif %}", "hive.atlas.hook" : "false", "hive_txn_acid" : "off", "hive.client.heapsize" : "1024", "hive.heapsize" : "3741", ranger-nifi-policymgr-ssl How to get it for free? ranger-nifi-policymgr-ssl "hive_database_type" : "mysql", "hive_user_nofile_limit" : "32000", "alert_ldap_username" : "", "hcat_pid_dir" : "/var/run/webhcat", "heap_dump_location" : "/tmp", "hive_database" : "New MySQL Database", "hcat_log_dir" : "/var/log/webhcat", "hive_exec_orc_storage_strategy" : "SPEED", "hive_user" : "hive", "hive.metastore.heapsize" : "1247", ranger-nifi-policymgr-ssl PasteShr ranger-nifi-policymgr-ssl "hive_timeline_logging_enabled" : "true", "hive.log.level" : "INFO", "hive_database_name" : "hive", "enable_heap_dump" : "false", "hive_pid_dir" : "/var/run/hive", "hcat_user" : "hcat", "hive_ambari_database" : "MySQL" } } }, ranger-nifi-policymgr-ssl How to dowload it? ranger-nifi-policymgr-ssl { "ams-grafana-env" : { "properties_attributes" : { }, "properties" : { "metrics_grafana_log_dir" : "/var/log/ambari-metrics-grafana", "metrics_grafana_pid_dir" : "/var/run/ambari-metrics-grafana", "content" : "\n# Set environment variables here.\n\n# AMS UI Server Home Dir\nexport AMS_GRAFANA_HOME_DIR={{ams_grafana_home_dir}}\n\n# AMS UI Server Data Dir\nexport AMS_GRAFANA_DATA_DIR={{ams_grafana_data_dir}}\n\n# AMS UI Server Log Dir\nexport AMS_GRAFANA_LOG_DIR={{ams_grafana_log_dir}}\n\n# AMS UI Server PID Dir\nexport AMS_GRAFANA_PID_DIR={{ams_grafana_pid_dir}}", "metrics_grafana_data_dir" : "/var/lib/ambari-metrics-grafana", "metrics_grafana_username" : "admin" } ranger-nifi-policymgr-ssl How to get it for free? ranger-nifi-policymgr-ssl } }, { "hive-interactive-site" : { "properties_attributes" : { }, "properties" : { "hive.vectorized.groupby.maxentries" : "1000000", "hive.llap.client.consistent.splits" : "true", "hive.llap.daemon.yarn.container.mb" : "0", "hive.llap.io.memory.size" : "0", ranger-nifi-policymgr-ssl How to dowload it? ranger-nifi-policymgr-ssl "hive.llap.daemon.am.liveness.heartbeat.interval.ms" : "10000ms", "hive.llap.task.scheduler.locality.delay" : "-1", "hive.vectorized.execution.reduce.enabled" : "true", "hive.llap.daemon.rpc.port" : "0", "hive.execution.mode" : "llap", "hive.exec.orc.split.strategy" : "HYBRID", "hive.llap.io.enabled" : "true", "hive.vectorized.execution.mapjoin.native.fast.hashtable.enabled" : "true", "hive.optimize.dynamic.partition.hashjoin" : "true", "hive.mapjoin.hybridgrace.hashtable" : "false", ranger-nifi-policymgr-ssl How to use it? ranger-nifi-policymgr-ssl "hive.execution.engine" : "tez", "hive.llap.object.cache.enabled" : "true", "hive.llap.daemon.queue.name" : "default", "hive.tez.container.size" : "682", "hive.metastore.uris" : "", "hive.server2.tez.sessions.restricted.configs" : "hive.execution.mode,hive.execution.engine", "hive.tez.cartesian-product.enabled" : "true", "hive.llap.auto.allow.uber" : "false", "hive.server2.webui.port" : "10502", "hive.llap.daemon.num.executors" : "1", ranger-nifi-policymgr-ssl How to get it? ranger-nifi-policymgr-ssl "hive.llap.daemon.task.scheduler.enable.preemption" : "true", "hive.llap.io.use.lrfu" : "true", "hive.vectorized.execution.mapjoin.native.enabled" : "true", "hive.metastore.event.listeners" : "", "hive.server2.tez.default.queues" : "default", "hive.llap.zk.sm.connectionString" : "%HOSTGROUP::host_group_1%:2181", "hive.server2.tez.sessions.custom.queue.allowed" : "ignore", "hive.server2.tez.sessions.per.default.queue" : "1", "hive.server2.webui.use.ssl" : "false", "hive.llap.management.rpc.port" : "15004", ranger-nifi-policymgr-ssl PasteShr ranger-nifi-policymgr-ssl "hive.server2.thrift.http.port" : "10501", "hive.map.aggr.hash.min.reduction" : "0.99", "hive.merge.nway.joins" : "false", "hive.llap.daemon.logger" : "query-routing", "hive.prewarm.enabled" : "false", "hive.auto.convert.join.noconditionaltask.size" : "1000000000", "llap.shuffle.connection-keep-alive.timeout" : "60", "hive.llap.execution.mode" : "only", "hive.llap.daemon.yarn.shuffle.port" : "15551", "hive.llap.enable.grace.join.in.llap" : "false", ranger-nifi-policymgr-ssl PasteShr ranger-nifi-policymgr-ssl "hive.llap.daemon.vcpus.per.instance" : "${hive.llap.daemon.num.executors}", "hive.llap.io.memory.mode" : "", "hive.server2.thrift.port" : "10500", "hive.driver.parallel.compilation" : "true", "hive.tez.exec.print.summary" : "true", "hive.tez.input.generate.consistent.splits" : "true", "hive.vectorized.execution.mapjoin.minmax.enabled" : "true", "hive.server2.enable.doAs" : "false", "hive.server2.zookeeper.namespace" : "hiveserver2-hive2", "hive.tez.bucket.pruning" : "true", ranger-nifi-policymgr-ssl How to get it for free? ranger-nifi-policymgr-ssl "hive.llap.io.threadpool.size" : "2", "llap.shuffle.connection-keep-alive.enable" : "true", "hive.llap.daemon.service.hosts" : "@llap0", "hive.server2.tez.initialize.default.sessions" : "true" } } }, { "ranger-hive-audit" : { "properties_attributes" : { }, ranger-nifi-policymgr-ssl PasteShr ranger-nifi-policymgr-ssl "properties" : { } } }, { "ranger-hive-security" : { "properties_attributes" : { }, "properties" : { } } }, { ranger-nifi-policymgr-ssl PasteShr ranger-nifi-policymgr-ssl "slider-log4j" : { "properties_attributes" : { }, "properties" : { "content" : "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n# Define some default values that can be overridden by system properties\nlog4j.rootLogger=INFO,stdout\nlog4j.threshhold=ALL\nlog4j.appender.stdout=org.apache.log4j.ConsoleAppender\nlog4j.appender.stdout.layout=org.apache.log4j.PatternLayout\n\n# log layout skips stack-trace creation operations by avoiding line numbers and method\nlog4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} - %m%n\n\n# debug edition is much more expensive\n#log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\nlog4j.appender.subprocess=org.apache.log4j.ConsoleAppender\nlog4j.appender.subprocess.layout=org.apache.log4j.PatternLayout\nlog4j.appender.subprocess.layout.ConversionPattern=[%c{1}]: %m%n\n#log4j.logger.org.apache.slider.yarn.appmaster.SliderAppMasterer.master=INFO,subprocess\n\n# for debugging Slider\n#log4j.logger.org.apache.slider=DEBUG\n#log4j.logger.org.apache.slider=DEBUG\n\n# uncomment to debug service lifecycle issues\n#log4j.logger.org.apache.hadoop.yarn.service.launcher=DEBUG\n#log4j.logger.org.apache.hadoop.yarn.service=DEBUG\n\n# uncomment for YARN operations\n#log4j.logger.org.apache.hadoop.yarn.client=DEBUG\n\n# uncomment this to debug security problems\n#log4j.logger.org.apache.hadoop.security=DEBUG\n\n#crank back on some noise\nlog4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR\nlog4j.logger.org.apache.hadoop.hdfs=WARN\n\n\nlog4j.logger.org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor=WARN\nlog4j.logger.org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl=WARN\nlog4j.logger.org.apache.zookeeper=WARN" } } }, { "product-info" : { "properties_attributes" : { }, ranger-nifi-policymgr-ssl How to dowload it? ranger-nifi-policymgr-ssl "properties" : { "product-info-content" : "\n{\n \"schemaVersion\" : \"1.0.0\",\n \"productId\": \"{{stackName}}\",\n \"componentId\": \"{{stackName}}\",\n \"productVersion\" : \"{{stackVersion}}\",\n \"type\":\"cluster\",\n \"instanceInfo\": {\n \"guid\": \"\",\n \"parentGuid\": \"\",\n \"name\":\"{{clusterName}}\",\n \"flexSubscriptionId\": \"{{flexSubscriptionId}}\",\n \"provider\": \"\",\n \"region\": \"\"\n }\n}" } } } ], "host_groups" : [ { "components" : [ { ranger-nifi-policymgr-ssl How to get it? ranger-nifi-policymgr-ssl "name" : "NODEMANAGER" }, { "name" : "HIVE_SERVER" }, { "name" : "METRICS_MONITOR" }, { "name" : "NFS_GATEWAY" ranger-nifi-policymgr-ssl How to get it for free? ranger-nifi-policymgr-ssl }, { "name" : "HIVE_METASTORE" }, { "name" : "TEZ_CLIENT" }, { "name" : "ZOOKEEPER_CLIENT" }, ranger-nifi-policymgr-ssl How to use it? ranger-nifi-policymgr-ssl { "name" : "HCAT" }, { "name" : "WEBHCAT_SERVER" }, { "name" : "ACTIVITY_ANALYZER" }, { ranger-nifi-policymgr-ssl How to use it? ranger-nifi-policymgr-ssl "name" : "SECONDARY_NAMENODE" }, { "name" : "HST_AGENT" }, { "name" : "SLIDER" }, { "name" : "ZOOKEEPER_SERVER" ranger-nifi-policymgr-ssl How to use it? ranger-nifi-policymgr-ssl }, { "name" : "METRICS_COLLECTOR" }, { "name" : "METRICS_GRAFANA" }, { "name" : "YARN_CLIENT" }, ranger-nifi-policymgr-ssl How to dowload it? ranger-nifi-policymgr-ssl { "name" : "HDFS_CLIENT" }, { "name" : "HST_SERVER" }, { "name" : "MYSQL_SERVER" }, { ranger-nifi-policymgr-ssl How to get it? ranger-nifi-policymgr-ssl "name" : "HISTORYSERVER" }, { "name" : "NAMENODE" }, { "name" : "PIG" }, { "name" : "ACTIVITY_EXPLORER" ranger-nifi-policymgr-ssl How to get it? ranger-nifi-policymgr-ssl }, { "name" : "MAPREDUCE2_CLIENT" }, { "name" : "AMBARI_SERVER" }, { "name" : "DATANODE" }, ranger-nifi-policymgr-ssl How to use it? ranger-nifi-policymgr-ssl { "name" : "APP_TIMELINE_SERVER" }, { "name" : "HIVE_CLIENT" }, { "name" : "RESOURCEMANAGER" } ], ranger-nifi-policymgr-ssl How to use it? ranger-nifi-policymgr-ssl "configurations" : [ ], "name" : "host_group_1", "cardinality" : "1" } ], "settings" : [ { "recovery_settings" : [ { "recovery_enabled" : "true" ranger-nifi-policymgr-ssl How to dowload it? ranger-nifi-policymgr-ssl } ] }, { "service_settings" : [ { "name" : "HIVE", "credential_store_enabled" : "true" }, { ranger-nifi-policymgr-ssl How to get it for free? ranger-nifi-policymgr-ssl "recovery_enabled" : "true", "name" : "AMBARI_METRICS" } ] }, { "component_settings" : [ { "recovery_enabled" : "true", "name" : "METRICS_COLLECTOR" ranger-nifi-policymgr-ssl How to get it? ranger-nifi-policymgr-ssl } ] } ], "Blueprints" : { "stack_name" : "HDP", "stack_version" : "2.6" } } ranger-nifi-policymgr-ssl