本文整理汇总了Python中execo_engine.logger.warn函数的典型用法代码示例。如果您正苦于以下问题:Python warn函数的具体用法?Python warn怎么用?Python warn使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了warn函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _initialize_conf
def _initialize_conf(self):
"""Merge locally-specified configuration files with default files
from the distribution."""
if os.path.exists(self.local_base_conf_dir):
base_conf_files = [os.path.join(self.local_base_conf_dir, f)
for f in os.listdir(self.local_base_conf_dir)]
for f in base_conf_files:
shutil.copy(f, self.init_conf_dir)
else:
logger.warn(
"Local conf dir does not exist. Using default configuration")
base_conf_files = []
missing_conf_files = self.conf_mandatory_files
for f in base_conf_files:
f_base_name = os.path.basename(f)
if f_base_name in missing_conf_files:
missing_conf_files.remove(f_base_name)
logger.info("Copying missing conf files from master: " + str(
missing_conf_files))
remote_missing_files = [os.path.join(self.conf_dir, f)
for f in missing_conf_files]
action = Get([self.master], remote_missing_files, self.init_conf_dir)
action.run()
开发者ID:mliroz,项目名称:bigdata_dpy,代码行数:28,代码来源:cluster.py
示例2: __init__
def __init__(self, hosts, topo_list=None):
"""Create a Hadoop topology object assigning each host to the
corresponding rack.
Args:
hosts (list of Host):
The hosts to be assigned a topology.
topo_list (list of str, optional):
The racks to be assigned to each host. len(hosts) should be equal to
len(topo_list).
"""
if topo_list:
if len(hosts) == len(topo_list):
self.topology = topo_list
return
else:
logger.warn("hosts and topology have not the same length.")
logger.info("Discovering topology automatically")
self.topology = {}
for h in hosts:
nw_adapters = get_host_attributes(h)[u'network_adapters']
for nwa in nw_adapters:
if (u'network_address' in nwa and
nwa[u'network_address'] == h.address):
self.topology[h] = "/" + nwa[u'switch']
break
开发者ID:djamelinfo,项目名称:hadoop_g5k,代码行数:28,代码来源:objects.py
示例3: _copy_base_conf
def _copy_base_conf(self):
"""Copy base configuration files to tmp dir."""
self.temp_conf_dir = tempfile.mkdtemp("", "hadoop-", "/tmp")
if os.path.exists(self.local_base_conf_dir):
base_conf_files = [os.path.join(self.local_base_conf_dir, f)
for f in os.listdir(self.local_base_conf_dir)]
for f in base_conf_files:
shutil.copy(f, self.temp_conf_dir)
else:
logger.warn(
"Local conf dir does not exist. Using default configuration")
base_conf_files = []
mandatory_files = [CORE_CONF_FILE, HDFS_CONF_FILE, MR_CONF_FILE]
missing_conf_files = mandatory_files
for f in base_conf_files:
f_base_name = os.path.basename(f)
if f_base_name in missing_conf_files:
missing_conf_files.remove(f_base_name)
logger.info("Copying missing conf files from master: " + str(
missing_conf_files))
remote_missing_files = [os.path.join(self.conf_dir, f)
for f in missing_conf_files]
action = Get([self.master], remote_missing_files, self.temp_conf_dir)
action.run()
开发者ID:lmolina,项目名称:hadoop_g5k,代码行数:30,代码来源:cluster.py
示例4: __init__
def __init__(self, jar_path, params=None, lib_paths=None):
"""Creates a new Hadoop MapReduce jar job with the given parameters.
Args:
jar_path (str):
The local path of the jar containing the job.
params (list of str, optional):
The list of parameters of the job.
lib_paths (list of str, optional):
The list of local paths to the libraries used by the job.
"""
if not params:
params = []
if not lib_paths:
lib_paths = []
# Check if the jar file exists
if not os.path.exists(jar_path):
logger.error("Jar file " + jar_path + " does not exist")
raise HadoopJobException("Jar file " + jar_path + " does not exist")
# Check if the libraries exist
for lp in lib_paths:
if not os.path.exists(lp):
logger.warn("Lib file " + lp + " does not exist")
return # TODO - exception
self.jar_path = jar_path
self.params = params
self.lib_paths = lib_paths
开发者ID:mliroz,项目名称:bigdata_dpy,代码行数:31,代码来源:objects.py
示例5: clean
def clean(self):
"""Remove all files created by Cassandra."""
if self.running:
logger.warn("The cluster needs to be stopped before cleaning.")
self.stop()
self.clean_logs()
开发者ID:mliroz,项目名称:bigdata_dpy,代码行数:8,代码来源:cassandra.py
示例6: execute_job
def execute_job(self, job, node=None, verbose=True):
"""Execute the given Spark job in the specified node.
Args:
job (SparkJob):
The job object.
node (Host, optional):
The host were the command should be executed. If not provided,
self.master is chosen.
verbose (bool, optional):
If True stdout and stderr of remote process is displayed.
Returns (tuple of str):
A tuple with the standard and error outputs of the process executing
the job.
"""
if not self.running:
logger.warn("The cluster was stopped. Starting it automatically")
self.start()
if node is None:
node = self.master
exec_dir = "/tmp"
# Copy necessary files to cluster
files_to_copy = job.get_files_to_copy()
action = Put([node], files_to_copy, exec_dir)
action.run()
# Get command
command = job.get_command(exec_dir)
# Execute
logger.info("Executing spark job. Command = {" + self.bin_dir +
"/spark-submit " + command + "} in " + str(node))
proc = SshProcess(self.bin_dir + "/spark-submit " + command, node)
if verbose:
red_color = '\033[01;31m'
proc.stdout_handlers.append(sys.stdout)
proc.stderr_handlers.append(
ColorDecorator(sys.stderr, red_color))
proc.start()
proc.wait()
# Get job info
job.stdout = proc.stdout
job.stderr = proc.stderr
job.success = (proc.exit_code == 0)
return proc.stdout, proc.stderr
开发者ID:djamelinfo,项目名称:hadoop_g5k,代码行数:56,代码来源:spark.py
示例7: clean
def clean(self):
"""Remove all files created by Spark."""
if self.running:
logger.warn("The cluster needs to be stopped before cleaning.")
self.stop()
self.clean_conf()
self.clean_logs()
self.initialized = False
开发者ID:djamelinfo,项目名称:hadoop_g5k,代码行数:11,代码来源:spark.py
示例8: _create_warehouse
def _create_warehouse(self):
""" """
if not self.hc.running:
logger.warn("Hadoop must be started first")
self.hc.start_and_wait()
logger.info("Creating warehouse dirs in HDFS")
self.hc.execute("fs -mkdir -p /tmp", verbose=False)
self.hc.execute("fs -mkdir -p /user/hive/warehouse", verbose=False)
self.hc.execute("fs -chmod g+w /tmp", verbose=False)
self.hc.execute("fs -chmod g+w /user/hive/warehouse", verbose=False)
开发者ID:djamelinfo,项目名称:hadoop_g5k,代码行数:12,代码来源:hive.py
示例9: format_dfs
def format_dfs(self):
"""Format the distributed filesystem."""
logger.info("Formatting HDFS")
proc = SshProcess(self.bin_dir + "/hadoop namenode -format",
self.master)
proc.run()
if proc.finished_ok:
logger.info("HDFS formatted successfully")
else:
logger.warn("Error while formatting HDFS")
开发者ID:lmolina,项目名称:hadoop_g5k,代码行数:13,代码来源:cluster.py
示例10: clean
def clean(self):
"""Remove all files created by Hadoop (logs, filesystem,
temporary files)."""
if self.running:
logger.warn("The cluster needs to be stopped before cleaning.")
self.stop()
self.clean_conf()
self.clean_logs()
self.clean_data()
self.initialized = False
开发者ID:lmolina,项目名称:hadoop_g5k,代码行数:13,代码来源:cluster.py
示例11: stop_yarn
def stop_yarn(self):
"""Stop the YARN ResourceManager and NodeManagers."""
self._check_initialization()
logger.info("Stopping YARN")
proc = SshProcess(self.sbin_dir + "/stop-yarn.sh", self.master)
proc.run()
if not proc.finished_ok:
logger.warn("Error while stopping YARN")
else:
self.running_yarn = False
开发者ID:mliroz,项目名称:bigdata_dpy,代码行数:14,代码来源:cluster_v2.py
示例12: _copy_conf
def _copy_conf(self, conf_dir, hosts=None):
if not hosts:
hosts = self.hosts
conf_files = [os.path.join(conf_dir, f) for f in os.listdir(conf_dir)]
action = TaktukPut(hosts, conf_files, self.conf_dir)
action.run()
if not action.finished_ok:
logger.warn("Error while copying configuration")
if not action.ended:
action.kill()
开发者ID:mliroz,项目名称:bigdata_dpy,代码行数:14,代码来源:mongodb.py
示例13: stop_map_reduce
def stop_map_reduce(self):
"""Stop the JobTracker and TaskTrackers."""
self._check_initialization()
logger.info("Stopping MapReduce")
proc = SshProcess(self.sbin_dir + "/stop-mapred.sh", self.master)
proc.run()
if not proc.finished_ok:
logger.warn("Error while stopping MapReduce")
else:
self.running_map_reduce = False
开发者ID:lmolina,项目名称:hadoop_g5k,代码行数:14,代码来源:cluster.py
示例14: stop_dfs
def stop_dfs(self):
"""Stop the NameNode and DataNodes."""
self._check_initialization()
logger.info("Stopping HDFS")
proc = SshProcess(self.sbin_dir + "/stop-dfs.sh", self.master)
proc.run()
if not proc.finished_ok:
logger.warn("Error while stopping HDFS")
else:
self.running_dfs = False
开发者ID:lmolina,项目名称:hadoop_g5k,代码行数:14,代码来源:cluster.py
示例15: stop_spark
def stop_spark(self):
"""Stop Spark processes."""
logger.info("Stopping Spark")
if self.mode == STANDALONE_MODE:
proc = SshProcess(self.sbin_dir + "/stop-slaves.sh;" +
self.sbin_dir + "/stop-master.sh;",
self.master)
proc.run()
if not proc.finished_ok:
logger.warn("Error while stopping Spark")
return
self.running = False
开发者ID:djamelinfo,项目名称:hadoop_g5k,代码行数:15,代码来源:spark.py
示例16: start
def start(self):
"""Start Hive processes."""
logger.info("Starting Hive")
if self.running:
logger.warn("Hive was already started")
return
if not self.hc.running:
logger.warn("Hadoop must be started first")
self.hc.start_and_wait()
# Do nothing
self.running = True
开发者ID:djamelinfo,项目名称:hadoop_g5k,代码行数:15,代码来源:hive.py
示例17: clean_logs
def clean_logs(self):
"""Remove all Hadoop logs."""
logger.info("Cleaning logs")
restart = False
if self.running:
logger.warn("The cluster needs to be stopped before cleaning.")
self.stop()
restart = True
action = Remote("rm -rf " + self.logs_dir + "/*", self.hosts)
action.run()
if restart:
self.start()
开发者ID:lmolina,项目名称:hadoop_g5k,代码行数:16,代码来源:cluster.py
示例18: start_dfs_and_wait
def start_dfs_and_wait(self):
"""Start the NameNode and DataNodes and wait for exiting safemode."""
self._check_initialization()
self.start_dfs()
logger.info("Waiting for safe mode to be off")
proc = SshProcess(self.bin_dir + "/hadoop dfsadmin -safemode wait",
self.master)
proc.run()
if not proc.finished_ok:
logger.warn("Error while starting HDFS")
else:
self.running_dfs = True
开发者ID:lmolina,项目名称:hadoop_g5k,代码行数:16,代码来源:cluster.py
示例19: execute
def execute(self, command, node=None, should_be_running=True,
verbose=True):
"""Execute the given Hadoop command in the given node.
Args:
command (str):
The command to be executed.
node (Host, optional):
The host were the command should be executed. If not provided,
self.master is chosen.
should_be_running (bool, optional):
True if the cluster needs to be running in order to execute the
command. If so, and it is not running, it is automatically started.
verbose: (bool, optional):
If True stdout and stderr of remote process is displayed.
Returns (tuple of str):
A tuple with the standard and error outputs of the process executing
the command.
"""
self._check_initialization()
if should_be_running and not self.running:
logger.warn("The cluster was stopped. Starting it automatically")
self.start()
if not node:
node = self.master
if verbose:
logger.info("Executing {" + self.bin_dir + "/hadoop " +
command + "} in " + str(node))
proc = SshProcess(self.bin_dir + "/hadoop " + command, node)
if verbose:
red_color = '\033[01;31m'
proc.stdout_handlers.append(sys.stdout)
proc.stderr_handlers.append(
ColorDecorator(sys.stderr, red_color))
proc.start()
proc.wait()
return (proc.stdout, proc.stderr)
开发者ID:lmolina,项目名称:hadoop_g5k,代码行数:47,代码来源:cluster.py
示例20: _configure_servers
def _configure_servers(self, hosts=None):
"""Configure servers and host-dependant parameters.
Args:
hosts (list of Host, optional):
The list of hosts to take into account in the configuration. If
not specified, all the hosts of the Hadoop cluster are used. The
first host of this list is always used as the reference.
"""
if not hosts:
hosts = self.hosts
host_attrs = get_host_attributes(hosts[0])
num_cores = host_attrs[u'architecture'][u'smt_size']
total_memory_mb = (int(host_attrs[u'main_memory'][u'ram_size']) /
(1024 * 1024)) - 2 * 1024
mem_per_slot_mb = total_memory_mb / (num_cores - 1)
replace_in_xml_file(os.path.join(self.temp_conf_dir, CORE_CONF_FILE),
"fs.default.name",
"hdfs://" + self.master.address + ":" +
str(self.hdfs_port) + "/",
True)
replace_in_xml_file(os.path.join(self.temp_conf_dir, CORE_CONF_FILE),
"hadoop.tmp.dir",
self.hadoop_temp_dir, True)
replace_in_xml_file(os.path.join(self.temp_conf_dir, CORE_CONF_FILE),
"topology.script.file.name",
self.conf_dir + "/topo.sh", True)
replace_in_xml_file(os.path.join(self.temp_conf_dir, MR_CONF_FILE),
"mapred.job.tracker",
self.master.address + ":" +
str(self.mapred_port), True)
replace_in_xml_file(os.path.join(self.temp_conf_dir, MR_CONF_FILE),
"mapred.tasktracker.map.tasks.maximum",
str(num_cores - 1), True)
replace_in_xml_file(os.path.join(self.temp_conf_dir, MR_CONF_FILE),
"mapred.tasktracker.reduce.tasks.maximum",
str(num_cores - 1), True)
if mem_per_slot_mb <= 0:
logger.warn("Memory is negative, no setting")
else:
replace_in_xml_file(os.path.join(self.temp_conf_dir, MR_CONF_FILE),
"mapred.child.java.opts",
"-Xmx" + str(mem_per_slot_mb) + "m", True)
开发者ID:lmolina,项目名称:hadoop_g5k,代码行数:47,代码来源:cluster.py
注:本文中的execo_engine.logger.warn函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论