本文整理汇总了Python中execo.action.Remote类的典型用法代码示例。如果您正苦于以下问题:Python Remote类的具体用法?Python Remote怎么用?Python Remote使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Remote类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: bootstrap
def bootstrap(self, tar_file):
# 0. Check that required packages are present
required_packages = "openjdk-7-jre openjdk-7-jdk"
check_packages = TaktukRemote("dpkg -s " + required_packages,
self.hosts)
for p in check_packages.processes:
p.nolog_exit_code = p.nolog_error = True
check_packages.run()
if not check_packages.ok:
logger.info("Packages not installed, trying to install")
install_packages = TaktukRemote(
"export DEBIAN_MASTER=noninteractive ; " +
"apt-get update && apt-get install -y --force-yes " +
required_packages, self.hosts).run()
if not install_packages.ok:
logger.error("Unable to install the packages")
get_java_home = SshProcess('echo $(readlink -f /usr/bin/javac | '
'sed "s:/bin/javac::")', self.master)
get_java_home.run()
self.java_home = get_java_home.stdout.strip()
logger.info("All required packages are present")
# 1. Copy hadoop tar file and uncompress
logger.info("Copy " + tar_file + " to hosts and uncompress")
rm_dirs = TaktukRemote("rm -rf " + self.base_dir +
" " + self.conf_dir,
self.hosts)
put_tar = TaktukPut(self.hosts, [tar_file], "/tmp")
tar_xf = TaktukRemote(
"tar xf /tmp/" + os.path.basename(tar_file) + " -C /tmp",
self.hosts)
SequentialActions([rm_dirs, put_tar, tar_xf]).run()
# 2. Move installation to base dir
logger.info("Create installation directories")
mv_base_dir = TaktukRemote(
"mv /tmp/" + os.path.basename(tar_file).replace(".tgz", "") + " " +
self.base_dir,
self.hosts)
mkdirs = TaktukRemote("mkdir -p " + self.conf_dir, self.hosts)
chmods = TaktukRemote("chmod g+w " + self.base_dir +
" && chmod g+w " + self.conf_dir,
self.hosts)
SequentialActions([mv_base_dir, mkdirs, chmods]).run()
# 3. Specify environment variables
command = "cat >> " + self.conf_dir + "/spark-env.sh << EOF\n"
command += "JAVA_HOME=" + self.java_home + "\n"
command += "SPARK_LOG_DIR=" + self.logs_dir + "\n"
if self.hc:
command += "HADOOP_CONF_DIR=" + self.hc.conf_dir + "\n"
if self.mode == YARN_MODE:
command += "YARN_CONF_DIR=" + self.hc.conf_dir + "\n"
command += "EOF\n"
command += "chmod +x " + self.conf_dir + "/spark-env.sh"
action = Remote(command, self.hosts)
action.run()
开发者ID:sarlam,项目名称:hadoop_g5k,代码行数:60,代码来源:spark.py
示例2: _set_common_params
def _set_common_params(self, params, conf_dir, default_tuning=False):
"""Replace common parameters. Some user-specified values are
overwritten.
Args:
params (str):
Already defined parameters over all the clusters.
conf_dir (str):
The path of the directory with the configuration files.
default_tuning (bool, optional):
Whether to use automatic tuning based on some best practices or
leave the default parameters.
"""
defs_file = conf_dir + "/spark-defaults.conf"
# spark-env.sh
command = "cat >> " + self.conf_dir + "/spark-env.sh << EOF\n"
command += "SPARK_MASTER_PORT=" + str(self.port) + "\n"
command += "EOF\n"
action = Remote(command, self.hosts)
action.run()
# Get already set parameters
global_params = params["global"]
exec_mem = global_params["exec_mem"]
exec_cores = global_params["exec_cores"]
total_execs = global_params["total_execs"]
# Log parameters
if self.evs_log_dir:
write_in_props_file(defs_file,
"spark.eventLog.enabled", "true",
create_if_absent=True,
override=True)
write_in_props_file(defs_file,
"spark.eventLog.dir", self.evs_log_dir,
create_if_absent=True,
override=True)
write_in_props_file(defs_file,
"spark.logConf", "true",
create_if_absent=True,
override=False)
if default_tuning:
write_in_props_file(defs_file,
"spark.executor.memory", "%dm" % exec_mem,
create_if_absent=True,
override=False)
write_in_props_file(defs_file,
"spark.executor.cores", exec_cores,
create_if_absent=True,
override=False)
write_in_props_file(defs_file,
"spark.executor.instances", total_execs,
create_if_absent=True,
override=False)
开发者ID:djamelinfo,项目名称:hadoop_g5k,代码行数:60,代码来源:spark.py
示例3: _start_disk_copy
def _start_disk_copy(self, disks=None, backing_file_dir='/tmp'):
""" """
disks_copy = []
if not disks:
disks = self.backing_files
for bf in disks:
logger.info('Treating ' + style.emph(bf))
logger.debug("Checking frontend disk vs host disk")
raw_disk = '%s/orig_' % backing_file_dir + bf.split('/')[-1]
f_disk = Process('md5sum -b ' + bf).run()
disk_hash = f_disk.stdout.split(' ')[0]
cmd = 'if [ -f ' + raw_disk + ' ]; ' + \
'then md5sum -b ' + raw_disk + '; fi'
h_disk = self.fact.get_remote(cmd, self.hosts).run()
disk_ok = True
for p in h_disk.processes:
if p.stdout.split(' ')[0] != disk_hash:
disk_ok = False
break
if disk_ok:
logger.info("Disk " + style.emph(bf) +
" is already present, skipping copy")
else:
disks_copy.append(self.fact.get_fileput(self.hosts, [bf],
remote_location="%s" % backing_file_dir))
if len(disks_copy) > 0:
self.copy_actions = ParallelActions(disks_copy).start()
else:
self.copy_actions = Remote('ls', self.hosts[0]).run()
开发者ID:badock,项目名称:vm5k,代码行数:30,代码来源:deployment.py
示例4: clean_logs
def clean_logs(self):
"""Remove all Hadoop logs."""
logger.info("Cleaning logs")
restart = False
if self.running:
logger.warn("The cluster needs to be stopped before cleaning.")
self.stop()
restart = True
action = Remote("rm -rf " + self.logs_dir + "/*", self.hosts)
action.run()
if restart:
self.start()
开发者ID:lmolina,项目名称:hadoop_g5k,代码行数:16,代码来源:cluster.py
示例5: clean_history
def clean_history(self):
"""Remove history."""
logger.info("Cleaning history")
restart = False
if self.running:
logger.warn("The cluster needs to be stopped before cleaning.")
self.stop()
restart = True
action = Remote("rm -rf " + self.logs_dir + "/history",
[self.master])
action.run()
if restart:
self.start()
开发者ID:lmolina,项目名称:hadoop_g5k,代码行数:17,代码来源:cluster.py
示例6: _configure_servers
def _configure_servers(self, hosts=None):
"""Configure servers and host-dependant parameters.
Args:
hosts (list of Host, optional):
The list of hosts to take into account in the configuration. If
not specified, all the hosts of the Spark cluster are used. The
first host of this list is always used as the reference.
"""
if not hosts:
hosts = self.hosts
host_attrs = get_host_attributes(hosts[0])
num_cores = host_attrs[u'architecture'][u'smt_size']
total_memory_mb = (int(host_attrs[u'main_memory'][u'ram_size']) /
(1024 * 1024))
memory_per_worker = int(0.75 * total_memory_mb)
memory_per_task = int(memory_per_worker / num_cores)
# Set memory for each worker
command = "cat >> " + self.conf_dir + "/spark-env.sh << EOF\n"
command += "SPARK_MASTER_PORT=" + str(self.port) + "\n"
command += "SPARK_WORKER_MEMORY=" + str(memory_per_worker) + "m\n"
command += "EOF\n"
action = Remote(command, self.hosts)
action.run()
# Default parameters
driver_mem = "1g"
executor_mem = str(memory_per_task) + "m"
with open(self.temp_conf_dir + "/spark-defaults.conf", "a") \
as defaults_file:
defaults_file.write("spark.executor.memory\t" + executor_mem + "\n")
defaults_file.write("spark.driver.memory\t" + driver_mem + "\n")
# defaults_file.write("spark.driver.maxResultSize\t1g\n")
defaults_file.write("spark.logConf\ttrue\n")
# defaults_file.write("spark.python.worker.memory\t512m")
if self.evs_log_dir:
defaults_file.write("spark.eventLog.enabled\ttrue\n")
defaults_file.write("spark.eventLog.dir\t" +
self.evs_log_dir + "\n")
开发者ID:lmolina,项目名称:hadoop_g5k,代码行数:43,代码来源:spark.py
示例7: clean_data
def clean_data(self):
"""Remove all data created by Hadoop (including filesystem)."""
if self.running:
logger.warn("The cluster needs to be stopped before cleaning.")
self.stop()
logger.info("Cleaning MongoDB data")
restart = False
if self.running:
self.stop()
restart = True
action = Remote("rm -rf " + self.data_dir + "/*", self.hosts)
action.run()
if restart:
self.start()
开发者ID:mliroz,项目名称:bigdata_dpy,代码行数:19,代码来源:mongodb.py
示例8: clean_data
def clean_data(self):
"""Remove all data created by Hadoop (including filesystem)."""
if self.running:
logger.warn("The cluster needs to be stopped before cleaning.")
self.stop()
logger.info("Cleaning hadoop data")
restart = False
if self.running:
self.stop()
restart = True
action = Remote("rm -rf " + self.hadoop_temp_dir + " /tmp/hadoop-" +
getpass.getuser() + "-*", self.hosts)
action.run()
if restart:
self.start()
开发者ID:lmolina,项目名称:hadoop_g5k,代码行数:20,代码来源:cluster.py
示例9: bootstrap
def bootstrap(self, tar_file):
"""Install MongoDB in all cluster nodes from the specified tgz file.
Args:
tar_file (str):
The file containing MongoDB binaries.
"""
# 1. Copy mongo tar file and uncompress
logger.info("Copy " + tar_file + " to hosts and uncompress")
rm_files = TaktukRemote("rm -rf " + self.base_dir +
" " + self.conf_dir +
" " + self.data_dir +
" " + self.logs_dir,
self.hosts)
put_tar = TaktukPut(self.hosts, [tar_file], "/tmp")
tar_xf = TaktukRemote("tar xf /tmp/" + os.path.basename(tar_file) +
" -C /tmp", self.hosts)
SequentialActions([rm_files, put_tar, tar_xf]).run()
# 2. Move installation to base dir
logger.info("Create installation directories")
action = Remote(
"mv /tmp/" +
os.path.basename(tar_file).replace(".tgz", "") + " " +
self.base_dir,
self.hosts)
action.run()
# 3 Create other dirs
mkdirs = TaktukRemote("mkdir -p " + self.data_dir +
" && mkdir -p " + self.conf_dir +
" && mkdir -p " + self.logs_dir +
" && touch " + os.path.join(self.conf_dir,
CONF_FILE),
self.hosts)
mkdirs.run()
# 4. Generate initial configuration
self._initialize_conf()
开发者ID:mliroz,项目名称:bigdata_dpy,代码行数:41,代码来源:mongodb.py
示例10: _initialize_conf
def _initialize_conf(self):
"""Merge locally-specified configuration files with default files
from the distribution."""
action = Remote("cp " + os.path.join(self.conf_dir,
SPARK_CONF_FILE + ".template ") +
os.path.join(self.conf_dir, SPARK_CONF_FILE),
self.hosts)
action.run()
if os.path.exists(self.local_base_conf_dir):
base_conf_files = [os.path.join(self.local_base_conf_dir, f)
for f in os.listdir(self.local_base_conf_dir)]
for f in base_conf_files:
shutil.copy(f, self.init_conf_dir)
else:
logger.warn(
"Local conf dir does not exist. Using default configuration")
base_conf_files = []
missing_conf_files = self.conf_mandatory_files
for f in base_conf_files:
f_base_name = os.path.basename(f)
if f_base_name in missing_conf_files:
missing_conf_files.remove(f_base_name)
logger.info("Copying missing conf files from master: " + str(
missing_conf_files))
remote_missing_files = [os.path.join(self.conf_dir, f)
for f in missing_conf_files]
action = Get([self.master], remote_missing_files, self.init_conf_dir)
action.run()
开发者ID:djamelinfo,项目名称:hadoop_g5k,代码行数:34,代码来源:spark.py
示例11: get_conf
def get_conf(self, param_names):
params = {}
remaining_param_names = param_names[:]
# Copy conf files from first host in the cluster
action = Remote("ls " + self.conf_dir + "/*.xml", [self.hosts[0]])
action.run()
output = action.processes[0].stdout
remote_conf_files = []
for f in output.split():
remote_conf_files.append(os.path.join(self.conf_dir, f))
tmp_dir = "/tmp/mliroz_temp_hadoop/"
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
action = Get([self.hosts[0]], remote_conf_files, tmp_dir)
action.run()
# Do replacements in temp file
temp_conf_files = [os.path.join(tmp_dir, f) for f in
os.listdir(tmp_dir)]
for f in temp_conf_files:
fparams = get_xml_params(f, remaining_param_names)
for p in fparams:
if fparams[p]:
params[p] = fparams[p]
remaining_param_names.remove(p)
return params
开发者ID:lmolina,项目名称:hadoop_g5k,代码行数:33,代码来源:cluster.py
示例12: change_conf
def change_conf(self, params, conf_file=None, default_file=MR_CONF_FILE):
"""Modify Hadoop configuration. This method copies the configuration
files from the first host of each g5k cluster conf dir into a local
temporary dir, do all the changes in place and broadcast the new
configuration files to all hosts.
Args:
params (dict of str:str):
The parameters to be changed in the form key:value.
conf_file (str, optional):
The file where parameters should be set. If not specified, all
files are checked for the parameter name and the parameter is set
in the file where the property is found. If not found, the
parameter is set in the default file.
default_file (str, optional): The default conf file where to set the
parameter if not found. Only applies when conf_file is not set.
"""
for cluster in self.hw.get_clusters():
hosts = cluster.get_hosts()
# Copy conf files from first host in the cluster
action = Remote("ls " + self.conf_dir + "/*.xml", [hosts[0]])
action.run()
output = action.processes[0].stdout
remote_conf_files = []
for f in output.split():
remote_conf_files.append(os.path.join(self.conf_dir, f))
tmp_dir = "/tmp/mliroz_temp_hadoop/"
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
action = Get([hosts[0]], remote_conf_files, tmp_dir)
action.run()
# Do replacements in temp file
if conf_file:
f = os.path.join(tmp_dir, conf_file)
for name, value in params.iteritems():
replace_in_xml_file(f, name, value, True)
else:
temp_conf_files = [os.path.join(tmp_dir, f) for f in
os.listdir(tmp_dir)]
for name, value in params.iteritems():
for f in temp_conf_files:
if replace_in_xml_file(f, name, value):
break
else:
# Property not found - add it in MR_CONF_FILE
logger.info("Parameter with name " + name + " has not "
"been found in any conf file. Setting it "
"in " + default_file)
f = os.path.join(tmp_dir, default_file)
replace_in_xml_file(f, name, value, True)
# Copy back the files to all hosts
self._copy_conf(tmp_dir, hosts)
开发者ID:mliroz,项目名称:bigdata_dpy,代码行数:60,代码来源:cluster.py
示例13: change_conf
def change_conf(self, params):
"""Modify Hadoop configuration. This method copies the configuration
files from the first host of each g5k cluster conf dir into a local
temporary dir, do all the changes in place and broadcast the new
configuration files to all hosts.
Args:
params (dict of str:str):
The parameters to be changed in the form key:value.
"""
for g5k_cluster in self.host_clusters:
hosts = self.host_clusters[g5k_cluster]
# Copy conf files from first host in the cluster
action = Remote("ls " + self.conf_dir + "/*.xml", [hosts[0]])
action.run()
output = action.processes[0].stdout
remote_conf_files = []
for f in output.split():
remote_conf_files.append(os.path.join(self.conf_dir, f))
tmp_dir = "/tmp/mliroz_temp_hadoop/"
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
action = Get([hosts[0]], remote_conf_files, tmp_dir)
action.run()
# Do replacements in temp file
temp_conf_files = [os.path.join(tmp_dir, f) for f in
os.listdir(tmp_dir)]
for name, value in params.iteritems():
for f in temp_conf_files:
if replace_in_xml_file(f, name, value):
break
else:
# Property not found - provisionally add it in MR_CONF_FILE
f = os.path.join(tmp_dir, MR_CONF_FILE)
replace_in_xml_file(f, name, value, True)
# Copy back the files to all hosts
self._copy_conf(tmp_dir, hosts)
开发者ID:lmolina,项目名称:hadoop_g5k,代码行数:45,代码来源:cluster.py
示例14: _get_conf_files
def _get_conf_files(self, host):
action = Remote("ls " + self.conf_dir + "/*.xml", [host])
action.run()
output = action.processes[0].stdout
remote_conf_files = []
for f in output.split():
remote_conf_files.append(os.path.join(self.conf_dir, f))
tmp_dir = "/tmp/mliroz_temp_hadoop/"
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
action = Get([host], remote_conf_files, tmp_dir)
action.run()
temp_conf_files = [os.path.join(tmp_dir, f) for f in
os.listdir(tmp_dir)]
return temp_conf_files
开发者ID:mliroz,项目名称:bigdata_dpy,代码行数:21,代码来源:cluster.py
示例15: bootstrap
def bootstrap(self, tar_file):
# 1. Remove used dirs if existing
action = Remote("rm -rf " + self.base_dir, self.hc.hosts)
action.run()
action = Remote("rm -rf " + self.conf_dir, self.hc.hosts)
action.run()
# 1. Copy Mahout tar file and uncompress
logger.info("Copy " + tar_file + " to hosts and uncompress")
action = Put(self.hc.hosts, [tar_file], "/tmp")
action.run()
action = Remote(
"tar xf /tmp/" + os.path.basename(tar_file) + " -C /tmp",
self.hc.hosts)
action.run()
# 2. Move installation to base dir
logger.info("Create installation directories")
action = Remote(
"mv /tmp/" +
os.path.basename(tar_file).replace(".tar.gz", "") + " " +
self.base_dir,
self.hc.hosts)
action.run()
# 3 Create other dirs
action = Remote("mkdir -p " + self.conf_dir, self.hc.hosts)
action.run()
# 4. Include libraries in Hadoop's classpath
list_dirs = SshProcess("ls -1 " + self.base_dir + "/*.jar",
self.hc.master)
list_dirs.run()
libs = " ".join(list_dirs.stdout.splitlines())
action = Remote("cp " + libs + " " + self.hc.base_dir + "/lib",
self.hc.hosts)
action.run()
initialized = True # No need to call initialize()
开发者ID:djamelinfo,项目名称:hadoop_g5k,代码行数:40,代码来源:mahout.py
示例16: bootstrap
def bootstrap(self, tar_file):
# 0. Check requirements
java_major_version = 7
if not check_java_version(java_major_version, self.hosts):
msg = "Java 1.%d+ required" % java_major_version
logger.error(msg)
raise SparkException(msg)
self.java_home = get_java_home(self.master)
# 1. Copy hadoop tar file and uncompress
logger.info("Copy " + tar_file + " to hosts and uncompress")
rm_dirs = TaktukRemote("rm -rf " + self.base_dir +
" " + self.conf_dir,
self.hosts)
put_tar = TaktukPut(self.hosts, [tar_file], "/tmp")
tar_xf = TaktukRemote(
"tar xf /tmp/" + os.path.basename(tar_file) + " -C /tmp",
self.hosts)
rm_tar = TaktukRemote(
"rm /tmp/" + os.path.basename(tar_file),
self.hosts)
SequentialActions([rm_dirs, put_tar, tar_xf, rm_tar]).run()
# 2. Move installation to base dir
logger.info("Create installation directories")
mv_base_dir = TaktukRemote(
"mv /tmp/" + os.path.basename(tar_file).replace(".tgz", "") + " " +
self.base_dir,
self.hosts)
mkdirs = TaktukRemote("mkdir -p " + self.conf_dir +
" && mkdir -p " + self.logs_dir,
self.hosts)
chmods = TaktukRemote("chmod g+w " + self.base_dir +
" && chmod g+w " + self.conf_dir +
" && chmod g+w " + self.logs_dir,
self.hosts)
SequentialActions([mv_base_dir, mkdirs, chmods]).run()
# 2.1. Create spark-events dir
if self.evs_log_dir:
if self.evs_log_dir.startswith("file://") or \
"://" not in self.evs_log_dir:
mk_evs_dir = TaktukRemote("mkdir -p " + self.evs_log_dir +
" && chmod g+w " + self.evs_log_dir,
self.hosts)
mk_evs_dir.run()
elif self.evs_log_dir.startswith("hdfs://"):
self.hc.execute("fs -mkdir -p " + self.evs_log_dir)
# 3. Specify environment variables
env_file = self.conf_dir + "/spark-env.sh"
command = "cat >> " + env_file + " << EOF\n"
command += "JAVA_HOME=" + self.java_home + "\n"
command += "SPARK_LOG_DIR=" + self.logs_dir + "\n"
if self.hc:
command += "HADOOP_CONF_DIR=" + self.hc.conf_dir + "\n"
if self.mode == YARN_MODE:
command += "YARN_CONF_DIR=" + self.hc.conf_dir + "\n"
command += "EOF\n"
command += "echo SPARK_PUBLIC_DNS=$(hostname) >> " + env_file
command += " && chmod +x " + env_file
action = Remote(command, self.hosts)
action.run()
# 4. Generate initial configuration
self._initialize_conf()
开发者ID:djamelinfo,项目名称:hadoop_g5k,代码行数:69,代码来源:spark.py
示例17: bootstrap
def bootstrap(self, tar_file):
"""Install Hadoop in all cluster nodes from the specified tar.gz file.
Args:
tar_file (str):
The file containing Hadoop binaries.
"""
# 0. Check that required packages are present
required_packages = "openjdk-7-jre openjdk-7-jdk"
check_packages = TaktukRemote("dpkg -s " + required_packages,
self.hosts)
for p in check_packages.processes:
p.nolog_exit_code = p.nolog_error = True
check_packages.run()
if not check_packages.ok:
logger.info("Packages not installed, trying to install")
install_packages = TaktukRemote(
"export DEBIAN_MASTER=noninteractive ; " +
"apt-get update && apt-get install -y --force-yes " +
required_packages, self.hosts).run()
if not install_packages.ok:
logger.error("Unable to install the packages")
get_java_home = SshProcess('echo $(readlink -f /usr/bin/javac | '
'sed "s:/bin/javac::")', self.master)
get_java_home.run()
self.java_home = get_java_home.stdout.strip()
logger.info("All required packages are present")
# 1. Copy hadoop tar file and uncompress
logger.info("Copy " + tar_file + " to hosts and uncompress")
rm_dirs = Remote("rm -rf " + self.base_dir +
" " + self.conf_dir +
" " + self.logs_dir +
" " + self.hadoop_temp_dir,
self.hosts)
put_tar = TaktukPut(self.hosts, [tar_file], "/tmp")
tar_xf = TaktukRemote(
"tar xf /tmp/" + os.path.basename(tar_file) + " -C /tmp",
self.hosts)
SequentialActions([rm_dirs, put_tar, tar_xf]).run()
# 2. Move installation to base dir and create other dirs
logger.info("Create installation directories")
mv_base_dir = TaktukRemote(
"mv /tmp/" +
os.path.basename(tar_file).replace(".tar.gz", "") + " " +
self.base_dir,
self.hosts)
mkdirs = TaktukRemote("mkdir -p " + self.conf_dir +
" && mkdir -p " + self.logs_dir +
" && mkdir -p " + self.hadoop_temp_dir,
self.hosts)
chmods = TaktukRemote("chmod g+w " + self.base_dir +
" && chmod g+w " + self.conf_dir +
" && chmod g+w " + self.logs_dir +
" && chmod g+w " + self.hadoop_temp_dir,
self.hosts)
SequentialActions([mv_base_dir, mkdirs, chmods]).run()
# 4. Specify environment variables
command = "cat >> " + self.conf_dir + "/hadoop-env.sh << EOF\n"
command += "export JAVA_HOME=" + self.java_home + "\n"
command += "export HADOOP_LOG_DIR=" + self.logs_dir + "\n"
command += "HADOOP_HOME_WARN_SUPPRESS=\"TRUE\"\n"
command += "EOF"
action = Remote(command, self.hosts)
action.run()
# 5. Check version
return self._check_version_compliance()
开发者ID:lmolina,项目名称:hadoop_g5k,代码行数:73,代码来源:cluster.py
示例18: uncompress
def uncompress(file_name, host):
if file_name.endswith("tar.gz"):
decompression = Remote("tar xf " + file_name, [host])
decompression.run()
base_name = os.path.basename(file_name[:-7])
dir_name = os.path.dirname(file_name[:-7])
new_name = dir_name + "/data-" + base_name
action = Remote("mv " + file_name[:-7] + " " + new_name, [host])
action.run()
elif file_name.endswith("gz"):
decompression = Remote("gzip -d " + file_name, [host])
decompression.run()
base_name = os.path.basename(file_name[:-3])
dir_name = os.path.dirname(file_name[:-3])
new_name = dir_name + "/data-" + base_name
action = Remote("mv " + file_name[:-3] + " " + new_name, [host])
action.run()
elif file_name.endswith("zip"):
decompression = Remote("unzip " + file_name, [host])
decompression.run()
base_name = os.path.basename(file_name[:-4])
dir_name = os.path.dirname(file_name[:-4])
new_name = dir_name + "/data-" + base_name
action = Remote("mv " + file_name[:-4] + " " + new_name, [host])
action.run()
elif file_name.endswith("bz2"):
decompression = Remote("bzip2 -d " + file_name, [host])
decompression.run()
base_name = os.path.basename(file_name[:-4])
dir_name = os.path.dirname(file_name[:-4])
new_name = dir_name + "/data-" + base_name
action = Remote("mv " + file_name[:-4] + " " + new_name, [host])
action.run()
else:
logger.warn("Unknown extension")
return file_name
return new_name
开发者ID:sarlam,项目名称:hadoop_g5k,代码行数:46,代码来源:util.py
示例19: vm5k_deployment
#.........这里部分代码省略.........
self.hosts,
connection_params={'taktuk_options': taktuk_conf}).run()
self._actions_hosts(conf_ssh)
def _start_disk_copy(self, disks=None, backing_file_dir='/tmp'):
""" """
disks_copy = []
if not disks:
disks = self.backing_files
for bf in disks:
logger.info('Treating ' + style.emph(bf))
logger.debug("Checking frontend disk vs host disk")
raw_disk = '%s/orig_' % backing_file_dir + bf.split('/')[-1]
f_disk = Process('md5sum -b ' + bf).run()
disk_hash = f_disk.stdout.split(' ')[0]
cmd = 'if [ -f ' + raw_disk + ' ]; ' + \
'then md5sum -b ' + raw_disk + '; fi'
h_disk = self.fact.get_remote(cmd, self.hosts).run()
disk_ok = True
for p in h_disk.processes:
if p.stdout.split(' ')[0] != disk_hash:
disk_ok = False
break
if disk_ok:
logger.info("Disk " + style.emph(bf) +
" is already present, skipping copy")
else:
disks_copy.append(self.fact.get_fileput(self.hosts, [bf],
remote_location="%s" % backing_file_dir))
if len(disks_copy) > 0:
self.copy_actions = ParallelActions(disks_copy).start()
else:
self.copy_actions = Remote('ls', self.hosts[0]).run()
def _create_backing_file(self, disks=None, backing_file_dir='/tmp'):
""" """
if not self.copy_actions:
self._start_disk_copy(disks)
if not self.copy_actions.ended:
logger.info("Waiting for the end of the disks copy")
self.copy_actions.wait()
if isinstance(self.copy_actions, ParallelActions):
mv_actions = []
for act in self.copy_actions.actions:
fname = act.local_files[0].split('/')[-1]
mv_actions.append(self.fact.get_remote("mv %s/" % backing_file_dir + fname +
" %s/orig_" % backing_file_dir + fname,
self.hosts))
mv = ParallelActions(mv_actions).run()
if not disks:
disks = self.backing_files
for bf in disks:
raw_disk = '%s/orig_' % backing_file_dir + bf.split('/')[-1]
to_disk = '%s/' % backing_file_dir + bf.split('/')[-1]
self.fact.get_remote('cp ' + raw_disk + ' ' + to_disk, self.hosts).run()
logger.info('Copying ssh key on ' + to_disk + ' ...')
cmd = 'modprobe nbd max_part=16; ' + \
'qemu-nbd --connect=/dev/nbd0 ' + to_disk + \
' ; sleep 3 ; partprobe /dev/nbd0 ; ' + \
'part=`fdisk -l /dev/nbd0 |grep dev|grep Linux| grep -v swap|cut -f 1 -d " "` ; ' + \
'mount $part /mnt ; mkdir -p /mnt/root/.ssh ; ' + \
'cat /root/.ssh/authorized_keys >> /mnt/root/.ssh/authorized_keys ; ' + \
'cp -r /root/.ssh/id_rsa* /mnt/root/.ssh/ ;' + \
开发者ID:badock,项目名称:vm5k,代码行数:67,代码来源:deployment.py
示例20: bootstrap
def bootstrap(self, tar_file):
"""Install Hadoop in all cluster nodes from the specified tar.gz file.
Args:
tar_file (str):
The file containing Hadoop binaries.
"""
# 0. Check requirements
java_major_version = 7
if not check_java_version(java_major_version, self.hosts):
msg = "Java 1.%d+ required" % java_major_version
logger.error(msg)
raise HadoopException(msg)
self.java_home = get_java_home(self.master)
# 1. Copy hadoop tar file and uncompress
logger.info("Copy " + tar_file + " to hosts and uncompress")
rm_dirs = TaktukRemote("rm -rf " + self.base_dir +
" " + self.conf_dir +
" " + self.logs_dir +
" " + self.hadoop_temp_dir,
self.hosts)
put_tar = TaktukPut(self.hosts, [tar_file], "/tmp")
tar_xf = TaktukRemote(
"tar xf /tmp/" + os.path.basename(tar_file) + " -C /tmp",
self.hosts)
rm_tar = TaktukRemote(
"rm /tmp/" + os.path.basename(ta
|
请发表评论