• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python logger.error函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中execo_engine.logger.error函数的典型用法代码示例。如果您正苦于以下问题:Python error函数的具体用法?Python error怎么用?Python error使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了error函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _exec_on_node

 def _exec_on_node(self, command, machine, log):
     logger.info(log)
     rem = ex.action.Remote(command, machine, connection_params={'user':'ci'}).run()
     if rem.ok :
         logger.info("Success")
     else:
         logger.error("Failure")
开发者ID:Marie-Donnie,项目名称:misc,代码行数:7,代码来源:os-distri-db.py


示例2: make_reservation

 def make_reservation(self):
     """Perform a reservation of the required number of nodes"""
     logger.info('Performing reservation')
     starttime = int(time.time() + timedelta_to_seconds(datetime.timedelta(minutes=1)))
     endtime = int(starttime + timedelta_to_seconds(datetime.timedelta(days=3,
                                                              minutes=1)))
     startdate, n_nodes = self._get_nodes(starttime, endtime)
     while not n_nodes:
         logger.info('No enough nodes found between %s and %s, ' + \
                     'increasing time window',
                     format_date(starttime), format_date(endtime))
         starttime = endtime
         endtime = int(starttime + timedelta_to_seconds(datetime.timedelta(days=3,
                                                             minutes=1)))
         startdate, n_nodes = self._get_nodes(starttime, endtime)
         if starttime > int(time.time() + timedelta_to_seconds(
                                         datetime.timedelta(weeks=6))):
             logger.error('There are not enough nodes on %s for your ' + \
                          'experiments, abort ...', self.cluster)
             exit()
     jobs_specs = get_jobs_specs({self.cluster: n_nodes},
                                 name=self.__class__.__name__)
     sub = jobs_specs[0][0]
     sub.walltime = self.options.walltime
     sub.additional_options = '-t deploy'
     sub.reservation_date = startdate
     (self.oar_job_id, self.frontend) = oarsub(jobs_specs)[0]
     logger.info('Startdate: %s, n_nodes: %s', format_date(startdate),
                 str(n_nodes))
开发者ID:lpouillo,项目名称:execo-g5k-tools,代码行数:29,代码来源:fp_hadoop.py


示例3: __init__

    def __init__(self, params):
        """Create a static dataset with the given params.
        
        Args:
          params (dict):
            A dictionary with the parameters. This dataset needs the following
            parameters:
            - local_path: The path to the directory where the dataset is stored
                          locally.
            - pre_load_function: A function to be applied after transfers and
                                 before loading to dfs (usually decompression).
        """

        super(StaticDataset, self).__init__(params)

        local_path = params["local_path"]
        if not os.path.exists(local_path):
            logger.error("The dataset local dir does not exist")

        if "pre_load_function" in params:
            pre_load_function_name = params["pre_load_function"]
            self.pre_load_function = import_function(pre_load_function_name)
        else:
            self.pre_load_function = None

        self.local_path = local_path
开发者ID:mliroz,项目名称:bigdata_dpy,代码行数:26,代码来源:dataset.py


示例4: load

    def load(self):
        """Load the configuration file"""

        # Load the configuration file
        try:
            with open(self.config_path) as config_file:
                config = yaml.load(config_file)
        except:
            logger.error("Error reading configuration file %s" %
                         self.config_path)
            t, value, tb = sys.exc_info()
            print("%s %s" % (str(t), str(value)))
            sys.exit(23)

        # Load g5k networks
        with open(NETWORK_FILE) as network_file:
            self.networks = yaml.load(network_file)


        self.config = {}
        self.config.update(DEFAULT_CONFIG)
        self.config.update(config)

        logger.info("Configuration file loaded : %s" % self.config_path)
        logger.info(pf(self.config))

        return self.config
开发者ID:BeyondTheClouds,项目名称:kolla-g5k,代码行数:27,代码来源:g5k_engine.py


示例5: deploy

    def deploy(self):
        # we put the nodes in the first vlan we have
        vlan = self._get_primary_vlan()
         # Deploy all the nodes
        logger.info("Deploying %s on %d nodes %s" % (self.config['env_name'],
            len(self.nodes),
            '(forced)' if self.force_deploy else ''))

        deployed, undeployed = EX5.deploy(
        EX5.Deployment(
            self.nodes,
            env_name=self.config['env_name'],
            vlan = vlan[1]
        ), check_deployed_command=not self.force_deploy)

        # Check the deployment
        if len(undeployed) > 0:
            logger.error("%d nodes where not deployed correctly:" % len(undeployed))
            for n in undeployed:
                logger.error(style.emph(n))

        # Updating nodes names with vlans
        self.nodes = sorted(translate_to_vlan(self.nodes, vlan[1]),
                            key = lambda n: n.address)
        logger.info(self.nodes)
        self.deployed_nodes = sorted(translate_to_vlan(
                                        map(lambda n: EX.Host(n), deployed), vlan[1]),
                                key = lambda n: n.address)
        logger.info(self.deployed_nodes)
        check_nodes(
                nodes = self.deployed_nodes,
                resources = self.config['resources'],
                mode = self.config['role_distribution'])

        return deployed, undeployed
开发者ID:BeyondTheClouds,项目名称:kolla-g5k,代码行数:35,代码来源:g5k_engine.py


示例6: bootstrap

    def bootstrap(self, tar_file):

        # 0. Check that required packages are present
        required_packages = "openjdk-7-jre openjdk-7-jdk"
        check_packages = TaktukRemote("dpkg -s " + required_packages,
                                      self.hosts)
        for p in check_packages.processes:
            p.nolog_exit_code = p.nolog_error = True
        check_packages.run()
        if not check_packages.ok:
            logger.info("Packages not installed, trying to install")
            install_packages = TaktukRemote(
                "export DEBIAN_MASTER=noninteractive ; " +
                "apt-get update && apt-get install -y --force-yes " +
                required_packages, self.hosts).run()
            if not install_packages.ok:
                logger.error("Unable to install the packages")

        get_java_home = SshProcess('echo $(readlink -f /usr/bin/javac | '
                                   'sed "s:/bin/javac::")', self.master)
        get_java_home.run()
        self.java_home = get_java_home.stdout.strip()

        logger.info("All required packages are present")

        # 1. Copy hadoop tar file and uncompress
        logger.info("Copy " + tar_file + " to hosts and uncompress")
        rm_dirs = TaktukRemote("rm -rf " + self.base_dir +
                               " " + self.conf_dir,
                               self.hosts)
        put_tar = TaktukPut(self.hosts, [tar_file], "/tmp")
        tar_xf = TaktukRemote(
            "tar xf /tmp/" + os.path.basename(tar_file) + " -C /tmp",
            self.hosts)
        SequentialActions([rm_dirs, put_tar, tar_xf]).run()

        # 2. Move installation to base dir
        logger.info("Create installation directories")
        mv_base_dir = TaktukRemote(
            "mv /tmp/" + os.path.basename(tar_file).replace(".tgz", "") + " " +
            self.base_dir,
            self.hosts)
        mkdirs = TaktukRemote("mkdir -p " + self.conf_dir, self.hosts)
        chmods = TaktukRemote("chmod g+w " + self.base_dir +
                              " && chmod g+w " + self.conf_dir,
                              self.hosts)
        SequentialActions([mv_base_dir, mkdirs, chmods]).run()

        # 3. Specify environment variables
        command = "cat >> " + self.conf_dir + "/spark-env.sh << EOF\n"
        command += "JAVA_HOME=" + self.java_home + "\n"
        command += "SPARK_LOG_DIR=" + self.logs_dir + "\n"
        if self.hc:
            command += "HADOOP_CONF_DIR=" + self.hc.conf_dir + "\n"
        if self.mode == YARN_MODE:
            command += "YARN_CONF_DIR=" + self.hc.conf_dir + "\n"
        command += "EOF\n"
        command += "chmod +x " + self.conf_dir + "/spark-env.sh"
        action = Remote(command, self.hosts)
        action.run()
开发者ID:sarlam,项目名称:hadoop_g5k,代码行数:60,代码来源:spark.py


示例7: __init__

    def __init__(self, jar_path, params=None, lib_paths=None):
        """Creates a new Hadoop MapReduce jar job with the given parameters.

        Args:
          jar_path (str):
            The local path of the jar containing the job.
          params (list of str, optional):
            The list of parameters of the job.
          lib_paths (list of str, optional):
            The list of local paths to the libraries used by the job.
        """

        if not params:
            params = []
        if not lib_paths:
            lib_paths = []

        # Check if the jar file exists
        if not os.path.exists(jar_path):
            logger.error("Jar file " + jar_path + " does not exist")
            raise HadoopJobException("Jar file " + jar_path + " does not exist")

        # Check if the libraries exist
        for lp in lib_paths:
            if not os.path.exists(lp):
                logger.warn("Lib file " + lp + " does not exist")
                return  # TODO - exception

        self.jar_path = jar_path
        self.params = params
        self.lib_paths = lib_paths
开发者ID:mliroz,项目名称:bigdata_dpy,代码行数:31,代码来源:objects.py


示例8: get_xml_params

def get_xml_params(f, param_names):

    if not param_names:
        return {}

    local_param_names = param_names[:]

    params = {}
    for name in local_param_names:
        params[name] = None

    with open(f) as inf:
        line = inf.readline()
        while line != "":
            for name in local_param_names:
                if "<name>" + name + "</name>" in line:
                    if "<value>" in line:
                        match = re.match('.*<value>([^<]*)</value>.*', line)
                        params[name] = match.group(1)
                    else:
                        line = inf.readline()
                        if line != "":
                            match = re.match('.*<value>([^<]*)</value>.*', line)
                            params[name] = match.group(1)
                        else:
                            logger.error("Configuration file " + f +
                                         " is not correctly formatted")

                    del(name)
                line = inf.readline()
        inf.close()

    return params
开发者ID:lmolina,项目名称:hadoop_g5k,代码行数:33,代码来源:util.py


示例9: __define_ds_parameters

    def __define_ds_parameters(self, config):
        ds_parameters_names = config.options("ds_parameters")
        self.ds_parameters = {}
        ds_class_parameters = {}
        ds_classes = []
        for pn in ds_parameters_names:
            pv = config.get("ds_parameters", pn).split(",")
            if pn.startswith("ds.class."):
                ds_class_parameters[pn[len("ds.class."):]] = \
                    [v.strip() for v in pv]
            elif pn == "ds.class":
                ds_classes = [v.strip() for v in pv]
            else:
                self.ds_parameters[pn] = [v.strip() for v in pv]

        # Create ds configurations
        self.ds_config = []
        for (idx, ds_class) in enumerate(ds_classes):
            this_ds_params = {}
            for pn, pv in ds_class_parameters.iteritems():
                if len(pv) == len(ds_classes):
                    if pv[idx]:
                        this_ds_params[pn] = pv[idx]
                elif len(pv) == 1:
                    this_ds_params[pn] = pv[0]
                else:
                    logger.error("Number of ds_class does not much number of " +
                                 pn)
                    raise ParameterException("Number of ds_class does not much "
                                             "number of " + pn)

            self.ds_config.append((ds_class, this_ds_params))

        self.ds_parameters["ds.config"] = range(0, len(self.ds_config))
开发者ID:mliroz,项目名称:diversity_p2p,代码行数:34,代码来源:engine.py


示例10: _check_initialization

    def _check_initialization(self):
        """ Check whether the cluster is initialized and raise and exception if
        not.
        """

        if not self.initialized:
            logger.error("The cluster should be initialized")
            raise ClusterNotInitializedException("The cluster should be initialized")
开发者ID:mliroz,项目名称:bigdata_dpy,代码行数:8,代码来源:cassandra.py


示例11: _check_version_compliance

 def _check_version_compliance(self):
     if self.get_major_version() != 2:
         logger.error("Version of HadoopCluster is not compliant with the "
                     "distribution provided in the bootstrap option. Use "
                     "the appropiate parameter for --version when creating "
                     "the cluster or use another distribution.")
         return False
     else:
         return True
开发者ID:mliroz,项目名称:bigdata_dpy,代码行数:9,代码来源:cluster_v2.py


示例12: replace_in_xml_file

def replace_in_xml_file(f, name, value, create_if_absent=False):
    """Assign the given value to variable name in xml file f.

    Args:
      f (str):
        The path of the file.
      name (str):
        The name of the variable.
      value (str):
        The new value to be assigned:
      create_if_absent (bool, optional):
        If True, the variable will be created at the end of the file in case
        it was not already present.

    Returns (bool):
      True if the assignment has been made, False otherwise.
    """

    changed = False

    (_, temp_file) = tempfile.mkstemp("", "xmlf-", "/tmp")

    inf = open(f)
    outf = open(temp_file, "w")
    line = inf.readline()
    while line != "":
        if "<name>" + name + "</name>" in line:
            if "<value>" in line:
                outf.write(__replace_line(line, value))
                changed = True
            else:
                outf.write(line)
                line = inf.readline()
                if line != "":
                    outf.write(__replace_line(line, value))
                    changed = True
                else:
                    logger.error("Configuration file " + f +
                                 " is not correctly formatted")
        else:
            if ("</configuration>" in line and
                    create_if_absent and not changed):
                outf.write("  <property><name>" + name + "</name>" +
                           "<value>" + str(value) + "</value></property>\n")
                outf.write(line)
                changed = True
            else:
                outf.write(line)
        line = inf.readline()
    inf.close()
    outf.close()

    if changed:
        shutil.copyfile(temp_file, f)
    os.remove(temp_file)

    return changed
开发者ID:sarlam,项目名称:hadoop_g5k,代码行数:57,代码来源:util.py


示例13: _check_version_compliance

 def _check_version_compliance(self):
     version = self.get_version()
     if not (version.startswith("Hadoop 0.") or
             version.startswith("Hadoop 1.")):
         logger.error("Version of HadoopCluster is not compliant with the "
                     "distribution provided in the bootstrap option. Use "
                     "the appropiate parameter for --version when creating "
                     "the cluster or use another distribution.")
         return False
     else:
         return True
开发者ID:lmolina,项目名称:hadoop_g5k,代码行数:11,代码来源:cluster.py


示例14: workflow

    def workflow(self, comb):
        """
            Compute one application launch 
            using a given parameter group
        """
        comb_ok = False
        try:
            # Generate configuration file needed by MPI processes
            logger.info("Generating assembly file...")
            py = comb['cores'] / comb['px']
            prepare = Process('cd %s && python %s %d %d %d %d %d %s app.lad' % 
                (self.workingPath, self.genLadScript, comb['datasize'], comb['datasize'], 
                    comb['datasize'], comb['px'], py, comb['transposition']))
            prepare.shell = True
            prepare.run()

            # Generate the MPI host file
            mfile = self.generate_machine_file()

            # Start L2C
            lad = "./app.lad"
            logger.info("Computing...")
            res = Process("export OAR_JOB_KEY_FILE=~/.oar_key ; cd %s && l2c_loader -M,-machinefile,%s --mpi -c %d %s" % (self.workingPath, mfile, comb['cores'], lad))
            res.shell = True
            res.stdout_handlers.append(os.path.join(self.result_dir, slugify(comb) + '.out'))
            res.stdout_handlers.append(sys.stdout)
            res.stderr_handlers.append(os.path.join(self.result_dir, slugify(comb) + '.err'))
            res.stderr_handlers.append(sys.stderr)
            res.run()
            if not res.ok:
                logger.error('Bad L2C termination')
                raise Exception('Bad L2C termination')
            if len(res.stderr) > 0: # WARNING: when L2C cannot find the LAD file or something strange like this
                logger.warning('Not empty error output')

            # Clean configuration files
            logger.info("Removing assembly files...")
            res = Process('cd %s && rm -f app.lad*' % self.workingPath)
            res.shell = True
            res.run()
                
            comb_ok = True
        except Exception:
            pass
        finally:
            if comb_ok:
                self.sweeper.done(comb)
                logger.info(style.host(slugify(comb)) + ' has been done')
            else:
                self.sweeper.cancel(comb)
                logger.warning(style.host(slugify(comb)) + ' has been canceled')
        
            logger.info(style.step('%s Remaining'),
                        len(self.sweeper.get_remaining()))
开发者ID:lpouillo,项目名称:execo-g5k-tools,代码行数:54,代码来源:l2c_fft.py


示例15: _check_initialization

    def _check_initialization(self):
        """ Check whether the cluster is initialized and raise and exception if
        not.
        
        Raises:
          HadoopNotInitializedException:
            If self.initialized = False
        """

        if not self.initialized:
            logger.error("The cluster should be initialized")
            raise HadoopNotInitializedException(
                "The cluster should be initialized")
开发者ID:lmolina,项目名称:hadoop_g5k,代码行数:13,代码来源:cluster.py


示例16: bootstrap

    def bootstrap(self, tar_file):
        """Install Cassandra in all cluster nodes from the specified tar.gz file.

        Args:
          tar_file (str):
            The file containing Cassandra binaries.
        """

        # 0. Check that required packages are present
        required_packages = "openjdk-7-jre openjdk-7-jdk"
        check_packages = TaktukRemote("dpkg -s " + required_packages, self.hosts)
        for p in check_packages.processes:
            p.nolog_exit_code = p.nolog_error = True
        check_packages.run()
        if not check_packages.ok:
            logger.info("Packages not installed, trying to install")
            install_packages = TaktukRemote(
                "export DEBIAN_MASTER=noninteractive ; "
                + "apt-get update && apt-get install -y --force-yes "
                + required_packages,
                self.hosts,
            ).run()
            if not install_packages.ok:
                logger.error("Unable to install the packages")

        get_java_home = SshProcess("echo $(readlink -f /usr/bin/javac | " 'sed "s:/bin/javac::")', self.master)
        get_java_home.run()
        self.java_home = get_java_home.stdout.strip()

        logger.info("All required packages are present")

        # 1. Copy hadoop tar file and uncompress
        logger.info("Copy " + tar_file + " to hosts and uncompress")
        rm_dirs = TaktukRemote("rm -rf " + self.base_dir + " " + self.conf_dir + " " + self.logs_dir, self.hosts)
        put_tar = TaktukPut(self.hosts, [tar_file], "/tmp")
        tar_xf = TaktukRemote("tar xf /tmp/" + os.path.basename(tar_file) + " -C /tmp", self.hosts)
        SequentialActions([rm_dirs, put_tar, tar_xf]).run()

        # 2. Move installation to base dir and create other dirs
        logger.info("Create installation directories")
        mv_base_dir = TaktukRemote(
            "mv /tmp/" + os.path.basename(tar_file).replace(".tar.gz", "") + " " + self.base_dir, self.hosts
        )
        mkdirs = TaktukRemote("mkdir -p " + self.conf_dir + " && mkdir -p " + self.logs_dir, self.hosts)
        chmods = TaktukRemote(
            "chmod g+w " + self.base_dir + " && chmod g+w " + self.conf_dir + " && chmod g+w " + self.logs_dir,
            self.hosts,
        )
        SequentialActions([mv_base_dir, mkdirs, chmods]).run()
开发者ID:mliroz,项目名称:bigdata_dpy,代码行数:49,代码来源:cassandra.py


示例17: make_reservation

    def make_reservation(self):
        """Perform a reservation of the required number of nodes."""

        logger.info('Performing reservation')
        now = int(time.time() +
                  timedelta_to_seconds(datetime.timedelta(minutes=1)))
        starttime = now
        endtime = int(starttime +
                      timedelta_to_seconds(datetime.timedelta(days=3,
                                                              minutes=1)))
        startdate, n_nodes = self._get_nodes(starttime, endtime)

        search_time = 3 * 24 * 60 * 60  # 3 days
        walltime_seconds = get_seconds(self.options.walltime)

        iteration = 0
        while not n_nodes:
            iteration += 1
            logger.info('Not enough nodes found between %s and %s, ' +
                        'increasing time window',
                        format_date(starttime), format_date(endtime))
            starttime = max(now, now +
                            iteration * search_time - walltime_seconds)
            endtime = int(now + (iteration + 1) * search_time)

            startdate, n_nodes = self._get_nodes(starttime, endtime)
            if starttime > int(time.time() + timedelta_to_seconds(
                    datetime.timedelta(weeks=6))):
                logger.error('There are not enough nodes on %s for your ' +
                             'experiments, abort ...', self.cluster)
                exit()

        jobs_specs = get_jobs_specs({self.cluster: n_nodes},
                                    name=self.__class__.__name__)
        sub = jobs_specs[0][0]
        sub.walltime = self.options.walltime
        if self.use_kadeploy:
            sub.additional_options = '-t deploy'
        else:
            sub.additional_options = '-t allow_classic_ssh'
        sub.reservation_date = startdate
        (self.oar_job_id, self.frontend) = oarsub(jobs_specs)[0]
        logger.info('Startdate: %s, n_nodes: %s, job_id: %s',
                    format_date(startdate),
                    str(n_nodes), str(self.oar_job_id))
开发者ID:mliroz,项目名称:diversity_p2p,代码行数:45,代码来源:engine.py


示例18: __define_test_parameters

    def __define_test_parameters(self, config):
        if config.has_section("test_parameters"):
            test_parameters_names = config.options("test_parameters")
            if "test.stats_path" in test_parameters_names:
                self.stats_manager.stats_path = \
                    config.get("test_parameters", "test.stats_path")
                if not os.path.exists(self.stats_manager.stats_path):
                    os.makedirs(self.stats_manager.stats_path)

            if "test.summary_file" in test_parameters_names:
                self.stats_manager.summary_file_name = \
                    config.get("test_parameters", "test.summary_file")

            if "test.ds_summary_file" in test_parameters_names:
                self.stats_manager.ds_summary_file_name = \
                    config.get("test_parameters", "test.ds_summary_file")

            if "test.num_repetitions" in test_parameters_names:
                self.comb_manager.num_repetitions = \
                    int(config.get("test_parameters", "test.num_repetitions"))

            if "test.jar_file" in test_parameters_names:
                self.jar_file = config.get("test_parameters", "test.jar_file")

            if "test.remote_dir" in test_parameters_names:
                self.remote_dir = config.get("test_parameters",
                                             "test.remote_dir")

            if "test.use_kadeploy" in test_parameters_names:
                self.use_kadeploy = config.getboolean("test_parameters",
                                                      "test.use_kadeploy")

            if self.use_kadeploy:
                if "test.kadeploy.env_file" in test_parameters_names:
                    self.kadeploy_env_file = \
                        config.get("test_parameters", "test.kadeploy.env_file")
                elif "test.kadeploy.env_name" in test_parameters_names:
                    self.kadeploy_env_name = \
                        config.get("test_parameters", "test.kadeploy.env_name")
                else:
                    logger.error("Either test.kadeploy.env_file or "
                                 "test.kadeploy.env_name should be specified")
                    raise ParameterException("Either test.kadeploy.env_file or "
                                             "test.kadeploy.env_name should be "
                                             "specified")
开发者ID:mliroz,项目名称:diversity_p2p,代码行数:45,代码来源:engine.py


示例19: prepare_bench

 def prepare_bench(self):
     """bench configuration and compilation, copy binaries to frontends
     
     return True if preparation is ok
     """
     logger.info("preparation: configure and compile benchmark")
     # the involved sites. We will do the compilation on the first of these.
     sites = list(set(map(get_cluster_site, self.parameters['cluster'])))
     # generate the bench compilation configuration
     bench_list = '\n'.join([ 'lu\t%s\t%s' % (size, n_core)
                              for n_core in self.parameters['n_core']
                              for size in self.parameters['size'] ])
     # Reserving a node because compiling on the frontend is forbidden
     # and because we need mpif77
     jobs = oarsub([(OarSubmission(resources = "nodes=1",
                                   job_type = 'allow_classic_ssh',
                                   walltime ='0:10:00'), sites[0])])
     if jobs[0][0]:
         try:
             logger.info("copying bench archive to %s" % (sites[0],))
             copy_bench = Put([sites[0]], ['NPB3.3-MPI.tar.bz2']).run()
             logger.info("extracting bench archive on %s" % (sites[0],))
             extract_bench = Remote('tar -xjf NPB3.3-MPI.tar.bz2', [sites[0]]).run()
             logger.info("waiting job start %s" % (jobs[0],))
             wait_oar_job_start(*jobs[0], prediction_callback = pred_cb)
             logger.info("getting nodes of %s" % (jobs[0],))
             nodes = get_oar_job_nodes(*jobs[0])
             logger.info("configure bench compilation")
             conf_bench = Remote('echo "%s" > ~/NPB3.3-MPI/config/suite.def' % bench_list, nodes).run()
             logger.info("compil bench")
             compilation = Remote('cd NPB3.3-MPI && make clean && make suite', nodes).run()
             logger.info("compil finished")
         except:
             logger.error("unable to compile bench")
             return False
         finally:
             oardel(jobs)
     # Copying binaries to all other frontends
     frontends = sites[1:]
     rsync = Remote('rsync -avuP ~/NPB3.3-MPI/ {{frontends}}:NPB3.3-MPI', 
                    [get_host_site(nodes[0])] * len(frontends)) 
     rsync.run()
     return compilation.ok and rsync.ok
开发者ID:lpouillo,项目名称:execo-g5k-tools,代码行数:43,代码来源:mpi_bench.py


示例20: start_shell

    def start_shell(self, language="IPYTHON", node=None, exec_params=None):
        """Open a Spark shell.

        Args:
          language (str, optional):
            The language to be used in the shell.
          node (Host, optional):
            The host were the shell is to be started. If not provided,
            self.master is chosen.
          exec_params (str, optional):
            The list of parameters used in job execution (e.g., driver-memory).
        """

        if not node:
            node = self.master

        # Configure execution options
        if exec_params is None:
            exec_params = []

        if self.mode == YARN_MODE:
            exec_params.append("--master yarn-client")

        params_str = " " + " ".join(exec_params)

        # Execute shell
        if language.upper() == "IPYTHON":
            call("ssh -t " + node.address + " " +
                 "IPYTHON=1 " + self.bin_dir + "/pyspark" + params_str,
                 shell=True)
        elif language.upper() == "PYTHON":
            call("ssh -t " + node.address + " " +
                 self.bin_dir + "/pyspark" + params_str,
                 shell=True)
        elif language.upper() == "SCALA":
            call("ssh -t " + node.address + " " +
                 self.bin_dir + "/spark-shell" + params_str,
                 shell=True)
        else:
            logger.error("Unknown language " + language)
            return
开发者ID:djamelinfo,项目名称:hadoop_g5k,代码行数:41,代码来源:spark.py



注:本文中的execo_engine.logger.error函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python logger.info函数代码示例发布时间:2022-05-24
下一篇:
Python process.SshProcess类代码示例发布时间:2022-05-24
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap