• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python log.warning函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中twitter.common.log.warning函数的典型用法代码示例。如果您正苦于以下问题:Python warning函数的具体用法?Python warning怎么用?Python warning使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了warning函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: allocate_port

  def allocate_port(self, name, port=None):
    if port is not None:
      if name in self._ports and self._ports[name] != port:
        raise EphemeralPortAllocator.PortConflict(
            'Port binding %s=>%s conflicts with current binding %s=>%s' % (
          name, port, name, self._ports[name]))
      else:
        self._ports[name] = port
        return port

    if name in self._ports:
      return self._ports[name]

    while True:
      rand_port = random.randint(*EphemeralPortAllocator.SOCKET_RANGE)
      # if this ever needs to be performant, make a peer set.
      if rand_port in self._ports.values():
        continue
      try:
        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        s.bind(('localhost', rand_port))
        s.close()
        self._ports[name] = rand_port
        break
      except OSError as e:
        if e.errno == errno.EADDRINUSE:
          log.warning('Could not bind port: %s' % e)
          time.sleep(0.2)
          continue
        else:
          raise
    return self._ports[name]
开发者ID:adamsxu,项目名称:commons,代码行数:32,代码来源:port_allocator.py


示例2: _construct_scheduler

  def _construct_scheduler(self):
    """
      Populates:
        self._scheduler_client
        self._client
    """
    self._scheduler_client = SchedulerClient.get(self.cluster, verbose=self.verbose)
    assert self._scheduler_client, "Could not find scheduler (cluster = %s)" % self.cluster.name
    start = time.time()
    while (time.time() - start) < self.CONNECT_MAXIMUM_WAIT.as_(Time.SECONDS):
      try:
        # this can wind up generating any kind of error, because it turns into
        # a call to a dynamically set authentication module.
        self._client = self._scheduler_client.get_thrift_client()
        break
      except SchedulerClient.CouldNotConnect as e:
        log.warning('Could not connect to scheduler: %s' % e)
      except Exception as e:
        # turn any auth module exception into an auth error.
        log.debug('Warning: got an unknown exception during authentication:')
        log.debug(traceback.format_exc())
        raise self.AuthenticationError('Error connecting to scheduler: %s' % e)
    if not self._client:
      raise self.TimeoutError('Timed out trying to connect to scheduler at %s' % self.cluster.name)

    server_version = self._client.getVersion().result.getVersionResult
    if server_version != CURRENT_API_VERSION:
      raise self.APIVersionError("Client Version: %s, Server Version: %s" %
                                 (CURRENT_API_VERSION, server_version))
开发者ID:wickman,项目名称:incubator-aurora,代码行数:29,代码来源:scheduler_client.py


示例3: _drain_hosts

  def _drain_hosts(self, drainable_hosts):
    """"Drains tasks from the specified hosts.

    This will move active tasks on these hosts to the DRAINING state, causing them to be
    rescheduled elsewhere.

    :param drainable_hosts: Hosts that are in maintenance mode and ready to be drained
    :type drainable_hosts: gen.apache.aurora.ttypes.Hosts
    :rtype: set of host names failed to drain
    """
    check_and_log_response(self._client.drain_hosts(drainable_hosts))
    drainable_hostnames = [hostname for hostname in drainable_hosts.hostNames]

    total_wait = self.STATUS_POLL_INTERVAL
    not_drained_hostnames = set(drainable_hostnames)
    while not self._wait_event.is_set() and not_drained_hostnames:
      self._wait_event.wait(self.STATUS_POLL_INTERVAL.as_(Time.SECONDS))

      not_drained_hostnames = self.check_if_drained(drainable_hostnames)

      total_wait += self.STATUS_POLL_INTERVAL
      if not_drained_hostnames and total_wait > self.MAX_STATUS_WAIT:
        log.warning('Failed to move all hosts into DRAINED within %s' % self.MAX_STATUS_WAIT)
        break

    return not_drained_hostnames
开发者ID:aalzabarah,项目名称:incubator-aurora,代码行数:26,代码来源:host_maintenance.py


示例4: sample

    def sample(self):
        """ Collate and aggregate ProcessSamples for process and children
        Returns None: result is stored in self.value
    """
        try:
            last_sample, last_stamp = self._sample, self._stamp
            if self._process is None:
                self._process = Process(self._pid)
            parent = self._process
            parent_sample = process_to_sample(parent)
            new_samples = dict((proc.pid, process_to_sample(proc)) for proc in parent.get_children(recursive=True))
            new_samples[self._pid] = parent_sample

        except PsutilError as e:
            log.warning("Error during process sampling: %s" % e)
            self._sample = ProcessSample.empty()
            self._rate = 0.0

        else:
            last_stamp = self._stamp
            self._stamp = time()
            # for most stats, calculate simple sum to aggregate
            self._sample = sum(new_samples.values(), ProcessSample.empty())
            # cpu consumption is more complicated
            # We require at least 2 generations of a process before we can calculate rate, so for all
            # current processes that were not running in the previous sample, compare to an empty sample
            if self._sampled_tree and last_stamp:
                new = new_samples.values()
                old = [self._sampled_tree.get(pid, ProcessSample.empty()) for pid in new_samples.keys()]
                new_user_sys = sum(map(attrgetter("user"), new)) + sum(map(attrgetter("system"), new))
                old_user_sys = sum(map(attrgetter("user"), old)) + sum(map(attrgetter("system"), old))
                self._rate = (new_user_sys - old_user_sys) / (self._stamp - last_stamp)
                log.debug("Calculated rate for pid=%s and children: %s" % (self._process.pid, self._rate))
            self._sampled_tree = new_samples
开发者ID:Empia,项目名称:incubator-aurora,代码行数:34,代码来源:process_collector_psutil.py


示例5: _complete_maintenance

 def _complete_maintenance(self, drained_hosts):
   """End the maintenance status for a give set of hosts."""
   check_and_log_response(self._client.end_maintenance(drained_hosts))
   resp = self._client.maintenance_status(drained_hosts)
   for host_status in resp.result.maintenanceStatusResult.statuses:
     if host_status.mode != MaintenanceMode.NONE:
       log.warning('%s is DRAINING or in DRAINED' % host_status.host)
开发者ID:Empia,项目名称:incubator-aurora,代码行数:7,代码来源:mesos_maintenance.py


示例6: _apply_states

    def _apply_states(self):
        """
      os.stat() the corresponding checkpoint stream of this task and determine if there are new ckpt
      records.  Attempt to read those records and update the high watermark for that stream.
      Returns True if new states were applied, False otherwise.
    """
        ckpt_offset = None
        try:
            ckpt_offset = os.stat(self._runner_ckpt).st_size

            updated = False
            if self._ckpt_head < ckpt_offset:
                with open(self._runner_ckpt, "r") as fp:
                    fp.seek(self._ckpt_head)
                    rr = ThriftRecordReader(fp, RunnerCkpt)
                    while True:
                        runner_update = rr.try_read()
                        if not runner_update:
                            break
                        try:
                            self._dispatcher.dispatch(self._runnerstate, runner_update)
                        except CheckpointDispatcher.InvalidSequenceNumber as e:
                            log.error("Checkpoint stream is corrupt: %s" % e)
                            break
                    new_ckpt_head = fp.tell()
                    updated = self._ckpt_head != new_ckpt_head
                    self._ckpt_head = new_ckpt_head
            return updated
        except OSError as e:
            if e.errno == errno.ENOENT:
                # The log doesn't yet exist, will retry later.
                log.warning("Could not read from checkpoint %s" % self._runner_ckpt)
                return False
            else:
                raise
开发者ID:rowoot,项目名称:aurora,代码行数:35,代码来源:monitor.py


示例7: disambiguate_args_or_die

  def disambiguate_args_or_die(cls, args, options, client_factory=AuroraClientAPI):
    """
    Returns a (AuroraClientAPI, AuroraJobKey, AuroraConfigFile:str) tuple
    if one can be found given the args, potentially querying the scheduler with the returned client.
    Calls die() with an appropriate error message otherwise.

    Arguments:
      args: args from app command invocation.
      options: options from app command invocation. must have env and cluster attributes.
      client_factory: a callable (cluster) -> AuroraClientAPI.
    """
    if not len(args) > 0:
      die('job path is required')
    try:
      job_key = AuroraJobKey.from_path(args[0])
      client = client_factory(job_key.cluster)
      config_file = args[1] if len(args) > 1 else None  # the config for hooks
      return client, job_key, config_file
    except AuroraJobKey.Error:
      log.warning("Failed to parse job path, falling back to compatibility mode")
      role = args[0] if len(args) > 0 else None
      name = args[1] if len(args) > 1 else None
      env = None
      config_file = None  # deprecated form does not support hooks functionality
      cluster = options.cluster
      if not cluster:
        die('cluster is required')
      client = client_factory(cluster)
      return client, cls._disambiguate_or_die(client, role, env, name), config_file
开发者ID:aalzabarah,项目名称:incubator-aurora,代码行数:29,代码来源:disambiguator.py


示例8: method_wrapper

    def method_wrapper(*args):
      with self._lock:
        start = time.time()
        # TODO(wfarner): The while loop causes failed unit tests to spin for the retry
        # period (currently 10 minutes).  Figure out a better approach.
        while not self._terminating.is_set() and (
            time.time() - start) < self.RPC_MAXIMUM_WAIT.as_(Time.SECONDS):

          # Only automatically append a SessionKey if this is not part of the read-only API.
          auth_args = () if hasattr(ReadOnlyScheduler.Iface, method_name) else (self.session_key(),)
          try:
            method = getattr(self.client(), method_name)
            if not callable(method):
              return method
            return method(*(args + auth_args))
          except (TTransport.TTransportException, self.TimeoutError) as e:
            if not self._terminating:
              log.warning('Connection error with scheduler: %s, reconnecting...' % e)
              self.invalidate()
              self._terminating.wait(self.RPC_RETRY_INTERVAL.as_(Time.SECONDS))
          except Exception as e:
            # Take any error that occurs during the RPC call, and transform it
            # into something clients can handle.
            if not self._terminating:
              raise self.ThriftInternalError("Error during thrift call %s to %s: %s" %
                                            (method_name, self.cluster.name, e))
        if not self._terminating:
          raise self.TimeoutError('Timed out attempting to issue %s to %s' % (
              method_name, self.cluster.name))
开发者ID:bhuvan,项目名称:incubator-aurora,代码行数:29,代码来源:scheduler_client.py


示例9: method_wrapper

    def method_wrapper(*args):
      with self._lock:
        start = time.time()
        while not self._terminating.is_set() and (
            time.time() - start) < self.RPC_MAXIMUM_WAIT.as_(Time.SECONDS):

          auth_args = () if method_name in self.UNAUTHENTICATED_RPCS else (self.session_key(),)
          try:
            method = getattr(self.client(), method_name)
            if not callable(method):
              return method
            return method(*(args + auth_args))
          except (TTransport.TTransportException, self.TimeoutError) as e:
            if not self._terminating:
              log.warning('Connection error with scheduler: %s, reconnecting...' % e)
              self.invalidate()
              self._terminating.wait(self.RPC_RETRY_INTERVAL.as_(Time.SECONDS))
          except Exception as e:
            # Take any error that occurs during the RPC call, and transform it
            # into something clients can handle.
            if not self._terminating:
              raise self.ThriftInternalError("Error during thrift call %s to %s: %s" %
                                            (method_name, self.cluster.name, e))
        if not self._terminating:
          raise self.TimeoutError('Timed out attempting to issue %s to %s' % (
              method_name, self.cluster.name))
开发者ID:kpfell,项目名称:incubator-aurora,代码行数:26,代码来源:scheduler_client.py


示例10: __call__

    def __call__(self, endpoint, use_post_method=False, expected_response=None, expected_response_code=None):
        """Returns a (boolean, string|None) tuple of (call success, failure reason)"""
        try:
            response, response_code = self.query(endpoint, "" if use_post_method else None)
            response = response.strip().lower()
            if expected_response and response != expected_response.lower():
                reason = 'Response differs from expected response (expected "%s", got "%s")'

                def shorten(string):
                    return (
                        string
                        if len(string) < self.FAILURE_REASON_LENGTH
                        else "%s..." % string[: self.FAILURE_REASON_LENGTH - 3]
                    )

                log.warning(reason % (expected_response, response))
                return (False, reason % (shorten(str(expected_response)), shorten(str(response))))
            elif expected_response_code and response_code != expected_response_code:
                reason = "Response code differs from expected response (expected %i, got %i)"
                log.warning(reason % (expected_response_code, response_code))
                return (False, reason % (expected_response_code, response_code))
            else:
                return (True, None)
        except self.QueryError as e:
            return (False, str(e))
开发者ID:rowoot,项目名称:aurora,代码行数:25,代码来源:http_signaler.py


示例11: acreate_completion

    def acreate_completion(result):
      try:
        # TODO(wickman) Kazoo has a bug:
        #    https://github.com/python-zk/kazoo/issues/106
        #    https://github.com/python-zk/kazoo/pull/107
        # Remove this one 1.3 is cut.
        path = self._zk.unchroot(result.get())
      except self.DISCONNECT_EXCEPTIONS:
        self._once(KazooState.CONNECTED, do_join)
        return
      except ke.KazooException as e:
        log.warning('Unexpected Kazoo result in join: (%s)%s' % (type(e), e))
        membership = Membership.error()
      else:
        created_id = self.znode_to_id(path)
        membership = Membership(created_id)
        with self._member_lock:
          result_future = self._members.get(membership, Future())
          result_future.set_result(blob)
          self._members[membership] = result_future
        if expire_callback:
          self._once(KazooState.CONNECTED, expire_notifier)
          do_exists(path)

      membership_capture.set(membership)
开发者ID:alfss,项目名称:commons,代码行数:25,代码来源:kazoo_group.py


示例12: method_wrapper

    def method_wrapper(*args):
      with self._lock:
        start = time.time()
        while not self._terminating.is_set() and (
            time.time() - start) < self.RPC_MAXIMUM_WAIT.as_(Time.SECONDS):

          try:
            method = getattr(self.client(), method_name)
            if not callable(method):
              return method

            resp = method(*args)
            if resp is not None and resp.responseCode == ResponseCode.ERROR_TRANSIENT:
              raise self.TransientError(", ".join(
                  [m.message for m in resp.details] if resp.details else []))
            return resp
          except TRequestsTransport.AuthError as e:
            log.error(self.scheduler_client().get_failed_auth_message())
            raise self.AuthError(e)
          except (TTransport.TTransportException, self.TimeoutError, self.TransientError) as e:
            if not self._terminating.is_set():
              log.warning('Connection error with scheduler: %s, reconnecting...' % e)
              self.invalidate()
              self._terminating.wait(self.RPC_RETRY_INTERVAL.as_(Time.SECONDS))
          except Exception as e:
            # Take any error that occurs during the RPC call, and transform it
            # into something clients can handle.
            if not self._terminating.is_set():
              raise self.ThriftInternalError("Error during thrift call %s to %s: %s" %
                                            (method_name, self.cluster.name, e))
        if not self._terminating.is_set():
          raise self.TimeoutError('Timed out attempting to issue %s to %s' % (
              method_name, self.cluster.name))
开发者ID:bmhatfield,项目名称:aurora,代码行数:33,代码来源:scheduler_client.py


示例13: method_wrapper

    def method_wrapper(*args):
      with self._lock:
        start = time.time()
        while not self._terminating.is_set() and (
            time.time() - start) < self.RPC_MAXIMUM_WAIT.as_(Time.SECONDS):

          # Only automatically append a SessionKey if this is not part of the read-only API.
          auth_args = () if hasattr(ReadOnlyScheduler.Iface, method_name) else (self.session_key(),)
          try:
            method = getattr(self.client(), method_name)
            if not callable(method):
              return method

            resp = method(*(args + auth_args))
            if resp is not None and resp.responseCode == ResponseCode.ERROR_TRANSIENT:
              raise self.TransientError(", ".join(
                  [m.message for m in resp.details] if resp.details else []))
            if resp.serverInfo.thriftAPIVersion != THRIFT_API_VERSION:
              raise self.APIVersionError("Client Version: %s, Server Version: %s" %
                  (THRIFT_API_VERSION, resp.serverInfo.thriftAPIVersion))
            return resp
          except (TTransport.TTransportException, self.TimeoutError, self.TransientError) as e:
            if not self._terminating.is_set():
              log.warning('Connection error with scheduler: %s, reconnecting...' % e)
              self.invalidate()
              self._terminating.wait(self.RPC_RETRY_INTERVAL.as_(Time.SECONDS))
          except Exception as e:
            # Take any error that occurs during the RPC call, and transform it
            # into something clients can handle.
            if not self._terminating.is_set():
              raise self.ThriftInternalError("Error during thrift call %s to %s: %s" %
                                            (method_name, self.cluster.name, e))
        if not self._terminating.is_set():
          raise self.TimeoutError('Timed out attempting to issue %s to %s' % (
              method_name, self.cluster.name))
开发者ID:KancerEzeroglu,项目名称:aurora,代码行数:35,代码来源:scheduler_client.py


示例14: get_completion

    def get_completion(result):
      try:
        children = result.get()
      except self.DISCONNECT_EXCEPTIONS:
        self._once(KazooState.CONNECTED, do_monitor)
        return
      except ke.NoNodeError:
        wait_exists()
        return
      except ke.KazooException as e:
        log.warning('Unexpected get_completion result: (%s)%s' % (type(e), e))
        return

      children = [child for child in children if self.znode_owned(child)]
      _, new = self._update_children(children)
      for child in new:
        def devnull(*args, **kw): pass
        self.info(child, callback=devnull)

      monitor_queue = self._monitor_queue[:]
      self._monitor_queue = []
      members = set(Membership(self.znode_to_id(child)) for child in children)
      for membership, capture in monitor_queue:
        if set(membership) != members:
          capture.set(members)
        else:
          self._monitor_queue.append((membership, capture))
开发者ID:CodeWarltz,项目名称:commons,代码行数:27,代码来源:kazoo_group.py


示例15: _drain_hosts

    def _drain_hosts(self, drainable_hosts):
        """"Drains tasks from the specified hosts.

    This will move active tasks on these hosts to the DRAINING state, causing them to be
    rescheduled elsewhere.

    :param drainable_hosts: Hosts that are in maintenance mode and ready to be drained
    :type drainable_hosts: gen.apache.aurora.ttypes.Hosts
    :rtype: set of host names failed to drain
    """
        check_and_log_response(self._client.drain_hosts(drainable_hosts))
        drainable_hostnames = [hostname for hostname in drainable_hosts.hostNames]

        total_wait = self.STATUS_POLL_INTERVAL
        not_drained_hostnames = set(drainable_hostnames)
        while not self._wait_event.is_set() and not_drained_hostnames:
            log.info("Waiting for hosts to be in DRAINED: %s" % not_drained_hostnames)
            self._wait_event.wait(self.STATUS_POLL_INTERVAL.as_(Time.SECONDS))

            statuses = self.check_status(list(not_drained_hostnames))
            not_drained_hostnames = set(h[0] for h in statuses if h[1] != "DRAINED")

            total_wait += self.STATUS_POLL_INTERVAL
            if not_drained_hostnames and total_wait > self.MAX_STATUS_WAIT:
                log.warning(
                    "Failed to move all hosts into DRAINED within %s:\n%s"
                    % (
                        self.MAX_STATUS_WAIT,
                        "\n".join("\tHost:%s\tStatus:%s" % h for h in sorted(statuses) if h[1] != "DRAINED"),
                    )
                )
                break

        return not_drained_hostnames
开发者ID:rosmo,项目名称:aurora,代码行数:34,代码来源:host_maintenance.py


示例16: setup_child_subreaping

def setup_child_subreaping():
  """
  This uses the prctl(2) syscall to set the `PR_SET_CHILD_SUBREAPER` flag. This
  means if any children processes need to be reparented, they will be reparented
  to this process.

  More documentation here: http://man7.org/linux/man-pages/man2/prctl.2.html
  and here: https://lwn.net/Articles/474787/

  Callers should reap terminal children to prevent zombies.
  """
  log.debug("Calling prctl(2) with PR_SET_CHILD_SUBREAPER")
  # This constant is taken from prctl.h
  PR_SET_CHILD_SUBREAPER = 36
  try:
    library_name = ctypes.util.find_library('c')
    if library_name is None:
      log.warning("libc is not found. Unable to call prctl!")
      log.warning("Children subreaping is disabled!")
      return
    libc = ctypes.CDLL(library_name, use_errno=True)
    # If we are on a system where prctl doesn't exist, this will throw an
    # attribute error.
    ret = libc.prctl(PR_SET_CHILD_SUBREAPER, 1, 0, 0, 0)
    if ret != 0:
      errno = ctypes.get_errno()
      raise OSError(errno, os.strerror(errno))
  except Exception as e:
    log.error("Unable to call prctl %s" % e)
    log.error("Children subreaping is disabled!")
开发者ID:bmhatfield,项目名称:aurora,代码行数:30,代码来源:process_util.py


示例17: on_finish

 def on_finish(service_instance):
   try:
     self._members[member_id] = ServiceInstance.unpack(service_instance)
   except Exception as e:
     log.warning('Failed to deserialize endpoint: %s' % e)
     return
   self._on_join(self._members[member_id])
开发者ID:BabyDuncan,项目名称:commons,代码行数:7,代码来源:serverset.py


示例18: _fast_forward_stream

  def _fast_forward_stream(self, process_name):
    log.debug('Fast forwarding %s stream to seq=%s' % (process_name,
      self._watermarks[process_name]))
    assert self._processes.get(process_name) is not None
    fp = self._processes[process_name]
    rr = ThriftRecordReader(fp, RunnerCkpt)
    current_watermark = -1
    records = 0
    while current_watermark < self._watermarks[process_name]:
      last_pos = fp.tell()
      record = rr.try_read()
      if record is None:
        break
      new_watermark = record.process_status.seq
      if new_watermark > self._watermarks[process_name]:
        log.debug('Over-seeked %s [watermark = %s, high watermark = %s], rewinding.' % (
          process_name, new_watermark, self._watermarks[process_name]))
        fp.seek(last_pos)
        break
      current_watermark = new_watermark
      records += 1

    if current_watermark < self._watermarks[process_name]:
      log.warning('Only able to fast forward to %[email protected]=%s, high watermark is %s' % (
         process_name, current_watermark, self._watermarks[process_name]))

    if records:
      log.debug('Fast forwarded %s %s record(s) to seq=%s.' % (process_name, records,
        current_watermark))
开发者ID:sumanau7,项目名称:incubator-aurora,代码行数:29,代码来源:muxer.py


示例19: _drain_hosts

  def _drain_hosts(self, drainable_hosts, clock=time):
    """"Drains tasks from the specified hosts.

    This will move active tasks on these hosts to the DRAINING state, causing them to be
    rescheduled elsewhere.

    :param drainable_hosts: Hosts that are in maintenance mode and ready to be drained
    :type drainable_hosts: gen.apache.aurora.ttypes.Hosts
    :param clock: time module for testing
    :type clock: time
    """
    check_and_log_response(self._client.drain_hosts(drainable_hosts))
    not_ready_hostnames = [hostname for hostname in drainable_hosts.hostNames]
    while not_ready_hostnames:
      log.info("Sleeping for %s." % self.START_MAINTENANCE_DELAY)
      clock.sleep(self.START_MAINTENANCE_DELAY.as_(Time.SECONDS))
      resp = self._client.maintenance_status(Hosts(set(not_ready_hostnames)))
      if not resp.result.maintenanceStatusResult.statuses:
        not_ready_hostnames = None
      for host_status in resp.result.maintenanceStatusResult.statuses:
        if host_status.mode != MaintenanceMode.DRAINED:
          log.warning('%s is currently in status %s' %
              (host_status.host, MaintenanceMode._VALUES_TO_NAMES[host_status.mode]))
        else:
          not_ready_hostnames.remove(host_status.host)
开发者ID:josephglanville,项目名称:incubator-aurora,代码行数:25,代码来源:host_maintenance.py


示例20: run

  def run(self):
    """Thread entrypoint. Loop indefinitely, polling collectors at self._collection_interval and
    collating samples."""

    log.debug('Commencing resource monitoring for task "%s"' % self._task_id)
    next_process_collection = 0
    next_disk_collection = 0

    while not self._kill_signal.is_set():

      now = time.time()

      if now > next_process_collection:
        next_process_collection = now + self._process_collection_interval
        actives = set(self._get_active_processes())
        current = set(self._process_collectors)
        for process in current - actives:
          self._process_collectors.pop(process)
        for process in actives - current:
          self._process_collectors[process] = self._process_collector_factory(process.pid)
        for process, collector in self._process_collectors.items():
          collector.sample()

      if now > next_disk_collection:
        next_disk_collection = now + self._disk_collection_interval
        if not self._disk_collector:
          sandbox = self._task_monitor.get_sandbox()
          if sandbox:
            self._disk_collector = self._disk_collector_class(sandbox)
        if self._disk_collector:
          self._disk_collector.sample()
        else:
          log.debug('No sandbox detected yet for %s' % self._task_id)

      try:
        aggregated_procs = sum(map(attrgetter('procs'), self._process_collectors.values()))
        aggregated_sample = sum(map(attrgetter('value'), self._process_collectors.values()),
                                ProcessSample.empty())
        disk_value = self._disk_collector.value if self._disk_collector else 0
        self._history.add(now, self.ResourceResult(aggregated_procs, aggregated_sample, disk_value))
      except ValueError as err:
        log.warning("Error recording resource sample: %s" % err)

      # Sleep until any of the following conditions are met:
      # - it's time for the next disk collection
      # - it's time for the next process collection
      # - the result from the last disk collection is available via the DiskCollector
      # - the TaskResourceMonitor has been killed via self._kill_signal
      now = time.time()
      next_collection = min(next_process_collection - now, next_disk_collection - now)

      if self._disk_collector:
        waiter = EventMuxer(self._kill_signal, self._disk_collector.completed_event)
      else:
        waiter = self._kill_signal

      waiter.wait(timeout=max(0, next_collection))

    log.debug('Stopping resource monitoring for task "%s"' % self._task_id)
开发者ID:AltanAlpay,项目名称:aurora,代码行数:59,代码来源:resource.py



注:本文中的twitter.common.log.warning函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python options.LogOptions类代码示例发布时间:2022-05-27
下一篇:
Python log.warn函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap