本文整理汇总了Python中backend.backendservice.backendservices函数的典型用法代码示例。如果您正苦于以下问题:Python backendservices函数的具体用法?Python backendservices怎么用?Python backendservices使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了backendservices函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: post
def post(self):
params = self.request.POST
if 'delete' in params:
# The jobs to delete are specified in the checkboxes
jobs_to_delete = params.getall('select_job')
service = backendservices(self.user_data)
# Select the jobs to delete from the datastore
result = {}
for job_name in jobs_to_delete:
try:
job = db.GqlQuery("SELECT * FROM StochKitJobWrapper WHERE user_id = :1 AND name = :2", self.user.user_id(),job_name).get()
except Exception,e:
result = {'status':False,'msg':"Could not retrieve the jobs"+job_name+ " from the datastore."}
job.delete()
# Render the status page
# AH: This is a hack to prevent the page from reloading before the datastore transactions
# have taken place. I think it is only necessary for the SQLLite backend stub.
# TODO: We need a better way to check if the entities are gone from the datastore...
time.sleep(0.5)
context = self.getContext()
self.render_response('status.html', **dict(result,**context))
开发者ID:dnath,项目名称:stochss,代码行数:28,代码来源:status.py
示例2: delete
def delete(self, handler):
self.stop(handler)
service = backendservices(handler.user_data)
#delete the local output
if self.zipFileName is not None and os.path.exists(self.zipFileName):
os.remove(self.zipFileName)
if self.preprocessedDir is not None and os.path.exists(str(self.preprocessedDir)):
shutil.rmtree(str(self.preprocessedDir))
if self.vtkFileName is not None and os.path.exists(self.vtkFileName):
os.remove(self.vtkFileName)
if self.outData is not None and os.path.exists(self.outData):
shutil.rmtree(self.outData)
# delete on cloud
if self.resource is not None and self.resource in backendservices.SUPPORTED_CLOUD_RESOURCES:
try:
service.deleteTasks(self)
except Exception as e:
logging.error("Failed to delete cloud resources of job {0}".format(self.key().id()))
logging.error(e)
super(SpatialJobWrapper, self).delete()
开发者ID:dnath,项目名称:stochss,代码行数:26,代码来源:spatial_job.py
示例3: fetchCloudOutput
def fetchCloudOutput(self, stochkit_job_wrapper):
'''
'''
try:
result = {}
stochkit_job = stochkit_job_wrapper.stochkit_job
# Grab the remote files
service = backendservices()
service.fetchOutput(stochkit_job.pid, stochkit_job.output_url)
# Unpack it to its local output location
os.system('tar -xf' +stochkit_job.uuid+'.tar')
stochkit_job.output_location = os.path.abspath(os.path.dirname(__file__))+'/../output/'+stochkit_job.uuid
stochkit_job.output_location = os.path.abspath(stochkit_job.output_location)
# Clean up
os.remove(stochkit_job.uuid+'.tar')
# Save the updated status
stochkit_job_wrapper.put()
result['status']=True
result['msg'] = "Sucessfully fetched the remote output files."
except Exception,e:
logging.info('************************************* {0}'.format(e))
result['status']=False
result['msg'] = "Failed to fetch the remote files."
开发者ID:JWW81,项目名称:stochss,代码行数:28,代码来源:status.py
示例4: delete
def delete(self, handle):
self.stop(handle.user_data)
if self.resource.lower() == 'molns':
# TODO: call 'molns exec cleanup'
pass
else:
# Call the backend to kill and delete the job and all associated files.
service = backendservices(handle.user_data)
if self.zipFileName is not None and os.path.exists(self.zipFileName):
os.remove(self.zipFileName)
#delete the ouput results of execution locally, if exists.
if self.outData is not None and os.path.exists(str(self.outData)):
shutil.rmtree(self.outData)
if self.resource is not None and self.resource in backendservices.SUPPORTED_CLOUD_RESOURCES:
try:
service.deleteTasks(self)
except Exception as e:
logging.error("Failed to delete cloud resources of job {0}".format(self.key().id()))
logging.error(e)
super(StochKitJobWrapper, self).delete()
开发者ID:StochSS,项目名称:stochss,代码行数:25,代码来源:stochkit_job.py
示例5: delete
def delete(self, handler):
job = self
stochkit_job = job.stochkit_job
# TODO: Call the backend to kill and delete the job and all associated files.
service = backendservices()
if job.stochkit_job.zipFileName:
if os.path.exists(job.stochkit_job.zipFileName):
os.remove(job.stochkit_job.zipFileName)
if stochkit_job.resource == "Local":
service.deleteTaskLocal([stochkit_job.pid])
time.sleep(0.25)
status = service.checkTaskStatusLocal([stochkit_job.pid]).values()[0]
else:
db_credentials = handler.user_data.getCredentials()
os.environ["AWS_ACCESS_KEY_ID"] = db_credentials["EC2_ACCESS_KEY"]
os.environ["AWS_SECRET_ACCESS_KEY"] = db_credentials["EC2_SECRET_KEY"]
service.deleteTasks([(stochkit_job.celery_pid, stochkit_job.pid)])
if os.path.exists(stochkit_job.output_location):
shutil.rmtree(stochkit_job.output_location)
super(StochKitJobWrapper, self).delete()
开发者ID:JWW81,项目名称:stochss,代码行数:27,代码来源:simulation.py
示例6: addSensitivityJob
def addSensitivityJob(self, job, globalOp = False):
if job.status == "Finished":
# Shared fields
jsonJob = { "version" : self.version,
"userId" : job.userId,
"jobName" : job.jobName,
"startTime" : job.startTime,
"indata" : json.loads(job.indata),
"status" : job.status }
if job.resource == "local":
outputLocation = self.addFolder('sensitivityJobs/data/{0}'.format(job.jobName), job.outData)
jsonJob["outData"] = outputLocation
elif job.resource == "cloud":
jsonJob["outputURL"] = job.outputURL
# Only grab S3 data if user wants us to
if (job.jobName in self.sensitivityJobsToDownload) or globalOp:
if job.outData is None or (job.outData is not None and not os.path.exists(job.outData)):
# Grab the output from S3 if we need to
service = backendservices()
service.fetchOutput(job.cloudDatabaseID, job.outputURL)
# Unpack it to its local output location
os.system('tar -xf' +job.cloudDatabaseID+'.tar')
job.outData = os.path.dirname(os.path.abspath(__file__))+'/../output/'+job.cloudDatabaseID
job.outData = os.path.abspath(job.outData)
# Update the DB entry
job.put()
# Clean up
os.remove(job.cloudDatabaseID+'.tar')
outputLocation = self.addFolder('sensitivityJobs/data/{0}'.format(job.jobName), job.outData)
jsonJob["outData"] = outputLocation
self.addBytes('sensitivityJobs/{0}.json'.format(job.jobName), json.dumps(jsonJob, sort_keys=True, indent=4, separators=(', ', ': ')))
开发者ID:JWW81,项目名称:stochss,代码行数:31,代码来源:exportimport.py
示例7: get
def get(self):
context = {
'isAdminUser': self.user.is_admin_user()
}
# We can only pull results from S3 if we have valid AWS credentials
if self.user_data.valid_credentials:
credentials = self.user_data.getCredentials()
# Get all the cloud jobs
stochkit_jobs = db.GqlQuery("SELECT * FROM StochKitJobWrapper WHERE user_id = :1", self.user.user_id()).fetch(100000)
stochkit_jobs = [job for job in stochkit_jobs if job.stochkit_job.resource == "Cloud" and job.stochkit_job.status == "Finished"]
# Create the dictionary to pass to backend to check for sizes
output_results_to_check = {}
for cloud_job in stochkit_jobs:
s3_url_segments = cloud_job.stochkit_job.output_url.split('/')
# S3 URLs are in the form https://s3.amazonaws.com/bucket_name/key/name
bucket_name = s3_url_segments[3]
# key_name is the concatenation of all segments after the bucket_name
key_name = '/'.join(s3_url_segments[4:])
if bucket_name in output_results_to_check.keys():
output_results_to_check[bucket_name] += [(key_name, cloud_job.name)]
else:
output_results_to_check[bucket_name] = [(key_name, cloud_job.name)]
# Sensitivity Jobs
sensi_jobs = db.GqlQuery("SELECT * FROM SensitivityJobWrapper WHERE userId = :1", self.user.user_id())
sensi_jobs = [job for job in sensi_jobs if job.resource == "cloud" and job.status == "Finished"]
for cloud_job in sensi_jobs:
s3_url_segments = cloud_job.outputURL.split('/')
# S3 URLs are in the form https://s3.amazonaws.com/bucket_name/key/name
bucket_name = s3_url_segments[3]
# key_name is the concatenation of all segments after the bucket_name
key_name = '/'.join(s3_url_segments[4:])
if bucket_name in output_results_to_check.keys():
output_results_to_check[bucket_name] += [(key_name, cloud_job.jobName)]
else:
output_results_to_check[bucket_name] = [(key_name, cloud_job.jobName)]
# Get all the job sizes from the backend
service = backendservices()
job_sizes = service.getSizeOfOutputResults(credentials['EC2_ACCESS_KEY'], credentials['EC2_SECRET_KEY'], output_results_to_check)
# Add all of the relevant jobs to the context so they will be rendered on the page
context["stochkit_jobs"] = []
context["sensitivity_jobs"] = []
for cloud_job in stochkit_jobs:
job_name = cloud_job.name
if job_name in job_sizes.keys():
# These are the relevant jobs
context["stochkit_jobs"].append({
'name': job_name,
'exec_type': cloud_job.stochkit_job.exec_type,
'size': '{0} KB'.format(round(float(job_sizes[job_name])/1024, 1))
})
for cloud_job in sensi_jobs:
job_name = cloud_job.jobName
if job_name in job_sizes.keys():
context["sensitivity_jobs"].append({
'name': job_name,
'exec_type': 'sensitivity_jobs',
'size': '{0} KB'.format(round(float(job_sizes[job_name])/1024, 1))
})
return self.render_response('exportimport.html', **context)
开发者ID:JWW81,项目名称:stochss,代码行数:59,代码来源:exportimport.py
示例8: addStochKitJob
def addStochKitJob(self, job, globalOp = False):
stochkit_job = job.stochkit_job
# Only export finished jobs
if stochkit_job.status == "Finished":
# These are fields shared among all jobs
jsonJob = { "version" : self.version,
"name" : job.name,
"user_id" : job.user_id,
"stdout" : job.stdout,
"stderr" : job.stderr,
# These are things contained in the stochkit_job object
"type" : stochkit_job.type,
"status" : stochkit_job.status,
"modelName" : job.modelName,
"final_time" : stochkit_job.final_time,
"increment" : stochkit_job.increment,
"units" : job.stochkit_job.units,
"realizations" : stochkit_job.realizations,
"exec_type" : stochkit_job.exec_type,
"store_only_mean" : stochkit_job.store_only_mean,
"label_column_names" : stochkit_job.label_column_names,
"create_histogram_data" : stochkit_job.create_histogram_data,
"epsilon" : stochkit_job.epsilon,
"threshold" : stochkit_job.threshold,
"pid" : stochkit_job.pid,
"result" : stochkit_job.result }
# For cloud jobs, we need to include the output_url and possibly grab the results from S3
if stochkit_job.resource == 'Cloud':
jsonJob["output_url"] = job.stochkit_job.output_url
# Only grab S3 data if user wants us to
#print 'globalOP', globalOp
if (job.name in self.stochKitJobsToDownload) or globalOp:
if stochkit_job.output_location is None or (stochkit_job.output_location is not None and not os.path.exists(stochkit_job.output_location)):
# Grab the output from S3 if we need to
service = backendservices()
service.fetchOutput(stochkit_job.pid, stochkit_job.output_url)
# Unpack it to its local output location
os.system('tar -xf' +stochkit_job.uuid+'.tar')
stochkit_job.output_location = os.path.dirname(os.path.abspath(__file__))+'/../output/'+stochkit_job.uuid
stochkit_job.output_location = os.path.abspath(stochkit_job.output_location)
# Update the DB entry
job.put()
# Clean up
os.remove(stochkit_job.uuid+'.tar')
# Add its data to the zip archive
outputLocation = self.addFolder('stochkitJobs/data/{0}'.format(job.name), stochkit_job.output_location)
jsonJob["output_location"] = outputLocation
# For local jobs, we need to include the output location in the zip archive
elif stochkit_job.resource == 'Local':
outputLocation = self.addFolder('stochkitJobs/data/{0}'.format(job.name), stochkit_job.output_location)
jsonJob["stdout"] = "{0}/stdout".format(outputLocation)
jsonJob["stderr"] = "{0}/stderr".format(outputLocation)
jsonJob["output_location"] = outputLocation
# Also be sure to include any extra attributes of job
if job.attributes:
jsonJob.update(job.attributes)
# Add the JSON to the zip archive
self.addBytes('stochkitJobs/{0}.json'.format(job.name), json.dumps(jsonJob, sort_keys=True, indent=4, separators=(', ', ': ')))
开发者ID:JohnAbel,项目名称:stochss,代码行数:59,代码来源:exportimport.py
示例9: stop
def stop(self, user_data):
# TODO: Call the backend to kill and delete the job and all associated files.
service = backendservices(user_data)
if self.resource.lower() == 'local':
service.stopTaskLocal([self.pid])
else:
service.stopTasks(self)
开发者ID:ahellander,项目名称:stochss,代码行数:8,代码来源:stochkit_job.py
示例10: post
def post(self):
self.response.headers['Content-Type'] = 'application/json'
req_type = self.request.get('req_type')
job_type = self.request.get('job_type')
id = self.request.get('id')
instance_type = self.request.get('instance_type')
if req_type == 'analyze':
logging.info('Analyzing the cost...')
#job_type = params['job_type']
logging.info('rerun cost analysis in '+instance_type)
credentials = self.user_data.getCredentials()
access_key = credentials['EC2_ACCESS_KEY']
secret_key = credentials['EC2_SECRET_KEY']
backend_services = backendservice.backendservices(self.user_data)
if not self.user_data.valid_credentials or not backend_services.isOneOrMoreComputeNodesRunning(instance_type):
logging.info('You must have at least one active '+instance_type+' compute node to run in the cloud.')
self.response.write(json.dumps({
'status': False,
'msg': 'You must have at least one active '+instance_type+' compute node to run in the cloud.'
}))
return
result = {}
try:
uuid, _ = self.get_uuid_name(id, job_type)
logging.info('start to rerun the job {0} for cost analysis'.format(str(uuid)))
# Set up CloudTracker with user credentials and specified UUID to rerun the job
ct = CloudTracker(access_key, secret_key, str(uuid), self.user_data.getBucketName())
has_prov = not ct.if_tracking()
# If there is no provenance data for this job, report an error to the user
if not has_prov:
result = {'status':"fail",'msg':"The job with this ID does not exist or cannot be reproduced."}
self.response.write(json.dumps(result))
return
params = ct.get_input()
params['cost_analysis_uuid'] = uuid
cloud_result = backend_services.submit_cloud_task(params, agent_type = AgentTypes.EC2, instance_type = instance_type, cost_replay = True)
if not cloud_result["success"]:
e = cloud_result["exception"]
result = {
'status': False,
'msg': 'Cloud execution failed: '+str(e)
}
return result
result = {'status':True,'msg':'Cost analysis submitted successfully.'}
except Exception,e:
result = {'status':False,'msg':'Cloud execution failed: '+str(e)}
开发者ID:StochSS,项目名称:stochss,代码行数:58,代码来源:cost_analysis.py
示例11: runCloud
def runCloud(self, data):
self.user_data.set_selected(1)
service = backendservices(self.user_data)
if not service.isOneOrMoreComputeNodesRunning():
raise Exception('No cloud computing resources found. (Have they been started?)')
# If the seed is negative, this means choose a seed >= 0 randomly
if int(data['seed']) < 0:
random.seed()
data['seed'] = random.randint(0, 2147483647)
pymodel = self.construct_pyurdme_model(data)
#logging.info('DATA: {0}'.format(data))
#####
cloud_params = {
"job_type": "spatial",
"simulation_algorithm" : data['algorithm'],
"simulation_realizations" : data['realizations'],
"simulation_seed" : data['seed'],
# "bucketname" : self.user_data.getBucketName(), #implys EC2, should be in backendservices
"paramstring" : '',
}
logging.debug('cloud_params = {}'.format(pprint.pformat(cloud_params)))
cloud_params['document'] = pickle.dumps(pymodel)
#logging.debug('PYURDME: {0}'.format(cloud_params['document']))
# Send the task to the backend
cloud_result = service.submit_cloud_task(params=cloud_params)
if not cloud_result["success"]:
e = cloud_result["exception"]
raise Exception("Cloud execution failed: {0}".format(e))
celery_task_id = cloud_result["celery_pid"]
taskid = cloud_result["db_id"]
job = SpatialJobWrapper()
job.type = 'PyURDME Ensemble'
job.user_id = self.user.user_id()
job.startTime = time.strftime("%Y-%m-%d-%H-%M-%S")
job.name = data["jobName"]
job.indata = json.dumps(data)
job.outData = None # This is where the data should be locally, when we get data from cloud, it must be put here
job.modelName = pymodel.name
job.resource = cloud_result['resource']
job.cloudDatabaseID = taskid
job.celeryPID = celery_task_id
job.status = "Running"
job.output_stored = "True"
job.put()
return job
开发者ID:StochSS,项目名称:stochss,代码行数:54,代码来源:spatial.py
示例12: __init__
def __init__(self, request, response):
self.auth = auth.get_auth()
# If not logged in, the dispatch() call will redirect to /login if needed
if self.logged_in():
# Make sure a handler has a reference to the current user
user_dict = self.auth.get_user_by_session()
self.user = self.auth.store.user_model.get_by_id(user_dict['user_id'])
# Most pages will need the UserData, so for convenience we add it here.
self.user_data = db.GqlQuery("SELECT * FROM UserData WHERE user_id = :1", self.user.user_id()).get()
# If the user_data does not exist in the datastore, we instantiate it here
if self.user_data == None:
user_data = UserData()
user_data.user_id = self.user.user_id()
# Get optional app-instance configurations and add those to user_data
credentials = {'EC2_SECRET_KEY': "",
'EC2_ACCESS_KEY': ""}
try:
env_variables = app.config.get('env_variables')
user_data.env_variables = json.dumps(env_variables)
if 'AWS_ACCESS_KEY' in env_variables:
credentials['EC2_ACCESS_KEY'] = env_variables['AWS_ACCESS_KEY']
if 'AWS_SECRET_KEY' in env_variables:
credentials['EC2_SECRET_KEY'] = env_variables['AWS_SECRET_KEY']
except:
raise
user_data.setCredentials(credentials)
# Check if the credentials are valid
service = backendservices(user_data)
params = {}
params['credentials'] = credentials
params["infrastructure"] = "ec2"
if service.validateCredentials(params):
user_data.valid_credentials = True
else:
user_data.valid_credentials = False
# Create an unique bucket name for the user
import uuid
user_data.setBucketName('stochss-output-' + str(uuid.uuid4()))
user_data.put()
self.user_data = user_data
webapp2.RequestHandler.__init__(self, request, response)
开发者ID:dnath,项目名称:stochss,代码行数:53,代码来源:stochssapp.py
示例13: post
def post(self):
params = self.request.POST
if 'delete' in params:
# The jobs to delete are specified in the checkboxes
jobs_to_delete = params.getall('select_job')
service = backendservices()
# Select the jobs to delete from the datastore
result = {}
for job_name in jobs_to_delete:
try:
job = db.GqlQuery("SELECT * FROM StochKitJobWrapper WHERE user_id = :1 AND name = :2", self.user.user_id(),job_name).get()
stochkit_job = job.stochkit_job
except Exception,e:
result = {'status':False,'msg':"Could not retrieve the jobs"+job_name+ " from the datastore."}
# TODO: Call the backend to kill and delete the job and all associated files.
try:
if stochkit_job.resource == 'Local':
service.deleteTaskLocal([stochkit_job.pid])
time.sleep(0.25)
status = service.checkTaskStatusLocal([stochkit_job.pid]).values()[0]
if status:
raise Exception("")
else:
db_credentials = self.user_data.getCredentials()
os.environ["AWS_ACCESS_KEY_ID"] = db_credentials['EC2_ACCESS_KEY']
os.environ["AWS_SECRET_ACCESS_KEY"] = db_credentials['EC2_SECRET_KEY']
service.deleteTasks([(stochkit_job.celery_pid,stochkit_job.pid)])
isdeleted_backend = True
except Exception,e:
isdeleted_backend = False
result['status']=False
result['msg'] = "Failed to delete task with PID " + str(stochkit_job.celery_pid) + str(e)
#
if isdeleted_backend:
# Delete all the local files and delete the job from the datastore
try:
# We remove the local entry of the job output directory
if os.path.exists(stochkit_job.output_location):
shutil.rmtree(stochkit_job.output_location)
db.delete(job)
except Exception,e:
result = {'status':False,'msg':"Failed to delete job "+job_name+str(e)}
开发者ID:JWW81,项目名称:stochss,代码行数:51,代码来源:status.py
示例14: stop
def stop(self, user_data):
# TODO: Call the backend to kill and delete the job and all associated files.
if self.resource.lower() == 'molns':
return
else:
service = backendservices(user_data)
if self.resource is None:
return # avoid error on "NoneType.lower()"
elif self.resource.lower() == 'local':
service.stopTaskLocal([self.pid])
elif self.resource.lower() == 'qsub' or self.resource.lower() == 'molns':
return # can't stop batching processing tasks (at least not easily)
else:
service.stopTasks(self)
开发者ID:StochSS,项目名称:stochss,代码行数:14,代码来源:stochkit_job.py
示例15: get
def get(self):
""" """
env_variables = self.user_data.env_variables
if env_variables == None:
context = {}
else:
context = json.loads(env_variables)
logging.info(context)
# Check if there is an internet connection available
if internet_on():
# Check if updates available. Assume a divergent branch can be updated. This is actually false, but we'll go with it. We need to change this so the user can make local changes...
h = subprocess.Popen("git remote update".split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = h.communicate()
h = subprocess.Popen("git status -uno".split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = h.communicate()
update_available = re.search("behind", stdout)
if update_available:
service = backendservices()
all_stochkit_jobs = db.GqlQuery(
"SELECT * FROM StochKitJobWrapper WHERE user_id = :1", self.user.email_address
)
# Check to make sure no jobs are running
context["nojobs"] = True
if all_stochkit_jobs != None:
for job in all_stochkit_jobs.run():
stochkit_job = job.stochkit_job
if not stochkit_job.status == "Finished":
res = service.checkTaskStatusLocal([stochkit_job.pid])
if res[stochkit_job.pid]:
context["nojobs"] = False
context["update"] = True
else:
context["update"] = False
context["nointernet"] = False
else:
context["nointernet"] = True
self.render_response("updates.html", **context)
开发者ID:bbbales2,项目名称:stochss,代码行数:47,代码来源:updates.py
示例16: isOneOrMoreComputeNodesRunning
def isOneOrMoreComputeNodesRunning(self, credentials):
"""
Checks for the existence of running compute nodes. Only need one running compute node
to be able to run a job in the cloud.
"""
try:
service = backendservices()
params = {"infrastructure": "ec2", "credentials": credentials}
all_vms = service.describeMachines(params)
if all_vms == None:
return False
# Just need one running vm
for vm in all_vms:
if vm != None and vm["state"] == "running":
return True
return False
except:
return False
开发者ID:JWW81,项目名称:stochss,代码行数:18,代码来源:simulation.py
示例17: delete
def delete(self, handler):
self.stop(handler)
if self.outData is not None and os.path.exists(self.outData):
shutil.rmtree(self.outData)
if self.zipFileName is not None and os.path.exists(self.zipFileName):
os.remove(self.zipFileName)
if self.resource is not None and self.resource in backendservices.SUPPORTED_CLOUD_RESOURCES:
try:
service = backendservices(handler.user_data)
service.deleteTasks(self)
except Exception as e:
logging.error("Failed to delete cloud resources of job {0}".format(self.key().id()))
logging.error(e)
super(SensitivityJobWrapper, self).delete()
开发者ID:StochSS,项目名称:stochss,代码行数:18,代码来源:sensitivity_job.py
示例18: stop
def stop(self, handler):
if self.status == "Running" or self.status == "Pending":
service = backendservices(handler.user_data)
if self.resource is not None and self.resource.lower() == "local":
service.stopTaskLocal([int(self.pid)])
elif self.resource in backendservices.SUPPORTED_CLOUD_RESOURCES:
# Write the finalized file
if self.outData is None or not os.path.exists(self.outData):
self.outData = os.path.abspath(
os.path.dirname(os.path.abspath(__file__))+'/../output/'+self.cloudDatabaseID
)
try:
logging.debug('stochoptim_job.stop() outData is None, makeing direcotry = {0}'.format(self.outData))
os.mkdir(self.outData)
except Exception as e:
logging.exception(e)
#TODO, comment out above
#pass
else:
logging.debug('stochoptim_job.stop() outData is not None, = {0}'.format(self.outData))
try:
file_to_check = "{0}/return_code".format(self.outData)
if not os.path.exists(file_to_check):
with open(file_to_check,'w+') as fd:
fd.write(str(1))
except Exception as e:
logging.exception(e)
result = service.stopTasks(self)
if result and result[self.cloudDatabaseID]:
final_cloud_result = result[self.cloudDatabaseID]
try:
self.outputURL = final_cloud_result['output']
except KeyError:
pass
self.status = "Finished"
self.put()
return True
else:
# Something went wrong
logging.error(result)
return False
else:
raise Exception("Unknown job resource '{0}'".format(self.resource))
开发者ID:StochSS,项目名称:stochss,代码行数:43,代码来源:stochoptim_job.py
示例19: runCloud
def runCloud(self, data):
'''
'''
job = SensitivityJobWrapper()
job.resource = "cloud"
job.userId = self.user.user_id()
model = modeleditor.StochKitModelWrapper.get_by_id(data["id"])
job.startTime = time.strftime("%Y-%m-%d-%H-%M-%S")
job.jobName = data["jobName"]
job.status = "Pending"
job.modelName = model.model_name
runtime = float(data["time"])
dt = float(data["increment"])
job.indata = json.dumps(data)
parameters = []
for parameter in data['selections']["pc"]:
if data['selections']["pc"][parameter]:
parameters.append(parameter)
params = {
"job_type": "sensitivity",
"document": str( model.model.serialize() ),
"paramstring": "stochkit_ode.py --sensi --parameters {0} -t {1} -i {2}".format( " ".join(parameters), runtime, int(runtime / dt)),
"bucketname": self.user_data.getBucketName()
}
service = backendservices()
db_credentials = self.user_data.getCredentials()
# Set the environmental variables
os.environ["AWS_ACCESS_KEY_ID"] = db_credentials['EC2_ACCESS_KEY']
os.environ["AWS_SECRET_ACCESS_KEY"] = db_credentials['EC2_SECRET_KEY']
# Send the task to the backend
cloud_result = service.executeTask(params)
# if not cloud_result["success"]:
job.cloudDatabaseID = cloud_result["db_id"]
job.celeryPID = cloud_result["celery_pid"]
job.outData = None
job.zipFileName = None
job.put()
return job
开发者ID:JohnAbel,项目名称:stochss,代码行数:42,代码来源:sensitivity.py
示例20: addStochOptimJob
def addStochOptimJob(self, job, globalOp = False):
jsonJob = { "version" : self.version,
"userId" : job.userId,
"pid" : job.pid,
"startTime" : job.startTime,
"jobName" : job.jobName,
"modelName" : job.modelName,
"indata" : json.loads(job.indata),
"nameToIndex" : json.loads(job.nameToIndex),
"outData" : job.outData,
"status" : job.status }
# For cloud jobs, we need to include the output_url and possibly grab the results from S3
if job.resource == 'cloud':
jsonJob["output_url"] = job.outputURL
# Only grab S3 data if user wants us to
if (job.jobName in self.stochKitJobsToDownload) or globalOp:
# Do we need to download it?
if job.outData is None or (job.outData is not None and not os.path.exists(job.outData)):
# Grab the output from S3
service = backendservices()
service.fetchOutput(job.pid, job.outputURL)
# Unpack it to its local output location
os.system("tar -xf {0}.tar".format(job.pid))
# And update the db entry
job.outData = os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../output/{0}".format(job.pid)
))
job.put()
os.remove("{0}.tar".format(job.pid))
# Only add the folder if it actually exists
if job.outData is not None and os.path.exists(job.outData):
outputLocation = self.addFolder('stochOptimJobs/data/{0}'.format(job.jobName), job.outData)
jsonJob["outData"] = outputLocation
jsonJob["stdout"] = "{0}/stdout".format(outputLocation)
jsonJob["stderr"] = "{0}/stderr".format(outputLocation)
self.addBytes('stochOptimJobs/{0}.json'.format(job.jobName), json.dumps(jsonJob, sort_keys=True, indent=4, separators=(', ', ': ')))
开发者ID:JohnAbel,项目名称:stochss,代码行数:40,代码来源:exportimport.py
注:本文中的backend.backendservice.backendservices函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论