21 from .api import api, http_cache
22 from collection import CollectionReader, CollectionWriter, ResumableCollectionWriter
25 from arvfile import StreamFileReader
26 from retry import RetryLoop
30 # Set up Arvados logging based on the user's configuration.
31 # All Arvados code should log under the arvados hierarchy.
32 log_handler = logging.StreamHandler()
33 log_handler.setFormatter(logging.Formatter(
34 '%(asctime)s %(name)s[%(process)d] %(levelname)s: %(message)s',
36 logger = logging.getLogger('arvados')
37 logger.addHandler(log_handler)
38 logger.setLevel(logging.DEBUG if config.get('ARVADOS_DEBUG')
41 def task_set_output(self, s, num_retries=5):
42 for tries_left in RetryLoop(num_retries=num_retries, backoff_start=0):
44 return api('v1').job_tasks().update(
51 except errors.ApiError as error:
52 if retry.check_http_response_success(error.resp.status) is None and tries_left > 0:
53 logger.debug("task_set_output: job_tasks().update() raised {}, retrying with {} tries left".format(repr(error),tries_left))
58 def current_task(num_retries=5):
63 for tries_left in RetryLoop(num_retries=num_retries, backoff_start=2):
65 task = api('v1').job_tasks().get(uuid=os.environ['TASK_UUID']).execute()
66 task = UserDict.UserDict(task)
67 task.set_output = types.MethodType(task_set_output, task)
68 task.tmpdir = os.environ['TASK_WORK']
71 except errors.ApiError as error:
72 if retry.check_http_response_success(error.resp.status) is None and tries_left > 0:
73 logger.debug("current_task: job_tasks().get() raised {}, retrying with {} tries left".format(repr(error),tries_left))
78 def current_job(num_retries=5):
83 for tries_left in RetryLoop(num_retries=num_retries, backoff_start=2):
85 job = api('v1').jobs().get(uuid=os.environ['JOB_UUID']).execute()
86 job = UserDict.UserDict(job)
87 job.tmpdir = os.environ['JOB_WORK']
90 except errors.ApiError as error:
91 if retry.check_http_response_success(error.resp.status) is None and tries_left > 0:
92 logger.debug("current_job: jobs().get() raised {}, retrying with {} tries left".format(repr(error),tries_left))
96 def getjobparam(*args):
97 return current_job()['script_parameters'].get(*args)
99 def get_job_param_mount(*args):
100 return os.path.join(os.environ['TASK_KEEPMOUNT'], current_job()['script_parameters'].get(*args))
102 def get_task_param_mount(*args):
103 return os.path.join(os.environ['TASK_KEEPMOUNT'], current_task()['parameters'].get(*args))
105 class JobTask(object):
106 def __init__(self, parameters=dict(), runtime_constraints=dict()):
107 print "init jobtask %s %s" % (parameters, runtime_constraints)
111 def one_task_per_input_file(if_sequence=0, and_end_task=True, input_as_path=False, api_client=None):
112 if if_sequence != current_task()['sequence']:
116 api_client = api('v1')
118 job_input = current_job()['script_parameters']['input']
119 cr = CollectionReader(job_input, api_client=api_client)
121 for s in cr.all_streams():
122 for f in s.all_files():
124 task_input = os.path.join(job_input, s.name(), f.name())
126 task_input = f.as_manifest()
128 'job_uuid': current_job()['uuid'],
129 'created_by_job_task_uuid': current_task()['uuid'],
130 'sequence': if_sequence + 1,
135 api_client.job_tasks().create(body=new_task_attrs).execute()
137 api_client.job_tasks().update(uuid=current_task()['uuid'],
138 body={'success':True}
143 def one_task_per_input_stream(if_sequence=0, and_end_task=True):
144 if if_sequence != current_task()['sequence']:
146 job_input = current_job()['script_parameters']['input']
147 cr = CollectionReader(job_input)
148 for s in cr.all_streams():
149 task_input = s.tokens()
151 'job_uuid': current_job()['uuid'],
152 'created_by_job_task_uuid': current_task()['uuid'],
153 'sequence': if_sequence + 1,
158 api('v1').job_tasks().create(body=new_task_attrs).execute()
160 api('v1').job_tasks().update(uuid=current_task()['uuid'],
161 body={'success':True}