17 from apiclient import errors
18 from apiclient.discovery import build
20 class CredentialsFromEnv:
22 def http_request(self, uri, **kwargs):
23 from httplib import BadStatusLine
24 if 'headers' not in kwargs:
25 kwargs['headers'] = {}
26 kwargs['headers']['Authorization'] = 'OAuth2 %s' % os.environ['ARVADOS_API_TOKEN']
28 return self.orig_http_request(uri, **kwargs)
30 # This is how httplib tells us that it tried to reuse an
31 # existing connection but it was already closed by the
32 # server. In that case, yes, we would like to retry.
33 # Unfortunately, we are not absolutely certain that the
34 # previous call did not succeed, so this is slightly
36 return self.orig_http_request(uri, **kwargs)
37 def authorize(self, http):
38 http.orig_http_request = http.request
39 http.request = types.MethodType(self.http_request, http)
42 url = ('https://%s/discovery/v1/apis/'
43 '{api}/{apiVersion}/rest' % os.environ['ARVADOS_API_HOST'])
44 credentials = CredentialsFromEnv()
45 http = httplib2.Http()
46 http = credentials.authorize(http)
47 http.disable_ssl_certificate_validation=True
48 service = build("arvados", "v1", http=http, discoveryServiceUrl=url)
50 def task_set_output(self,s):
51 service.job_tasks().update(uuid=self['uuid'],
63 t = service.job_tasks().get(uuid=os.environ['TASK_UUID']).execute()
64 t = UserDict.UserDict(t)
65 t.set_output = types.MethodType(task_set_output, t)
66 t.tmpdir = os.environ['TASK_TMPDIR']
75 t = service.jobs().get(uuid=os.environ['JOB_UUID']).execute()
76 t = UserDict.UserDict(t)
77 t.tmpdir = os.environ['CRUNCH_WORK']
85 def __init__(self, parameters=dict(), resource_limits=dict()):
86 print "init jobtask %s %s" % (parameters, resource_limits)
90 def one_task_per_input_file(if_sequence=0, and_end_task=True):
91 if if_sequence != current_task()['sequence']:
93 job_input = current_job()['script_parameters']['input']
94 cr = CollectionReader(job_input)
95 for s in cr.all_streams():
96 for f in s.all_files():
97 task_input = f.as_manifest()
99 'job_uuid': current_job()['uuid'],
100 'created_by_job_task_uuid': current_task()['uuid'],
101 'sequence': if_sequence + 1,
106 service.job_tasks().create(job_task=json.dumps(new_task_attrs)).execute()
108 service.job_tasks().update(uuid=current_task()['uuid'],
109 job_task=json.dumps({'success':True})
115 def run_command(execargs, **kwargs):
116 p = subprocess.Popen(execargs, close_fds=True, shell=False,
117 stdin=subprocess.PIPE,
118 stdout=subprocess.PIPE,
119 stderr=subprocess.PIPE,
121 stdoutdata, stderrdata = p.communicate(None)
122 if p.returncode != 0:
123 raise Exception("run_command %s exit %d:\n%s" %
124 (execargs, p.returncode, stderrdata))
125 return stdoutdata, stderrdata
128 def git_checkout(url, version, path):
129 if not re.search('^/', path):
130 path = os.path.join(current_job().tmpdir, path)
131 if not os.path.exists(path):
132 util.run_command(["git", "clone", url, path],
133 cwd=os.path.dirname(path))
134 util.run_command(["git", "checkout", version],
139 def __init__(self, data_locator):
140 self.data_locator = data_locator
141 self.p = subprocess.Popen(["whget", "-r", self.data_locator, "-"],
142 stdout=subprocess.PIPE,
143 stdin=None, stderr=subprocess.PIPE,
144 shell=False, close_fds=True)
149 def read(self, size, **kwargs):
150 return self.p.stdout.read(size, **kwargs)
152 self.p.stdout.close()
153 if not self.p.stderr.closed:
154 for err in self.p.stderr:
155 print >> sys.stderr, err
156 self.p.stderr.close()
158 if self.p.returncode != 0:
159 raise Exception("whget subprocess exited %d" % self.p.returncode)
161 class StreamFileReader:
162 def __init__(self, stream, pos, size, name):
163 self._stream = stream
170 def decompressed_name(self):
171 return re.sub('\.(bz2|gz)$', '', self._name)
174 def stream_name(self):
175 return self._stream.name()
176 def read(self, size, **kwargs):
177 self._stream.seek(self._pos + self._filepos)
178 data = self._stream.read(min(size, self._size - self._filepos))
179 self._filepos += len(data)
181 def readall(self, size, **kwargs):
183 data = self.read(size, **kwargs)
187 def bunzip2(self, size):
188 decompressor = bz2.BZ2Decompressor()
189 for chunk in self.readall(size):
190 data = decompressor.decompress(chunk)
191 if data and data != '':
193 def gunzip(self, size):
194 decompressor = zlib.decompressobj(16+zlib.MAX_WBITS)
195 for chunk in self.readall(size):
196 data = decompressor.decompress(decompressor.unconsumed_tail + chunk)
197 if data and data != '':
199 def readlines(self, decompress=True):
200 self._stream.seek(self._pos + self._filepos)
201 if decompress and re.search('\.bz2$', self._name):
202 datasource = self.bunzip2(2**10)
203 elif decompress and re.search('\.gz$', self._name):
204 datasource = self.gunzip(2**10)
206 datasource = self.readall(2**10)
208 for newdata in datasource:
212 eol = string.find(data, "\n", sol)
215 yield data[sol:eol+1]
220 def as_manifest(self):
222 return ("%s d41d8cd98f00b204e9800998ecf8427e+0 0:0:%s\n"
223 % (self._stream.name(), self.name()))
224 return string.join(self._stream.tokens_for_range(self._pos, self._size),
228 def __init__(self, tokens):
229 self._tokens = tokens
230 self._current_datablock_data = None
231 self._current_datablock_pos = 0
232 self._current_datablock_index = -1
235 self._stream_name = None
236 self.data_locators = []
239 for tok in self._tokens:
240 if self._stream_name == None:
241 self._stream_name = tok
242 elif re.search(r'^[0-9a-f]{32}(\+\S+)*$', tok):
243 self.data_locators += [tok]
244 elif re.search(r'^\d+:\d+:\S+', tok):
245 pos, size, name = tok.split(':',2)
246 self.files += [[int(pos), int(size), name]]
248 raise Exception("Invalid manifest format")
249 def tokens_for_range(self, range_start, range_size):
250 resp = [self._stream_name]
251 return_all_tokens = False
253 token_bytes_skipped = 0
254 for locator in self.data_locators:
255 sizehint = re.search(r'\+(\d+)', locator)
257 return_all_tokens = True
258 if return_all_tokens:
261 blocksize = int(sizehint.group(0))
262 if range_start + range_size <= block_start:
264 if range_start < block_start + blocksize:
267 token_bytes_skipped += blocksize
268 block_start += blocksize
270 if ((f[0] < range_start + range_size)
272 (f[0] + f[1] > range_start)
275 resp += ["%d:%d:%s" % (f[0] - token_bytes_skipped, f[1], f[2])]
278 return self._stream_name
282 yield StreamFileReader(self, pos, size, name)
283 def nextdatablock(self):
284 if self._current_datablock_index < 0:
285 self._current_datablock_pos = 0
286 self._current_datablock_index = 0
288 self._current_datablock_pos += self.current_datablock_size()
289 self._current_datablock_index += 1
290 self._current_datablock_data = None
291 def current_datablock_data(self):
292 if self._current_datablock_data == None:
293 self._current_datablock_data = Keep.get(self.data_locators[self._current_datablock_index])
294 return self._current_datablock_data
295 def current_datablock_size(self):
296 if self._current_datablock_index < 0:
298 sizehint = re.search('\+(\d+)', self.data_locators[self._current_datablock_index])
300 return int(sizehint.group(0))
301 return len(self.current_datablock_data())
303 """Set the position of the next read operation."""
305 def really_seek(self):
306 """Find and load the appropriate data block, so the byte at
309 if self._pos == self._current_datablock_pos:
311 if (self._current_datablock_pos != None and
312 self._pos >= self._current_datablock_pos and
313 self._pos <= self._current_datablock_pos + self.current_datablock_size()):
315 if self._pos < self._current_datablock_pos:
316 self._current_datablock_index = -1
318 while (self._pos > self._current_datablock_pos and
319 self._pos > self._current_datablock_pos + self.current_datablock_size()):
321 def read(self, size):
322 """Read no more than size bytes -- but at least one byte,
323 unless _pos is already at the end of the stream.
328 while self._pos >= self._current_datablock_pos + self.current_datablock_size():
330 if self._current_datablock_index >= len(self.data_locators):
332 data = self.current_datablock_data()[self._pos - self._current_datablock_pos : self._pos - self._current_datablock_pos + size]
333 self._pos += len(data)
336 class CollectionReader:
337 def __init__(self, manifest_locator_or_text):
338 if re.search(r'^\S+( [a-f0-9]{32,}(\+\S+)*)+( \d+:\d+:\S+)+\n', manifest_locator_or_text):
339 self._manifest_text = manifest_locator_or_text
340 self._manifest_locator = None
342 self._manifest_locator = manifest_locator_or_text
343 self._manifest_text = None
350 if self._streams != None:
352 if not self._manifest_text:
353 self._manifest_text = Keep.get(self._manifest_locator)
355 for stream_line in self._manifest_text.split("\n"):
356 stream_tokens = stream_line.split()
357 self._streams += [stream_tokens]
358 def all_streams(self):
361 for s in self._streams:
362 resp += [StreamReader(s)]
365 for s in self.all_streams():
366 for f in s.all_files():
369 class CollectionWriter:
370 KEEP_BLOCK_SIZE = 2**26
372 self._data_buffer = []
373 self._data_buffer_len = 0
374 self._current_stream_files = []
375 self._current_stream_length = 0
376 self._current_stream_locators = []
377 self._current_stream_name = '.'
378 self._current_file_name = None
379 self._current_file_pos = 0
380 self._finished_streams = []
385 def write(self, newdata):
386 self._data_buffer += [newdata]
387 self._data_buffer_len += len(newdata)
388 self._current_stream_length += len(newdata)
389 while self._data_buffer_len >= self.KEEP_BLOCK_SIZE:
391 def flush_data(self):
392 data_buffer = ''.join(self._data_buffer)
393 if data_buffer != '':
394 self._current_stream_locators += [Keep.put(data_buffer[0:self.KEEP_BLOCK_SIZE])]
395 self._data_buffer = [data_buffer[self.KEEP_BLOCK_SIZE:]]
396 def start_new_file(self, newfilename=None):
397 self.finish_current_file()
398 self.set_current_file_name(newfilename)
399 def set_current_file_name(self, newfilename):
400 if re.search(r'[ \t\n]', newfilename):
401 raise AssertionError("Manifest filenames cannot contain whitespace")
402 self._current_file_name = newfilename
403 def current_file_name(self):
404 return self._current_file_name
405 def finish_current_file(self):
406 if self._current_file_name == None:
407 if self._current_file_pos == self._current_stream_length:
409 raise Exception("Cannot finish an unnamed file (%d bytes at offset %d in '%s' stream)" % (self._current_stream_length - self._current_file_pos, self._current_file_pos, self._current_stream_name))
410 self._current_stream_files += [[self._current_file_pos,
411 self._current_stream_length - self._current_file_pos,
412 self._current_file_name]]
413 self._current_file_pos = self._current_stream_length
414 def start_new_stream(self, newstreamname=None):
415 self.finish_current_stream()
416 self.set_current_stream_name(newstreamname)
417 def set_current_stream_name(self, newstreamname):
418 if re.search(r'[ \t\n]', newstreamname):
419 raise AssertionError("Manifest stream names cannot contain whitespace")
420 self._current_stream_name = newstreamname
421 def current_stream_name(self):
422 return self._current_stream_name
423 def finish_current_stream(self):
424 self.finish_current_file()
426 if len(self._current_stream_files) == 0:
428 elif self._current_stream_name == None:
429 raise Exception("Cannot finish an unnamed stream (%d bytes in %d files)" % (self._current_stream_length, len(self._current_stream_files)))
431 self._finished_streams += [[self._current_stream_name,
432 self._current_stream_locators,
433 self._current_stream_files]]
434 self._current_stream_files = []
435 self._current_stream_length = 0
436 self._current_stream_locators = []
437 self._current_stream_name = None
438 self._current_file_pos = 0
439 self._current_file_name = None
441 return Keep.put(self.manifest_text())
442 def manifest_text(self):
443 self.finish_current_stream()
445 for stream in self._finished_streams:
446 manifest += stream[0]
447 if len(stream[1]) == 0:
448 manifest += " d41d8cd98f00b204e9800998ecf8427e+0"
450 for locator in stream[1]:
451 manifest += " %s" % locator
452 for sfile in stream[2]:
453 manifest += " %d:%d:%s" % (sfile[0], sfile[1], sfile[2])
460 if 'KEEP_LOCAL_STORE' in os.environ:
461 return Keep.local_store_put(data)
462 p = subprocess.Popen(["whput", "-"],
463 stdout=subprocess.PIPE,
464 stdin=subprocess.PIPE,
465 stderr=subprocess.PIPE,
466 shell=False, close_fds=True)
467 stdoutdata, stderrdata = p.communicate(data)
468 if p.returncode != 0:
469 raise Exception("whput subprocess exited %d - stderr:\n%s" % (p.returncode, stderrdata))
470 return stdoutdata.rstrip()
473 if 'KEEP_LOCAL_STORE' in os.environ:
474 return Keep.local_store_get(locator)
475 p = subprocess.Popen(["whget", locator, "-"],
476 stdout=subprocess.PIPE,
478 stderr=subprocess.PIPE,
479 shell=False, close_fds=True)
480 stdoutdata, stderrdata = p.communicate(None)
481 if p.returncode != 0:
482 raise Exception("whget subprocess exited %d - stderr:\n%s" % (p.returncode, stderrdata))
483 m = hashlib.new('md5')
486 if locator.index(m.hexdigest()) == 0:
490 raise Exception("md5 checksum mismatch: md5(get(%s)) == %s" % (locator, m.hexdigest()))
492 def local_store_put(data):
493 m = hashlib.new('md5')
496 locator = '%s+%d' % (md5, len(data))
497 with open(os.path.join(os.environ['KEEP_LOCAL_STORE'], md5 + '.tmp'), 'w') as f:
499 os.rename(os.path.join(os.environ['KEEP_LOCAL_STORE'], md5 + '.tmp'),
500 os.path.join(os.environ['KEEP_LOCAL_STORE'], md5))
503 def local_store_get(locator):
504 r = re.search('^([0-9a-f]{32,})', locator)
506 raise Exception("Keep.get: invalid data locator '%s'" % locator)
507 if r.group(0) == 'd41d8cd98f00b204e9800998ecf8427e':
509 with open(os.path.join(os.environ['KEEP_LOCAL_STORE'], r.group(0)), 'r') as f: