end
modal_path = choose_collections_path \
({ title: chooser_title,
- filters: [['tail_uuid', '=', object.owner_uuid]].to_json,
+ filters: [['owner_uuid', '=', object.owner_uuid]].to_json,
action_name: 'OK',
action_href: pipeline_instance_path(id: object.uuid),
action_method: 'patch',
<div class="modal-body">
<div class="input-group">
<% if params[:by_project].to_s != "false" %>
+ <% selected_project_name = 'All projects'
+ @filters.andand.each do |attr, op, val|
+ if attr == 'owner_uuid' and op == '='
+ if val == current_user.uuid
+ selected_project_name = "Home"
+ else
+ selected_project_name = Group.find(val).name rescue val
+ end
+ end
+ end
+ %>
<div class="input-group-btn" data-filterable-target=".modal.arv-choose .selectable-container">
<button type="button" class="btn btn-default dropdown-toggle" data-toggle="dropdown">
- All projects <span class="caret"></span>
+ <%= selected_project_name %> <span class="caret"></span>
</button>
<ul class="dropdown-menu" role="menu">
<li>
click
within('.modal-dialog') do
+ assert_selector 'button.dropdown-toggle', text: 'A Project'
first('span', text: 'foo_tag').click
find('button', text: 'OK').click
end
#!/usr/bin/env python
+# collection-merge
+#
+# Merge two or more collections together. Can also be used to extract specific
+# files from a collection to produce a new collection.
+#
+# input:
+# An array of collections or collection/file paths in script_parameter["input"]
+#
+# output:
+# A manifest with the collections merged. Duplicate file names will
+# have their contents concatenated in the order that they appear in the input
+# array.
+
import arvados
import md5
import subst
if fn in s.files():
merged += s.files()[fn].as_manifest()
-crm = arvados.CollectionReader(merged)
-
-combined = crm.manifest_text(strip=True)
-
-m = hashlib.new('md5')
-m.update(combined)
-
-uuid = "{}+{}".format(m.hexdigest(), len(combined))
-
-collection = arvados.api().collections().create(
- body={
- 'uuid': uuid,
- 'manifest_text': crm.manifest_text(),
- }).execute()
-
-for s in src:
- l = arvados.api().links().create(body={
- "link": {
- "tail_uuid": s,
- "head_uuid": uuid,
- "link_class": "provenance",
- "name": "provided"
- }}).execute()
-
-arvados.current_task().set_output(uuid)
+arvados.current_task().set_output(merged)
--- /dev/null
+#!/usr/bin/env python
+
+#
+# decompress-all.py
+#
+# Decompress all compressed files in the collection using the "dtrx" tool and
+# produce a new collection with the contents. Uncompressed files
+# are passed through.
+#
+# input:
+# A collection at script_parameters["input"]
+#
+# output:
+# A manifest of the uncompressed contents of the input collection.
+
+import arvados
+import re
+import subprocess
+import os
+import sys
+
+arvados.job_setup.one_task_per_input_file(if_sequence=0, and_end_task=True,
+ input_as_path=True)
+
+task = arvados.current_task()
+
+input_file = task['parameters']['input']
+
+infile_parts = re.match(r"(^[a-f0-9]{32}\+\d+)(\+\S+)*(/.*)?(/[^/]+)$", input_file)
+
+outdir = os.path.join(task.tmpdir, "output")
+os.makedirs(outdir)
+os.chdir(outdir)
+
+if infile_parts == None:
+ print >>sys.stderr, "Failed to parse input filename '%s' as a Keep file\n" % input_file
+ sys.exit(1)
+
+cr = arvados.CollectionReader(infile_parts.group(1))
+streamname = infile_parts.group(3)[1:]
+filename = infile_parts.group(4)[1:]
+
+if streamname != None:
+ subprocess.call(["mkdir", "-p", streamname])
+ os.chdir(streamname)
+else:
+ streamname = '.'
+
+m = re.match(r'.*\.(gz|Z|bz2|tgz|tbz|zip|rar|7z|cab|deb|rpm|cpio|gem)$', arvados.get_task_param_mount('input'), re.IGNORECASE)
+
+if m != None:
+ rc = subprocess.call(["dtrx", "-r", "-n", "-q", arvados.get_task_param_mount('input')])
+ if rc == 0:
+ out = arvados.CollectionWriter()
+ out.write_directory_tree(outdir, max_manifest_depth=0)
+ task.set_output(out.finish())
+ else:
+ sys.exit(rc)
+else:
+ streamreader = filter(lambda s: s.name() == streamname, cr.all_streams())[0]
+ filereader = streamreader.files()[filename]
+ task.set_output(streamname + filereader.as_manifest()[1:])
import time
import arvados.commands.put as put
import signal
+import stat
+import copy
+import traceback
+import pprint
+import multiprocessing
+import logging
os.umask(0077)
+logging.basicConfig(format="run-command: %(message)s")
t = arvados.current_task().tmpdir
os.chdir("output")
+outdir = os.getcwd()
+
+taskp = None
+jobp = arvados.current_job()['script_parameters']
if len(arvados.current_task()['parameters']) > 0:
- p = arvados.current_task()['parameters']
-else:
- p = arvados.current_job()['script_parameters']
+ taskp = arvados.current_task()['parameters']
links = []
def sub_link(v):
- r = os.path.basename(v)
- os.symlink(os.path.join(os.environ['TASK_KEEPMOUNT'], v) , r)
+ r = os.path.join(outdir, os.path.basename(v))
+ os.symlink(v, r)
links.append(r)
return r
def sub_tmpdir(v):
return os.path.join(arvados.current_task().tmpdir, 'tmpdir')
+def sub_outdir(v):
+ return outdir
+
def sub_cores(v):
- return os.environ['CRUNCH_NODE_SLOTS']
+ return str(multiprocessing.cpu_count())
+
+def sub_jobid(v):
+ return os.environ['JOB_UUID']
+
+def sub_taskid(v):
+ return os.environ['TASK_UUID']
+
+def sub_jobsrc(v):
+ return os.environ['CRUNCH_SRC']
subst.default_subs["link "] = sub_link
-subst.default_subs["tmpdir"] = sub_tmpdir
+subst.default_subs["task.tmpdir"] = sub_tmpdir
+subst.default_subs["task.outdir"] = sub_outdir
+subst.default_subs["job.srcdir"] = sub_jobsrc
subst.default_subs["node.cores"] = sub_cores
-
-rcode = 1
+subst.default_subs["job.uuid"] = sub_jobid
+subst.default_subs["task.uuid"] = sub_taskid
def machine_progress(bytes_written, bytes_expected):
return "run-command: wrote {} total {}\n".format(
sp.send_signal(signum)
self.sig = signum
+def expand_item(p, c):
+ if isinstance(c, dict):
+ if "foreach" in c and "command" in c:
+ var = c["foreach"]
+ items = get_items(p, p[var])
+ r = []
+ for i in items:
+ params = copy.copy(p)
+ params[var] = i
+ r.extend(expand_list(params, c["command"]))
+ return r
+ elif isinstance(c, list):
+ return expand_list(p, c)
+ elif isinstance(c, str) or isinstance(c, unicode):
+ return [subst.do_substitution(p, c)]
+
+ return []
+
+def expand_list(p, l):
+ return [exp for arg in l for exp in expand_item(p, arg)]
+
+def get_items(p, value):
+ if isinstance(value, list):
+ return expand_list(p, value)
+
+ fn = subst.do_substitution(p, value)
+ mode = os.stat(fn).st_mode
+ prefix = fn[len(os.environ['TASK_KEEPMOUNT'])+1:]
+ if mode != None:
+ if stat.S_ISDIR(mode):
+ items = ["$(dir %s/%s/)" % (prefix, l) for l in os.listdir(fn)]
+ elif stat.S_ISREG(mode):
+ with open(fn) as f:
+ items = [line for line in f]
+ return items
+ else:
+ return None
+
+stdoutname = None
+stdoutfile = None
+rcode = 1
+
try:
- cmd = []
- for c in p["command"]:
- cmd.append(subst.do_substitution(p, c))
-
- stdoutname = None
- stdoutfile = None
- if "stdout" in p:
- stdoutname = subst.do_substitution(p, p["stdout"])
+ if "task.foreach" in jobp:
+ if arvados.current_task()['sequence'] == 0:
+ var = jobp["task.foreach"]
+ items = get_items(jobp, jobp[var])
+ logging.info("parallelizing on %s with items %s" % (var, items))
+ if items != None:
+ for i in items:
+ params = copy.copy(jobp)
+ params[var] = i
+ arvados.api().job_tasks().create(body={
+ 'job_uuid': arvados.current_job()['uuid'],
+ 'created_by_job_task_uuid': arvados.current_task()['uuid'],
+ 'sequence': 1,
+ 'parameters': params
+ }
+ ).execute()
+ arvados.current_task().set_output(None)
+ sys.exit(0)
+ else:
+ sys.exit(1)
+ else:
+ taskp = jobp
+
+ cmd = expand_list(taskp, taskp["command"])
+
+ if "save.stdout" in taskp:
+ stdoutname = subst.do_substitution(taskp, taskp["save.stdout"])
stdoutfile = open(stdoutname, "wb")
- print("run-command: {}{}".format(' '.join(cmd), (" > " + stdoutname) if stdoutname != None else ""))
+ logging.info("{}{}".format(' '.join(cmd), (" > " + stdoutname) if stdoutname != None else ""))
+
+except Exception as e:
+ logging.exception("caught exception")
+ logging.error("task parameters was:")
+ logging.error(pprint.pformat(taskp))
+ sys.exit(1)
+try:
sp = subprocess.Popen(cmd, shell=False, stdout=stdoutfile)
sig = SigHandler()
rcode = sp.wait()
if sig.sig != None:
- print("run-command: terminating on signal %s" % sig.sig)
+ logging.critical("terminating on signal %s" % sig.sig)
sys.exit(2)
else:
- print("run-command: completed with exit code %i (%s)" % (rcode, "success" if rcode == 0 else "failed"))
+ logging.info("completed with exit code %i (%s)" % (rcode, "success" if rcode == 0 else "failed"))
except Exception as e:
- print("run-command: caught exception: {}".format(e))
+ logging.exception("caught exception")
# restore default signal handlers.
signal.signal(signal.SIGINT, signal.SIG_DFL)
for l in links:
os.unlink(l)
-print("run-command: the follow output files will be saved to keep:")
+logging.info("the following output files will be saved to keep:")
-subprocess.call(["find", ".", "-type", "f", "-printf", "run-command: %12.12s %h/%f\\n"])
+subprocess.call(["find", ".", "-type", "f", "-printf", "run-command: %12.12s %h/%f\\n"], stdout=sys.stderr)
-print("run-command: start writing output to keep")
+logging.info("start writing output to keep")
done = False
resume_cache = put.ResumeCache(os.path.join(arvados.current_task().tmpdir, "upload-output-checkpoint"))
}).execute()
done = True
except KeyboardInterrupt:
- print("run-command: terminating on signal 2")
+ logging.critical("terminating on signal 2")
sys.exit(2)
except Exception as e:
- print("run-command: caught exception: {}".format(e))
+ logging.exception("caught exception:")
time.sleep(5)
sys.exit(rcode)
--- /dev/null
+#!/usr/bin/python
+
+import arvados
+import re
+import hashlib
+import string
+
+api = arvados.api('v1')
+
+piece = 0
+manifest_text = ""
+
+# Look for paired reads
+
+inp = arvados.CollectionReader(arvados.getjobparam('reads'))
+
+manifest_list = []
+
+chunking = False #arvados.getjobparam('chunking')
+
+def nextline(reader, start):
+ n = -1
+ while True:
+ r = reader.readfrom(start, 128)
+ if r == '':
+ break
+ n = string.find(r, "\n")
+ if n > -1:
+ break
+ else:
+ start += 128
+ return n
+
+# Chunk a fastq into approximately 64 MiB chunks. Requires that the input data
+# be decompressed ahead of time, such as using decompress-all.py. Generates a
+# new manifest, but doesn't actually move any data around. Handles paired
+# reads by ensuring that each chunk of a pair gets the same number of records.
+#
+# This works, but in practice is so slow that potential gains in alignment
+# performance are lost in the prep time, which is why it is currently disabled.
+#
+# A better algorithm would seek to a file position a bit less than the desired
+# chunk size and then scan ahead for the next record, making sure that record
+# was matched by the read pair.
+def splitfastq(p):
+ for i in xrange(0, len(p)):
+ p[i]["start"] = 0
+ p[i]["end"] = 0
+
+ count = 0
+ recordsize = [0, 0]
+
+ global piece
+ finish = False
+ while not finish:
+ for i in xrange(0, len(p)):
+ recordsize[i] = 0
+
+ # read next 4 lines
+ for i in xrange(0, len(p)):
+ for ln in xrange(0, 4):
+ r = nextline(p[i]["reader"], p[i]["end"]+recordsize[i])
+ if r == -1:
+ finish = True
+ break
+ recordsize[i] += (r+1)
+
+ splitnow = finish
+ for i in xrange(0, len(p)):
+ if ((p[i]["end"] - p[i]["start"]) + recordsize[i]) >= (64*1024*1024):
+ splitnow = True
+
+ if splitnow:
+ for i in xrange(0, len(p)):
+ global manifest_list
+ print >>sys.stderr, "Finish piece ./_%s/%s (%s %s)" % (piece, p[i]["reader"].name(), p[i]["start"], p[i]["end"])
+ manifest = []
+ manifest.extend(["./_" + str(piece)])
+ manifest.extend([d[arvados.LOCATOR] for d in p[i]["reader"]._stream._data_locators])
+ manifest.extend(["{}:{}:{}".format(seg[arvados.LOCATOR]+seg[arvados.OFFSET], seg[arvados.SEGMENTSIZE], p[i]["reader"].name().replace(' ', '\\040')) for seg in arvados.locators_and_ranges(p[i]["reader"].segments, p[i]["start"], p[i]["end"] - p[i]["start"])])
+ manifest_list.append(manifest)
+ p[i]["start"] = p[i]["end"]
+ piece += 1
+ else:
+ for i in xrange(0, len(p)):
+ p[i]["end"] += recordsize[i]
+ count += 1
+ if count % 10000 == 0:
+ print >>sys.stderr, "Record %s at %s" % (count, p[i]["end"])
+
+prog = re.compile(r'(.*?)(_[12])?\.fastq(\.gz)?$')
+
+# Look for fastq files
+for s in inp.all_streams():
+ for f in s.all_files():
+ name_pieces = prog.match(f.name())
+ if name_pieces != None:
+ if s.name() != ".":
+ # The downstream tool (run-command) only iterates over the top
+ # level of directories so if there are fastq files in
+ # directories in the input, the choice is either to forget
+ # there are directories (which might lead to name conflicts) or
+ # just fail.
+ print >>sys.stderr, "fastq must be at the root of the collection"
+ sys.exit(1)
+
+ p = None
+ if name_pieces.group(2) != None:
+ if name_pieces.group(2) == "_1":
+ p = [{}, {}]
+ p[0]["reader"] = s.files()[name_pieces.group(0)]
+ p[1]["reader"] = s.files()[name_pieces.group(1) + "_2.fastq" + (name_pieces.group(3) if name_pieces.group(3) else '')]
+ else:
+ p = [{}]
+ p[0]["reader"] = s.files()[name_pieces.group(0)]
+
+ if p != None:
+ if chunking:
+ splitfastq(p)
+ else:
+ for i in xrange(0, len(p)):
+ m = p[i]["reader"].as_manifest()[1:]
+ manifest_list.append(["./_" + str(piece), m[:-1]])
+ piece += 1
+
+manifest_text = "\n".join(" ".join(m) for m in manifest_list)
+
+arvados.current_task().set_output(manifest_text)
return os.path.splitext(os.path.basename(v))[0]
def sub_glob(v):
- return glob.glob(v)[0]
+ l = glob.glob(v)
+ if len(l) == 0:
+ raise Exception("$(glob): No match on '%s'" % v)
+ else:
+ return l[0]
default_subs = {"file ": sub_file,
"dir ": sub_dir,
- user/tutorials/intro-crunch.html.textile.liquid
- user/tutorials/running-external-program.html.textile.liquid
- user/tutorials/tutorial-firstscript.html.textile.liquid
- - user/topics/tutorial-job-debug.html.textile.liquid
+ - user/tutorials/tutorial-submit-job.html.textile.liquid
- user/topics/tutorial-parallel.html.textile.liquid
- user/examples/crunch-examples.html.textile.liquid
- Query the metadata database:
|limit|integer (default 100)|Maximum number of collections to return.|query||
|order|string|Order in which to return matching collections.|query||
|filters|array|Conditions for filtering collections.|query||
+|select|array|Data fields to return in the result list.|query|@["uuid", "manifest_text"]@|
+
+N.B.: Because adding access tokens to manifests can be computationally expensive, the @manifest_text@ field is not included in results by default. If you need it, pass a @select@ parameter that includes @manifest_text@.
h2. update
+++ /dev/null
----
-layout: default
-navsection: userguide
-title: "Debugging a Crunch script"
-...
-
-To test changes to a script by running a job, the change must be pushed to your hosted repository, and the job might have to wait in the queue before it runs. This cycle can be an inefficient way to develop and debug scripts. This tutorial demonstrates an alternative: using @arv-crunch-job@ to run your job in your local VM. This avoids the job queue and allows you to execute the script directly from your git working tree without committing or pushing.
-
-{% include 'tutorial_expectations' %}
-
-This tutorial uses @$USER@ to denote your username. Replace @$USER@ with your user name in all the following examples.
-
-h2. Create a new script
-
-Change to your Git working directory and create a new script in @crunch_scripts/@.
-
-<notextile>
-<pre><code>~$ <span class="userinput">cd $USER/crunch_scripts</span>
-~/$USER/crunch_scripts$ <span class="userinput">cat >hello-world.py <<EOF
-#!/usr/bin/env python
-
-print "hello world"
-print "this script will fail, and that is expected!"
-EOF</span>
-~/$USER/crunch_scripts$ <span class="userinput">chmod +x hello-world.py</span>
-</code></pre>
-</notextile>
-
-h2. Using arv-crunch-job to run the job in your VM
-
-Instead of a Git commit hash, we provide the path to the directory in the "script_version" parameter. The script specified in "script" is expected to be in the @crunch_scripts/@ subdirectory of the directory specified "script_version". Although we are running the script locally, the script still requires access to the Arvados API server and Keep storage service. The job will be recorded in the Arvados job history, and visible in Workbench.
-
-<notextile>
-<pre><code>~/$USER/crunch_scripts$ <span class="userinput">cat >~/the_job <<EOF
-{
- "repository":"",
- "script":"hello-world.py",
- "script_version":"$HOME/$USER",
- "script_parameters":{}
-}
-EOF</span>
-</code></pre>
-</notextile>
-
-Your shell should fill in values for @$HOME@ and @$USER@ so that the saved JSON points "script_version" at the directory with your checkout. Now you can run that job:
-
-<notextile>
-<pre><code>~/$USER/crunch_scripts</span>$ <span class="userinput">arv-crunch-job --job "$(cat ~/the_job)"</span>
-2013-12-12_21:36:42 qr1hi-8i9sb-okzukfzkpbrnhst 29827 check slurm allocation
-2013-12-12_21:36:42 qr1hi-8i9sb-okzukfzkpbrnhst 29827 node localhost - 1 slots
-2013-12-12_21:36:42 qr1hi-8i9sb-okzukfzkpbrnhst 29827 start
-2013-12-12_21:36:42 qr1hi-8i9sb-okzukfzkpbrnhst 29827 script hello-world.py
-2013-12-12_21:36:42 qr1hi-8i9sb-okzukfzkpbrnhst 29827 script_version /home/$USER/$USER
-2013-12-12_21:36:42 qr1hi-8i9sb-okzukfzkpbrnhst 29827 script_parameters {}
-2013-12-12_21:36:42 qr1hi-8i9sb-okzukfzkpbrnhst 29827 runtime_constraints {"max_tasks_per_node":0}
-2013-12-12_21:36:42 qr1hi-8i9sb-okzukfzkpbrnhst 29827 start level 0
-2013-12-12_21:36:42 qr1hi-8i9sb-okzukfzkpbrnhst 29827 status: 0 done, 0 running, 1 todo
-2013-12-12_21:36:42 qr1hi-8i9sb-okzukfzkpbrnhst 29827 0 job_task qr1hi-ot0gb-4zdajby8cjmlguh
-2013-12-12_21:36:42 qr1hi-8i9sb-okzukfzkpbrnhst 29827 0 child 29834 started on localhost.1
-2013-12-12_21:36:42 qr1hi-8i9sb-okzukfzkpbrnhst 29827 status: 0 done, 1 running, 0 todo
-2013-12-12_21:36:42 qr1hi-8i9sb-okzukfzkpbrnhst 29827 0 stderr hello world
-2013-12-12_21:36:42 qr1hi-8i9sb-okzukfzkpbrnhst 29827 0 stderr this script will fail, and that is expected!
-2013-12-12_21:36:43 qr1hi-8i9sb-okzukfzkpbrnhst 29827 0 child 29834 on localhost.1 exit 0 signal 0 success=
-2013-12-12_21:36:43 qr1hi-8i9sb-okzukfzkpbrnhst 29827 0 failure (#1, permanent) after 0 seconds
-2013-12-12_21:36:43 qr1hi-8i9sb-okzukfzkpbrnhst 29827 0 output
-2013-12-12_21:36:43 qr1hi-8i9sb-okzukfzkpbrnhst 29827 Every node has failed -- giving up on this round
-2013-12-12_21:36:43 qr1hi-8i9sb-okzukfzkpbrnhst 29827 wait for last 0 children to finish
-2013-12-12_21:36:43 qr1hi-8i9sb-okzukfzkpbrnhst 29827 status: 0 done, 0 running, 0 todo
-2013-12-12_21:36:43 qr1hi-8i9sb-okzukfzkpbrnhst 29827 Freeze not implemented
-2013-12-12_21:36:43 qr1hi-8i9sb-okzukfzkpbrnhst 29827 collate
-2013-12-12_21:36:43 qr1hi-8i9sb-okzukfzkpbrnhst 29827 output d41d8cd98f00b204e9800998ecf8427e+0
-2013-12-12_21:36:44 qr1hi-8i9sb-okzukfzkpbrnhst 29827 meta key is c00bfbd58e6f58ce3bebdd47f745a70f+1857
-</code></pre>
-</notextile>
-
-These are the lines of interest:
-
-bc. 2013-12-12_21:36:42 qr1hi-8i9sb-okzukfzkpbrnhst 29827 0 stderr hello world
-2013-12-12_21:36:42 qr1hi-8i9sb-okzukfzkpbrnhst 29827 0 stderr this script will fail, and that is expected!
-
-The script's output is captured in the log, which is useful for print statement debugging. However, although this script returned a status code of 0 (success), the job failed. Why? For a job to complete successfully scripts must explicitly add their output to Keep, and then tell Arvados about it. Here is a second try:
-
-<notextile>
-<pre><code>~/$USER/crunch_scripts$ <span class="userinput">cat >hello-world-fixed.py <<EOF
-#!/usr/bin/env python
-
-import arvados
-
-# Create a new collection
-out = arvados.CollectionWriter()
-
-# Set the name of the file in the collection to write to
-out.set_current_file_name('hello.txt')
-
-# Actually output our text
-out.write('hello world')
-
-# Commit the collection to Keep
-out_collection = out.finish()
-
-# Tell Arvados which Keep object is our output
-arvados.current_task().set_output(out_collection)
-
-# Done!
-EOF</span>
-~/$USER/crunch_scripts$ <span class="userinput">chmod +x hello-world-fixed.py</span>
-~/$USER/crunch_scripts$ <span class="userinput">cat >~/the_job <<EOF
-{
- "repository":"",
- "script":"hello-world-fixed.py",
- "script_version":"$HOME/$USER",
- "script_parameters":{}
-}
-EOF</span>
-~/$USER/crunch_scripts$ <span class="userinput">arv-crunch-job --job "$(cat ~/the_job)"</span>
-2013-12-12_21:56:59 qr1hi-8i9sb-79260ykfew5trzl 31578 check slurm allocation
-2013-12-12_21:56:59 qr1hi-8i9sb-79260ykfew5trzl 31578 node localhost - 1 slots
-2013-12-12_21:57:00 qr1hi-8i9sb-79260ykfew5trzl 31578 start
-2013-12-12_21:57:00 qr1hi-8i9sb-79260ykfew5trzl 31578 script hello-world-fixed.py
-2013-12-12_21:57:00 qr1hi-8i9sb-79260ykfew5trzl 31578 script_version /home/$USER/$USER
-2013-12-12_21:57:00 qr1hi-8i9sb-79260ykfew5trzl 31578 script_parameters {}
-2013-12-12_21:57:00 qr1hi-8i9sb-79260ykfew5trzl 31578 runtime_constraints {"max_tasks_per_node":0}
-2013-12-12_21:57:00 qr1hi-8i9sb-79260ykfew5trzl 31578 start level 0
-2013-12-12_21:57:00 qr1hi-8i9sb-79260ykfew5trzl 31578 status: 0 done, 0 running, 1 todo
-2013-12-12_21:57:00 qr1hi-8i9sb-79260ykfew5trzl 31578 0 job_task qr1hi-ot0gb-u8g594ct0wt7f3f
-2013-12-12_21:57:00 qr1hi-8i9sb-79260ykfew5trzl 31578 0 child 31585 started on localhost.1
-2013-12-12_21:57:00 qr1hi-8i9sb-79260ykfew5trzl 31578 status: 0 done, 1 running, 0 todo
-2013-12-12_21:57:02 qr1hi-8i9sb-79260ykfew5trzl 31578 0 child 31585 on localhost.1 exit 0 signal 0 success=true
-2013-12-12_21:57:02 qr1hi-8i9sb-79260ykfew5trzl 31578 0 success in 1 seconds
-2013-12-12_21:57:02 qr1hi-8i9sb-79260ykfew5trzl 31578 0 output 576c44d762ba241b0a674aa43152b52a+53
-2013-12-12_21:57:02 qr1hi-8i9sb-79260ykfew5trzl 31578 wait for last 0 children to finish
-2013-12-12_21:57:02 qr1hi-8i9sb-79260ykfew5trzl 31578 status: 1 done, 0 running, 0 todo
-2013-12-12_21:57:02 qr1hi-8i9sb-79260ykfew5trzl 31578 Freeze not implemented
-2013-12-12_21:57:02 qr1hi-8i9sb-79260ykfew5trzl 31578 collate
-2013-12-12_21:57:02 qr1hi-8i9sb-79260ykfew5trzl 31578 output 576c44d762ba241b0a674aa43152b52a+53
-WARNING:root:API lookup failed for collection 576c44d762ba241b0a674aa43152b52a+53 (<class 'apiclient.errors.HttpError'>: <HttpError 404 when requesting https://qr1hi.arvadosapi.com/arvados/v1/collections/576c44d762ba241b0a674aa43152b52a%2B53?alt=json returned "Not Found">)
-2013-12-12_21:57:03 qr1hi-8i9sb-79260ykfew5trzl 31578 finish
-</code></pre>
-</notextile>
-
-(The WARNING issued near the end of the script may be safely ignored here; it is the Arvados SDK letting you know that it could not find a collection named @576c44d762ba241b0a674aa43152b52a+53@ and that it is going to try looking up a block by that name instead.)
-
-The job succeeded, with output in Keep object @576c44d762ba241b0a674aa43152b52a+53@. Let's look at our output:
-
-<notextile>
-<pre><code>~/$USER/crunch_scripts$ <span class="userinput">arv keep get 576c44d762ba241b0a674aa43152b52a+53/hello.txt</span>
-hello world
-</code></pre>
-</notextile>
-
-h3. Location of temporary files
-
-Crunch job tasks are supplied with @TASK_WORK@ and @JOB_WORK@ environment variables, to be used as scratch space. When running in local development mode using @arv-crunch-job@, Crunch sets these variables to point to directory called @crunch-job-{USERID}@ in @TMPDIR@ (or @/tmp@ if @TMPDIR@ is not set).
-
-* Set @TMPDIR@ to @/scratch@ to make Crunch use a directory like @/scratch/crunch-job-{USERID}/@ for temporary space.
-
-* Set @CRUNCH_TMP@ to @/scratch/foo@ to make Crunch use @/scratch/foo/@ for temporary space (omitting the default @crunch-job-{USERID}@ leaf name)
-
-h3. Testing job scripts without SDKs and Keep access
-
-Read and write data to @/tmp/@ instead of Keep. This only works with the Python SDK.
-
-notextile. <pre><code>~$ <span class="userinput">export KEEP_LOCAL_STORE=/tmp</span></code></pre>
---
layout: default
navsection: userguide
-title: "Parallel Crunch tasks"
+title: "Concurrent Crunch tasks"
...
-In the previous tutorials, we used @arvados.job_setup.one_task_per_input_file()@ to automatically parallelize our jobs by creating a separate task per file. For some types of jobs, you may need to split the work up differently, for example creating tasks to process different segments of a single large file. In this this tutorial will demonstrate how to create Crunch tasks directly.
+In the previous tutorials, we used @arvados.job_setup.one_task_per_input_file()@ to automatically create concurrent jobs by creating a separate task per file. For some types of jobs, you may need to split the work up differently, for example creating tasks to process different segments of a single large file. In this this tutorial will demonstrate how to create Crunch tasks directly.
Start by entering the @crunch_scripts@ directory of your Git repository:
</code></pre>
</notextile>
-Next, using @nano@ or your favorite Unix text editor, create a new file called @parallel-hash.py@ in the @crunch_scripts@ directory.
+Next, using @nano@ or your favorite Unix text editor, create a new file called @concurrent-hash.py@ in the @crunch_scripts@ directory.
-notextile. <pre>~/$USER/crunch_scripts$ <code class="userinput">nano parallel-hash.py</code></pre>
+notextile. <pre>~/$USER/crunch_scripts$ <code class="userinput">nano concurrent-hash.py</code></pre>
Add the following code to compute the MD5 hash of each file in a collection:
-<notextile> {% code 'parallel_hash_script_py' as python %} </notextile>
+<notextile> {% code 'concurrent_hash_script_py' as python %} </notextile>
Make the file executable:
-notextile. <pre><code>~/$USER/crunch_scripts$ <span class="userinput">chmod +x parallel-hash.py</span></code></pre>
+notextile. <pre><code>~/$USER/crunch_scripts$ <span class="userinput">chmod +x concurrent-hash.py</span></code></pre>
Add the file to the Git staging area, commit, and push:
<notextile>
-<pre><code>~/$USER/crunch_scripts$ <span class="userinput">git add parallel-hash.py</span>
-~/$USER/crunch_scripts$ <span class="userinput">git commit -m"parallel hash"</span>
+<pre><code>~/$USER/crunch_scripts$ <span class="userinput">git add concurrent-hash.py</span>
+~/$USER/crunch_scripts$ <span class="userinput">git commit -m"concurrent hash"</span>
~/$USER/crunch_scripts$ <span class="userinput">git push origin master</span>
</code></pre>
</notextile>
-You should now be able to run your new script using Crunch, with "script" referring to our new "parallel-hash.py" script. We will use a different input from our previous examples. We will use @887cd41e9c613463eab2f0d885c6dd96+83@ which consists of three files, "alice.txt", "bob.txt" and "carol.txt" (the example collection used previously in "fetching data from Arvados using Keep":{{site.baseurl}}/user/tutorials/tutorial-keep.html#dir).
+You should now be able to run your new script using Crunch, with "script" referring to our new "concurrent-hash.py" script. We will use a different input from our previous examples. We will use @887cd41e9c613463eab2f0d885c6dd96+83@ which consists of three files, "alice.txt", "bob.txt" and "carol.txt" (the example collection used previously in "fetching data from Arvados using Keep":{{site.baseurl}}/user/tutorials/tutorial-keep.html#dir).
<notextile>
<pre><code>~/$USER/crunch_scripts$ <span class="userinput">cat >~/the_job <<EOF
{
- "script": "parallel-hash.py",
+ "script": "concurrent-hash.py",
"repository": "$USER",
"script_version": "master",
"script_parameters":
(Your shell should automatically fill in @$USER@ with your login name. The job JSON that gets saved should have @"repository"@ pointed at your personal Git repository.)
-Because the job ran in parallel, each instance of parallel-hash creates a separate @md5sum.txt@ as output. Arvados automatically collates theses files into a single collection, which is the output of the job:
+Because the job ran in concurrent, each instance of concurrent-hash creates a separate @md5sum.txt@ as output. Arvados automatically collates theses files into a single collection, which is the output of the job:
<notextile>
<pre><code>~/$USER/crunch_scripts$ <span class="userinput">arv keep ls e2ccd204bca37c77c0ba59fc470cd0f7+162</span>
These PGP IDs let us find public profiles, for example:
-* "https://my.personalgenomes.org/profile/huE2E371":https://my.personalgenomes.org/profile/huE2E371
-* "https://my.personalgenomes.org/profile/huDF04CC":https://my.personalgenomes.org/profile/huDF04CC
+* "https://my.pgp-hms.org/profile/huE2E371":https://my.pgp-hms.org/profile/huE2E371
+* "https://my.pgp-hms.org/profile/huDF04CC":https://my.pgp-hms.org/profile/huDF04CC
* ...
h2. Find genomic data from specific humans
-Now we want to find collections in Keep that were provided by these humans. We search the "links" resource for "provenance" links that point to subjects in list of humans with the non-melanoma skin cancer trait:
+Now we want to find collections in Keep that were provided by these humans. We search the "links" resource for "provenance" links that point to entries in the list of humans with the non-melanoma skin cancer trait:
<notextile>
<pre><code>>>> <span class="userinput">provenance_links = arvados.api().links().list(limit=1000, filters=[
# print PGP public profile links with file locators
for c in collections['items']:
for f in c['files']:
- print "https://my.personalgenomes.org/profile/%s %s %s%s" % (pgpid[c['uuid']], c['uuid'], ('' if f[0] == '.' else f[0]+'/'), f[1])
+ print "https://my.pgp-hms.org/profile/%s %s %s%s" % (pgpid[c['uuid']], c['uuid'], ('' if f[0] == '.' else f[0]+'/'), f[1])
</span>
-https://my.personalgenomes.org/profile/hu43860C a58dca7609fa84c8c38a7e926a97b2fc var-GS00253-DNA_A01_200_37-ASM.tsv.bz2
-https://my.personalgenomes.org/profile/huB1FD55 ea30eb9e46eedf7f05ed6e348c2baf5d var-GS000010320-ASM.tsv.bz2
-https://my.personalgenomes.org/profile/huDF04CC 4ab0df8f22f595d1747a22c476c05873 var-GS000010427-ASM.tsv.bz2
-https://my.personalgenomes.org/profile/hu7A2F1D 756d0ada29b376140f64e7abfe6aa0e7 var-GS000014566-ASM.tsv.bz2
-https://my.personalgenomes.org/profile/hu553620 7ed4e425bb1c7cc18387cbd9388181df var-GS000015272-ASM.tsv.bz2
-https://my.personalgenomes.org/profile/huD09534 542112e210daff30dd3cfea4801a9f2f var-GS000016374-ASM.tsv.bz2
-https://my.personalgenomes.org/profile/hu599905 33a9f3842b01ea3fdf27cc582f5ea2af var-GS000016015-ASM.tsv.bz2
-https://my.personalgenomes.org/profile/hu43860C a58dca7609fa84c8c38a7e926a97b2fc+302 var-GS00253-DNA_A01_200_37-ASM.tsv.bz2
-https://my.personalgenomes.org/profile/huB1FD55 ea30eb9e46eedf7f05ed6e348c2baf5d+291 var-GS000010320-ASM.tsv.bz2
-https://my.personalgenomes.org/profile/huDF04CC 4ab0df8f22f595d1747a22c476c05873+242 var-GS000010427-ASM.tsv.bz2
-https://my.personalgenomes.org/profile/hu7A2F1D 756d0ada29b376140f64e7abfe6aa0e7+242 var-GS000014566-ASM.tsv.bz2
-https://my.personalgenomes.org/profile/hu553620 7ed4e425bb1c7cc18387cbd9388181df+242 var-GS000015272-ASM.tsv.bz2
-https://my.personalgenomes.org/profile/huD09534 542112e210daff30dd3cfea4801a9f2f+242 var-GS000016374-ASM.tsv.bz2
-https://my.personalgenomes.org/profile/hu599905 33a9f3842b01ea3fdf27cc582f5ea2af+242 var-GS000016015-ASM.tsv.bz2
-https://my.personalgenomes.org/profile/hu599905 d6e2e57cd60ba5979006d0b03e45e726+81 Witch_results.zip
-https://my.personalgenomes.org/profile/hu553620 ea4f2d325592a1272f989d141a917fdd+85 Devenwood_results.zip
-https://my.personalgenomes.org/profile/hu7A2F1D 4580f6620bb15b25b18373766e14e4a7+85 Innkeeper_results.zip
-https://my.personalgenomes.org/profile/huD09534 fee37be9440b912eb90f5e779f272416+82 Hallet_results.zip
+https://my.pgp-hms.org/profile/hu43860C a58dca7609fa84c8c38a7e926a97b2fc var-GS00253-DNA_A01_200_37-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/huB1FD55 ea30eb9e46eedf7f05ed6e348c2baf5d var-GS000010320-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/huDF04CC 4ab0df8f22f595d1747a22c476c05873 var-GS000010427-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/hu7A2F1D 756d0ada29b376140f64e7abfe6aa0e7 var-GS000014566-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/hu553620 7ed4e425bb1c7cc18387cbd9388181df var-GS000015272-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/huD09534 542112e210daff30dd3cfea4801a9f2f var-GS000016374-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/hu599905 33a9f3842b01ea3fdf27cc582f5ea2af var-GS000016015-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/hu43860C a58dca7609fa84c8c38a7e926a97b2fc+302 var-GS00253-DNA_A01_200_37-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/huB1FD55 ea30eb9e46eedf7f05ed6e348c2baf5d+291 var-GS000010320-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/huDF04CC 4ab0df8f22f595d1747a22c476c05873+242 var-GS000010427-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/hu7A2F1D 756d0ada29b376140f64e7abfe6aa0e7+242 var-GS000014566-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/hu553620 7ed4e425bb1c7cc18387cbd9388181df+242 var-GS000015272-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/huD09534 542112e210daff30dd3cfea4801a9f2f+242 var-GS000016374-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/hu599905 33a9f3842b01ea3fdf27cc582f5ea2af+242 var-GS000016015-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/hu599905 d6e2e57cd60ba5979006d0b03e45e726+81 Witch_results.zip
+https://my.pgp-hms.org/profile/hu553620 ea4f2d325592a1272f989d141a917fdd+85 Devenwood_results.zip
+https://my.pgp-hms.org/profile/hu7A2F1D 4580f6620bb15b25b18373766e14e4a7+85 Innkeeper_results.zip
+https://my.pgp-hms.org/profile/huD09534 fee37be9440b912eb90f5e779f272416+82 Hallet_results.zip
</code></pre>
</notextile>
title: "Writing a Crunch script"
...
-This tutorial demonstrates how to create a new Arvados pipeline using the Arvados Python SDK. The Arvados SDK supports access to advanced features not available using the @run-command@ wrapper, such as scheduling parallel tasks across nodes.
+This tutorial demonstrates how to write a script using Arvados Python SDK. The Arvados SDK supports access to advanced features not available using the @run-command@ wrapper, such as scheduling concurrent tasks across nodes.
{% include 'tutorial_expectations' %}
This tutorial uses @$USER@ to denote your username. Replace @$USER@ with your user name in all the following examples.
-h2. Setting up Git
-
-All Crunch scripts are managed through the Git revision control system. Before you start using Git, you should do some basic configuration (you only need to do this the first time):
+Start by creating a directory called @$USER@ . Next, create a subdirectory called @crunch_scripts@ and change to that directory:
<notextile>
-<pre><code>~$ <span class="userinput">git config --global user.name "Your Name"</span>
-~$ <span class="userinput">git config --global user.email $USER@example.com</span></code></pre>
-</notextile>
-
-On the Arvados Workbench, navigate to "Code repositories":https://{{site.arvados_workbench_host}}/repositories. You should see a repository with your user name listed in the *name* column. Next to *name* is the column *push_url*. Copy the *push_url* value associated with your repository. This should look like <notextile><code>git@git.{{ site.arvados_api_host }}:$USER.git</code></notextile>.
-
-Next, on the Arvados virtual machine, clone your Git repository:
-
-<notextile>
-<pre><code>~$ <span class="userinput">cd $HOME</span> # (or wherever you want to install)
-~$ <span class="userinput">git clone git@git.{{ site.arvados_api_host }}:$USER.git</span>
-Cloning into '$USER'...</code></pre>
-</notextile>
-
-This will create a Git repository in the directory called @$USER@ in your home directory. Say yes when prompted to continue with connection.
-Ignore any warning that you are cloning an empty repository.
-
-{% include 'notebox_begin' %}
-For more information about using Git, try
-
-notextile. <pre><code>$ <span class="userinput">man gittutorial</span></code></pre>
-
-or *"search Google for Git tutorials":http://google.com/#q=git+tutorial*.
-{% include 'notebox_end' %}
-
-h2. Creating a Crunch script
-
-Start by entering the @$USER@ directory created by @git clone@. Next create a subdirectory called @crunch_scripts@ and change to that directory:
-
-<notextile>
-<pre><code>~$ <span class="userinput">cd $USER</span>
-~/$USER$ <span class="userinput">mkdir crunch_scripts</span>
-~/$USER$ <span class="userinput">cd crunch_scripts</span></code></pre>
+<pre><code>~$ <span class="userinput">mkdir -p tutorial/crunch_scripts</span>
+~$ <span class="userinput">cd tutorial/crunch_scripts</span></code></pre>
</notextile>
Next, using @nano@ or your favorite Unix text editor, create a new file called @hash.py@ in the @crunch_scripts@ directory.
-notextile. <pre>~/$USER/crunch_scripts$ <code class="userinput">nano hash.py</code></pre>
+notextile. <pre>~/tutorial/crunch_scripts$ <code class="userinput">nano hash.py</code></pre>
Add the following code to compute the MD5 hash of each file in a collection:
Make the file executable:
-notextile. <pre><code>~/$USER/crunch_scripts$ <span class="userinput">chmod +x hash.py</span></code></pre>
-
-{% include 'notebox_begin' %}
-The steps below describe how to execute the script after committing changes to Git. To run a single script locally for testing (bypassing the job queue) please see "debugging a crunch script":{{site.baseurl}}/user/topics/tutorial-job-debug.html.
-
-{% include 'notebox_end' %}
+notextile. <pre><code>~/tutorial/crunch_scripts$ <span class="userinput">chmod +x hash.py</span></code></pre>
-Next, add the file to the staging area. This tells @git@ that the file should be included on the next commit.
-
-notextile. <pre><code>~/$USER/crunch_scripts$ <span class="userinput">git add hash.py</span></code></pre>
-
-Next, commit your changes. All staged changes are recorded into the local git repository:
-
-<notextile>
-<pre><code>~/$USER/crunch_scripts$ <span class="userinput">git commit -m"my first script"</span>
-[master (root-commit) 27fd88b] my first script
- 1 file changed, 45 insertions(+)
- create mode 100755 crunch_scripts/hash.py</code></pre>
-</notextile>
-
-Finally, upload your changes to the Arvados server:
-
-<notextile>
-<pre><code>~/$USER/crunch_scripts$ <span class="userinput">git push origin master</span>
-Counting objects: 4, done.
-Compressing objects: 100% (2/2), done.
-Writing objects: 100% (4/4), 682 bytes, done.
-Total 4 (delta 0), reused 0 (delta 0)
-To git@git.qr1hi.arvadosapi.com:$USER.git
- * [new branch] master -> master</code></pre>
-</notextile>
-
-h2. Create a pipeline template
-
-Next, create a file that contains the pipeline definition:
+Next, create a submission job record. This describes a specific invocation of your script:
<notextile>
-<pre><code>~/$USER/crunch_scripts$ <span class="userinput">cd ~</span>
-~$ <span class="userinput">cat >the_pipeline <<EOF
+<pre><code>~/tutorial/crunch_scripts$ <span class="userinput">cat >~/the_job <<EOF
{
- "name":"My md5 pipeline",
- "components":{
- "do_hash":{
- "script":"hash.py",
- "script_parameters":{
- "input":{
- "required": true,
- "dataclass": "Collection"
- }
- },
- "repository":"$USER",
- "script_version":"master",
- "output_is_persistent":true,
- "runtime_constraints":{
- "docker_image":"arvados/jobs-java-bwa-samtools"
- }
- }
- }
+ "repository":"",
+ "script":"hash.py",
+ "script_version":"$HOME/tutorial",
+ "script_parameters":{
+ "input":"c1bad4b39ca5a924e481008009d94e32+210"
+ }
}
-EOF
-</span></code></pre>
+EOF</span>
+</code></pre>
</notextile>
-* @"repository"@ is the name of a git repository to search for the script version. You can access a list of available git repositories on the Arvados Workbench under "Code repositories":https://{{site.arvados_workbench_host}}/repositories.
-* @"script_version"@ specifies the version of the script that you wish to run. This can be in the form of an explicit Git revision hash, a tag, or a branch (in which case it will use the HEAD of the specified branch). Arvados logs the script version that was used in the run, enabling you to go back and re-run any past job with the guarantee that the exact same code will be used as was used in the previous run.
-* @"script"@ specifies the filename of the script to run. Crunch expects to find this in the @crunch_scripts/@ subdirectory of the Git repository.
-
-Now, use @arv pipeline_template create@ to register your pipeline template in Arvados:
+You can now run your script on your local workstation or VM using @arv-crunch-job@:
<notextile>
-<pre><code>~$ <span class="userinput">arv pipeline_template create --pipeline-template "$(cat the_pipeline)"</span>
+<pre><code>~/tutorial/crunch_scripts</span>$ <span class="userinput">arv-crunch-job --job "$(cat ~/the_job)"</span>
+2014-08-06_15:16:22 qr1hi-8i9sb-qyrat80ef927lam 14473 check slurm allocation
+2014-08-06_15:16:22 qr1hi-8i9sb-qyrat80ef927lam 14473 node localhost - 1 slots
+2014-08-06_15:16:23 qr1hi-8i9sb-qyrat80ef927lam 14473 start
+2014-08-06_15:16:23 qr1hi-8i9sb-qyrat80ef927lam 14473 script hash.py
+2014-08-06_15:16:23 qr1hi-8i9sb-qyrat80ef927lam 14473 script_version /home/peter/peter
+2014-08-06_15:16:23 qr1hi-8i9sb-qyrat80ef927lam 14473 script_parameters {"input":"c1bad4b39ca5a924e481008009d94e32+210"}
+2014-08-06_15:16:23 qr1hi-8i9sb-qyrat80ef927lam 14473 runtime_constraints {"max_tasks_per_node":0}
+2014-08-06_15:16:23 qr1hi-8i9sb-qyrat80ef927lam 14473 start level 0
+2014-08-06_15:16:23 qr1hi-8i9sb-qyrat80ef927lam 14473 status: 0 done, 0 running, 1 todo
+2014-08-06_15:16:23 qr1hi-8i9sb-qyrat80ef927lam 14473 0 job_task qr1hi-ot0gb-lptn85mwkrn9pqo
+2014-08-06_15:16:23 qr1hi-8i9sb-qyrat80ef927lam 14473 0 child 14478 started on localhost.1
+2014-08-06_15:16:23 qr1hi-8i9sb-qyrat80ef927lam 14473 status: 0 done, 1 running, 0 todo
+2014-08-06_15:16:24 qr1hi-8i9sb-qyrat80ef927lam 14473 0 stderr crunchstat: Running [stdbuf --output=0 --error=0 /home/$USER/tutorial/crunch_scripts/hash.py]
+2014-08-06_15:16:24 qr1hi-8i9sb-qyrat80ef927lam 14473 0 child 14478 on localhost.1 exit 0 signal 0 success=true
+2014-08-06_15:16:24 qr1hi-8i9sb-qyrat80ef927lam 14473 0 success in 1 seconds
+2014-08-06_15:16:24 qr1hi-8i9sb-qyrat80ef927lam 14473 0 output
+2014-08-06_15:16:25 qr1hi-8i9sb-qyrat80ef927lam 14473 wait for last 0 children to finish
+2014-08-06_15:16:25 qr1hi-8i9sb-qyrat80ef927lam 14473 status: 1 done, 0 running, 1 todo
+2014-08-06_15:16:25 qr1hi-8i9sb-qyrat80ef927lam 14473 start level 1
+2014-08-06_15:16:25 qr1hi-8i9sb-qyrat80ef927lam 14473 status: 1 done, 0 running, 1 todo
+2014-08-06_15:16:25 qr1hi-8i9sb-qyrat80ef927lam 14473 1 job_task qr1hi-ot0gb-e3obm0lv6k6p56a
+2014-08-06_15:16:25 qr1hi-8i9sb-qyrat80ef927lam 14473 1 child 14504 started on localhost.1
+2014-08-06_15:16:25 qr1hi-8i9sb-qyrat80ef927lam 14473 status: 1 done, 1 running, 0 todo
+2014-08-06_15:16:26 qr1hi-8i9sb-qyrat80ef927lam 14473 1 stderr crunchstat: Running [stdbuf --output=0 --error=0 /home/$USER/tutorial/crunch_scripts/hash.py]
+2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473 1 child 14504 on localhost.1 exit 0 signal 0 success=true
+2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473 1 success in 10 seconds
+2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473 1 output 50cafdb29cc21dd6eaec85ba9e0c6134+56+Aef0f991b80fa0b75f802e58e70b207aa184d24ff@53f4bbd3
+2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473 wait for last 0 children to finish
+2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473 status: 2 done, 0 running, 0 todo
+2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473 Freeze not implemented
+2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473 collate
+2014-08-06_15:16:36 qr1hi-8i9sb-qyrat80ef927lam 14473 output d6338df28d6b8e5d14929833b417e20e+107+Adf1ce81222b6992ce5d33d8bfb28a6b5a1497898@53f4bbd4
+2014-08-06_15:16:37 qr1hi-8i9sb-qyrat80ef927lam 14473 finish
+2014-08-06_15:16:38 qr1hi-8i9sb-qyrat80ef927lam 14473 log manifest is 7fe8cf1d45d438a3ca3ac4a184b7aff4+83
</code></pre>
</notextile>
-h2. Running your pipeline
+Although the job runs locally, the output of the job has been saved to Keep, the Arvados file store. The "output" line (third from the bottom) provides the "Keep locator":{{site.baseurl}}/user/tutorials/tutorial-keep-get.html to which the script's output has been saved. Copy the output identifier and use @arv-ls@ to list the contents of your output collection, and @arv-get@ to download it to the current directory:
-Your new pipeline template should appear at the top of the Workbench "pipeline templates":https://{{ site.arvados_workbench_host }}/pipeline_templates page. You can run your pipeline "using Workbench":tutorial-pipeline-workbench.html or the "command line.":{{site.baseurl}}/user/topics/running-pipeline-command-line.html
+<notextile>
+<pre><code>~/tutorial/crunch_scripts$ <span class="userinput">arv-ls d6338df28d6b8e5d14929833b417e20e+107+Adf1ce81222b6992ce5d33d8bfb28a6b5a1497898@53f4bbd4</span>
+./md5sum.txt
+~/tutorial/crunch_scripts$ <span class="userinput">arv-get d6338df28d6b8e5d14929833b417e20e+107+Adf1ce81222b6992ce5d33d8bfb28a6b5a1497898@53f4bbd4/ .</span>
+~/tutorial/crunch_scripts$ <span class="userinput">cat md5sum.txt</span>
+44b8ae3fde7a8a88d2f7ebd237625b4f c1bad4b39ca5a924e481008009d94e32+210/./var-GS000016015-ASM.tsv.bz2
+</code></pre>
+</notextile>
-For more information and examples for writing pipelines, see the "pipeline template reference":{{site.baseurl}}/api/schema/PipelineTemplate.html
+Running locally is convenient for development and debugging, as it permits a fast iterative development cycle. Your job run is also recorded by Arvados, and will show up in the "Recent jobs and pipelines" panel on the "Workbench dashboard":https://{{site.arvados_workbench_host}}. This provides limited provenance, by recording the input parameters, the execution log, and the output. However, running locally does not allow you to scale out to multiple nodes, and does not store the complete system snapshot required to achieve reproducibility; to that you need to "submit a job to the Arvados cluster":{{site.baseurl}}/user/tutorials/tutorial-submit-job.html
title: "Uploading data"
...
-This tutorial describes how to to upload new Arvados data collections using the command line tool @arv-put@. This example uses a freely available TSV file containing variant annotations from "Personal Genome Project (PGP)":http://www.personalgenomes.org subject "hu599905.":https://my.personalgenomes.org/profile/hu599905
+This tutorial describes how to to upload new Arvados data collections using the command line tool @arv-put@. This example uses a freely available TSV file containing variant annotations from "Personal Genome Project (PGP)":http://www.pgp-hms.org participant "hu599905.":https://my.pgp-hms.org/profile/hu599905
notextile. <div class="spaced-out">
# On system from which you will upload data, configure the environment with the Arvados instance host name and authentication token as decribed in "Getting an API token.":{{site.baseurl}}/user/reference/api-tokens.html (If you are logged into an Arvados VM, you can skip this step.)
# Download the following example file. (If you are uploading your own data, you can skip this step.)
<notextile>
-<pre><code>~$ <span class="userinput">curl -o var-GS000016015-ASM.tsv.bz2 'https://warehouse.personalgenomes.org/warehouse/f815ec01d5d2f11cb12874ab2ed50daa+234+K@ant/var-GS000016015-ASM.tsv.bz2'</span>
+<pre><code>~$ <span class="userinput">curl -o var-GS000016015-ASM.tsv.bz2 'https://warehouse.pgp-hms.org/warehouse/f815ec01d5d2f11cb12874ab2ed50daa+234+K@ant/var-GS000016015-ASM.tsv.bz2'</span>
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 216M 100 216M 0 0 10.0M 0 0:00:21 0:00:21 --:--:-- 9361k
--- /dev/null
+---
+layout: default
+navsection: userguide
+navmenu: Tutorials
+title: "Running on an Arvados cluster"
+...
+
+This tutorial demonstrates how to create a pipeline to run your crunch script on an Arvados cluster. Cluster jobs can scale out to multiple nodes, and use @git@ and @docker@ to store the complete system snapshot required to achieve reproducibilty.
+
+{% include 'tutorial_expectations' %}
+
+This tutorial uses @$USER@ to denote your username. Replace @$USER@ with your user name in all the following examples.
+
+h2. Setting up Git
+
+All Crunch scripts are managed through the Git revision control system. Before you start using Git, you should do some basic configuration (you only need to do this the first time):
+
+<notextile>
+<pre><code>~$ <span class="userinput">git config --global user.name "Your Name"</span>
+~$ <span class="userinput">git config --global user.email $USER@example.com</span></code></pre>
+</notextile>
+
+On the Arvados Workbench, navigate to "Code repositories":https://{{site.arvados_workbench_host}}/repositories. You should see a repository with your user name listed in the *name* column. Next to *name* is the column *push_url*. Copy the *push_url* value associated with your repository. This should look like <notextile><code>git@git.{{ site.arvados_api_host }}:$USER.git</code></notextile>.
+
+Next, on the Arvados virtual machine, clone your Git repository:
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd $HOME</span> # (or wherever you want to install)
+~$ <span class="userinput">git clone git@git.{{ site.arvados_api_host }}:$USER.git</span>
+Cloning into '$USER'...</code></pre>
+</notextile>
+
+This will create a Git repository in the directory called @$USER@ in your home directory. Say yes when prompted to continue with connection.
+Ignore any warning that you are cloning an empty repository.
+
+{% include 'notebox_begin' %}
+For more information about using Git, try
+
+notextile. <pre><code>$ <span class="userinput">man gittutorial</span></code></pre>
+
+or *"search Google for Git tutorials":http://google.com/#q=git+tutorial*.
+{% include 'notebox_end' %}
+
+h2. Creating a Crunch script
+
+Start by entering the @$USER@ directory created by @git clone@. Next create a subdirectory called @crunch_scripts@ and change to that directory:
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd $USER</span>
+~/$USER$ <span class="userinput">mkdir crunch_scripts</span>
+~/$USER$ <span class="userinput">cd crunch_scripts</span></code></pre>
+</notextile>
+
+Next, using @nano@ or your favorite Unix text editor, create a new file called @hash.py@ in the @crunch_scripts@ directory.
+
+notextile. <pre>~/$USER/crunch_scripts$ <code class="userinput">nano hash.py</code></pre>
+
+Add the following code to compute the MD5 hash of each file in a collection (if you already completed "Writing a Crunch script":tutorial-firstscript.html you can just copy the @hash.py@ file you created previously.)
+
+<notextile> {% code 'tutorial_hash_script_py' as python %} </notextile>
+
+Make the file executable:
+
+notextile. <pre><code>~/$USER/crunch_scripts$ <span class="userinput">chmod +x hash.py</span></code></pre>
+
+Next, add the file to the staging area. This tells @git@ that the file should be included on the next commit.
+
+notextile. <pre><code>~/$USER/crunch_scripts$ <span class="userinput">git add hash.py</span></code></pre>
+
+Next, commit your changes. All staged changes are recorded into the local git repository:
+
+<notextile>
+<pre><code>~/$USER/crunch_scripts$ <span class="userinput">git commit -m"my first script"</span>
+[master (root-commit) 27fd88b] my first script
+ 1 file changed, 45 insertions(+)
+ create mode 100755 crunch_scripts/hash.py</code></pre>
+</notextile>
+
+Finally, upload your changes to the Arvados server:
+
+<notextile>
+<pre><code>~/$USER/crunch_scripts$ <span class="userinput">git push origin master</span>
+Counting objects: 4, done.
+Compressing objects: 100% (2/2), done.
+Writing objects: 100% (4/4), 682 bytes, done.
+Total 4 (delta 0), reused 0 (delta 0)
+To git@git.qr1hi.arvadosapi.com:$USER.git
+ * [new branch] master -> master</code></pre>
+</notextile>
+
+h2. Create a pipeline template
+
+Next, create a file that contains the pipeline definition:
+
+<notextile>
+<pre><code>~/$USER/crunch_scripts$ <span class="userinput">cd ~</span>
+~$ <span class="userinput">cat >the_pipeline <<EOF
+{
+ "name":"My md5 pipeline",
+ "components":{
+ "do_hash":{
+ "script":"hash.py",
+ "script_parameters":{
+ "input":{
+ "required": true,
+ "dataclass": "Collection"
+ }
+ },
+ "repository":"$USER",
+ "script_version":"master",
+ "output_is_persistent":true,
+ "runtime_constraints":{
+ "docker_image":"arvados/jobs-java-bwa-samtools"
+ }
+ }
+ }
+}
+EOF
+</span></code></pre>
+</notextile>
+
+* @"repository"@ is the name of a git repository to search for the script version. You can access a list of available git repositories on the Arvados Workbench under "Code repositories":https://{{site.arvados_workbench_host}}/repositories.
+* @"script_version"@ specifies the version of the script that you wish to run. This can be in the form of an explicit Git revision hash, a tag, or a branch (in which case it will use the HEAD of the specified branch). Arvados logs the script version that was used in the run, enabling you to go back and re-run any past job with the guarantee that the exact same code will be used as was used in the previous run.
+* @"script"@ specifies the filename of the script to run. Crunch expects to find this in the @crunch_scripts/@ subdirectory of the Git repository.
+
+Now, use @arv pipeline_template create@ to register your pipeline template in Arvados:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv pipeline_template create --pipeline-template "$(cat the_pipeline)"</span>
+</code></pre>
+</notextile>
+
+h2. Running your pipeline
+
+Your new pipeline template should appear at the top of the Workbench "pipeline templates":https://{{ site.arvados_workbench_host }}/pipeline_templates page. You can run your pipeline "using Workbench":tutorial-pipeline-workbench.html or the "command line.":{{site.baseurl}}/user/topics/running-pipeline-command-line.html
+
+For more information and examples for writing pipelines, see the "pipeline template reference":{{site.baseurl}}/api/schema/PipelineTemplate.html
# Install dependencies and set up system.
# The FUSE packages help ensure that we can install the Python SDK (arv-mount).
RUN /usr/bin/apt-get install -q -y python-dev python-llfuse python-pip \
- libio-socket-ssl-perl libjson-perl liburi-perl libwww-perl \
+ libio-socket-ssl-perl libjson-perl liburi-perl libwww-perl dtrx \
fuse libattr1-dev libfuse-dev && \
/usr/sbin/adduser --disabled-password \
--gecos 'Crunch execution user' crunch && \
# Install Arvados packages.
RUN (find /usr/src/arvados/sdk -name '*.gem' -print0 | \
- xargs -0rn 1 gem install) && \
+ xargs -0rn 1 /usr/local/rvm/bin/rvm-exec default gem install) && \
cd /usr/src/arvados/services/fuse && \
python setup.py install && \
cd /usr/src/arvados/sdk/python && \
class Google::APIClient
def discovery_document(api, version)
api = api.to_s
- return @discovery_documents["#{api}:#{version}"] ||=
+ discovery_uri = self.discovery_uri(api, version)
+ discovery_uri_hash = Digest::MD5.hexdigest(discovery_uri)
+ return @discovery_documents[discovery_uri_hash] ||=
begin
# fetch new API discovery doc if stale
- cached_doc = File.expand_path '~/.cache/arvados/discovery_uri.json' rescue nil
+ cached_doc = File.expand_path "~/.cache/arvados/discovery-#{discovery_uri_hash}.json" rescue nil
if cached_doc.nil? or not File.exist?(cached_doc) or (Time.now - File.mtime(cached_doc)) > 86400
response = self.execute!(:http_method => :get,
- :uri => self.discovery_uri(api, version),
+ :uri => discovery_uri,
:authenticated => false)
begin
end
def self.create(attributes)
result = $client.execute(:api_method => $arvados.pipeline_instances.create,
- :body => {
- :pipeline_instance => attributes.to_json
+ :body_object => {
+ :pipeline_instance => attributes
},
:authenticated => false,
:headers => {
:parameters => {
:uuid => @pi[:uuid]
},
- :body => {
- :pipeline_instance => @attributes_to_update.to_json
+ :body_object => {
+ :pipeline_instance => @attributes_to_update
},
:authenticated => false,
:headers => {
body = {job: no_nil_values(job)}.merge(no_nil_values(create_params))
result = $client.execute(:api_method => $arvados.jobs.create,
- :body => body,
+ :body_object => body,
:authenticated => false,
:headers => {
authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
msg += "Job submission was: #{body.to_json}"
$client.execute(:api_method => $arvados.logs.create,
- :body => {
+ :body_object => {
:log => {
:object_uuid => pipeline[:uuid],
:event_type => 'stderr',
end
def setup_instance
- if $options[:submit]
- @instance ||= PipelineInstance.
- create(:components => @components,
- :pipeline_template_uuid => @template[:uuid],
- :state => 'New')
+ if @instance
+ @instance[:properties][:run_options] ||= {}
+ if @options[:no_reuse]
+ # override properties of existing instance
+ @instance[:properties][:run_options][:enable_job_reuse] = false
+ else
+ # Default to "enable reuse" if not specified. (This code path
+ # can go away when old clients go away.)
+ if @instance[:properties][:run_options][:enable_job_reuse].nil?
+ @instance[:properties][:run_options][:enable_job_reuse] = true
+ end
+ end
else
- @instance ||= PipelineInstance.
- create(:components => @components,
- :pipeline_template_uuid => @template[:uuid],
- :state => 'RunningOnClient')
+ @instance = PipelineInstance.
+ create(components: @components,
+ properties: {
+ run_options: {
+ enable_job_reuse: !@options[:no_reuse]
+ }
+ },
+ pipeline_template_uuid: @template[:uuid],
+ state: ($options[:submit] ? 'RunningOnServer' : 'RunningOnClient'))
end
self
end
# dealing with new API servers.
:minimum_script_version => c[:minimum_script_version],
:exclude_script_versions => c[:exclude_minimum_script_versions],
- :find_or_create => !(@options[:no_reuse] || c[:nondeterministic]),
+ :find_or_create => (@instance[:properties][:run_options].andand[:enable_job_reuse] &&
+ !c[:nondeterministic]),
:filters => c[:filters]
})
if job
use Fcntl ':flock';
use File::Path qw( make_path );
+use constant EX_TEMPFAIL => 75;
+
$ENV{"TMPDIR"} ||= "/tmp";
unless (defined $ENV{"CRUNCH_TMP"}) {
$ENV{"CRUNCH_TMP"} = $ENV{"TMPDIR"} . "/crunch-job";
{
$Job = $arv->{'jobs'}->{'get'}->execute('uuid' => $jobspec);
if (!$force_unlock) {
+ # If some other crunch-job process has grabbed this job (or we see
+ # other evidence that the job is already underway) we exit
+ # EX_TEMPFAIL so crunch-dispatch (our parent process) doesn't
+ # mark the job as failed.
if ($Job->{'is_locked_by_uuid'}) {
- croak("Job is locked: " . $Job->{'is_locked_by_uuid'});
+ Log(undef, "Job is locked by " . $Job->{'is_locked_by_uuid'});
+ exit EX_TEMPFAIL;
}
if ($Job->{'success'} ne undef) {
- croak("Job 'success' flag (" . $Job->{'success'} . ") is not null");
+ Log(undef, "Job 'success' flag (" . $Job->{'success'} . ") is not null");
+ exit EX_TEMPFAIL;
}
if ($Job->{'running'}) {
- croak("Job 'running' flag is already set");
+ Log(undef, "Job 'running' flag is already set");
+ exit EX_TEMPFAIL;
}
if ($Job->{'started_at'}) {
- croak("Job 'started_at' time is already set (" . $Job->{'started_at'} . ")");
+ Log(undef, "Job 'started_at' time is already set (" . $Job->{'started_at'} . ")");
+ exit EX_TEMPFAIL;
}
}
}
# Claim this job, and make sure nobody else does
unless ($Job->update_attributes('is_locked_by_uuid' => $User->{'uuid'}) &&
$Job->{'is_locked_by_uuid'} == $User->{'uuid'}) {
- croak("Error while updating / locking job");
+ Log(undef, "Error while updating / locking job, exiting ".EX_TEMPFAIL);
+ exit EX_TEMPFAIL;
}
$Job->update_attributes('started_at' => scalar gmtime,
'running' => 1,
my @execargs = ('bash', '-c', $command);
srun (\@srunargs, \@execargs, undef, $build_script_to_send);
- exit (111);
+ # exec() failed, we assume nothing happened.
+ Log(undef, "srun() failed on build script");
+ die;
}
close("writer");
if (!defined $childpid)
Log (undef, "finish");
save_meta();
-exit 0;
+exit ($Job->{'success'} ? 1 : 0);
class Google::APIClient
def discovery_document(api, version)
api = api.to_s
- return @discovery_documents["#{api}:#{version}"] ||=
+ discovery_uri = self.discovery_uri(api, version)
+ discovery_uri_hash = Digest::MD5.hexdigest(discovery_uri)
+ return @discovery_documents[discovery_uri_hash] ||=
begin
# fetch new API discovery doc if stale
- cached_doc = File.expand_path '~/.cache/arvados/discovery_uri.json'
- if not File.exist?(cached_doc) or (Time.now - File.mtime(cached_doc)) > 86400
+ cached_doc = File.expand_path "~/.cache/arvados/discovery-#{discovery_uri_hash}.json" rescue nil
+ if cached_doc.nil? or not File.exist?(cached_doc) or (Time.now - File.mtime(cached_doc)) > 86400
response = self.execute!(:http_method => :get,
- :uri => self.discovery_uri(api, version),
+ :uri => discovery_uri,
:authenticated => false)
- FileUtils.makedirs(File.dirname cached_doc)
- File.open(cached_doc, 'w') do |f|
- f.puts response.body
+ begin
+ FileUtils.makedirs(File.dirname cached_doc)
+ File.open(cached_doc, 'w') do |f|
+ f.puts response.body
+ end
+ rescue
+ return JSON.load response.body
end
end
end
end
- @objects = @objects.select(@select.map { |s| "#{table_name}.#{ActiveRecord::Base.connection.quote_column_name s.to_s}" }.join ", ") if @select
+ if @select
+ # Map attribute names in @select to real column names, resolve
+ # those to fully-qualified SQL column names, and pass the
+ # resulting string to the select method.
+ api_column_map = model_class.attributes_required_columns
+ columns_list = @select.
+ flat_map { |attr| api_column_map[attr] }.
+ uniq.
+ map { |s| "#{table_name}.#{ActiveRecord::Base.connection.quote_column_name s}" }
+ @objects = @objects.select(columns_list.join(", "))
+ end
@objects = @objects.order(@orders.join ", ") if @orders.any?
@objects = @objects.limit(@limit)
@objects = @objects.offset(@offset)
end
# Remove any permission signatures from the manifest.
- resource_attrs[:manifest_text]
- .gsub!(/ [[:xdigit:]]{32}(\+[[:digit:]]+)?(\+\S+)/) { |word|
- word.strip!
- loc = Locator.parse(word)
- if loc
- " " + loc.without_signature.to_s
- else
- " " + word
- end
- }
+ munge_manifest_locators(resource_attrs[:manifest_text]) do |loc|
+ loc.without_signature.to_s
+ end
# Save the collection with the stripped manifest.
act_as_system_user do
end
def show
- if current_api_client_authorization
- signing_opts = {
- key: Rails.configuration.blob_signing_key,
- api_token: current_api_client_authorization.api_token,
- ttl: Rails.configuration.blob_signing_ttl,
- }
- @object[:manifest_text]
- .gsub!(/ [[:xdigit:]]{32}(\+[[:digit:]]+)?(\+\S+)/) { |word|
- word.strip!
- loc = Locator.parse(word)
- if loc
- " " + Blob.sign_locator(word, signing_opts)
- else
- " " + word
- end
- }
- end
- render json: @object.as_api_response(:with_data)
+ sign_manifests(@object[:manifest_text])
+ super
+ end
+
+ def index
+ sign_manifests(*@objects.map { |c| c[:manifest_text] })
+ super
end
def collection_uuid(uuid)
logger.debug "visiting #{uuid}"
- if m
+ if m
# uuid is a collection
Collection.readable_by(current_user).where(uuid: uuid).each do |c|
visited[uuid] = c.as_api_response
Job.readable_by(current_user).where(log: uuid).each do |job|
generate_provenance_edges(visited, job.uuid)
end
-
+
else
# uuid is something else
rsc = ArvadosModel::resource_class_for_uuid uuid
logger.debug "visiting #{uuid}"
- if m
+ if m
# uuid is a collection
Collection.readable_by(current_user).where(uuid: uuid).each do |c|
visited[uuid] = c.as_api_response
Job.readable_by(current_user).where(["jobs.script_parameters like ?", "%#{uuid}%"]).each do |job|
generate_used_by_edges(visited, job.uuid)
end
-
+
else
# uuid is something else
rsc = ArvadosModel::resource_class_for_uuid uuid
render json: visited
end
+ def self.munge_manifest_locators(manifest)
+ # Given a manifest text and a block, yield each locator,
+ # and replace it with whatever the block returns.
+ manifest.andand.gsub!(/ [[:xdigit:]]{32}(\+[[:digit:]]+)?(\+\S+)/) do |word|
+ if loc = Locator.parse(word.strip)
+ " " + yield(loc)
+ else
+ " " + word
+ end
+ end
+ end
+
protected
+
+ def find_objects_for_index
+ # Omit manifest_text from index results unless expressly selected.
+ @select ||= model_class.api_accessible_attributes(:user).
+ map { |attr_spec| attr_spec.first.to_s } - ["manifest_text"]
+ super
+ end
+
def find_object_by_uuid
super
if !@object and !params[:uuid].match(/^[0-9a-f]+\+\d+$/)
end
end
end
+
+ def munge_manifest_locators(manifest, &block)
+ self.class.munge_manifest_locators(manifest, &block)
+ end
+
+ def sign_manifests(*manifests)
+ if current_api_client_authorization
+ signing_opts = {
+ key: Rails.configuration.blob_signing_key,
+ api_token: current_api_client_authorization.api_token,
+ ttl: Rails.configuration.blob_signing_ttl,
+ }
+ manifests.each do |text|
+ munge_manifest_locators(text) do |loc|
+ Blob.sign_locator(loc.to_s, signing_opts)
+ end
+ end
+ end
+ end
end
index
end
+ def self._create_requires_parameters
+ (super rescue {}).
+ merge({
+ find_or_create: {
+ type: 'boolean', required: false, default: false
+ },
+ filters: {
+ type: 'array', required: false
+ },
+ minimum_script_version: {
+ type: 'string', required: false
+ },
+ exclude_script_versions: {
+ type: 'array', required: false
+ },
+ })
+ end
+
def self._queue_requires_parameters
self._index_requires_parameters
end
}.compact.first
if httpMethod and
route.defaults[:controller] == 'arvados/v1/' + k.to_s.underscore.pluralize and
- !d_methods[action.to_sym] and
- ctl_class.action_methods.include? action and
- ![:show, :index, :destroy].include?(action.to_sym)
- method = {
- id: "arvados.#{k.to_s.underscore.pluralize}.#{action}",
- path: route.path.spec.to_s.sub('/arvados/v1/','').sub('(.:format)','').sub(/:(uu)?id/,'{uuid}'),
- httpMethod: httpMethod,
- description: "#{route.defaults[:action]} #{k.to_s.underscore.pluralize}",
- parameters: {},
- response: {
- "$ref" => (action == 'index' ? "#{k.to_s}List" : k.to_s)
- },
- scopes: [
- "https://api.clinicalfuture.com/auth/arvados"
- ]
- }
- route.segment_keys.each do |key|
- if key != :format
- key = :uuid if key == :id
- method[:parameters][key] = {
- type: "string",
- description: "",
- required: true,
- location: "path"
- }
+ ctl_class.action_methods.include? action
+ if !d_methods[action.to_sym]
+ method = {
+ id: "arvados.#{k.to_s.underscore.pluralize}.#{action}",
+ path: route.path.spec.to_s.sub('/arvados/v1/','').sub('(.:format)','').sub(/:(uu)?id/,'{uuid}'),
+ httpMethod: httpMethod,
+ description: "#{action} #{k.to_s.underscore.pluralize}",
+ parameters: {},
+ response: {
+ "$ref" => (action == 'index' ? "#{k.to_s}List" : k.to_s)
+ },
+ scopes: [
+ "https://api.clinicalfuture.com/auth/arvados"
+ ]
+ }
+ route.segment_keys.each do |key|
+ if key != :format
+ key = :uuid if key == :id
+ method[:parameters][key] = {
+ type: "string",
+ description: "",
+ required: true,
+ location: "path"
+ }
+ end
end
+ else
+ # We already built a generic method description, but we
+ # might find some more required parameters through
+ # introspection.
+ method = d_methods[action.to_sym]
end
if ctl_class.respond_to? "_#{action}_requires_parameters".to_sym
ctl_class.send("_#{action}_requires_parameters".to_sym).each do |k, v|
end
end
end
- d_methods[route.defaults[:action].to_sym] = method
+ d_methods[action.to_sym] = method
end
end
end
def self._setup_requires_parameters
{
- send_notification_email: { type: 'boolean', required: true },
+ user: {
+ type: 'object', required: false
+ },
+ openid_prefix: {
+ type: 'string', required: false
+ },
+ repo_name: {
+ type: 'string', required: false
+ },
+ vm_uuid: {
+ type: 'string', required: false
+ },
+ send_notification_email: {
+ type: 'boolean', required: false, default: false
+ },
}
end
self.columns.select { |col| col.name == attr.to_s }.first
end
+ def self.attributes_required_columns
+ # This method returns a hash. Each key is the name of an API attribute,
+ # and it's mapped to a list of database columns that must be fetched
+ # to generate that attribute.
+ # This implementation generates a simple map of attributes to
+ # matching column names. Subclasses can override this method
+ # to specify that method-backed API attributes need to fetch
+ # specific columns from the database.
+ all_columns = columns.map(&:name)
+ api_column_map = Hash.new { |hash, key| hash[key] = [] }
+ methods.grep(/^api_accessible_\w+$/).each do |method_name|
+ next if method_name == :api_accessible_attributes
+ send(method_name).each_pair do |api_attr_name, col_name|
+ col_name = col_name.to_s
+ if all_columns.include?(col_name)
+ api_column_map[api_attr_name.to_s] |= [col_name]
+ end
+ end
+ end
+ api_column_map
+ end
+
# Return nil if current user is not allowed to see the list of
# writers. Otherwise, return a list of user_ and group_uuids with
# write permission. (If not returning nil, current_user is always in
api_accessible :user, extend: :common do |t|
t.add :data_size
t.add :files
+ t.add :manifest_text
end
- api_accessible :with_data, extend: :user do |t|
- t.add :manifest_text
+ def self.attributes_required_columns
+ super.merge({ "data_size" => ["manifest_text"],
+ "files" => ["manifest_text"],
+ })
end
def redundancy_status
if self.cancelled_at and not self.cancelled_at_was
self.cancelled_at = Time.now
self.cancelled_by_user_uuid = current_user.uuid
- self.cancelled_by_client_uuid = current_api_client.uuid
+ self.cancelled_by_client_uuid = current_api_client.andand.uuid
@need_crunch_dispatch_trigger = true
else
self.cancelled_at = self.cancelled_at_was
$stderr.puts j_done[:stderr_buf] + "\n"
end
- # Wait the thread
- j_done[:wait_thr].value
+ # Wait the thread (returns a Process::Status)
+ exit_status = j_done[:wait_thr].value
jobrecord = Job.find_by_uuid(job_done.uuid)
- if jobrecord.started_at
+ if exit_status.to_i != 75 and jobrecord.started_at
# Clean up state fields in case crunch-job exited without
# putting the job in a suitable "finished" state.
jobrecord.running = false
# Don't fail the job if crunch-job didn't even get as far as
# starting it. If the job failed to run due to an infrastructure
# issue with crunch-job or slurm, we want the job to stay in the
- # queue.
+ # queue. If crunch-job exited after losing a race to another
+ # crunch-job process, it exits 75 and we should leave the job
+ # record alone so the winner of the race do its thing.
+ #
+ # There is still an unhandled race condition: If our crunch-job
+ # process is about to lose a race with another crunch-job
+ # process, but crashes before getting to its "exit 75" (for
+ # example, "cannot fork" or "cannot reach API server") then we
+ # will assume incorrectly that it's our process's fault
+ # jobrecord.started_at is non-nil, and mark the job as failed
+ # even though the winner of the race is probably still doing
+ # fine.
end
# Invalidate the per-job auth token
authorize_with :active
get :index
assert_response :success
- assert_not_nil assigns(:objects)
+ assert(assigns(:objects).andand.any?, "no Collections returned in index")
+ refute(json_response["items"].any? { |c| c.has_key?("manifest_text") },
+ "basic Collections index included manifest_text")
+ end
+
+ test "can get non-database fields via index select" do
+ authorize_with :active
+ get(:index, filters: [["uuid", "=", collections(:foo_file).uuid]],
+ select: %w(uuid owner_uuid files))
+ assert_response :success
+ assert_equal(1, json_response["items"].andand.size,
+ "wrong number of items returned for index")
+ assert_equal([[".", "foo", 3]], json_response["items"].first["files"],
+ "wrong file list in index result")
+ end
+
+ test "can select only non-database fields for index" do
+ authorize_with :active
+ get(:index, select: %w(data_size files))
+ assert_response :success
+ assert(json_response["items"].andand.any?, "no items found in index")
+ json_response["items"].each do |coll|
+ assert_equal(coll["data_size"],
+ coll["files"].inject(0) { |size, fspec| size + fspec.last },
+ "mismatch between data size and file list")
+ end
+ end
+
+ test "index with manifest_text selected returns signed locators" do
+ columns = %w(uuid owner_uuid data_size files manifest_text)
+ authorize_with :active
+ get :index, select: columns
+ assert_response :success
+ assert(assigns(:objects).andand.any?,
+ "no Collections returned for index with columns selected")
+ json_response["items"].each do |coll|
+ assert_equal(columns, columns & coll.keys,
+ "Collections index did not respect selected columns")
+ loc_regexp = / [[:xdigit:]]{32}\+\d+\S+/
+ pos = 0
+ while match = loc_regexp.match(coll["manifest_text"], pos)
+ assert_match(/\+A[[:xdigit:]]+@[[:xdigit:]]{8}\b/, match.to_s,
+ "Locator in manifest_text was not signed")
+ pos = match.end(0)
+ end
+ end
end
[0,1,2].each do |limit|