8 api = arvados.api('v1')
13 # Look for paired reads
15 inp = arvados.CollectionReader(arvados.getjobparam('reads'))
19 chunking = False #arvados.getjobparam('chunking')
21 def nextline(reader, start):
24 r = reader.readfrom(start, 128)
27 n = string.find(r, "\n")
34 # Chunk a fastq into approximately 64 MiB chunks. Requires that the input data
35 # be decompressed ahead of time, such as using decompress-all.py. Generates a
36 # new manifest, but doesn't actually move any data around. Handles paired
37 # reads by ensuring that each chunk of a pair gets the same number of records.
39 # This works, but in practice is so slow that potential gains in alignment
40 # performance are lost in the prep time, which is why it is currently disabled.
42 # A better algorithm would seek to a file position a bit less than the desired
43 # chunk size and then scan ahead for the next record, making sure that record
44 # was matched by the read pair.
46 for i in xrange(0, len(p)):
56 for i in xrange(0, len(p)):
60 for i in xrange(0, len(p)):
61 for ln in xrange(0, 4):
62 r = nextline(p[i]["reader"], p[i]["end"]+recordsize[i])
66 recordsize[i] += (r+1)
69 for i in xrange(0, len(p)):
70 if ((p[i]["end"] - p[i]["start"]) + recordsize[i]) >= (64*1024*1024):
74 for i in xrange(0, len(p)):
76 print >>sys.stderr, "Finish piece ./_%s/%s (%s %s)" % (piece, p[i]["reader"].name(), p[i]["start"], p[i]["end"])
78 manifest.extend(["./_" + str(piece)])
79 manifest.extend([d[arvados.LOCATOR] for d in p[i]["reader"]._stream._data_locators])
80 manifest.extend(["{}:{}:{}".format(seg[arvados.LOCATOR]+seg[arvados.OFFSET], seg[arvados.SEGMENTSIZE], p[i]["reader"].name().replace(' ', '\\040')) for seg in arvados.locators_and_ranges(p[i]["reader"].segments, p[i]["start"], p[i]["end"] - p[i]["start"])])
81 manifest_list.append(manifest)
82 p[i]["start"] = p[i]["end"]
85 for i in xrange(0, len(p)):
86 p[i]["end"] += recordsize[i]
88 if count % 10000 == 0:
89 print >>sys.stderr, "Record %s at %s" % (count, p[i]["end"])
91 prog = re.compile(r'(.*?)(_[12])?\.fastq(\.gz)?$')
93 # Look for fastq files
94 for s in inp.all_streams():
95 for f in s.all_files():
96 name_pieces = prog.match(f.name())
97 if name_pieces is not None:
99 # The downstream tool (run-command) only iterates over the top
100 # level of directories so if there are fastq files in
101 # directories in the input, the choice is either to forget
102 # there are directories (which might lead to name conflicts) or
104 print >>sys.stderr, "fastq must be at the root of the collection"
108 if name_pieces.group(2) is not None:
109 if name_pieces.group(2) == "_1":
111 p[0]["reader"] = s.files()[name_pieces.group(0)]
112 p[1]["reader"] = s.files()[name_pieces.group(1) + "_2.fastq" + (name_pieces.group(3) if name_pieces.group(3) else '')]
115 p[0]["reader"] = s.files()[name_pieces.group(0)]
121 for i in xrange(0, len(p)):
122 m = p[i]["reader"].as_manifest()[1:]
123 manifest_list.append(["./_" + str(piece), m[:-1]])
126 manifest_text = "\n".join(" ".join(m) for m in manifest_list)
128 arvados.current_task().set_output(manifest_text)