+chunking = False #arvados.getjobparam('chunking')
+
+def nextline(reader, start):
+ n = -1
+ while True:
+ r = reader.readfrom(start, 128)
+ if r == '':
+ break
+ n = string.find(r, "\n")
+ if n > -1:
+ break
+ else:
+ start += 128
+ return n
+
+# Chunk a fastq into approximately 64 MiB chunks. Requires that the input data
+# be decompressed ahead of time, such as using decompress-all.py. Generates a
+# new manifest, but doesn't actually move any data around. Handles paired
+# reads by ensuring that each chunk of a pair gets the same number of records.
+#
+# This works, but in practice is so slow that potential gains in alignment
+# performance are lost in the prep time, which is why it is currently disabled.
+#
+# A better algorithm would seek to a file position a bit less than the desired
+# chunk size and then scan ahead for the next record, making sure that record
+# was matched by the read pair.
+def splitfastq(p):
+ for i in xrange(0, len(p)):
+ p[i]["start"] = 0
+ p[i]["end"] = 0
+
+ count = 0
+ recordsize = [0, 0]
+
+ global piece
+ finish = False
+ while not finish:
+ for i in xrange(0, len(p)):
+ recordsize[i] = 0
+
+ # read next 4 lines
+ for i in xrange(0, len(p)):
+ for ln in xrange(0, 4):
+ r = nextline(p[i]["reader"], p[i]["end"]+recordsize[i])
+ if r == -1:
+ finish = True
+ break
+ recordsize[i] += (r+1)