"bufio"
"bytes"
"encoding/gob"
+ "encoding/json"
"errors"
"flag"
"fmt"
const annotationMaxTileSpan = 100
type sliceNumpy struct {
- filter filter
- threads int
- chi2Cases []bool
- chi2PValue float64
- minCoverage int
- includeVariant1 bool
- debugTag tagID
+ filter filter
+ threads int
+ chi2Cases []bool
+ chi2PValue float64
+ pvalueMinFrequency float64
+ maxFrequency float64
+ pcaComponents int
+ minCoverage int
+ minCoverageAll bool
+ includeVariant1 bool
+ debugTag tagID
cgnames []string
samples []sampleInfo
trainingSet []int // samples index => training set index, or -1 if not in training set
trainingSetSize int
+ pvalue func(onehot []bool) float64
+ pvalueCallCount int64
}
func (cmd *sliceNumpy) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
arvadosVCPUs := flags.Int("arvados-vcpus", 96, "number of VCPUs to request for arvados container")
projectUUID := flags.String("project", "", "project `UUID` for output data")
priority := flags.Int("priority", 500, "container request priority")
+ preemptible := flags.Bool("preemptible", true, "request preemptible instance")
inputDir := flags.String("input-dir", "./in", "input `directory`")
outputDir := flags.String("output-dir", "./out", "output `directory`")
ref := flags.String("ref", "", "reference name (if blank, choose last one that appears in input)")
onehotSingle := flags.Bool("single-onehot", false, "generate one-hot tile-based matrix")
onehotChunked := flags.Bool("chunked-onehot", false, "generate one-hot tile-based matrix per input chunk")
samplesFilename := flags.String("samples", "", "`samples.csv` file with training/validation and case/control groups (see 'lightning choose-samples')")
- onlyPCA := flags.Bool("pca", false, "generate pca matrix")
- pcaComponents := flags.Int("pca-components", 4, "number of PCA components")
+ caseControlOnly := flags.Bool("case-control-only", false, "drop samples that are not in case/control groups")
+ onlyPCA := flags.Bool("pca", false, "run principal component analysis, write components to pca.npy and samples.csv")
+ flags.IntVar(&cmd.pcaComponents, "pca-components", 4, "number of PCA components to compute / use in logistic regression")
maxPCATiles := flags.Int("max-pca-tiles", 0, "maximum tiles to use as PCA input (filter, then drop every 2nd colum pair until below max)")
debugTag := flags.Int("debug-tag", -1, "log debugging details about specified tag")
+ flags.BoolVar(&cmd.minCoverageAll, "min-coverage-all", false, "apply -min-coverage filter based on all samples, not just training set")
flags.IntVar(&cmd.threads, "threads", 16, "number of memory-hungry assembly threads, and number of VCPUs to request for arvados container")
- flags.Float64Var(&cmd.chi2PValue, "chi2-p-value", 1, "do Χ² test and omit columns with p-value above this threshold")
+ flags.Float64Var(&cmd.chi2PValue, "chi2-p-value", 1, "do Χ² test (or logistic regression if -samples file has PCA components) and omit columns with p-value above this threshold")
+ flags.Float64Var(&cmd.pvalueMinFrequency, "pvalue-min-frequency", 0.01, "skip p-value calculation on tile variants below this frequency in the training set")
+ flags.Float64Var(&cmd.maxFrequency, "max-frequency", 1, "do not output variants above this frequency in the training set")
flags.BoolVar(&cmd.includeVariant1, "include-variant-1", false, "include most common variant when building one-hot matrix")
cmd.filter.Flags(flags)
err := flags.Parse(args)
return nil
} else if err != nil {
return err
+ } else if flags.NArg() > 0 {
+ return fmt.Errorf("errant command line arguments after parsed flags: %v", flags.Args())
}
if *pprof != "" {
Priority: *priority,
KeepCache: 2,
APIAccess: true,
+ Preemptible: *preemptible,
}
err = runner.TranslatePaths(inputDir, regionsFilename, samplesFilename)
if err != nil {
"-single-onehot=" + fmt.Sprintf("%v", *onehotSingle),
"-chunked-onehot=" + fmt.Sprintf("%v", *onehotChunked),
"-samples=" + *samplesFilename,
+ "-case-control-only=" + fmt.Sprintf("%v", *caseControlOnly),
+ "-min-coverage-all=" + fmt.Sprintf("%v", cmd.minCoverageAll),
"-pca=" + fmt.Sprintf("%v", *onlyPCA),
- "-pca-components=" + fmt.Sprintf("%d", *pcaComponents),
+ "-pca-components=" + fmt.Sprintf("%d", cmd.pcaComponents),
"-max-pca-tiles=" + fmt.Sprintf("%d", *maxPCATiles),
"-chi2-p-value=" + fmt.Sprintf("%f", cmd.chi2PValue),
+ "-pvalue-min-frequency=" + fmt.Sprintf("%f", cmd.pvalueMinFrequency),
+ "-max-frequency=" + fmt.Sprintf("%f", cmd.maxFrequency),
"-include-variant-1=" + fmt.Sprintf("%v", cmd.includeVariant1),
"-debug-tag=" + fmt.Sprintf("%d", cmd.debugTag),
}
}
if *samplesFilename != "" {
- cmd.samples, err = cmd.loadSampleInfo(*samplesFilename)
+ cmd.samples, err = loadSampleInfo(*samplesFilename)
if err != nil {
return err
}
+ } else if *caseControlOnly {
+ return fmt.Errorf("-case-control-only does not make sense without -samples")
}
cmd.cgnames = nil
} else if len(cmd.cgnames) != len(cmd.samples) {
return fmt.Errorf("mismatched sample list: %d samples in library, %d in %s", len(cmd.cgnames), len(cmd.samples), *samplesFilename)
} else {
- cmd.trainingSetSize = 0
for i, name := range cmd.cgnames {
if s := trimFilenameForLabel(name); s != cmd.samples[i].id {
return fmt.Errorf("mismatched sample list: sample %d is %q in library, %q in %s", i, s, cmd.samples[i].id, *samplesFilename)
}
+ }
+ if *caseControlOnly {
+ for i := 0; i < len(cmd.samples); i++ {
+ if !cmd.samples[i].isTraining && !cmd.samples[i].isValidation {
+ if i+1 < len(cmd.samples) {
+ copy(cmd.samples[i:], cmd.samples[i+1:])
+ copy(cmd.cgnames[i:], cmd.cgnames[i+1:])
+ }
+ cmd.samples = cmd.samples[:len(cmd.samples)-1]
+ cmd.cgnames = cmd.cgnames[:len(cmd.cgnames)-1]
+ i--
+ }
+ }
+ }
+ cmd.chi2Cases = nil
+ cmd.trainingSetSize = 0
+ for i := range cmd.cgnames {
if cmd.samples[i].isTraining {
cmd.trainingSet[i] = cmd.trainingSetSize
cmd.trainingSetSize++
+ cmd.chi2Cases = append(cmd.chi2Cases, cmd.samples[i].isCase)
} else {
cmd.trainingSet[i] = -1
}
}
+ if cmd.pvalue == nil {
+ cmd.pvalue = func(onehot []bool) float64 {
+ return pvalue(onehot, cmd.chi2Cases)
+ }
+ }
}
- if cmd.filter.MinCoverage == 1 {
- // In the generic formula below, floating point
- // arithmetic can effectively push the coverage
- // threshold above 1.0, which is impossible/useless.
- // 1.0 needs to mean exactly 100% coverage.
+
+ if cmd.minCoverageAll {
cmd.minCoverage = len(cmd.cgnames)
} else {
- cmd.minCoverage = int(math.Ceil(cmd.filter.MinCoverage * float64(len(cmd.cgnames))))
+ cmd.minCoverage = cmd.trainingSetSize
+ }
+ if cmd.filter.MinCoverage < 1 {
+ cmd.minCoverage = int(math.Ceil(cmd.filter.MinCoverage * float64(cmd.minCoverage)))
+ }
+
+ if len(cmd.samples[0].pcaComponents) > 0 {
+ cmd.pvalue = glmPvalueFunc(cmd.samples, cmd.pcaComponents)
+ // Unfortunately, statsmodel/glm lib logs stuff to
+ // os.Stdout when it panics on an unsolvable
+ // problem. We recover() from the panic in glm.go, but
+ // we also need to commandeer os.Stdout to avoid
+ // producing large quantities of logs.
+ stdoutWas := os.Stdout
+ defer func() { os.Stdout = stdoutWas }()
+ os.Stdout, err = os.Open(os.DevNull)
+ if err != nil {
+ return err
+ }
+ }
+
+ // cgnamemap[name]==true for samples that we are including in
+ // output
+ cgnamemap := map[string]bool{}
+ for _, name := range cmd.cgnames {
+ cgnamemap[name] = true
+ }
+
+ err = writeSampleInfo(cmd.samples, *outputDir)
+ if err != nil {
+ return err
}
log.Info("indexing reference tiles")
return err
}
foundthistag := false
- taglib.FindAll(tiledata[:len(tiledata)-1], func(tagid tagID, offset, _ int) {
+ taglib.FindAll(bufio.NewReader(bytes.NewReader(tiledata[:len(tiledata)-1])), nil, func(tagid tagID, offset, _ int) {
if !foundthistag && tagid == libref.Tag {
foundthistag = true
return
if cmd.filter.MaxTag >= 0 && cg.StartTag > tagID(cmd.filter.MaxTag) {
return errSkip
}
- if !matchGenome.MatchString(cg.Name) {
+ if !cgnamemap[cg.Name] {
continue
}
// pad to full slice size
if err == errSkip {
return nil
} else if err != nil {
- return fmt.Errorf("%04d: DecodeLibrary(%s): err", infileIdx, infile)
+ return fmt.Errorf("%04d: DecodeLibrary(%s): %w", infileIdx, infile, err)
}
tagstart := cgs[cmd.cgnames[0]].StartTag
tagend := cgs[cmd.cgnames[0]].EndTag
count[blake2b.Sum256(rt.tiledata)] = 0
}
- for cgname, cg := range cgs {
+ for cgidx, cgname := range cmd.cgnames {
+ if !cmd.minCoverageAll && !cmd.samples[cgidx].isTraining {
+ continue
+ }
+ cg := cgs[cgname]
idx := int(tag-tagstart) * 2
for allele := 0; allele < 2; allele++ {
v := cg.Variants[idx+allele]
break
}
remap := variantRemap[tag-tagstart]
+ if remap == nil {
+ // was not assigned above,
+ // because minCoverage
+ outcol++
+ continue
+ }
maxv := tileVariantID(0)
for _, v := range remap {
if maxv < v {
if cmd.filter.MaxTag >= 0 && tag > tagID(cmd.filter.MaxTag) {
break
}
- if rt := reftile[tag]; rt == nil || rt.excluded {
+ if rt := reftile[tag]; mask != nil && (rt == nil || rt.excluded) {
continue
}
if v == 0 {
if err != nil {
return err
}
+ fnm = fmt.Sprintf("%s/stats.json", *outputDir)
+ j, err := json.Marshal(map[string]interface{}{
+ "pvalueCallCount": cmd.pvalueCallCount,
+ })
+ if err != nil {
+ return err
+ }
+ err = os.WriteFile(fnm, j, 0777)
+ if err != nil {
+ return err
+ }
}
if *onlyPCA {
cols := 0
cols = (cols + 1) / 2
stride = stride * 2
}
+ if cols%2 == 1 {
+ // we work with pairs of columns
+ cols++
+ }
log.Printf("creating full matrix (%d rows) and training matrix (%d rows) with %d cols, stride %d", len(cmd.cgnames), cmd.trainingSetSize, cols, stride)
mtxFull := mat.NewDense(len(cmd.cgnames), cols, nil)
mtxTrain := mat.NewDense(cmd.trainingSetSize, cols, nil)
}
}
log.Print("fitting")
- transformer := nlp.NewPCA(*pcaComponents)
+ transformer := nlp.NewPCA(cmd.pcaComponents)
transformer.Fit(mtxTrain.T())
log.Printf("transforming")
pca, err := transformer.Transform(mtxFull.T())
}
log.Print("done")
- samplesOutFilename := *outputDir + "/samples.csv"
- log.Infof("writing sample metadata to %s", samplesOutFilename)
- var f *os.File
- f, err = os.Create(samplesOutFilename)
- if err != nil {
- return err
- }
- defer f.Close()
- for i, si := range cmd.samples {
- var cc, tv string
- if si.isCase {
- cc = "1"
- } else if si.isControl {
- cc = "0"
- }
- if si.isTraining {
- tv = "1"
- } else {
- tv = "0"
- }
- var pcavals string
+ log.Print("copying pca components to sampleInfo")
+ for i := range cmd.samples {
+ cmd.samples[i].pcaComponents = make([]float64, outcols)
for c := 0; c < outcols; c++ {
- pcavals += fmt.Sprintf(",%f", pca.At(i, c))
- }
- _, err = fmt.Fprintf(f, "%d,%s,%s,%s%s\n", i, si.id, cc, tv, pcavals)
- if err != nil {
- err = fmt.Errorf("write %s: %w", samplesOutFilename, err)
- return err
+ cmd.samples[i].pcaComponents[c] = pca.At(i, c)
}
}
- err = f.Close()
+ log.Print("done")
+
+ err = writeSampleInfo(cmd.samples, *outputDir)
if err != nil {
- err = fmt.Errorf("close %s: %w", samplesOutFilename, err)
return err
}
- log.Print("done")
}
}
if !*mergeOutput && !*onehotChunked && !*onehotSingle && !*onlyPCA {
return nil
}
-// Read training set file(s) from path (may be dir or file) and set up
-// cmd.trainingSet.
-//
-// cmd.trainingSet[i] == n >= 0 if cmd.cgnames[i] is the nth training
-// set sample.
-//
-// cmd.trainingSet[i] == -1 if cmd.cgnames[i] is not in the training
-// set.
-func (cmd *sliceNumpy) loadTrainingSet(path string) error {
- cmd.trainingSet = make([]int, len(cmd.cgnames))
- if path == "" {
- cmd.trainingSetSize = len(cmd.cgnames)
- for i := range cmd.trainingSet {
- cmd.trainingSet[i] = i
- }
- return nil
- }
- for i := range cmd.trainingSet {
- cmd.trainingSet[i] = -1
- }
- infiles, err := allFiles(path, nil)
- if err != nil {
- return err
- }
- for _, infile := range infiles {
- f, err := open(infile)
- if err != nil {
- return err
- }
- buf, err := io.ReadAll(f)
- f.Close()
- if err != nil {
- return err
- }
- for _, tsv := range bytes.Split(buf, []byte{'\n'}) {
- if len(tsv) == 0 {
- continue
- }
- split := strings.Split(string(tsv), "\t")
- pattern := split[0]
- found := -1
- for i, name := range cmd.cgnames {
- if strings.Contains(name, pattern) {
- if found >= 0 {
- log.Warnf("pattern %q in %s already matched sample ID %q -- not using %q", pattern, infile, cmd.cgnames[found], name)
- } else {
- found = i
- cmd.trainingSet[found] = 1
- }
- }
- }
- if found < 0 {
- log.Warnf("pattern %q in %s does not match any genome IDs", pattern, infile)
- continue
- }
- }
- }
- tsi := 0
- for i, x := range cmd.trainingSet {
- if x == 1 {
- cmd.trainingSet[i] = tsi
- tsi++
- }
- }
- cmd.trainingSetSize = tsi + 1
- return nil
-}
-
type sampleInfo struct {
id string
isCase bool
// Read samples.csv file with case/control and training/validation
// flags.
-func (cmd *sliceNumpy) loadSampleInfo(samplesFilename string) ([]sampleInfo, error) {
+func loadSampleInfo(samplesFilename string) ([]sampleInfo, error) {
var si []sampleInfo
f, err := open(samplesFilename)
if err != nil {
return nil, err
}
lineNum := 0
- for csv := range bytes.Split(buf, []byte{'\n'}) {
+ for _, csv := range bytes.Split(buf, []byte{'\n'}) {
lineNum++
+ if len(csv) == 0 {
+ continue
+ }
split := strings.Split(string(csv), ",")
- if len(split) != 4 {
- return nil, fmt.Errorf("fields != 4 in %s line %d: %q", samplesFilename, lineNum, csv)
+ if len(split) < 4 {
+ return nil, fmt.Errorf("%d fields < 4 in %s line %d: %q", len(split), samplesFilename, lineNum, csv)
}
if split[0] == "Index" && split[1] == "SampleID" && split[2] == "CaseControl" && split[3] == "TrainingValidation" {
continue
if idx != len(si) {
return nil, fmt.Errorf("%s line %d: index %d out of order", samplesFilename, lineNum, idx)
}
+ var pcaComponents []float64
+ if len(split) > 4 {
+ for _, s := range split[4:] {
+ f, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return nil, fmt.Errorf("%s line %d: cannot parse float %q: %s", samplesFilename, lineNum, s, err)
+ }
+ pcaComponents = append(pcaComponents, f)
+ }
+ }
si = append(si, sampleInfo{
- id: split[1],
- isCase: split[2] == "1",
- isControl: split[2] == "0",
- isTraining: split[3] == "1",
- isValidation: split[3] == "0",
+ id: split[1],
+ isCase: split[2] == "1",
+ isControl: split[2] == "0",
+ isTraining: split[3] == "1",
+ isValidation: split[3] == "0" && len(split[2]) > 0, // fix errant 0s in input
+ pcaComponents: pcaComponents,
})
}
return si, nil
}
+func writeSampleInfo(samples []sampleInfo, outputDir string) error {
+ fnm := outputDir + "/samples.csv"
+ log.Infof("writing sample metadata to %s", fnm)
+ f, err := os.Create(fnm)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ pcaLabels := ""
+ if len(samples) > 0 {
+ for i := range samples[0].pcaComponents {
+ pcaLabels += fmt.Sprintf(",PCA%d", i)
+ }
+ }
+ _, err = fmt.Fprintf(f, "Index,SampleID,CaseControl,TrainingValidation%s\n", pcaLabels)
+ if err != nil {
+ return err
+ }
+ for i, si := range samples {
+ var cc, tv string
+ if si.isCase {
+ cc = "1"
+ } else if si.isControl {
+ cc = "0"
+ }
+ if si.isTraining {
+ tv = "1"
+ } else if si.isValidation {
+ tv = "0"
+ }
+ var pcavals string
+ for _, pcaval := range si.pcaComponents {
+ pcavals += fmt.Sprintf(",%f", pcaval)
+ }
+ _, err = fmt.Fprintf(f, "%d,%s,%s,%s%s\n", i, si.id, cc, tv, pcavals)
+ if err != nil {
+ return fmt.Errorf("write %s: %w", fnm, err)
+ }
+ }
+ err = f.Close()
+ if err != nil {
+ return fmt.Errorf("close %s: %w", fnm, err)
+ }
+ log.Print("done")
+ return nil
+}
+
func (cmd *sliceNumpy) filterHGVScolpair(colpair [2][]int8) bool {
if cmd.chi2PValue >= 1 {
return true
variant tileVariantID
hom bool
pvalue float64
+ maf float64
}
const onehotXrefSize = unsafe.Sizeof(onehotXref{})
}
tagoffset := tag - chunkstarttag
coverage := 0
- for _, cg := range cgs {
+ for cgidx, cgname := range cmd.cgnames {
+ if !cmd.minCoverageAll && !cmd.samples[cgidx].isTraining {
+ continue
+ }
+ cg := cgs[cgname]
alleles := 0
for _, v := range cg.Variants[tagoffset*2 : tagoffset*2+2] {
if v > 0 && int(v) < len(seq[tag]) && len(seq[tag][v].Sequence) > 0 {
if coverage < cmd.minCoverage {
return nil, nil
}
+ // "observed" array for p-value calculation (training set
+ // only)
obs := make([][]bool, (maxv+1)*2) // 2 slices (hom + het) for each variant#
+ // one-hot output (all samples)
+ outcols := make([][]int8, (maxv+1)*2)
for i := range obs {
- obs[i] = make([]bool, len(cmd.cgnames))
+ obs[i] = make([]bool, cmd.trainingSetSize)
+ outcols[i] = make([]int8, len(cmd.cgnames))
}
for cgid, name := range cmd.cgnames {
+ tsid := cmd.trainingSet[cgid]
cgvars := cgs[name].Variants[tagoffset*2:]
tv0, tv1 := remap[cgvars[0]], remap[cgvars[1]]
for v := tileVariantID(1); v <= maxv; v++ {
if tv0 == v && tv1 == v {
- obs[v*2][cgid] = true
+ if tsid >= 0 {
+ obs[v*2][tsid] = true
+ }
+ outcols[v*2][cgid] = 1
} else if tv0 == v || tv1 == v {
- obs[v*2+1][cgid] = true
+ if tsid >= 0 {
+ obs[v*2+1][tsid] = true
+ }
+ outcols[v*2+1][cgid] = 1
}
}
}
var onehot [][]int8
var xref []onehotXref
+ var maf float64
for col := 2; col < len(obs); col++ {
// col 0,1 correspond to tile variant 0, i.e.,
// no-call; col 2,3 correspond to the most common
if col < 4 && !cmd.includeVariant1 {
continue
}
- p := pvalue(obs[col], cmd.chi2Cases)
+ if col&1 == 0 {
+ maf = homhet2maf(obs[col : col+2])
+ if maf < cmd.pvalueMinFrequency {
+ // Skip both columns (hom and het) if
+ // allele frequency is below threshold
+ col++
+ continue
+ }
+ if maf > cmd.maxFrequency {
+ // Skip both columns if allele
+ // frequency is above threshold
+ col++
+ continue
+ }
+ }
+ atomic.AddInt64(&cmd.pvalueCallCount, 1)
+ p := cmd.pvalue(obs[col])
if cmd.chi2PValue < 1 && !(p < cmd.chi2PValue) {
continue
}
- onehot = append(onehot, bool2int8(obs[col]))
+ onehot = append(onehot, outcols[col])
xref = append(xref, onehotXref{
tag: tag,
variant: tileVariantID(col >> 1),
hom: col&1 == 0,
pvalue: p,
+ maf: maf,
})
}
return onehot, xref
}
-func bool2int8(in []bool) []int8 {
- out := make([]int8, len(in))
- for i, v := range in {
- if v {
- out[i] = 1
+func homhet2maf(onehot [][]bool) float64 {
+ if len(onehot[0]) == 0 {
+ return 0
+ }
+ n := 0
+ for i := range onehot[0] {
+ if onehot[0][i] {
+ // hom
+ n += 2
+ } else if onehot[1][i] {
+ // het
+ n += 1
}
}
- return out
+ return float64(n) / float64(len(onehot[0])*2)
}
// convert a []onehotXref with length N to a numpy-style []int32
// P-value row contains 1000000x actual p-value.
func onehotXref2int32(xrefs []onehotXref) []int32 {
xcols := len(xrefs)
- xdata := make([]int32, 5*xcols)
+ xdata := make([]int32, 6*xcols)
for i, xref := range xrefs {
xdata[i] = int32(xref.tag)
xdata[xcols+i] = int32(xref.variant)
}
xdata[xcols*3+i] = int32(xref.pvalue * 1000000)
xdata[xcols*4+i] = int32(-math.Log10(xref.pvalue) * 1000000)
+ xdata[xcols*5+i] = int32(xref.maf * 1000000)
}
return xdata
}