+from past.builtins import basestring
+from builtins import object
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
keep_client=self.arvrunner.keep_client,
num_retries=self.arvrunner.num_retries)
script_parameters["task.vwd"] = {}
- generatemapper = VwdPathMapper([self.generatefiles], "", "",
+ generatemapper = VwdPathMapper(self.generatefiles["listing"], "", "",
separateDirs=False)
with Perf(metrics, "createfiles %s" % self.name):
- for f, p in generatemapper.items():
+ for f, p in list(generatemapper.items()):
if p.type == "CreateFile":
with vwd.open(p.target, "w") as n:
n.write(p.resolved.encode("utf-8"))
trash_at=info["trash_at"],
properties=info["properties"])
- for f, p in generatemapper.items():
+ for f, p in list(generatemapper.items()):
if p.type == "File":
script_parameters["task.vwd"][p.target] = p.resolved
if p.type == "CreateFile":
api_client=self.arvrunner.api,
keep_client=self.arvrunner.keep_client,
num_retries=self.arvrunner.num_retries)
- log = logc.open(logc.keys()[0])
+ log = logc.open(list(logc.keys())[0])
dirs = {
"tmpdir": "/tmpdir",
"outdir": "/outdir",
a pipeline template or pipeline instance.
"""
- if self.tool.tool["id"].startswith("keep:"):
- self.job_order["cwl:tool"] = self.tool.tool["id"][5:]
+ if self.embedded_tool.tool["id"].startswith("keep:"):
+ self.job_order["cwl:tool"] = self.embedded_tool.tool["id"][5:]
else:
- packed = packed_workflow(self.arvrunner, self.tool, self.merged_map)
+ packed = packed_workflow(self.arvrunner, self.embedded_tool, self.merged_map)
wf_pdh = upload_workflow_collection(self.arvrunner, self.name, packed)
self.job_order["cwl:tool"] = "%s/workflow.cwl#main" % wf_pdh
find_or_create=self.enable_reuse
).execute(num_retries=self.arvrunner.num_retries)
- for k,v in job_spec["script_parameters"].items():
+ for k,v in list(job_spec["script_parameters"].items()):
if v is False or v is None or isinstance(v, dict):
job_spec["script_parameters"][k] = {"value": v}
}
def __init__(self, runner, tool, job_order, enable_reuse, uuid,
- submit_runner_ram=0, name=None, merged_map=None):
+ submit_runner_ram=0, name=None, merged_map=None,
+ loadingContext=None):
self.runner = runner
- self.tool = tool
+ self.embedded_tool = tool
self.job = RunnerJob(
runner=runner,
tool=tool,
- job_order=job_order,
enable_reuse=enable_reuse,
output_name=None,
output_tags=None,
submit_runner_ram=submit_runner_ram,
name=name,
- merged_map=merged_map)
+ merged_map=merged_map,
+ loadingContext=loadingContext)
+ self.job.job_order = job_order
self.uuid = uuid
def pipeline_component_spec(self):
job_params = spec['script_parameters']
spec['script_parameters'] = {}
- for param in self.tool.tool['inputs']:
+ for param in self.embedded_tool.tool['inputs']:
param = copy.deepcopy(param)
# Data type and "required" flag...