LICENSE_STRING=`grep license $WORKSPACE/$PKG_DIR/setup.py|cut -f2 -d=|sed -e "s/[',\\"]//g"`
COMMAND_ARR+=('--license' "$LICENSE_STRING")
+ if [[ "$FORMAT" == "rpm" ]]; then
+ # Make sure to conflict with the old rh-python36 packages we used to publish
+ COMMAND_ARR+=('--conflicts' "rh-python36-python-$PKG")
+ fi
+
if [[ "$DEBUG" != "0" ]]; then
COMMAND_ARR+=('--verbose' '--log' 'info')
fi
fi
# the python3-arvados-cwl-runner package comes with cwltool, expose that version
- if [[ -e "$WORKSPACE/$PKG_DIR/dist/build/usr/share/$python/dist/python-arvados-cwl-runner/bin/cwltool" ]]; then
- COMMAND_ARR+=("usr/share/$python/dist/python-arvados-cwl-runner/bin/cwltool=/usr/bin/")
+ if [[ -e "$WORKSPACE/$PKG_DIR/dist/build/usr/share/$python/dist/$PYTHON_PKG/bin/cwltool" ]]; then
+ COMMAND_ARR+=("usr/share/$python/dist/$PYTHON_PKG/bin/cwltool=/usr/bin/")
fi
COMMAND_ARR+=(".")
- Containers API (lsf):
- install/crunch2-lsf/install-dispatch.html.textile.liquid
- Additional configuration:
+ - install/singularity.html.textile.liquid
- install/container-shell-access.html.textile.liquid
- External dependencies:
- install/install-postgresql.html.textile.liquid
The @crunch-dispatch-local@ dispatcher now reads the API host and token from the system wide @/etc/arvados/config.yml@ . It will fail to start that file is not found or not readable.
-h2(#v2_2_0). v2.2.0 (2021-06-03)
-
-"Upgrading from 2.1.0":#v2_1_0
-
h3. Multi-file docker image collections
Typically a docker image collection contains a single @.tar@ file at the top level. Handling of atypical cases has changed. If a docker image collection contains files with extensions other than @.tar@, they will be ignored (previously they could cause errors). If a docker image collection contains multiple @.tar@ files, it will cause an error at runtime, "cannot choose from multiple tar files in image collection" (previously one of the @.tar@ files was selected). Subdirectories are ignored. The @arv keep docker@ command always creates a collection with a single @.tar@ file, and never uses subdirectories, so this change will not affect most users.
+h2(#v2_2_0). v2.2.0 (2021-06-03)
+
+"Upgrading from 2.1.0":#v2_1_0
+
h3. New spelling of S3 credential configs
If you use the S3 driver for Keep volumes and specify credentials in your configuration file (as opposed to using an IAM role), you should change the spelling of the @AccessKey@ and @SecretKey@ config keys to @AccessKeyID@ and @SecretAccessKey@. If you don't update them, the previous spellings will still be accepted, but warnings will be logged at server startup.
--- /dev/null
+---
+layout: default
+navsection: installguide
+title: Singularity container runtime
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Arvados can be configured to use "Singularity":https://sylabs.io/singularity/ instead of Docker to execute containers on cloud nodes or a SLURM/LSF cluster. Singularity may be preferable due to its simpler installation and lack of long-running daemon process and special system users/groups.
+
+Please note:
+* *Singularity support is currently considered experimental.*
+* Even when using the singularity runtime, users' container images are expected to be saved in Docker format using @arv keep docker@. Arvados converts the Docker image to Singularity format (@.sif@) at runtime as needed. Specifying a @.sif@ file as an image when submitting a container request is not yet supported.
+* Singularity does not limit the amount of memory available in a container. Each container will have access to all memory on the host where it runs, unless memory use is restricted by SLURM/LSF.
+* Programs running in containers may behave differently due to differences between Singularity and Docker.
+** The root (image) filesystem is read-only in a Singularity container. Programs that attempt to write outside a designated output or temporary directory are likely to fail.
+** The Docker ENTRYPOINT instruction is ignored.
+* Arvados is currently tested with Singularity version 3.5.2.
+
+To use singularity, first make sure "Singularity is installed":https://sylabs.io/guides/3.5/user-guide/quick_start.html on your cloud worker image or SLURM/LSF compute nodes as applicable. Note @squashfs-tools@ is required.
+
+<notextile>
+<pre><code>$ <span class="userinput">singularity version</span>
+3.5.2
+$ <span class="userinput">mksquashfs -version</span>
+mksquashfs version 4.3-git (2014/06/09)
+[...]
+</code></pre>
+</notextile>
+
+Then update @Containers.RuntimeEngine@ in your cluster configuration:
+
+<notextile>
+<pre><code> # Container runtime: "docker" (default) or "singularity" (experimental)
+ RuntimeEngine: singularity
+</code></pre>
+</notextile>
+
+Restart your dispatcher (@crunch-dispatch-slurm@, @arvados-dispatch-cloud@, or @arvados-dispatch-lsf@) after updating your configuration file.
{% codeblock as yaml %}
hints:
arv:RunInSingleContainer: {}
+
arv:RuntimeConstraints:
keep_cache: 123456
outputDirType: keep_output_dir
+
arv:PartitionRequirement:
partition: dev_partition
+
arv:APIRequirement: {}
- cwltool:LoadListingRequirement:
- loadListing: shallow_listing
+
arv:IntermediateOutput:
outputTTL: 3600
- arv:ReuseRequirement:
- enableReuse: false
+
cwltool:Secrets:
secrets: [input1, input2]
- cwltool:TimeLimit:
- timelimit: 14400
+
arv:WorkflowRunnerResources:
ramMin: 2048
coresMin: 2
keep_cache: 512
+
arv:ClusterTarget:
cluster_id: clsr1
project_uuid: clsr1-j7d0g-qxc4jcji7n4lafx
+
+ arv:OutputStorageClass:
+ intermediateStorageClass: fast_storage
+ finalStorageClass: robust_storage
{% endcodeblock %}
h2(#RunInSingleContainer). arv:RunInSingleContainer
|cluster_id|string|The five-character alphanumeric cluster id (uuid prefix) where a container or subworkflow will execute. May be an expression.|
|project_uuid|string|The uuid of the project which will own container request and output of the container. May be an expression.|
+h2(#OutputStorageClass). arv:OutputStorageClass
+
+Specify the "storage class":{{site.baseurl}}/user/topics/storage-classes.html to use for intermediate and final outputs.
+
+table(table table-bordered table-condensed).
+|_. Field |_. Type |_. Description |
+|intermediateStorageClass|string or array of strings|The storage class for output of intermediate steps. For example, faster "hot" storage.|
+|finalStorageClass_uuid|string or array of strings|The storage class for the final output. |
+
h2. arv:dockerCollectionPDH
This is an optional extension field appearing on the standard @DockerRequirement@. It specifies the portable data hash of the Arvados collection containing the Docker image. If present, it takes precedence over @dockerPull@ or @dockerImageId@.
The following extensions are deprecated because equivalent features are part of the CWL v1.1 standard.
+{% codeblock as yaml %}
+hints:
+ cwltool:LoadListingRequirement:
+ loadListing: shallow_listing
+ arv:ReuseRequirement:
+ enableReuse: false
+ cwltool:TimeLimit:
+ timelimit: 14400
+{% endcodeblock %}
+
h2. cwltool:LoadListingRequirement
For CWL v1.1 scripts, this is deprecated in favor of "loadListing":https://www.commonwl.org/v1.1/CommandLineTool.html#CommandInputParameter or "LoadListingRequirement":https://www.commonwl.org/v1.1/CommandLineTool.html#LoadListingRequirement
|==--no-wait==| Submit workflow runner and exit.|
|==--log-timestamps==| Prefix logging lines with timestamp|
|==--no-log-timestamps==| No timestamp on logging lines|
-|==--api== {containers}|Select work submission API. Only supports 'containers'|
|==--compute-checksum==| Compute checksum of contents while collecting outputs|
|==--submit-runner-ram== SUBMIT_RUNNER_RAM|RAM (in MiB) required for the workflow runner (default 1024)|
|==--submit-runner-image== SUBMIT_RUNNER_IMAGE|Docker image for workflow runner|
|==--always-submit-runner==|When invoked with --submit --wait, always submit a runner to manage the workflow, even when only running a single CommandLineTool|
-|==--submit-request-uuid== UUID|Update and commit to supplied container request instead of creating a new one (containers API only).|
-|==--submit-runner-cluster== CLUSTER_ID|Submit workflow runner to a remote cluster (containers API only)|
+|==--submit-request-uuid== UUID|Update and commit to supplied container request instead of creating a new one.|
+|==--submit-runner-cluster== CLUSTER_ID|Submit workflow runner to a remote cluster|
|==--name NAME==|Name to use for workflow execution instance.|
|==--on-error== {stop,continue}|Desired workflow behavior when a step fails. One of 'stop' (do not submit any more steps) or 'continue' (may submit other steps that are not downstream from the error). Default is 'continue'.|
|==--enable-dev==|Enable loading and running development versions of CWL spec.|
-|==--storage-classes== STORAGE_CLASSES|Specify comma separated list of storage classes to be used when saving workflow output to Keep.|
+|==--storage-classes== STORAGE_CLASSES|Specify comma separated list of storage classes to be used when saving the final workflow output to Keep.|
+|==--intermediate-storage-classes== STORAGE_CLASSES|Specify comma separated list of storage classes to be used when intermediate workflow output to Keep.|
|==--intermediate-output-ttl== N|If N > 0, intermediate output collections will be trashed N seconds after creation. Default is 0 (don't trash).|
-|==--priority== PRIORITY|Workflow priority (range 1..1000, higher has precedence over lower, containers api only)|
+|==--priority== PRIORITY|Workflow priority (range 1..1000, higher has precedence over lower)|
|==--thread-count== THREAD_COUNT|Number of threads to use for container submit and output collection.|
|==--http-timeout== HTTP_TIMEOUT|API request timeout in seconds. Default is 300 seconds (5 minutes).|
|==--trash-intermediate==|Immediately trash intermediate outputs on workflow success.|
---
layout: default
navsection: userguide
-title: "Working with Docker images"
+title: "Working with container images"
...
{% comment %}
Copyright (C) The Arvados Authors. All rights reserved.
SPDX-License-Identifier: CC-BY-SA-3.0
{% endcomment %}
-This page describes how to set up the runtime environment (e.g., the programs, libraries, and other dependencies needed to run a job) that a workflow step will be run in using "Docker.":https://www.docker.com/ Docker is a tool for building and running containers that isolate applications from other applications running on the same node. For detailed information about Docker, see the "Docker User Guide.":https://docs.docker.com/userguide/
+This page describes how to set up the runtime environment (e.g., the programs, libraries, and other dependencies needed to run a job) that a workflow step will be run in using "Docker":https://www.docker.com/ or "Singularity":https://sylabs.io/singularity/. Docker and Singularity are tools for building and running containers that isolate applications from other applications running on the same node. For detailed information, see the "Docker User Guide":https://docs.docker.com/userguide/ and the "Introduction to Singularity":https://sylabs.io/guides/3.5/user-guide/introduction.html.
+
+Note that Arvados always works with Docker images, even when it is configured to use Singularity to run containers. There are some differences between the two runtimes that can affect your containers. See the "Singularity container runtime":{{site.baseurl}}/install/singularity.html page for details.
This page describes:
{% include 'tutorial_expectations_workstation' %}
-You also need ensure that "Docker is installed,":https://docs.docker.com/installation/ the Docker daemon is running, and you have permission to access Docker. You can test this by running @docker version@. If you receive a permission denied error, your user account may need to be added to the @docker@ group. If you have root access, you can add yourself to the @docker@ group using @$ sudo addgroup $USER docker@ then log out and log back in again; otherwise consult your local sysadmin.
+You also need to ensure that "Docker is installed,":https://docs.docker.com/installation/ the Docker daemon is running, and you have permission to access Docker. You can test this by running @docker version@. If you receive a permission denied error, your user account may need to be added to the @docker@ group. If you have root access, you can add yourself to the @docker@ group using @$ sudo addgroup $USER docker@ then log out and log back in again; otherwise consult your local sysadmin.
h2(#create). Create a custom image using a Dockerfile
SPDX-License-Identifier: CC-BY-SA-3.0
{% endcomment %}
-Storage classes (alternately known as "storage tiers") allow you to control which volumes should be used to store particular collection data blocks. This can be used to implement data storage policies such as moving data to archival storage.
+Storage classes (sometimes called as "storage tiers") allow you to control which back-end storage volumes should be used to store the data blocks of a particular collection. This can be used to implement data storage policies such as assigning data collections to "fast", "robust" or "archival" storage.
-Names of storage classes are internal to the cluster and decided by the administrator. Aside from "default", Arvados currently does not define any standard storage class names.
+Names of storage classes are internal to the cluster and decided by the administrator. Aside from "default", Arvados currently does not define any standard storage class names. Consult your cluster administrator for guidance on what storage classes are available to use on your specific Arvados instance.
+
+Note that when changing the storage class of an existing collection, it does not take effect immediately, the blocks are asynchronously copied to the new storage class and removed from the old one. The collection field "storage_classes_confirmed" is updated to reflect when data blocks have been successfully copied.
h3. arv-put
h3. arvados-cwl-runner
-You may also specify the desired storage class for the final output collection produced by @arvados-cwl-runner@:
+You may specify the desired storage class for the intermediate and final output collections produced by @arvados-cwl-runner@ on the command line or using the "arv:OutputStorageClass hint":{{site.baseurl}}/user/cwl/cwl-extensions.html#OutputStorageClass .
<pre>
-$ arvados-cwl-runner --storage-classes=hot myworkflow.cwl myinput.yml
+$ arvados-cwl-runner --intermediate-storage-classes=hot_storage --storage-classes=robust_storage myworkflow.cwl myinput.yml
</pre>
-(Note: intermediate collections produced by a workflow run will use the cluster's default storage class(es).)
-
h3. arv command line
You may set the storage class on an existing collection by setting the "storage_classes_desired" field of a Collection. For example, at the command line:
import cwltool.workflow
import cwltool.process
import cwltool.argparser
+from cwltool.errors import WorkflowException
from cwltool.process import shortname, UnsupportedRequirement, use_custom_schema
from cwltool.utils import adjustFileObjs, adjustDirObjs, get_listing
help="Enable loading and running development versions "
"of the CWL standards.", default=False)
parser.add_argument('--storage-classes', default="default",
- help="Specify comma separated list of storage classes to be used when saving workflow output to Keep.")
+ help="Specify comma separated list of storage classes to be used when saving final workflow output to Keep.")
+ parser.add_argument('--intermediate-storage-classes', default="default",
+ help="Specify comma separated list of storage classes to be used when saving intermediate workflow output to Keep.")
parser.add_argument("--intermediate-output-ttl", type=int, metavar="N",
help="If N > 0, intermediate output collections will be trashed N seconds after creation. Default is 0 (don't trash).",
"http://commonwl.org/cwltool#LoadListingRequirement",
"http://arvados.org/cwl#IntermediateOutput",
"http://arvados.org/cwl#ReuseRequirement",
- "http://arvados.org/cwl#ClusterTarget"
+ "http://arvados.org/cwl#ClusterTarget",
+ "http://arvados.org/cwl#OutputStorageClass"
])
def exit_signal_handler(sigcode, frame):
job_order_object = None
arvargs = parser.parse_args(args)
- if len(arvargs.storage_classes.strip().split(',')) > 1:
- logger.error(str(u"Multiple storage classes are not supported currently."))
- return 1
-
arvargs.use_container = True
arvargs.relax_path_checks = True
arvargs.print_supported_versions = False
if keep_client is None:
keep_client = arvados.keep.KeepClient(api_client=api_client, num_retries=4)
executor = ArvCwlExecutor(api_client, arvargs, keep_client=keep_client, num_retries=4)
+ except WorkflowException as e:
+ logger.error(e, exc_info=(sys.exc_info()[1] if arvargs.debug else False))
+ return 1
except Exception:
logger.exception("Error creating the Arvados CWL Executor")
return 1
project_uuid:
type: string?
doc: The project that will own the container requests and intermediate collections
+
+
+- name: OutputStorageClass
+ type: record
+ extends: cwl:ProcessRequirement
+ inVocab: false
+ doc: |
+ Specify the storage class to be used for intermediate and final output
+ fields:
+ class:
+ type: string
+ doc: "Always 'arv:StorageClassHint"
+ jsonldPredicate:
+ _id: "@type"
+ _type: "@vocab"
+ intermediateStorageClass:
+ type:
+ - "null"
+ - string
+ - type: array
+ items: string
+ doc: One or more storages classes
+ finalStorageClass:
+ type:
+ - "null"
+ - string
+ - type: array
+ items: string
+ doc: One or more storages classes
project_uuid:
type: string?
doc: The project that will own the container requests and intermediate collections
+
+- name: OutputStorageClass
+ type: record
+ extends: cwl:ProcessRequirement
+ inVocab: false
+ doc: |
+ Specify the storage class to be used for intermediate and final output
+ fields:
+ class:
+ type: string
+ doc: "Always 'arv:StorageClassHint"
+ jsonldPredicate:
+ _id: "@type"
+ _type: "@vocab"
+ intermediateStorageClass:
+ type:
+ - "null"
+ - string
+ - type: array
+ items: string
+ doc: One or more storages classes
+ finalStorageClass:
+ type:
+ - "null"
+ - string
+ - type: array
+ items: string
+ doc: One or more storages classes
project_uuid:
type: string?
doc: The project that will own the container requests and intermediate collections
+
+
+- name: OutputStorageClass
+ type: record
+ extends: cwl:ProcessRequirement
+ inVocab: false
+ doc: |
+ Specify the storage class to be used for intermediate and final output
+ fields:
+ class:
+ type: string
+ doc: "Always 'arv:StorageClassHint"
+ jsonldPredicate:
+ _id: "@type"
+ _type: "@vocab"
+ intermediateStorageClass:
+ type:
+ - "null"
+ - string
+ - type: array
+ items: string
+ doc: One or more storages classes
+ finalStorageClass:
+ type:
+ - "null"
+ - string
+ - type: array
+ items: string
+ doc: One or more storages classes
if self.output_ttl < 0:
raise WorkflowException("Invalid value %d for output_ttl, cannot be less than zero" % container_request["output_ttl"])
+ storage_class_req, _ = self.get_requirement("http://arvados.org/cwl#OutputStorageClass")
+ if storage_class_req and storage_class_req.get("intermediateStorageClass"):
+ container_request["output_storage_classes"] = aslist(storage_class_req["intermediateStorageClass"])
+ else:
+ container_request["output_storage_classes"] = runtimeContext.intermediate_storage_classes.strip().split(",")
+
if self.timelimit is not None and self.timelimit > 0:
scheduling_parameters["max_run_time"] = self.timelimit
if runtimeContext.storage_classes != "default":
command.append("--storage-classes=" + runtimeContext.storage_classes)
+ if runtimeContext.intermediate_storage_classes != "default":
+ command.append("--intermediate-storage-classes=" + runtimeContext.intermediate_storage_classes)
+
if self.on_error:
command.append("--on-error=" + self.on_error)
if runtimeContext.project_uuid:
cluster_target = runtimeContext.submit_runner_cluster or arvrunner.api._rootDesc["uuidPrefix"]
if not runtimeContext.project_uuid.startswith(cluster_target):
- raise WorkflowException("Project uuid '%s' must be for target cluster '%s'" % (runtimeContext.project_uuid, cluster_target))
+ raise WorkflowException("Project uuid '%s' should start with id of target cluster '%s'" % (runtimeContext.project_uuid, cluster_target))
+
try:
- arvrunner.api.groups().get(uuid=runtimeContext.project_uuid).execute()
+ if runtimeContext.project_uuid[5:12] == '-tpzed-':
+ arvrunner.api.users().get(uuid=runtimeContext.project_uuid).execute()
+ else:
+ proj = arvrunner.api.groups().get(uuid=runtimeContext.project_uuid).execute()
+ if proj["group_class"] != "project":
+ raise Exception("not a project, group_class is '%s'" % (proj["group_class"]))
except Exception as e:
raise WorkflowException("Invalid project uuid '%s': %s" % (runtimeContext.project_uuid, e))
self.wait = True
self.cwl_runner_job = None
self.storage_classes = "default"
+ self.intermediate_storage_classes = "default"
self.current_container = None
self.http_timeout = 300
self.submit_runner_cluster = None
from ._version import __version__
from cwltool.process import shortname, UnsupportedRequirement, use_custom_schema
-from cwltool.utils import adjustFileObjs, adjustDirObjs, get_listing, visit_class
+from cwltool.utils import adjustFileObjs, adjustDirObjs, get_listing, visit_class, aslist
from cwltool.command_line_tool import compute_checksums
from cwltool.load_tool import load_tool
if runtimeContext.submit_request_uuid and self.work_api != "containers":
raise Exception("--submit-request-uuid requires containers API, but using '{}' api".format(self.work_api))
+ default_storage_classes = ",".join([k for k,v in self.api.config()["StorageClasses"].items() if v.get("Default") is True])
+ if runtimeContext.storage_classes == "default":
+ runtimeContext.storage_classes = default_storage_classes
+ if runtimeContext.intermediate_storage_classes == "default":
+ runtimeContext.intermediate_storage_classes = default_storage_classes
+
if not runtimeContext.name:
runtimeContext.name = self.name = updated_tool.tool.get("label") or updated_tool.metadata.get("label") or os.path.basename(updated_tool.tool["id"])
if self.output_tags is None:
self.output_tags = ""
- storage_classes = runtimeContext.storage_classes.strip().split(",")
+ storage_classes = ""
+ storage_class_req, _ = tool.get_requirement("http://arvados.org/cwl#OutputStorageClass")
+ if storage_class_req and storage_class_req.get("finalStorageClass"):
+ storage_classes = aslist(storage_class_req["finalStorageClass"])
+ else:
+ storage_classes = runtimeContext.storage_classes.strip().split(",")
+
self.final_output, self.final_output_collection = self.make_output_collection(self.output_name, storage_classes, self.output_tags, self.final_output)
self.set_crunch_output()
'cwd': '/var/spool/cwl',
'scheduling_parameters': {},
'properties': {},
- 'secret_mounts': {}
+ 'secret_mounts': {},
+ 'output_storage_classes': ["default"]
}))
# The test passes some fields in builder.resources
'partitions': ['blurb']
},
'properties': {},
- 'secret_mounts': {}
+ 'secret_mounts': {},
+ 'output_storage_classes': ["default"]
}
call_body = call_kwargs.get('body', None)
'scheduling_parameters': {
},
'properties': {},
- 'secret_mounts': {}
+ 'secret_mounts': {},
+ 'output_storage_classes': ["default"]
}
call_body = call_kwargs.get('body', None)
'cwd': '/var/spool/cwl',
'scheduling_parameters': {},
'properties': {},
- 'secret_mounts': {}
+ 'secret_mounts': {},
+ 'output_storage_classes': ["default"]
}))
@mock.patch("arvados.collection.Collection")
'cwd': '/var/spool/cwl',
'scheduling_parameters': {},
'properties': {},
- 'secret_mounts': {}
+ 'secret_mounts': {},
+ 'output_storage_classes': ["default"]
}))
# The test passes no builder.resources
"content": "username: user\npassword: blorp\n",
"kind": "text"
}
- }
+ },
+ 'output_storage_classes': ["default"]
}))
# The test passes no builder.resources
self.assertEqual(42, kwargs['body']['scheduling_parameters'].get('max_run_time'))
+ # The test passes no builder.resources
+ # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
+ @mock.patch("arvados.commands.keepdocker.list_images_in_arv")
+ def test_setting_storage_class(self, keepdocker):
+ arv_docker_clear_cache()
+
+ runner = mock.MagicMock()
+ runner.ignore_docker_for_reuse = False
+ runner.intermediate_output_ttl = 0
+ runner.secret_store = cwltool.secrets.SecretStore()
+
+ keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
+ runner.api.collections().get().execute.return_value = {
+ "portable_data_hash": "99999999999999999999999999999993+99"}
+
+ tool = cmap({
+ "inputs": [],
+ "outputs": [],
+ "baseCommand": "ls",
+ "arguments": [{"valueFrom": "$(runtime.outdir)"}],
+ "id": "#",
+ "class": "CommandLineTool",
+ "hints": [
+ {
+ "class": "http://arvados.org/cwl#OutputStorageClass",
+ "finalStorageClass": ["baz_sc", "qux_sc"],
+ "intermediateStorageClass": ["foo_sc", "bar_sc"]
+ }
+ ]
+ })
+
+ loadingContext, runtimeContext = self.helper(runner, True)
+
+ arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, loadingContext)
+ arvtool.formatgraph = None
+
+ for j in arvtool.job({}, mock.MagicMock(), runtimeContext):
+ j.run(runtimeContext)
+ runner.api.container_requests().create.assert_called_with(
+ body=JsonDiffMatcher({
+ 'environment': {
+ 'HOME': '/var/spool/cwl',
+ 'TMPDIR': '/tmp'
+ },
+ 'name': 'test_run_True',
+ 'runtime_constraints': {
+ 'vcpus': 1,
+ 'ram': 1073741824
+ },
+ 'use_existing': True,
+ 'priority': 500,
+ 'mounts': {
+ '/tmp': {'kind': 'tmp',
+ "capacity": 1073741824
+ },
+ '/var/spool/cwl': {'kind': 'tmp',
+ "capacity": 1073741824 }
+ },
+ 'state': 'Committed',
+ 'output_name': 'Output for step test_run_True',
+ 'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',
+ 'output_path': '/var/spool/cwl',
+ 'output_ttl': 0,
+ 'container_image': '99999999999999999999999999999993+99',
+ 'command': ['ls', '/var/spool/cwl'],
+ 'cwd': '/var/spool/cwl',
+ 'scheduling_parameters': {},
+ 'properties': {},
+ 'secret_mounts': {},
+ 'output_storage_classes': ["foo_sc", "bar_sc"]
+ }))
+
+
class TestWorkflow(unittest.TestCase):
def setUp(self):
cwltool.process._names = set()
"scheduling_parameters": {},
"secret_mounts": {},
"state": "Committed",
- "use_existing": True
+ "use_existing": True,
+ 'output_storage_classes': ["default"]
}))
mockc.open().__enter__().write.assert_has_calls([mock.call(subwf)])
mockc.open().__enter__().write.assert_has_calls([mock.call(
],
'use_existing': True,
'output_name': u'Output for step echo-subwf',
- 'cwd': '/var/spool/cwl'
+ 'cwd': '/var/spool/cwl',
+ 'output_storage_classes': ["default"]
}))
def test_default_work_api(self):
stubs.api.containers().current().execute.return_value = {
"uuid": stubs.fake_container_uuid,
}
+ stubs.api.config()["StorageClasses"].items.return_value = {
+ "default": {
+ "Default": True
+ }
+ }.items()
class CollectionExecute(object):
def __init__(self, exe):
cwltool.process._names = set()
arvados_cwl.arvdocker.arv_docker_clear_cache()
- @stubs
- def test_error_when_multiple_storage_classes_specified(self, stubs):
- storage_classes = "foo,bar"
- exited = arvados_cwl.main(
- ["--debug", "--storage-classes", storage_classes,
- "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
- sys.stdin, sys.stderr, api_client=stubs.api)
- self.assertEqual(exited, 1)
@mock.patch("time.sleep")
@stubs
stubs.expect_container_request_uuid + '\n')
self.assertEqual(exited, 0)
+ @stubs
+ def test_submit_multiple_storage_classes(self, stubs):
+ exited = arvados_cwl.main(
+ ["--debug", "--submit", "--no-wait", "--api=containers", "--storage-classes=foo,bar", "--intermediate-storage-classes=baz",
+ "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+ stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+ expect_container = copy.deepcopy(stubs.expect_container_spec)
+ expect_container["command"] = ['arvados-cwl-runner', '--local', '--api=containers',
+ '--no-log-timestamps', '--disable-validate', '--disable-color',
+ '--eval-timeout=20', '--thread-count=0',
+ '--enable-reuse', "--collection-cache-size=256", "--debug",
+ "--storage-classes=foo,bar", "--intermediate-storage-classes=baz", '--on-error=continue',
+ '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']
+
+ stubs.api.container_requests().create.assert_called_with(
+ body=JsonDiffMatcher(expect_container))
+ self.assertEqual(stubs.capture_stdout.getvalue(),
+ stubs.expect_container_request_uuid + '\n')
+ self.assertEqual(exited, 0)
+
@mock.patch("cwltool.task_queue.TaskQueue")
@mock.patch("arvados_cwl.arvworkflow.ArvadosWorkflow.job")
@mock.patch("arvados_cwl.executor.ArvCwlExecutor.make_output_collection")
make_output.assert_called_with(u'Output of submit_wf.cwl', ['default'], '', 'zzzzz-4zz18-zzzzzzzzzzzzzzzz')
self.assertEqual(exited, 0)
+ @mock.patch("cwltool.task_queue.TaskQueue")
+ @mock.patch("arvados_cwl.arvworkflow.ArvadosWorkflow.job")
+ @mock.patch("arvados_cwl.executor.ArvCwlExecutor.make_output_collection")
+ @stubs
+ def test_storage_class_hint_to_make_output_collection(self, stubs, make_output, job, tq):
+ final_output_c = arvados.collection.Collection()
+ make_output.return_value = ({},final_output_c)
+
+ def set_final_output(job_order, output_callback, runtimeContext):
+ output_callback("zzzzz-4zz18-zzzzzzzzzzzzzzzz", "success")
+ return []
+ job.side_effect = set_final_output
+
+ exited = arvados_cwl.main(
+ ["--debug", "--local",
+ "tests/wf/submit_storage_class_wf.cwl", "tests/submit_test_job.json"],
+ stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+
+ make_output.assert_called_with(u'Output of submit_storage_class_wf.cwl', ['foo', 'bar'], '', 'zzzzz-4zz18-zzzzzzzzzzzzzzzz')
+ self.assertEqual(exited, 0)
+
@stubs
def test_submit_container_output_ttl(self, stubs):
exited = arvados_cwl.main(
@stubs
def test_submit_container_project(self, stubs):
project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'
+ stubs.api.groups().get().execute.return_value = {"group_class": "project"}
exited = arvados_cwl.main(
["--submit", "--no-wait", "--api=containers", "--debug", "--project-uuid="+project_uuid,
"tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
@stubs
def test_submit_validate_project_uuid(self, stubs):
+ # Fails with bad cluster prefix
exited = arvados_cwl.main(
["--submit", "--no-wait", "--api=containers", "--debug", "--project-uuid=zzzzb-j7d0g-zzzzzzzzzzzzzzz",
"tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
self.assertEqual(exited, 1)
+ # Project lookup fails
stubs.api.groups().get().execute.side_effect = Exception("Bad project")
exited = arvados_cwl.main(
["--submit", "--no-wait", "--api=containers", "--debug", "--project-uuid=zzzzz-j7d0g-zzzzzzzzzzzzzzx",
stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
self.assertEqual(exited, 1)
+ # It should work this time because it is looking up a user (and only group is stubbed out to fail)
+ exited = arvados_cwl.main(
+ ["--submit", "--no-wait", "--api=containers", "--debug", "--project-uuid=zzzzz-tpzed-zzzzzzzzzzzzzzx",
+ "tests/wf/submit_wf.cwl", "tests/submit_test_job.json"],
+ stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)
+ self.assertEqual(exited, 0)
+
+
@mock.patch("arvados.collection.CollectionReader")
@stubs
def test_submit_uuid_inputs(self, stubs, collectionReader):
@stubs
def test_create(self, stubs):
project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'
+ stubs.api.groups().get().execute.return_value = {"group_class": "project"}
exited = arvados_cwl.main(
["--create-workflow", "--debug",
@stubs
def test_create_name(self, stubs):
project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'
+ stubs.api.groups().get().execute.return_value = {"group_class": "project"}
exited = arvados_cwl.main(
["--create-workflow", "--debug",
@stubs
def test_create_collection_per_tool(self, stubs):
project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'
+ stubs.api.groups().get().execute.return_value = {"group_class": "project"}
exited = arvados_cwl.main(
["--create-workflow", "--debug",
@stubs
def test_create_with_imports(self, stubs):
project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'
+ stubs.api.groups().get().execute.return_value = {"group_class": "project"}
exited = arvados_cwl.main(
["--create-workflow", "--debug",
@stubs
def test_create_with_no_input(self, stubs):
project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'
+ stubs.api.groups().get().execute.return_value = {"group_class": "project"}
exited = arvados_cwl.main(
["--create-workflow", "--debug",
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+# Test case for arvados-cwl-runner
+#
+# Used to test whether scanning a workflow file for dependencies
+# (e.g. submit_tool.cwl) and uploading to Keep works as intended.
+
+class: Workflow
+cwlVersion: v1.0
+$namespaces:
+ arv: "http://arvados.org/cwl#"
+hints:
+ arv:OutputStorageClass:
+ finalStorageClass: [foo, bar]
+inputs:
+ - id: x
+ type: File
+ - id: y
+ type: Directory
+ - id: z
+ type: Directory
+outputs: []
+steps:
+ - id: step1
+ in:
+ - { id: x, source: "#x" }
+ out: []
+ run: ../tool/submit_tool.cwl
arguments = [i for i in arguments if i not in (args.image, args.tag, image_repo_tag)]
put_args = keepdocker_parser.parse_known_args(arguments)[1]
+ # Don't fail when cached manifest is invalid, just ignore the cache.
+ put_args += ['--batch']
+
if args.name is None:
put_args += ['--name', collection_name]
still be displayed.)
""")
+run_opts.add_argument('--batch', action='store_true', default=False,
+ help="""
+Retries with '--no-resume --no-cache' if cached state contains invalid/expired
+block signatures.
+""")
+
_group = run_opts.add_mutually_exclusive_group()
_group.add_argument('--resume', action='store_true', default=True,
help="""
}
def __init__(self, paths, resume=True, use_cache=True, reporter=None,
- name=None, owner_uuid=None, api_client=None,
+ name=None, owner_uuid=None, api_client=None, batch_mode=False,
ensure_unique_name=False, num_retries=None,
put_threads=None, replication_desired=None, filename=None,
update_time=60.0, update_collection=None, storage_classes=None,
self.paths = paths
self.resume = resume
self.use_cache = use_cache
+ self.batch_mode = batch_mode
self.update = False
self.reporter = reporter
# This will set to 0 before start counting, if no special files are going
# No cache file, set empty state
self._state = copy.deepcopy(self.EMPTY_STATE)
if not self._cached_manifest_valid():
- raise ResumeCacheInvalidError()
+ if not self.batch_mode:
+ raise ResumeCacheInvalidError()
+ else:
+ self.logger.info("Invalid signatures on cache file '{}' while being run in 'batch mode' -- continuing anyways.".format(self._cache_file.name))
+ self.use_cache = False # Don't overwrite preexisting cache file.
+ self._state = copy.deepcopy(self.EMPTY_STATE)
# Load the previous manifest so we can check if files were modified remotely.
self._local_collection = arvados.collection.Collection(
self._state['manifest'],
writer = ArvPutUploadJob(paths = args.paths,
resume = args.resume,
use_cache = args.use_cache,
+ batch_mode= args.batch,
filename = args.filename,
reporter = reporter,
api_client = api_client,
" or been created with another Arvados user's credentials.",
" Switch user or use one of the following options to restart upload:",
" --no-resume to start a new resume cache.",
- " --no-cache to disable resume cache."]))
+ " --no-cache to disable resume cache.",
+ " --batch to ignore the resume cache if invalid."]))
sys.exit(1)
except (CollectionUpdateError, PathDoesNotExistError) as error:
logger.error("\n".join([
r'INFO: Cache expired, starting from scratch.*')
self.assertEqual(p.returncode, 0)
- def test_invalid_signature_invalidates_cache(self):
- self.authorize_with('active')
- tmpdir = self.make_tmpdir()
- with open(os.path.join(tmpdir, 'somefile.txt'), 'w') as f:
- f.write('foo')
- # Upload a directory and get the cache file name
- p = subprocess.Popen([sys.executable, arv_put.__file__, tmpdir],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- env=self.ENVIRON)
- (_, err) = p.communicate()
- self.assertRegex(err.decode(), r'INFO: Creating new cache file at ')
- self.assertEqual(p.returncode, 0)
- cache_filepath = re.search(r'INFO: Creating new cache file at (.*)',
- err.decode()).groups()[0]
- self.assertTrue(os.path.isfile(cache_filepath))
- # Load the cache file contents and modify the manifest to simulate
- # an invalid access token
- with open(cache_filepath, 'r') as c:
- cache = json.load(c)
- self.assertRegex(cache['manifest'], r'\+A\S+\@')
- cache['manifest'] = re.sub(
- r'\+A.*\@',
- "+Aabcdef0123456789abcdef0123456789abcdef01@",
- cache['manifest'])
- with open(cache_filepath, 'w') as c:
- c.write(json.dumps(cache))
- # Re-run the upload and expect to get an invalid cache message
- p = subprocess.Popen([sys.executable, arv_put.__file__, tmpdir],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- env=self.ENVIRON)
- (_, err) = p.communicate()
- self.assertRegex(
- err.decode(),
- r'ERROR: arv-put: Resume cache contains invalid signature.*')
- self.assertEqual(p.returncode, 1)
+ def test_invalid_signature_in_cache(self):
+ for batch_mode in [False, True]:
+ self.authorize_with('active')
+ tmpdir = self.make_tmpdir()
+ with open(os.path.join(tmpdir, 'somefile.txt'), 'w') as f:
+ f.write('foo')
+ # Upload a directory and get the cache file name
+ arv_put_args = [tmpdir]
+ if batch_mode:
+ arv_put_args = ['--batch'] + arv_put_args
+ p = subprocess.Popen([sys.executable, arv_put.__file__] + arv_put_args,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=self.ENVIRON)
+ (_, err) = p.communicate()
+ self.assertRegex(err.decode(), r'INFO: Creating new cache file at ')
+ self.assertEqual(p.returncode, 0)
+ cache_filepath = re.search(r'INFO: Creating new cache file at (.*)',
+ err.decode()).groups()[0]
+ self.assertTrue(os.path.isfile(cache_filepath))
+ # Load the cache file contents and modify the manifest to simulate
+ # an invalid access token
+ with open(cache_filepath, 'r') as c:
+ cache = json.load(c)
+ self.assertRegex(cache['manifest'], r'\+A\S+\@')
+ cache['manifest'] = re.sub(
+ r'\+A.*\@',
+ "+Aabcdef0123456789abcdef0123456789abcdef01@",
+ cache['manifest'])
+ with open(cache_filepath, 'w') as c:
+ c.write(json.dumps(cache))
+ # Re-run the upload and expect to get an invalid cache message
+ p = subprocess.Popen([sys.executable, arv_put.__file__] + arv_put_args,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=self.ENVIRON)
+ (_, err) = p.communicate()
+ if not batch_mode:
+ self.assertRegex(
+ err.decode(),
+ r'ERROR: arv-put: Resume cache contains invalid signature.*')
+ self.assertEqual(p.returncode, 1)
+ else:
+ self.assertRegex(
+ err.decode(),
+ r'Invalid signatures on cache file \'.*\' while being run in \'batch mode\' -- continuing anyways.*')
+ self.assertEqual(p.returncode, 0)
def test_single_expired_signature_reuploads_file(self):
self.authorize_with('active')
def strip_signatures_and_update_replication_confirmed
if self.manifest_text_changed?
in_old_manifest = {}
- if not self.replication_confirmed.nil?
+ # manifest_text_was could be nil when dealing with a freshly created snapshot,
+ # so we skip this case because there was no real manifest change. (Bug #18005)
+ if (not self.replication_confirmed.nil?) and (not self.manifest_text_was.nil?)
self.class.each_manifest_locator(manifest_text_was) do |match|
in_old_manifest[match[1]] = true
end
namespace :db do
desc "Apply expiration policy on long lived tokens"
task fix_long_lived_tokens: :environment do
- if Rails.configuration.Login.TokenLifetime == 0
- puts("No expiration policy set on Login.TokenLifetime.")
- else
- exp_date = Time.now + Rails.configuration.Login.TokenLifetime
- puts("Setting token expiration to: #{exp_date}")
- token_count = 0
- ll_tokens.each do |auth|
- if (auth.user.uuid =~ /-tpzed-000000000000000/).nil?
- CurrentApiClientHelper.act_as_system_user do
- auth.update_attributes!(expires_at: exp_date)
- end
- token_count += 1
+ lifetime = Rails.configuration.API.MaxTokenLifetime
+ if lifetime.nil? or lifetime == 0
+ lifetime = Rails.configuration.Login.TokenLifetime
+ end
+ if lifetime.nil? or lifetime == 0
+ puts("No expiration policy set (API.MaxTokenLifetime nor Login.TokenLifetime is set), nothing to do.")
+ # abort the rake task
+ next
+ end
+ exp_date = Time.now + lifetime
+ puts("Setting token expiration to: #{exp_date}")
+ token_count = 0
+ ll_tokens(lifetime).each do |auth|
+ if auth.user.nil?
+ printf("*** WARNING, found ApiClientAuthorization with invalid user: auth id: %d, user id: %d\n", auth.id, auth.user_id)
+ # skip this token
+ next
+ end
+ if (auth.user.uuid =~ /-tpzed-000000000000000/).nil?
+ CurrentApiClientHelper.act_as_system_user do
+ auth.update_attributes!(expires_at: exp_date)
end
+ token_count += 1
end
- puts("#{token_count} tokens updated.")
end
+ puts("#{token_count} tokens updated.")
end
desc "Show users with long lived tokens"
task check_long_lived_tokens: :environment do
+ lifetime = Rails.configuration.API.MaxTokenLifetime
+ if lifetime.nil? or lifetime == 0
+ lifetime = Rails.configuration.Login.TokenLifetime
+ end
+ if lifetime.nil? or lifetime == 0
+ puts("No expiration policy set (API.MaxTokenLifetime nor Login.TokenLifetime is set), nothing to do.")
+ # abort the rake task
+ next
+ end
user_ids = Set.new()
token_count = 0
- ll_tokens.each do |auth|
- if (auth.user.uuid =~ /-tpzed-000000000000000/).nil?
+ ll_tokens(lifetime).each do |auth|
+ if auth.user.nil?
+ printf("*** WARNING, found ApiClientAuthorization with invalid user: auth id: %d, user id: %d\n", auth.id, auth.user_id)
+ # skip this token
+ next
+ end
+ if not auth.user.nil? and (auth.user.uuid =~ /-tpzed-000000000000000/).nil?
user_ids.add(auth.user_id)
token_count += 1
end
end
end
- def ll_tokens
+ def ll_tokens(lifetime)
query = ApiClientAuthorization.where(expires_at: nil)
- if Rails.configuration.Login.TokenLifetime > 0
- query = query.or(ApiClientAuthorization.where("expires_at > ?", Time.now + Rails.configuration.Login.TokenLifetime))
- end
+ query = query.or(ApiClientAuthorization.where("expires_at > ?", Time.now + lifetime))
query
end
end
c.reload
assert_equal 'foobar', c.name
assert_equal 2, c.version
+ # Simulate a keep-balance run and trigger a new versionable update
+ # This tests bug #18005
+ assert_nil c.replication_confirmed
+ assert_nil c.replication_confirmed_at
+ # Updates without validations/callbacks
+ c.update_column('modified_at', fifteen_min_ago)
+ c.update_column('replication_confirmed_at', Time.now)
+ c.update_column('replication_confirmed', 2)
+ c.reload
+ assert_equal fifteen_min_ago.to_i, c.modified_at.to_i
+ assert_not_nil c.replication_confirmed_at
+ assert_not_nil c.replication_confirmed
+ # Make the versionable update
+ c.update_attributes!({'name' => 'foobarbaz'})
+ c.reload
+ assert_equal 'foobarbaz', c.name
+ assert_equal 3, c.version
end
end