(
set -e
cd "$WORKSPACE/doc"
- ARVADOS_API_HOST=qr1hi.arvadosapi.com
+ ARVADOS_API_HOST=pirca.arvadosapi.com
# Make sure python-epydoc is installed or the next line won't
# do much good!
PYTHONPATH=$WORKSPACE/sdk/python/ "$bundle" exec rake linkchecker baseurl=file://$WORKSPACE/doc/.site/ arvados_workbench_host=https://workbench.$ARVADOS_API_HOST arvados_api_host=$ARVADOS_API_HOST
+++ /dev/null
-#!/usr/bin/env python
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-# Import the Arvados sdk module
-import arvados
-
-# Get information about the task from the environment
-this_task = arvados.current_task()
-
-this_task_input = arvados.current_job()['script_parameters']['input']
-
-# Create the object access to the collection referred to in the input
-collection = arvados.CollectionReader(this_task_input)
-
-# Create an object to write a new collection as output
-out = arvados.CollectionWriter()
-
-# Create a new file in the output collection
-with out.open('0-filter.txt') as out_file:
- # Iterate over every input file in the input collection
- for input_file in collection.all_files():
- # Output every line in the file that starts with '0'
- out_file.writelines(line for line in input_file if line.startswith('0'))
-
-# Commit the output to Keep.
-output_locator = out.finish()
-
-# Use the resulting locator as the output for this task.
-this_task.set_output(output_locator)
-
-# Done!
+++ /dev/null
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-<div class="alert alert-block alert-info">
- <button type="button" class="close" data-dismiss="alert">×</button>
- <h4>Hi!</h4>
- <P>This section is incomplete. Please be patient with us as we fill in the blanks — or <A href="https://dev.arvados.org/projects/arvados/wiki/Documentation#Contributing">contribute to the documentation project.</A></P>
-</div>
+++ /dev/null
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-<div class="alert alert-block alert-info">
- <button type="button" class="close" data-dismiss="alert">×</button>
- <h4>Hi!</h4>
- <p>This section is incomplete. Please be patient with us as we fill in the blanks — or <A href="https://dev.arvados.org/projects/arvados/wiki/Documentation#Contributing">contribute to the documentation project.</A></p>
-</div>
+++ /dev/null
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-{% include 'notebox_begin' %}
-As stated above, arv-copy is recursive by default and requires a working git repository in the destination cluster. If you do not have a repository created, you can follow the "Adding a new repository":{{site.baseurl}}/user/tutorials/add-new-repository.html page. We will use the *tutorial* repository created in that page as the example.
-
-<br/>In addition, arv-copy requires git when copying to a git repository. Please make sure that git is installed and available.
-
-{% include 'notebox_end' %}
+++ /dev/null
-#!/usr/bin/env python
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-import hashlib
-import os
-import arvados
-
-# Jobs consist of one or more tasks. A task is a single invocation of
-# a crunch script.
-
-# Get the current task
-this_task = arvados.current_task()
-
-# Tasks have a sequence number for ordering. All tasks
-# with the current sequence number must finish successfully
-# before tasks in the next sequence are started.
-# The first task has sequence number 0
-if this_task['sequence'] == 0:
- # Get the "input" field from "script_parameters" on the task object
- job_input = arvados.current_job()['script_parameters']['input']
-
- # Create a collection reader to read the input
- cr = arvados.CollectionReader(job_input)
-
- # Loop over each stream in the collection (a stream is a subset of
- # files that logically represents a directory)
- for s in cr.all_streams():
-
- # Loop over each file in the stream
- for f in s.all_files():
-
- # Synthesize a manifest for just this file
- task_input = f.as_manifest()
-
- # Set attributes for a new task:
- # 'job_uuid' the job that this task is part of
- # 'created_by_job_task_uuid' this task that is creating the new task
- # 'sequence' the sequence number of the new task
- # 'parameters' the parameters to be passed to the new task
- new_task_attrs = {
- 'job_uuid': arvados.current_job()['uuid'],
- 'created_by_job_task_uuid': arvados.current_task()['uuid'],
- 'sequence': 1,
- 'parameters': {
- 'input':task_input
- }
- }
-
- # Ask the Arvados API server to create a new task, running the same
- # script as the parent task specified in 'created_by_job_task_uuid'
- arvados.api().job_tasks().create(body=new_task_attrs).execute()
-
- # Now tell the Arvados API server that this task executed successfully,
- # even though it doesn't have any output.
- this_task.set_output(None)
-else:
- # The task sequence was not 0, so it must be a parallel worker task
- # created by the first task
-
- # Instead of getting "input" from the "script_parameters" field of
- # the job object, we get it from the "parameters" field of the
- # task object
- this_task_input = this_task['parameters']['input']
-
- collection = arvados.CollectionReader(this_task_input)
-
- # There should only be one file in the collection, so get the
- # first one from the all files iterator.
- input_file = next(collection.all_files())
- output_path = os.path.normpath(os.path.join(input_file.stream_name(),
- input_file.name))
-
- # Everything after this is the same as the first tutorial.
- digestor = hashlib.new('md5')
- for buf in input_file.readall():
- digestor.update(buf)
-
- out = arvados.CollectionWriter()
- with out.open('md5sum.txt') as out_file:
- out_file.write("{} {}\n".format(digestor.hexdigest(), output_path))
-
- this_task.set_output(out.finish())
-
-# Done!
+++ /dev/null
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-{% include 'notebox_begin_warning' %}
-This section assumes the legacy Jobs API is available. Some newer installations have already disabled the Jobs API in favor of the Containers API.
+++ /dev/null
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-{% include 'notebox_end' %}
+++ /dev/null
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-{
- "name": "Example using R in a custom Docker image",
- "components": {
- "Rscript": {
- "script": "run-command",
- "script_version": "master",
- "repository": "arvados",
- "script_parameters": {
- "command": [
- "Rscript",
- "$(glob $(file $(myscript))/*.r)",
- "$(glob $(dir $(mydata))/*.csv)"
- ],
- "myscript": {
- "required": true,
- "dataclass": "Collection"
- },
- "mydata": {
- "required": true,
- "dataclass": "Collection"
- }
- },
- "runtime_constraints": {
- "docker_image": "arvados/jobs-with-r"
- }
- }
- }
-}
h2(#cgroups). Configure Linux cgroups accounting
-Linux can report what compute resources are used by processes in a specific cgroup or Docker container. Crunch can use these reports to share that information with users running compute work. This can help pipeline authors debug and optimize their workflows.
+Linux can report what compute resources are used by processes in a specific cgroup or Docker container. Crunch can use these reports to share that information with users running compute work. This can help workflow authors debug and optimize their workflows.
To enable cgroups accounting, you must boot Linux with the command line parameters @cgroup_enable=memory swapaccount=1@.
+++ /dev/null
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-{% include 'notebox_begin' %}
-The Arvados API and Git servers require Git 1.7.10 or later.
-{% include 'notebox_end' %}
+++ /dev/null
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-Now that all your configuration is in place, rerun the {{railspkg}} package configuration to install necessary Ruby Gems and other server dependencies. On Debian-based systems:
-
-<notextile><pre><code>~$ <span class="userinput">sudo dpkg-reconfigure {{railspkg}}</span>
-</code></pre></notextile>
-
-On Red Hat-based systems:
-
-<notextile><pre><code>~$ <span class="userinput">sudo yum reinstall {{railspkg}}</span>
-</code></pre></notextile>
-
-You only need to do this manual step once, after initial configuration. When you make configuration changes in the future, you just need to restart Nginx for them to take effect.
\ No newline at end of file
+++ /dev/null
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-Ruby 2.3 is recommended; Ruby 2.1 is also known to work.
-
-h4(#rvm). *Option 1: Install with RVM*
-
-<notextile>
-<pre><code><span class="userinput">sudo gpg --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 7D2BAF1CF37B13E2069D6956105BD0E739499BDB
-\curl -sSL https://get.rvm.io | sudo bash -s stable --ruby=2.3
-</span></code></pre></notextile>
-
-Either log out and log back in to activate RVM, or explicitly load it in all open shells like this:
-
-<notextile>
-<pre><code><span class="userinput">source /usr/local/rvm/scripts/rvm
-</span></code></pre></notextile>
-
-Once RVM is activated in your shell, install Bundler:
-
-<notextile>
-<pre><code>~$ <span class="userinput">gem install bundler</span>
-</code></pre></notextile>
-
-h4(#fromsource). *Option 2: Install from source*
-
-Install prerequisites for Debian 8:
-
-<notextile>
-<pre><code><span class="userinput">sudo apt-get install \
- bison build-essential gettext libcurl3 libcurl3-gnutls \
- libcurl4-openssl-dev libpcre3-dev libreadline-dev \
- libssl-dev libxslt1.1 zlib1g-dev
-</span></code></pre></notextile>
-
-Install prerequisites for CentOS 7:
-
-<notextile>
-<pre><code><span class="userinput">sudo yum install \
- libyaml-devel glibc-headers autoconf gcc-c++ glibc-devel \
- patch readline-devel zlib-devel libffi-devel openssl-devel \
- make automake libtool bison sqlite-devel tar
-</span></code></pre></notextile>
-
-Install prerequisites for Ubuntu 12.04 or 14.04:
-
-<notextile>
-<pre><code><span class="userinput">sudo apt-get install \
- gawk g++ gcc make libc6-dev libreadline6-dev zlib1g-dev libssl-dev \
- libyaml-dev libsqlite3-dev sqlite3 autoconf libgdbm-dev \
- libncurses5-dev automake libtool bison pkg-config libffi-dev curl
-</span></code></pre></notextile>
-
-Build and install Ruby:
-
-<notextile>
-<pre><code><span class="userinput">mkdir -p ~/src
-cd ~/src
-curl -f http://cache.ruby-lang.org/pub/ruby/2.3/ruby-2.3.3.tar.gz | tar xz
-cd ruby-2.3.3
-./configure --disable-install-rdoc
-make
-sudo make install
-
-sudo -i gem install bundler</span>
-</code></pre></notextile>
+++ /dev/null
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-On Debian-based systems:
-
-<notextile>
-<pre><code>~$ <span class="userinput">sudo apt-get install runit</span>
-</code></pre>
-</notextile>
-
-On Red Hat-based systems:
-
-<notextile>
-<pre><code>~$ <span class="userinput">sudo yum install runit</span>
-</code></pre>
-</notextile>
+++ /dev/null
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-{% include 'notebox_begin_warning' %}
-Arvados pipeline templates are deprecated. The recommended way to develop new workflows for Arvados is using the "Common Workflow Language":{{site.baseurl}}/user/cwl/cwl-runner.html.
-{% include 'notebox_end' %}
+++ /dev/null
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-{
- "name":"run-command example pipeline",
- "components":{
- "bwa-mem": {
- "script": "run-command",
- "script_version": "master",
- "repository": "arvados",
- "script_parameters": {
- "command": [
- "bwa",
- "mem",
- "-t",
- "$(node.cores)",
- "$(glob $(dir $(reference_collection))/*.fasta)",
- {
- "foreach": "read_pair",
- "command": "$(read_pair)"
- }
- ],
- "task.stdout": "$(basename $(glob $(dir $(sample))/*_1.fastq)).sam",
- "task.foreach": ["sample_subdir", "read_pair"],
- "reference_collection": {
- "required": true,
- "dataclass": "Collection"
- },
- "sample": {
- "required": true,
- "dataclass": "Collection"
- },
- "sample_subdir": "$(dir $(sample))",
- "read_pair": {
- "value": {
- "group": "sample_subdir",
- "regex": "(.*)_[12]\\.fastq(\\.gz)?$"
- }
- }
- }
- }
- }
-}
+++ /dev/null
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-{
- "name":"run-command example pipeline",
- "components":{
- "bwa-mem": {
- "script": "run-command",
- "script_version": "master",
- "repository": "arvados",
- "script_parameters": {
- "command": [
- "$(dir $(bwa_collection))/bwa",
- "mem",
- "-t",
- "$(node.cores)",
- "-R",
- "@RG\\\tID:group_id\\\tPL:illumina\\\tSM:sample_id",
- "$(glob $(dir $(reference_collection))/*.fasta)",
- "$(glob $(dir $(sample))/*_1.fastq)",
- "$(glob $(dir $(sample))/*_2.fastq)"
- ],
- "reference_collection": {
- "required": true,
- "dataclass": "Collection"
- },
- "bwa_collection": {
- "required": true,
- "dataclass": "Collection",
- "default": "39c6f22d40001074f4200a72559ae7eb+5745"
- },
- "sample": {
- "required": true,
- "dataclass": "Collection"
- },
- "task.stdout": "$(basename $(glob $(dir $(sample))/*_1.fastq)).sam"
- }
- }
- }
-}
+++ /dev/null
-#!/usr/bin/env python
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-import arvados
-
-# Automatically parallelize this job by running one task per file.
-arvados.job_setup.one_task_per_input_file(if_sequence=0, and_end_task=True,
- input_as_path=True)
-
-# Get the input file for the task
-input_file = arvados.get_task_param_mount('input')
-
-# Run the external 'md5sum' program on the input file
-stdoutdata, stderrdata = arvados.util.run_command(['md5sum', input_file])
-
-# Save the standard output (stdoutdata) to "md5sum.txt" in the output collection
-out = arvados.CollectionWriter()
-with out.open('md5sum.txt') as out_file:
- out_file.write(stdoutdata)
-arvados.current_task().set_output(out.finish())
+++ /dev/null
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-{
- "name": "Tutorial align using bwa mem and SortSam",
- "components": {
- "bwa-mem": {
- "script": "run-command",
- "script_version": "master",
- "repository": "arvados",
- "script_parameters": {
- "command": [
- "$(dir $(bwa_collection))/bwa",
- "mem",
- "-t",
- "$(node.cores)",
- "-R",
- "@RG\\\tID:group_id\\\tPL:illumina\\\tSM:sample_id",
- "$(glob $(dir $(reference_collection))/*.fasta)",
- "$(glob $(dir $(sample))/*_1.fastq)",
- "$(glob $(dir $(sample))/*_2.fastq)"
- ],
- "reference_collection": {
- "required": true,
- "dataclass": "Collection"
- },
- "bwa_collection": {
- "required": true,
- "dataclass": "Collection",
- "default": "39c6f22d40001074f4200a72559ae7eb+5745"
- },
- "sample": {
- "required": true,
- "dataclass": "Collection"
- },
- "task.stdout": "$(basename $(glob $(dir $(sample))/*_1.fastq)).sam"
- },
- "runtime_constraints": {
- "docker_image": "bcosc/arv-base-java",
- "arvados_sdk_version": "master"
- }
- },
- "SortSam": {
- "script": "run-command",
- "script_version": "847459b3c257aba65df3e0cbf6777f7148542af2",
- "repository": "arvados",
- "script_parameters": {
- "command": [
- "java",
- "-Xmx4g",
- "-Djava.io.tmpdir=$(tmpdir)",
- "-jar",
- "$(dir $(picard))/SortSam.jar",
- "CREATE_INDEX=True",
- "SORT_ORDER=coordinate",
- "VALIDATION_STRINGENCY=LENIENT",
- "INPUT=$(glob $(dir $(input))/*.sam)",
- "OUTPUT=$(basename $(glob $(dir $(input))/*.sam)).sort.bam"
- ],
- "input": {
- "output_of": "bwa-mem"
- },
- "picard": {
- "required": true,
- "dataclass": "Collection",
- "default": "88447c464574ad7f79e551070043f9a9+1970"
- }
- },
- "runtime_constraints": {
- "docker_image": "bcosc/arv-base-java",
- "arvados_sdk_version": "master"
- }
- }
- }
-}
+++ /dev/null
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-{% include 'notebox_begin' %}
-This tutorial assumes you are using the playground Arvados instance, @pirca@. If you are using a different instance, replace @pirca@ with your instance. See "Accessing Arvados Workbench":{{site.baseurl}}/user/getting_started/workbench.html for more details.
-{% include 'notebox_end' %}
+++ /dev/null
-#!/usr/bin/env python
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-import hashlib # Import the hashlib module to compute MD5.
-import os # Import the os module for basic path manipulation
-import arvados # Import the Arvados sdk module
-
-# Automatically parallelize this job by running one task per file.
-# This means that if the input consists of many files, each file will
-# be processed in parallel on different nodes enabling the job to
-# be completed quicker.
-arvados.job_setup.one_task_per_input_file(if_sequence=0, and_end_task=True,
- input_as_path=True)
-
-# Get object representing the current task
-this_task = arvados.current_task()
-
-# Create the message digest object that will compute the MD5 hash
-digestor = hashlib.new('md5')
-
-# Get the input file for the task
-input_id, input_path = this_task['parameters']['input'].split('/', 1)
-
-# Open the input collection
-input_collection = arvados.CollectionReader(input_id)
-
-# Open the input file for reading
-with input_collection.open(input_path) as input_file:
- for buf in input_file.readall(): # Iterate the file's data blocks
- digestor.update(buf) # Update the MD5 hash object
-
-# Write a new collection as output
-out = arvados.CollectionWriter()
-
-# Write an output file with one line: the MD5 value and input path
-with out.open('md5sum.txt') as out_file:
- out_file.write("{} {}/{}\n".format(digestor.hexdigest(), input_id,
- os.path.normpath(input_path)))
-
-# Commit the output to Keep.
-output_locator = out.finish()
-
-# Use the resulting locator as the output for this task.
-this_task.set_output(output_locator)
-
-# Done!
+++ /dev/null
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-{
- "name":"My md5 pipeline",
- "components":{
- "do_hash":{
- "repository":"$USER/$USER",
- "script":"hash.py",
- "script_version":"master",
- "runtime_constraints":{
- "docker_image":"arvados/jobs"
- },
- "script_parameters":{
- "input":{
- "required": true,
- "dataclass": "Collection"
- }
- }
- }
- }
-}
SPDX-License-Identifier: CC-BY-SA-3.0
{% endcomment %}
-This document is for accessing an Arvados VM using SSH keys in Unix environments (Linux, OS X, Cygwin). If you would like to access VM through your browser, please visit the "Accessing an Arvados VM with Webshell":vm-login-with-webshell.html page. If you are using a Windows environment, please visit the "Accessing an Arvados VM with SSH - Windows Environments":ssh-access-windows.html page.
+This document is for accessing an Arvados VM using SSH keys in Unix-like environments (Linux, macOS, Cygwin, Windows Subsystem for Linux). If you would like to access VM through your browser, please visit the "Accessing an Arvados VM with Webshell":vm-login-with-webshell.html page. If you are using a Windows environment, please visit the "Accessing an Arvados VM with SSH - Windows Environments":ssh-access-windows.html page.
{% include 'ssh_intro' %}
SPDX-License-Identifier: CC-BY-SA-3.0
{% endcomment %}
-This document is for accessing an Arvados VM using SSH keys in Windows environments using PuTTY. If you would like to use to access VM through your browser, please visit the "Accessing an Arvados VM with Webshell":vm-login-with-webshell.html page. If you are using a Unix environment (Linux, OS X, Cygwin, or Windows Subsystem for Linux), please visit the "Accessing an Arvados VM with SSH - Unix Environments":ssh-access-unix.html page.
+This document is for accessing an Arvados VM using SSH keys in Windows environments using PuTTY. If you would like to use to access VM through your browser, please visit the "Accessing an Arvados VM with Webshell":vm-login-with-webshell.html page. If you are using a Unix-like environment (Linux, macOS, Cygwin, or Windows Subsystem for Linux), please visit the "Accessing an Arvados VM with SSH - Unix Environments":ssh-access-unix.html page.
{% include 'ssh_intro' %}
You will be asked to log in. Arvados uses only your name and email address for identification, and will never access any personal information. If you are accessing Arvados for the first time, the Workbench may indicate your account status is *New / inactive*. If this is the case, contact the administrator of the Arvados instance to request activation of your account.
-Once your account is active, logging in to the Workbench will present you with the Dashboard. This gives a summary of your projects and recent activity in the Arvados instance. You are now ready to "upload data":{{ site.baseurl }}/user/tutorials/tutorial-keep.html or "run your first pipeline.":{{ site.baseurl }}/user/tutorials/tutorial-workflow-workbench.html
+Once your account is active, logging in to the Workbench will present you with the Dashboard. This gives a summary of your projects and recent activity in the Arvados instance. You are now ready to "upload data":{{ site.baseurl }}/user/tutorials/tutorial-keep.html or "run your first workflow.":{{ site.baseurl }}/user/tutorials/tutorial-workflow-workbench.html
!{display: block;margin-left: 25px;margin-right: auto;border:1px solid lightgray;}{{ site.baseurl }}/images/workbench-dashboard.png!
206M / 206M 100.0% 2020-06-29 13:48:21 arvados.arv_put[769] INFO:
2020-06-29 13:48:21 arvados.arv_put[769] INFO: Collection saved as 'Docker image docker-example-r-base:latest sha256:edd10'
-x20vs-4zz18-0tayximqcyb6uf8
+zzzzz-4zz18-0tayximqcyb6uf8
$ <span class="userinput">arv-keepdocker images</span>
REPOSITORY TAG IMAGE ID COLLECTION CREATED
-docker-example-r-base latest sha256:edd10 x20vs-4zz18-0tayximqcyb6uf8 Mon Jun 29 17:46:16 2020
+docker-example-r-base latest sha256:edd10 zzzzz-4zz18-0tayximqcyb6uf8 Mon Jun 29 17:46:16 2020
</code></pre>
</notextile>
SPDX-License-Identifier: CC-BY-SA-3.0
{% endcomment %}
-Arvados supports managing git repositories. You can acess these repositories using your Arvados credentials and share them with other Arvados users.
+Arvados supports managing git repositories. You can access these repositories using your Arvados credentials and share them with other Arvados users.
{% include 'tutorial_expectations' %}
---
layout: default
navsection: userguide
-title: "Access Keep from OS X Finder"
+title: "Access Keep from macOS Finder"
...
{% comment %}
Copyright (C) The Arvados Authors. All rights reserved.
SPDX-License-Identifier: CC-BY-SA-3.0
{% endcomment %}
-OS X users can browse Keep read-only via WebDAV. Specific collections can also be accessed read-write via WebDAV.
+Users of macOS can browse Keep read-only via WebDAV. Specific collections can also be accessed read-write via WebDAV.
h3. Browsing Keep in Finder (read-only)
# a link to the multi-site search page on a "home" Workbench site.
#
# Example:
- # https://workbench.qr1hi.arvadosapi.com/collections/multisite
+ # https://workbench.zzzzz.arvadosapi.com/collections/multisite
MultiSiteSearch: ""
# Should workbench allow management of local git repositories? Set to false if
# a link to the multi-site search page on a "home" Workbench site.
#
# Example:
- # https://workbench.qr1hi.arvadosapi.com/collections/multisite
+ # https://workbench.zzzzz.arvadosapi.com/collections/multisite
MultiSiteSearch: ""
# Should workbench allow management of local git repositories? Set to false if