$ git clone https://github.com/arvados/arvados.git
$ cd arvados/tools/arvbox/bin
$ ./arvbox start localdemo
+$ ./arvbox adduser demouser demo@example.com
</pre>
+You can now log in as @demouser@ using the password you selected.
+
h2. Requirements
* Linux 3.x+ and Docker 1.9+
build <config> build arvbox Docker image
reboot <config> stop, build arvbox Docker image, run
rebuild <config> build arvbox Docker image, no layer cache
+checkpoint create database backup
+restore restore checkpoint
+hotreset reset database and restart API without restarting container
reset delete arvbox arvados data (be careful!)
destroy delete all arvbox code and data (be careful!)
log <service> tail log of specified service
sv <start|stop|restart> <service>
change state of service inside arvbox
clone <from> <to> clone dev arvbox
+adduser <username> <email>
+ add a user login
+removeuser <username>
+ remove user login
+listusers list user logins
</pre>
h2. Install root certificate
--- /dev/null
+---
+layout: default
+navsection: sdk
+navmenu: Python
+title: Arvados CWL Runner
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The Arvados FUSE driver is a Python utility that allows you to see the Keep service as a normal filesystem, so that data can be accessed using standard tools. This driver requires the Python SDK installed in order to access Arvados services.
+
+h2. Installation
+
+If you are logged in to a managed Arvados VM, the @arv-mount@ utility should already be installed.
+
+To use the FUSE driver elsewhere, you can install from a distribution package, or PyPI.
+
+h2. Option 1: Install from distribution packages
+
+First, "add the appropriate package repository for your distribution":{{ site.baseurl }}/install/packages.html
+
+{% assign arvados_component = 'python3-arvados-cwl-runner' %}
+
+{% include 'install_packages' %}
+
+h2. Option 2: Install with pip
+
+Run @pip install arvados-cwl-runner@ in an appropriate installation environment, such as a virtualenv.
+
+Note:
+
+The SDK uses @pycurl@ which depends on the @libcurl@ C library. To build the module you may have to first install additional packages. On Debian 9 this is:
+
+<pre>
+$ apt-get install git build-essential python-dev libcurl4-openssl-dev libssl1.0-dev python-llfuse
+</pre>
+
+For Python 3 this is:
+
+<pre>
+$ apt-get install git build-essential python3-dev libcurl4-openssl-dev libssl1.0-dev python3-llfuse
+</pre>
+
+h3. Check Docker access
+
+In order to pull and upload Docker images, @arvados-cwl-runner@ requires access to Docker. You do not need Docker if the Docker images you intend to use are already available in Arvados.
+
+You can determine if you have access to Docker by running @docker version@:
+
+<notextile>
+<pre><code>~$ <span class="userinput">docker version</span>
+Client:
+ Version: 1.9.1
+ API version: 1.21
+ Go version: go1.4.2
+ Git commit: a34a1d5
+ Built: Fri Nov 20 12:59:02 UTC 2015
+ OS/Arch: linux/amd64
+
+Server:
+ Version: 1.9.1
+ API version: 1.21
+ Go version: go1.4.2
+ Git commit: a34a1d5
+ Built: Fri Nov 20 12:59:02 UTC 2015
+ OS/Arch: linux/amd64
+</code></pre>
+</notextile>
+
+If this returns an error, contact the sysadmin of your cluster for assistance.
+
+h3. Usage
+
+Please refer to the "Accessing Keep from GNU/Linux":{{site.baseurl}}/user/tutorials/tutorial-keep-mount-gnu-linux.html tutorial for more information.
func GetInstanceSet() (cloud.InstanceSet, cloud.ImageID, arvados.Cluster, error) {
cluster := arvados.Cluster{
InstanceTypes: arvados.InstanceTypeMap(map[string]arvados.InstanceType{
- "tiny": arvados.InstanceType{
+ "tiny": {
Name: "tiny",
ProviderType: "Standard_D1_v2",
VCPUs: 1,
DetailedError: autorest.DetailedError{
Response: &http.Response{
StatusCode: 429,
- Header: map[string][]string{"Retry-After": []string{"123"}},
+ Header: map[string][]string{"Retry-After": {"123"}},
},
},
ServiceError: &azure.ServiceError{},
var ok bool
if keyname, ok = instanceSet.keys[md5keyFingerprint]; !ok {
keyout, err := instanceSet.client.DescribeKeyPairs(&ec2.DescribeKeyPairsInput{
- Filters: []*ec2.Filter{&ec2.Filter{
+ Filters: []*ec2.Filter{{
Name: aws.String("fingerprint"),
Values: []*string{&md5keyFingerprint, &sha1keyFingerprint},
}},
KeyName: &keyname,
NetworkInterfaces: []*ec2.InstanceNetworkInterfaceSpecification{
- &ec2.InstanceNetworkInterfaceSpecification{
+ {
AssociatePublicIpAddress: aws.Bool(false),
DeleteOnTermination: aws.Bool(true),
DeviceIndex: aws.Int64(0),
DisableApiTermination: aws.Bool(false),
InstanceInitiatedShutdownBehavior: aws.String("terminate"),
TagSpecifications: []*ec2.TagSpecification{
- &ec2.TagSpecification{
+ {
ResourceType: aws.String("instance"),
Tags: ec2tags,
}},
}
if instanceType.AddedScratch > 0 {
- rii.BlockDeviceMappings = []*ec2.BlockDeviceMapping{&ec2.BlockDeviceMapping{
+ rii.BlockDeviceMappings = []*ec2.BlockDeviceMapping{{
DeviceName: aws.String("/dev/xvdt"),
Ebs: &ec2.EbsBlockDevice{
DeleteOnTermination: aws.Bool(true),
}
func (e *ec2stub) RunInstances(input *ec2.RunInstancesInput) (*ec2.Reservation, error) {
- return &ec2.Reservation{Instances: []*ec2.Instance{&ec2.Instance{
+ return &ec2.Reservation{Instances: []*ec2.Instance{{
InstanceId: aws.String("i-123"),
Tags: input.TagSpecifications[0].Tags,
}}}, nil
func GetInstanceSet() (cloud.InstanceSet, cloud.ImageID, arvados.Cluster, error) {
cluster := arvados.Cluster{
InstanceTypes: arvados.InstanceTypeMap(map[string]arvados.InstanceType{
- "tiny": arvados.InstanceType{
+ "tiny": {
Name: "tiny",
ProviderType: "t2.micro",
VCPUs: 1,
Price: .02,
Preemptible: false,
},
- "tiny-with-extra-scratch": arvados.InstanceType{
+ "tiny-with-extra-scratch": {
Name: "tiny",
ProviderType: "t2.micro",
VCPUs: 1,
Preemptible: false,
AddedScratch: 20000000000,
},
- "tiny-preemptible": arvados.InstanceType{
+ "tiny-preemptible": {
Name: "tiny",
ProviderType: "t2.micro",
VCPUs: 1,
"api_clients/" + arvadostest.TrustedWorkbenchAPIClientUUID: nil,
"api_client_authorizations/" + arvadostest.AdminTokenUUID: nil,
"authorized_keys/" + arvadostest.AdminAuthorizedKeysUUID: nil,
- "collections/" + arvadostest.CollectionWithUniqueWordsUUID: map[string]bool{"href": true},
+ "collections/" + arvadostest.CollectionWithUniqueWordsUUID: {"href": true},
"containers/" + arvadostest.RunningContainerUUID: nil,
"container_requests/" + arvadostest.QueuedContainerRequestUUID: nil,
"groups/" + arvadostest.AProjectUUID: nil,
"logs/" + arvadostest.CrunchstatForRunningJobLogUUID: nil,
"nodes/" + arvadostest.IdleNodeUUID: nil,
"repositories/" + arvadostest.ArvadosRepoUUID: nil,
- "users/" + arvadostest.ActiveUserUUID: map[string]bool{"href": true},
+ "users/" + arvadostest.ActiveUserUUID: {"href": true},
"virtual_machines/" + arvadostest.TestVMUUID: nil,
"workflows/" + arvadostest.WorkflowWithDefinitionYAMLUUID: nil,
}
}),
})
if (!resp.ok) {
- document.getElementById('error').innerHTML = 'authentication failed (default accounts are user/user, admin/admin)'
+ document.getElementById('error').innerHTML = '<p>Authentication failed.</p><p>The "test login" users are defined in Clusters.[ClusterID].Login.Test.Users section of config.yml</p><p>If you are using arvbox, use "arvbox adduser" to add users.</p>'
return
}
var redir = document.getElementById('return_to').value
test.InstanceType(2): 0,
},
running: map[string]time.Time{
- test.ContainerUUID(2): time.Time{},
+ test.ContainerUUID(2): {},
},
}
queue := test.Queue{
s.cluster.Collections.BlobSigningKey = knownKey
s.cluster.SystemRootToken = arvadostest.SystemRootToken
s.cluster.RemoteClusters = map[string]arvados.RemoteCluster{
- s.remoteClusterID: arvados.RemoteCluster{
+ s.remoteClusterID: {
Host: strings.Split(s.remoteAPI.URL, "//")[1],
Proxy: true,
Scheme: "http",
cluster.TLS.Insecure = client.Insecure
cluster.PostgreSQL.Connection = testDBConfig()
cluster.PostgreSQL.ConnectionPool = 12
- cluster.Services.Websocket.InternalURLs = map[arvados.URL]arvados.ServiceInstance{arvados.URL{Host: ":"}: arvados.ServiceInstance{}}
+ cluster.Services.Websocket.InternalURLs = map[arvados.URL]arvados.ServiceInstance{{Host: ":"}: {}}
cluster.ManagementToken = arvadostest.ManagementToken
return cluster, nil
}
git -C "$COMPOSER_ROOT" pull
fi
if ! test -d "$WORKBENCH2_ROOT" ; then
- git clone https://github.com/arvados/arvados-workbench2.git "$WORKBENCH2_ROOT"
+ git clone https://git.arvados.org/arvados-workbench2.git "$WORKBENCH2_ROOT"
fi
if [[ "$CONFIG" = test ]] ; then
EOF
;;
+ adduser)
+ docker exec -ti $ARVBOX_CONTAINER /usr/local/lib/arvbox/edit_users.py /var/lib/arvados/cluster_config.yml.override $(getclusterid) add $@
+ docker exec $ARVBOX_CONTAINER sv restart controller
+ ;;
+
+ removeuser)
+ docker exec -ti $ARVBOX_CONTAINER /usr/local/lib/arvbox/edit_users.py /var/lib/arvados/cluster_config.yml.override $(getclusterid) remove $@
+ docker exec $ARVBOX_CONTAINER sv restart controller
+ ;;
+
+ listusers)
+ exec docker exec -ti $ARVBOX_CONTAINER /usr/local/lib/arvbox/edit_users.py /var/lib/arvados/cluster_config.yml $(getclusterid) list
+ ;;
+
*)
echo "Arvados-in-a-box https://doc.arvados.org/install/arvbox.html"
echo
echo "sv <start|stop|restart> <service> "
echo " change state of service inside arvbox"
echo "clone <from> <to> clone dev arvbox"
+ echo "adduser <username> <email>"
+ echo " add a user login"
+ echo "removeuser <username>"
+ echo " remove user login"
+ echo "listusers list user logins"
;;
esac
keep-setup.sh common.sh createusers.sh \
logger runsu.sh waitforpostgres.sh \
yml_override.py api-setup.sh \
- go-setup.sh devenv.sh cluster-config.sh \
+ go-setup.sh devenv.sh cluster-config.sh edit_users.py \
/usr/local/lib/arvbox/
ADD runit /etc/runit
ARG workbench2_version=master
RUN cd /usr/src && \
- git clone --no-checkout https://github.com/arvados/arvados.git && \
+ git clone --no-checkout https://git.arvados.org/arvados.git && \
git -C arvados checkout ${arvados_version} && \
git -C arvados pull && \
git clone --no-checkout https://github.com/arvados/composer.git && \
git -C composer checkout ${composer_version} && \
git -C composer pull && \
- git clone --no-checkout https://github.com/arvados/arvados-workbench2.git workbench2 && \
+ git clone --no-checkout https://git.arvados.org/arvados-workbench2.git workbench2 && \
git -C workbench2 checkout ${workbench2_version} && \
git -C workbench2 pull && \
chown -R 1000:1000 /usr/src
InternalURLs:
"http://localhost:${services[keep-web]}/": {}
ExternalURL: "https://$localip:${services[keep-web-ssl]}/"
- InternalURLs:
- "http://localhost:${services[keep-web]}/": {}
Composer:
ExternalURL: "https://$localip:${services[composer]}"
Controller:
Login:
Test:
Enable: true
- Users:
- admin:
- Email: admin@example.com
- Password: admin
- user:
- Email: user@example.com
- Password: user
Users:
NewUsersAreActive: true
- AutoAdminUserWithEmail: admin@example.com
+ AutoAdminFirstUser: true
AutoSetupNewUsers: true
AutoSetupNewUsersWithVmUUID: $vm_uuid
AutoSetupNewUsersWithRepository: true
cp /var/lib/arvados/cluster_config.yml /etc/arvados/config.yml
+chmod og-rw \
+ /var/lib/arvados/cluster_config.yml.override \
+ /var/lib/arvados/cluster_config.yml \
+ /etc/arvados/config.yml \
+ /var/lib/arvados/api_secret_token \
+ /var/lib/arvados/blob_signing_key \
+ /var/lib/arvados/management_token \
+ /var/lib/arvados/system_root_token \
+ /var/lib/arvados/api_database_pw \
+ /var/lib/arvados/workbench_secret_token \
+ /var/lib/arvados/superuser_token \
+
mkdir -p /var/lib/arvados/run_tests
cat >/var/lib/arvados/run_tests/config.yml <<EOF
Clusters:
--- /dev/null
+#!/usr/bin/env python3
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import ruamel.yaml
+import sys
+import getpass
+import os
+
+def print_help():
+ print("%s <path/to/config.yaml> <clusterid> add <username> <email> [pass]" % (sys.argv[0]))
+ print("%s <path/to/config.yaml> <clusterid> remove <username>" % (" " * len(sys.argv[0])))
+ print("%s <path/to/config.yaml> <clusterid> list" % (" " * len(sys.argv[0])))
+ exit()
+
+if len(sys.argv) < 4:
+ print_help()
+
+fn = sys.argv[1]
+cl = sys.argv[2]
+op = sys.argv[3]
+
+if op == "remove" and len(sys.argv) < 5:
+ print_help()
+if op == "add" and len(sys.argv) < 6:
+ print_help()
+
+if op in ("add", "remove"):
+ user = sys.argv[4]
+
+if not os.path.exists(fn):
+ open(fn, "w").close()
+
+with open(fn, "r") as f:
+ conf = ruamel.yaml.round_trip_load(f)
+
+if not conf:
+ conf = {}
+
+conf["Clusters"] = conf.get("Clusters", {})
+conf["Clusters"][cl] = conf["Clusters"].get(cl, {})
+conf["Clusters"][cl]["Login"] = conf["Clusters"][cl].get("Login", {})
+conf["Clusters"][cl]["Login"]["Test"] = conf["Clusters"][cl]["Login"].get("Test", {})
+conf["Clusters"][cl]["Login"]["Test"]["Users"] = conf["Clusters"][cl]["Login"]["Test"].get("Users", {})
+
+users_obj = conf["Clusters"][cl]["Login"]["Test"]["Users"]
+
+if op == "add":
+ email = sys.argv[5]
+ if len(sys.argv) == 7:
+ p = sys.argv[6]
+ else:
+ p = getpass.getpass("Password for %s: " % user)
+
+ users_obj[user] = {
+ "Email": email,
+ "Password": p
+ }
+ print("Added %s" % user)
+elif op == "remove":
+ del users_obj[user]
+ print("Removed %s" % user)
+elif op == "list":
+ print(ruamel.yaml.round_trip_dump(users_obj))
+else:
+ print("Operations are 'add', 'remove' and 'list'")
+
+with open(fn, "w") as f:
+ f.write(ruamel.yaml.round_trip_dump(conf))