sdk/cwl/tests/wf/feddemo
go.mod
go.sum
+sdk/python/tests/fed-migrate/CWLFile
+sdk/python/tests/fed-migrate/*.cwl
+sdk/python/tests/fed-migrate/*.cwlex
true
end
+ def self.copies_to_projects?
+ false
+ end
+
def work_unit(label=nil, child_objects=nil)
ContainerWorkUnit.new(self, label, self.uuid, child_objects=child_objects)
end
centos7/generated: common-generated-all
test -d centos7/generated || mkdir centos7/generated
- cp -rlt centos7/generated common-generated/*
+ cp -f -rlt centos7/generated common-generated/*
debian9/generated: common-generated-all
test -d debian9/generated || mkdir debian9/generated
- cp -rlt debian9/generated common-generated/*
+ cp -f -rlt debian9/generated common-generated/*
debian10/generated: common-generated-all
test -d debian10/generated || mkdir debian10/generated
- cp -rlt debian10/generated common-generated/*
+ cp -f -rlt debian10/generated common-generated/*
ubuntu1604/generated: common-generated-all
test -d ubuntu1604/generated || mkdir ubuntu1604/generated
- cp -rlt ubuntu1604/generated common-generated/*
+ cp -f -rlt ubuntu1604/generated common-generated/*
ubuntu1804/generated: common-generated-all
test -d ubuntu1804/generated || mkdir ubuntu1804/generated
- cp -rlt ubuntu1804/generated common-generated/*
+ cp -f -rlt ubuntu1804/generated common-generated/*
GOTARBALL=go1.13.4.linux-amd64.tar.gz
NODETARBALL=node-v6.11.2-linux-x64.tar.xz
centos7/generated: common-generated-all
test -d centos7/generated || mkdir centos7/generated
- cp -rlt centos7/generated common-generated/*
+ cp -f -rlt centos7/generated common-generated/*
debian9/generated: common-generated-all
test -d debian9/generated || mkdir debian9/generated
- cp -rlt debian9/generated common-generated/*
+ cp -f -rlt debian9/generated common-generated/*
debian10/generated: common-generated-all
test -d debian10/generated || mkdir debian10/generated
- cp -rlt debian10/generated common-generated/*
+ cp -f -rlt debian10/generated common-generated/*
ubuntu1604/generated: common-generated-all
test -d ubuntu1604/generated || mkdir ubuntu1604/generated
- cp -rlt ubuntu1604/generated common-generated/*
+ cp -f -rlt ubuntu1604/generated common-generated/*
ubuntu1804/generated: common-generated-all
test -d ubuntu1804/generated || mkdir ubuntu1804/generated
- cp -rlt ubuntu1804/generated common-generated/*
+ cp -f -rlt ubuntu1804/generated common-generated/*
RVMKEY1=mpapis.asc
RVMKEY2=pkuczynski.asc
python_sdk_version="${ARVADOS_BUILDING_VERSION}-${ARVADOS_BUILDING_ITERATION}"
fi
-cwl_runner_version_orig=$cwl_runner_version
+# What we use to tag the Docker image. For release candidate
+# packages, the OS package has a "~rc" suffix, but Python requires
+# just an "rc" suffix. Arvados-cwl-runner will be expecting the
+# Python-compatible version string when it tries to pull the Docker
+# image, but --build-arg is expecting the OS package version.
+cwl_runner_version_tag=$(echo -n $cwl_runner_version | sed s/~rc/rc/g)
if [[ "${cwl_runner_version}" != "${ARVADOS_BUILDING_VERSION}" ]]; then
cwl_runner_version="${cwl_runner_version}-1"
--build-arg python_sdk_version=${python_sdk_version} \
--build-arg cwl_runner_version=${cwl_runner_version} \
--build-arg repo_version=${REPO} \
- -t arvados/jobs:$cwl_runner_version_orig .
+ -t arvados/jobs:$cwl_runner_version_tag .
ECODE=$?
FORCE=-f
fi
-if ! [[ -z "$version_tag" ]]; then
- docker tag $FORCE arvados/jobs:$cwl_runner_version_orig arvados/jobs:"$version_tag"
- ECODE=$?
-
- if [[ "$ECODE" != "0" ]]; then
- EXITCODE=$(($EXITCODE + $ECODE))
- fi
-
- checkexit $ECODE "docker tag"
- title "docker tag complete (`timer`)"
-fi
-
title "uploading images"
timer_reset
## 20150526 nico -- *sometimes* dockerhub needs re-login
## even though credentials are already in .dockercfg
docker login -u arvados
- if ! [[ -z "$version_tag" ]]; then
- docker_push arvados/jobs:"$version_tag"
- else
- docker_push arvados/jobs:$cwl_runner_version_orig
- fi
+ docker_push arvados/jobs:$cwl_runner_version_tag
title "upload arvados images finished (`timer`)"
else
title "upload arvados images SKIPPED because no --upload option set (`timer`)"
fi
# Determine the package version from the generated sdist archive
- PYTHON_VERSION=${ARVADOS_BUILDING_VERSION:-$(awk '($1 == "Version:"){print $2}' *.egg-info/PKG-INFO)}
+ if [[ -n "$ARVADOS_BUILDING_VERSION" ]] ; then
+ UNFILTERED_PYTHON_VERSION=$(echo -n $ARVADOS_BUILDING_VERSION)
+ PYTHON_VERSION=$(echo -n $ARVADOS_BUILDING_VERSION | sed s/~rc/rc/g)
+ else
+ UNFILTERED_PYTHON_VERSION=$(awk '($1 == "Version:"){print $2}' *.egg-info/PKG-INFO)
+ PYTHON_VERSION=$(awk '($1 == "Version:"){print $2}' *.egg-info/PKG-INFO)
+ fi
# See if we actually need to build this package; does it exist already?
# We can't do this earlier than here, because we need PYTHON_VERSION...
# This isn't so bad; the sdist call above is pretty quick compared to
# the invocation of virtualenv and fpm, below.
- if ! test_package_presence "$PYTHON_PKG" $PYTHON_VERSION $PACKAGE_TYPE $ARVADOS_BUILDING_ITERATION; then
+ if ! test_package_presence "$PYTHON_PKG" $UNFILTERED_PYTHON_VERSION $PACKAGE_TYPE $ARVADOS_BUILDING_ITERATION; then
return 0
fi
COMMAND_ARR+=('--verbose' '--log' 'info')
fi
- COMMAND_ARR+=('-v' "$PYTHON_VERSION")
+ COMMAND_ARR+=('-v' $(echo "$PYTHON_VERSION" | sed s/rc/~rc/g))
COMMAND_ARR+=('--iteration' "$ARVADOS_BUILDING_ITERATION")
COMMAND_ARR+=('-n' "$PYTHON_PKG")
COMMAND_ARR+=('-C' "build")
source 'https://rubygems.org'
gem 'zenweb'
-gem 'liquid'
+gem 'liquid', '~>4.0.0'
gem 'RedCloth'
gem 'colorize'
DEPENDENCIES
RedCloth
colorize
- liquid
+ liquid (~> 4.0.0)
zenweb
BUNDLED WITH
+++ /dev/null
-#!/usr/bin/env ruby
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-require 'rubygems'
-
-require 'cgi'
-require 'fileutils'
-require 'json'
-require 'net/https'
-require 'socket'
-require 'syslog'
-
-class ComputeNodePing
- @@NODEDATA_DIR = "/var/tmp/arv-node-data"
- @@PUPPET_CONFFILE = "/etc/puppet/puppet.conf"
- @@HOST_STATEFILE = "/var/run/arvados-compute-ping-hoststate.json"
-
- def initialize(args, stdout, stderr)
- @stdout = stdout
- @stderr = stderr
- @stderr_loglevel = ((args.first == "quiet") ?
- Syslog::LOG_ERR : Syslog::LOG_DEBUG)
- @puppet_disabled = false
- @syslog = Syslog.open("arvados-compute-ping",
- Syslog::LOG_CONS | Syslog::LOG_PID,
- Syslog::LOG_DAEMON)
- @puppetless = File.exist?('/compute-node.puppetless')
-
- begin
- prepare_ping
- load_puppet_conf unless @puppetless
- begin
- @host_state = JSON.parse(IO.read(@@HOST_STATEFILE))
- rescue Errno::ENOENT
- @host_state = nil
- end
- rescue
- @syslog.close
- raise
- end
- end
-
- def send
- pong = send_raw_ping
-
- if pong["hostname"] and pong["domain"] and pong["first_ping_at"]
- if @host_state.nil?
- @host_state = {
- "fqdn" => (Socket.gethostbyname(Socket.gethostname).first rescue nil),
- "resumed_slurm" =>
- ["busy", "idle"].include?(pong["crunch_worker_state"]),
- }
- update_host_state({})
- end
-
- if hostname_changed?(pong)
- disable_puppet unless @puppetless
- rename_host(pong)
- update_host_state("fqdn" => fqdn_from_pong(pong),
- "resumed_slurm" => false)
- end
-
- unless @host_state["resumed_slurm"]
- run_puppet_agent unless @puppetless
- resume_slurm_node(pong["hostname"])
- update_host_state("resumed_slurm" => true)
- end
- end
-
- log("Last ping at #{pong['last_ping_at']}")
- end
-
- def cleanup
- enable_puppet if @puppet_disabled and not @puppetless
- @syslog.close
- end
-
- private
-
- def log(message, level=Syslog::LOG_INFO)
- @syslog.log(level, message)
- if level <= @stderr_loglevel
- @stderr.write("#{Time.now.strftime("%Y-%m-%d %H:%M:%S")} #{message}\n")
- end
- end
-
- def abort(message, code=1)
- log(message, Syslog::LOG_ERR)
- exit(code)
- end
-
- def run_and_check(cmd_a, accept_codes, io_opts, &block)
- result = IO.popen(cmd_a, "r", io_opts, &block)
- unless accept_codes.include?($?.exitstatus)
- abort("#{cmd_a} exited #{$?.exitstatus}")
- end
- result
- end
-
- DEFAULT_ACCEPT_CODES=[0]
- def check_output(cmd_a, accept_codes=DEFAULT_ACCEPT_CODES, io_opts={})
- # Run a command, check the exit status, and return its stdout as a string.
- run_and_check(cmd_a, accept_codes, io_opts) do |pipe|
- pipe.read
- end
- end
-
- def check_command(cmd_a, accept_codes=DEFAULT_ACCEPT_CODES, io_opts={})
- # Run a command, send stdout to syslog, and check the exit status.
- run_and_check(cmd_a, accept_codes, io_opts) do |pipe|
- pipe.each_line do |line|
- line.chomp!
- log("#{cmd_a.first}: #{line}") unless line.empty?
- end
- end
- end
-
- def replace_file(path, body)
- open(path, "w") { |f| f.write(body) }
- end
-
- def update_host_state(updates_h)
- @host_state.merge!(updates_h)
- replace_file(@@HOST_STATEFILE, @host_state.to_json)
- end
-
- def disable_puppet
- check_command(["puppet", "agent", "--disable"])
- @puppet_disabled = true
- loop do
- # Wait for any running puppet agents to finish.
- check_output(["pgrep", "puppet"], 0..1)
- break if $?.exitstatus == 1
- sleep(1)
- end
- end
-
- def enable_puppet
- check_command(["puppet", "agent", "--enable"])
- @puppet_disabled = false
- end
-
- def prepare_ping
- begin
- ping_uri_s = File.read(File.join(@@NODEDATA_DIR, "arv-ping-url"))
- rescue Errno::ENOENT
- abort("ping URL file is not present yet, skipping run")
- end
-
- ping_uri = URI.parse(ping_uri_s)
- payload_h = CGI.parse(ping_uri.query)
-
- # Collect all extra data to be sent
- dirname = File.join(@@NODEDATA_DIR, "meta-data")
- Dir.open(dirname).each do |basename|
- filename = File.join(dirname, basename)
- if File.file?(filename)
- payload_h[basename.gsub('-', '_')] = File.read(filename).chomp
- end
- end
-
- ping_uri.query = nil
- @ping_req = Net::HTTP::Post.new(ping_uri.to_s)
- @ping_req.set_form_data(payload_h)
- @ping_client = Net::HTTP.new(ping_uri.host, ping_uri.port)
- @ping_client.use_ssl = ping_uri.scheme == 'https'
- end
-
- def send_raw_ping
- begin
- response = @ping_client.start do |http|
- http.request(@ping_req)
- end
- if response.is_a? Net::HTTPSuccess
- pong = JSON.parse(response.body)
- else
- raise "response was a #{response}"
- end
- rescue JSON::ParserError => error
- abort("Error sending ping: could not parse JSON response: #{error}")
- rescue => error
- abort("Error sending ping: #{error}")
- end
-
- replace_file(File.join(@@NODEDATA_DIR, "pong.json"), response.body)
- if pong["errors"] then
- log(pong["errors"].join("; "), Syslog::LOG_ERR)
- if pong["errors"].grep(/Incorrect ping_secret/).any?
- system("halt")
- end
- exit(1)
- end
- pong
- end
-
- def load_puppet_conf
- # Parse Puppet configuration suitable for rewriting.
- # Save certnames in @puppet_certnames.
- # Save other functional configuration lines in @puppet_conf.
- @puppet_conf = []
- @puppet_certnames = []
- open(@@PUPPET_CONFFILE, "r") do |conffile|
- conffile.each_line do |line|
- key, value = line.strip.split(/\s*=\s*/, 2)
- if key == "certname"
- @puppet_certnames << value
- elsif not (key.nil? or key.empty? or key.start_with?("#"))
- @puppet_conf << line
- end
- end
- end
- end
-
- def fqdn_from_pong(pong)
- "#{pong['hostname']}.#{pong['domain']}"
- end
-
- def certname_from_pong(pong)
- fqdn = fqdn_from_pong(pong).sub(".", ".compute.")
- "#{pong['first_ping_at'].gsub(':', '-').downcase}.#{fqdn}"
- end
-
- def hostname_changed?(pong)
- if @puppetless
- (@host_state["fqdn"] != fqdn_from_pong(pong))
- else
- (@host_state["fqdn"] != fqdn_from_pong(pong)) or
- (@puppet_certnames != [certname_from_pong(pong)])
- end
- end
-
- def rename_host(pong)
- new_fqdn = fqdn_from_pong(pong)
- log("Renaming host from #{@host_state["fqdn"]} to #{new_fqdn}")
-
- replace_file("/etc/hostname", "#{new_fqdn.split('.', 2).first}\n")
- check_output(["hostname", new_fqdn])
-
- ip_address = check_output(["facter", "ipaddress"]).chomp
- esc_address = Regexp.escape(ip_address)
- check_command(["sed", "-i", "/etc/hosts",
- "-e", "s/^#{esc_address}.*$/#{ip_address}\t#{new_fqdn}/"])
-
- unless @puppetless
- new_conflines = @puppet_conf + ["\n[agent]\n",
- "certname=#{certname_from_pong(pong)}\n"]
- replace_file(@@PUPPET_CONFFILE, new_conflines.join(""))
- FileUtils.remove_entry_secure("/var/lib/puppet/ssl")
- end
- end
-
- def run_puppet_agent
- log("Running puppet agent")
- enable_puppet
- check_command(["puppet", "agent", "--onetime", "--no-daemonize",
- "--no-splay", "--detailed-exitcodes",
- "--ignorecache", "--no-usecacheonfailure"],
- [0, 2], {err: [:child, :out]})
- end
-
- def resume_slurm_node(node_name)
- current_state = check_output(["sinfo", "--noheader", "-o", "%t",
- "-n", node_name]).chomp
- if %w(down drain drng).include?(current_state)
- log("Resuming node in SLURM")
- check_command(["scontrol", "update", "NodeName=#{node_name}",
- "State=RESUME"], [0], {err: [:child, :out]})
- end
- end
-end
-
-LOCK_DIRNAME = "/var/lock/arvados-compute-node.lock"
-begin
- Dir.mkdir(LOCK_DIRNAME)
-rescue Errno::EEXIST
- exit(0)
-end
-
-ping_sender = nil
-begin
- ping_sender = ComputeNodePing.new(ARGV, $stdout, $stderr)
- ping_sender.send
-ensure
- Dir.rmdir(LOCK_DIRNAME)
- ping_sender.cleanup unless ping_sender.nil?
-end
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: CC-BY-SA-3.0
package main
#!/usr/bin/env cwl-runner
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: CC-BY-SA-3.0
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
cwlVersion: v1.0
class: CommandLineTool
inputs: []
+++ /dev/null
----
-layout: default
-navsection: installguide
-title: Sample compute node ping script
-...
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-When a new elastic compute node is booted, it needs to contact Arvados to register itself. Here is an example ping script to run on boot.
-
-<notextile> {% code 'compute_ping_rb' as ruby %} </notextile>
---
layout: default
navsection: sdk
-navmenu: Python
+navmenu: Go
title: Examples
...
{% comment %}
You can save this source as a .go file and run it:
-<notextile>{% code 'example_sdk_go' as go %}</notextile>
+<notextile>{% code example_sdk_go as go %}</notextile>
A few more usage examples can be found in the "services/keepproxy":https://dev.arvados.org/projects/arvados/repository/revisions/master/show/services/keepproxy and "sdk/go/keepclient":https://dev.arvados.org/projects/arvados/repository/revisions/master/show/sdk/go/keepclient directories in the arvados source tree.
notextile. <pre>~/tutorials$ <code class="userinput">nano hello.cwl</code></pre>
-<notextile> {% code 'tutorial_hello_cwl' as yaml %} </notextile>
+<notextile> {% code tutorial_hello_cwl as yaml %} </notextile>
Next, add the file to the git repository. This tells @git@ that the file should be included on the next commit.
Liquid::Tag.instance_method(:initialize).bind(self).call(tag_name, markup, tokens)
if markup =~ Syntax
- @template_name = $1
+ @template_name_expr = $1
@language = $3
@attributes = {}
else
def render(context)
require 'coderay'
- partial = load_cached_partial(context)
+ partial = load_cached_partial(@template_name_expr, context)
html = ''
+ # be explicit about errors
+ context.exception_renderer = lambda do |exc|
+ exc.is_a?(Liquid::InternalError) ? "Liquid error: #{exc.cause.message}" : exc
+ end
+
context.stack do
html = CodeRay.scan(partial.root.nodelist.join, @language).div
end
partial = partial[1..-1]
end
+ # be explicit about errors
+ context.exception_renderer = lambda do |exc|
+ exc.is_a?(Liquid::InternalError) ? "Liquid error: #{exc.cause.message}" : exc
+ end
+
context.stack do
html = CodeRay.scan(partial, @language).div
end
//
// SPDX-License-Identifier: Apache-2.0
-// package cmd helps define reusable functions that can be exposed as
+// Package cmd helps define reusable functions that can be exposed as
// [subcommands of] command line programs.
package cmd
redir += '?'
}
const respj = await resp.json()
- document.location = redir + "api_token=" + respj.api_token
+ document.location = redir + "api_token=v2/" + respj.uuid + "/" + respj.api_token
}
</script>
</head>
func (cq *Queue) Get(uuid string) (arvados.Container, bool) {
cq.mtx.Lock()
defer cq.mtx.Unlock()
- if ctr, ok := cq.current[uuid]; !ok {
+ ctr, ok := cq.current[uuid]
+ if !ok {
return arvados.Container{}, false
- } else {
- return ctr.Container, true
}
+ return ctr.Container, true
}
// Entries returns all cache entries, keyed by container UUID.
"postgresql",
"postgresql-contrib",
"python3-dev",
+ "python3-venv",
+ "python3-virtualenv",
"r-base",
"r-cran-testthat",
"r-cran-devtools",
"r-cran-roxygen2",
"r-cran-xml",
"sudo",
- "python3-virtualenv",
- "python3-venv",
"wget",
"xvfb",
"zlib1g-dev",
env_version = os.environ.get("ARVADOS_BUILDING_VERSION")
if env_version:
+ env_version = env_version.replace("~rc", "rc")
save_version(setup_dir, module, env_version)
else:
try:
export ARVADOS_API_HOST=localhost:8000
export ARVADOS_API_HOST_INSECURE=1
-export ARVADOS_API_TOKEN=\$(cat /var/lib/arvados/superuser_token)
+export ARVADOS_API_TOKEN=\$(cat /var/lib/arvados-arvbox/superuser_token)
if test -n "$build" ; then
/usr/src/arvados/build/build-dev-docker-jobs-image.sh
arguments:
- shellQuote: false
valueFrom: |
- docker cp cluster_config.yml.override $(inputs.container_name):/var/lib/arvados
+ docker cp cluster_config.yml.override $(inputs.container_name):/var/lib/arvados-arvbox
docker cp application.yml.override $(inputs.container_name):/usr/src/arvados/services/api/config
$(inputs.arvbox_bin.path) sv restart api
$(inputs.arvbox_bin.path) sv restart controller
$(inputs.arvbox_bin.path) restart $(inputs.arvbox_mode)
fi
$(inputs.arvbox_bin.path) status > status.txt
- $(inputs.arvbox_bin.path) cat /var/lib/arvados/superuser_token > superuser_token.txt
+ $(inputs.arvbox_bin.path) cat /var/lib/arvados-arvbox/superuser_token > superuser_token.txt
}
}
}
- if cc, ok := sc.Clusters[clusterID]; !ok {
+ cc, ok := sc.Clusters[clusterID]
+ if !ok {
return nil, fmt.Errorf("cluster %q is not configured", clusterID)
- } else {
- cc.ClusterID = clusterID
- return &cc, nil
}
+ cc.ClusterID = clusterID
+ return &cc, nil
}
type WebDAVCacheConfig struct {
// it fails, we'll try again next time.
close(done)
return nil
- } else {
- // In sync mode, we proceed regardless of
- // whether another flush is in progress: It
- // can't finish before we do, because we hold
- // fn's lock until we finish our own writes.
}
+ // In sync mode, we proceed regardless of
+ // whether another flush is in progress: It
+ // can't finish before we do, because we hold
+ // fn's lock until we finish our own writes.
seg.flushing = done
offsets = append(offsets, len(block))
if len(refs) == 1 {
a.Tokens = append(a.Tokens, string(token))
}
-// LoadTokensFromHTTPRequestBody() loads credentials from the request
+// LoadTokensFromHTTPRequestBody loads credentials from the request
// body.
//
// This is separate from LoadTokensFromHTTPRequest() because it's not
var LocatorPattern = regexp.MustCompile(
"^[0-9a-fA-F]{32}\\+[0-9]+(\\+[A-Z][A-Za-z0-9@_-]*)*$")
-// Stores a Block Locator Digest compactly, up to 128 bits.
-// Can be used as a map key.
+// BlockDigest stores a Block Locator Digest compactly, up to 128 bits. Can be
+// used as a map key.
type BlockDigest struct {
H uint64
L uint64
return fmt.Sprintf("%s+%d", w.Digest.String(), w.Size)
}
-// Will create a new BlockDigest unless an error is encountered.
+// FromString creates a new BlockDigest unless an error is encountered.
func FromString(s string) (dig BlockDigest, err error) {
if len(s) != 32 {
err = fmt.Errorf("Block digest should be exactly 32 characters but this one is %d: %s", len(s), s)
package blockdigest
-// Just used for testing when we need some distinct BlockDigests
+// MakeTestBlockDigest is used for testing with distinct BlockDigests
func MakeTestBlockDigest(i int) BlockDigest {
return BlockDigest{L: uint64(i)}
}
//
// If the block hash and data size are known, PutHR is more efficient.
func (kc *KeepClient) PutR(r io.Reader) (locator string, replicas int, err error) {
- if buffer, err := ioutil.ReadAll(r); err != nil {
+ buffer, err := ioutil.ReadAll(r)
+ if err != nil {
return "", 0, err
- } else {
- return kc.PutB(buffer)
}
+ return kc.PutB(buffer)
}
func (kc *KeepClient) getOrHead(method string, locator string, header http.Header) (io.ReadCloser, int64, string, http.Header, error) {
"git.arvados.org/arvados.git/sdk/go/arvadosclient"
)
-// Function used to emit debug messages. The easiest way to enable
+// DebugPrintf emits debug messages. The easiest way to enable
// keepclient debug messages in your application is to assign
// log.Printf to DebugPrintf.
var DebugPrintf = func(string, ...interface{}) {}
env_version = os.environ.get("ARVADOS_BUILDING_VERSION")
if env_version:
+ env_version = env_version.replace("~rc", "rc")
save_version(setup_dir, module, env_version)
else:
try:
- arguments:
- arvbox
- cat
- - /var/lib/arvados/superuser_token
+ - /var/lib/arvados-arvbox/superuser_token
class: CommandLineTool
cwlVersion: v1.0
id: '#superuser_tok'
ARVADOS_VIRTUAL_MACHINE_UUID=\$($(inputs.arvbox_bin.path)
- cat /var/lib/arvados/vm-uuid)
+ cat /var/lib/arvados-arvbox/vm-uuid)
ARVADOS_API_TOKEN=\$($(inputs.arvbox_bin.path) cat
- /var/lib/arvados/superuser_token)
+ /var/lib/arvados-arvbox/superuser_token)
while ! curl --fail --insecure --silent -H
"Authorization: Bearer $ARVADOS_API_TOKEN"
while ! curl --fail --insecure --silent https://$(inputs.host)/discovery/v1/apis/arvados/v1/rest >/dev/null ; do sleep 3 ; done
-ARVADOS_VIRTUAL_MACHINE_UUID=\$($(inputs.arvbox_bin.path) cat /var/lib/arvados/vm-uuid)
-ARVADOS_API_TOKEN=\$($(inputs.arvbox_bin.path) cat /var/lib/arvados/superuser_token)
+ARVADOS_VIRTUAL_MACHINE_UUID=\$($(inputs.arvbox_bin.path) cat /var/lib/arvados-arvbox/vm-uuid)
+ARVADOS_API_TOKEN=\$($(inputs.arvbox_bin.path) cat /var/lib/arvados-arvbox/superuser_token)
while ! curl --fail --insecure --silent -H "Authorization: Bearer $ARVADOS_API_TOKEN" https://$(inputs.host)/arvados/v1/virtual_machines/$ARVADOS_VIRTUAL_MACHINE_UUID >/dev/null ; do sleep 3 ; done
>>>
report = run_test(arvados_api_hosts, superuser_tokens=supertok, fed_migrate)
return supertok, report
-}
\ No newline at end of file
+}
envDef:
ARVBOX_CONTAINER: "$(inputs.container)"
InlineJavascriptRequirement: {}
-arguments: [arvbox, cat, /var/lib/arvados/superuser_token]
+arguments: [arvbox, cat, /var/lib/arvados-arvbox/superuser_token]
if params[pname].is_a?(Boolean)
return params[pname]
else
- logger.warn "Warning: received non-boolean parameter '#{pname}' on #{self.class.inspect}."
+ logger.warn "Warning: received non-boolean value #{params[pname].inspect} for boolean parameter #{pname} on #{self.class.inspect}, treating as false."
end
end
false
# Make sure params[key] is either true or false -- not a
# string, not nil, etc.
if not params.include?(key)
- params[key] = info[:default]
+ params[key] = info[:default] || false
elsif [false, 'false', '0', 0].include? params[key]
params[key] = false
elsif [true, 'true', '1', 1].include? params[key]
val.is_a?(String) && (attr == 'uuid' || attr == 'api_token')
}
end
- @objects = model_class.where('user_id=?', current_user.id)
+ if current_api_client_authorization.andand.api_token != Rails.configuration.SystemRootToken
+ @objects = model_class.where('user_id=?', current_user.id)
+ end
if wanted_scopes.compact.any?
# We can't filter on scopes effectively using AR/postgres.
# Instead we get the entire result set, do our own filtering on
def find_object_by_uuid
uuid_param = params[:uuid] || params[:id]
- if (uuid_param != current_api_client_authorization.andand.uuid and
- not Thread.current[:api_client].andand.is_trusted)
+ if (uuid_param != current_api_client_authorization.andand.uuid &&
+ !Thread.current[:api_client].andand.is_trusted)
return forbidden
end
@limit = 1
(super rescue {}).
merge({
include_trash: {
- type: 'boolean', required: false, description: "Include collections whose is_trashed attribute is true."
+ type: 'boolean', required: false, default: false, description: "Include collections whose is_trashed attribute is true.",
},
include_old_versions: {
- type: 'boolean', required: false, description: "Include past collection versions."
+ type: 'boolean', required: false, default: false, description: "Include past collection versions.",
},
})
end
(super rescue {}).
merge({
include_trash: {
- type: 'boolean', required: false, description: "Show collection even if its is_trashed attribute is true."
+ type: 'boolean', required: false, default: false, description: "Show collection even if its is_trashed attribute is true.",
},
include_old_versions: {
- type: 'boolean', required: false, description: "Include past collection versions."
+ type: 'boolean', required: false, default: true, description: "Include past collection versions.",
},
})
end
end
def find_objects_for_index
- opts = {}
- if params[:include_trash] || ['destroy', 'trash', 'untrash'].include?(action_name)
- opts.update({include_trash: true})
- end
- if params[:include_old_versions] || @include_old_versions
- opts.update({include_old_versions: true})
- end
+ opts = {
+ include_trash: params[:include_trash] || ['destroy', 'trash', 'untrash'].include?(action_name),
+ include_old_versions: params[:include_old_versions] || false,
+ }
@objects = Collection.readable_by(*@read_users, opts) if !opts.empty?
super
end
def find_object_by_uuid
- if params[:include_old_versions].nil?
- @include_old_versions = true
- else
- @include_old_versions = params[:include_old_versions]
- end
-
if loc = Keep::Locator.parse(params[:id])
loc.strip_hints!
- opts = {}
- opts.update({include_trash: true}) if params[:include_trash]
- opts.update({include_old_versions: @include_old_versions})
+ opts = {
+ include_trash: params[:include_trash],
+ include_old_versions: params[:include_old_versions],
+ }
# It matters which Collection object we pick because we use it to get signed_manifest_text,
# the value of which is affected by the value of trash_at.
(super rescue {}).
merge({
include_trash: {
- type: 'boolean', required: false, description: "Include container requests whose owner project is trashed."
+ type: 'boolean', required: false, default: false, description: "Include container requests whose owner project is trashed.",
},
})
end
(super rescue {}).
merge({
include_trash: {
- type: 'boolean', required: false, description: "Show container request even if its owner project is trashed."
+ type: 'boolean', required: false, default: false, description: "Show container request even if its owner project is trashed.",
},
})
end
(super rescue {}).
merge({
include_trash: {
- type: 'boolean', required: false, description: "Include items whose is_trashed attribute is true."
+ type: 'boolean', required: false, default: false, description: "Include items whose is_trashed attribute is true.",
},
})
end
(super rescue {}).
merge({
include_trash: {
- type: 'boolean', required: false, description: "Show group/project even if its is_trashed attribute is true."
+ type: 'boolean', required: false, default: false, description: "Show group/project even if its is_trashed attribute is true.",
},
})
end
params = _index_requires_parameters.
merge({
uuid: {
- type: 'string', required: false, default: nil
+ type: 'string', required: false, default: nil,
},
recursive: {
- type: 'boolean', required: false, description: 'Include contents from child groups recursively.'
+ type: 'boolean', required: false, default: false, description: 'Include contents from child groups recursively.',
},
include: {
- type: 'string', required: false, description: 'Include objects referred to by listed field in "included" (only owner_uuid).'
+ type: 'string', required: false, description: 'Include objects referred to by listed field in "included" (only owner_uuid).',
},
include_old_versions: {
- type: 'boolean', required: false, description: 'Include past collection versions.'
+ type: 'boolean', required: false, default: false, description: 'Include past collection versions.',
}
})
params.delete(:select)
type: 'boolean',
location: 'query',
default: false,
- description: 'defer permissions update'
+ description: 'defer permissions update',
}
}
)
type: 'boolean',
location: 'query',
default: false,
- description: 'defer permissions update'
+ description: 'defer permissions update',
}
}
)
(super rescue {}).
merge({
find_or_create: {
- type: 'boolean', required: false, default: false
+ type: 'boolean', required: false, default: false,
},
filters: {
- type: 'array', required: false
+ type: 'array', required: false,
},
minimum_script_version: {
- type: 'string', required: false
+ type: 'string', required: false,
},
exclude_script_versions: {
- type: 'array', required: false
+ type: 'array', required: false,
},
})
end
end
@response = @object.setup(repo_name: full_repo_name,
- vm_uuid: params[:vm_uuid])
-
- # setup succeeded. send email to user
- if params[:send_notification_email] && !Rails.configuration.Users.UserSetupMailText.empty?
- begin
- UserNotifier.account_is_setup(@object).deliver_now
- rescue => e
- logger.warn "Failed to send email to #{@object.email}: #{e}"
- end
- end
+ vm_uuid: params[:vm_uuid],
+ send_notification_email: params[:send_notification_email])
send_json kind: "arvados#HashList", items: @response.as_api_response(nil)
end
type: 'string', required: false,
},
redirect_to_new_user: {
- type: 'boolean', required: false,
+ type: 'boolean', required: false, default: false,
},
old_user_uuid: {
type: 'string', required: false,
def self._setup_requires_parameters
{
uuid: {
- type: 'string', required: false
+ type: 'string', required: false,
},
user: {
- type: 'object', required: false
+ type: 'object', required: false,
},
repo_name: {
- type: 'string', required: false
+ type: 'string', required: false,
},
vm_uuid: {
- type: 'string', required: false
+ type: 'string', required: false,
},
send_notification_email: {
- type: 'boolean', required: false, default: false
+ type: 'boolean', required: false, default: false,
},
}
end
def self._update_requires_parameters
super.merge({
bypass_federation: {
- type: 'boolean', required: false,
+ type: 'boolean', required: false, default: false,
},
})
end
end
# Sync user record.
- if remote_user_prefix == Rails.configuration.Login.LoginCluster
- # Remote cluster controls our user database, set is_active if
- # remote is active. If remote is not active, user will be
- # unsetup (see below).
- user.is_active = true if remote_user['is_active']
- user.is_admin = remote_user['is_admin']
- else
- if Rails.configuration.Users.NewUsersAreActive ||
- Rails.configuration.RemoteClusters[remote_user_prefix].andand["ActivateUsers"]
- # Default policy is to activate users
- user.is_active = true if remote_user['is_active']
+ act_as_system_user do
+ %w[first_name last_name email prefs].each do |attr|
+ user.send(attr+'=', remote_user[attr])
end
- end
- %w[first_name last_name email prefs].each do |attr|
- user.send(attr+'=', remote_user[attr])
- end
+ if remote_user['uuid'][-22..-1] == '-tpzed-000000000000000'
+ user.first_name = "root"
+ user.last_name = "from cluster #{remote_user_prefix}"
+ end
- if remote_user['uuid'][-22..-1] == '-tpzed-000000000000000'
- user.first_name = "root"
- user.last_name = "from cluster #{remote_user_prefix}"
- end
+ user.save!
- act_as_system_user do
- if (user.is_active && !remote_user['is_active']) or (user.is_invited && !remote_user['is_invited'])
- # Synchronize the user's "active/invited" state state. This
- # also saves the record.
+ if user.is_invited && !remote_user['is_invited']
+ # Remote user is not "invited" state, they should be unsetup, which
+ # also makes them inactive.
user.unsetup
else
- user.save!
+ if !user.is_invited && remote_user['is_invited'] and
+ (remote_user_prefix == Rails.configuration.Login.LoginCluster or
+ Rails.configuration.Users.AutoSetupNewUsers or
+ Rails.configuration.Users.NewUsersAreActive or
+ Rails.configuration.RemoteClusters[remote_user_prefix].andand["ActivateUsers"])
+ user.setup
+ end
+
+ if !user.is_active && remote_user['is_active'] && user.is_invited and
+ (remote_user_prefix == Rails.configuration.Login.LoginCluster or
+ Rails.configuration.Users.NewUsersAreActive or
+ Rails.configuration.RemoteClusters[remote_user_prefix].andand["ActivateUsers"])
+ user.update_attributes!(is_active: true)
+ elsif user.is_active && !remote_user['is_active']
+ user.update_attributes!(is_active: false)
+ end
+
+ if remote_user_prefix == Rails.configuration.Login.LoginCluster and
+ user.is_active and
+ user.is_admin != remote_user['is_admin']
+ # Remote cluster controls our user database, including the
+ # admin flag.
+ user.update_attributes!(is_admin: remote_user['is_admin'])
+ end
end
# We will accept this token (and avoid reloading the user
end
# create links
- def setup(repo_name: nil, vm_uuid: nil)
- repo_perm = create_user_repo_link repo_name
- vm_login_perm = create_vm_login_permission_link(vm_uuid, username) if vm_uuid
+ def setup(repo_name: nil, vm_uuid: nil, send_notification_email: nil)
+ newly_invited = Link.where(tail_uuid: self.uuid,
+ head_uuid: all_users_group_uuid,
+ link_class: 'permission',
+ name: 'can_read').empty?
+
+ # Add can_read link from this user to "all users" which makes this
+ # user "invited"
group_perm = create_user_group_link
+ # Add git repo
+ if repo_name.nil? && username && Rails.configuration.Users.AutoSetupNewUsersWithRepository
+ repo_name = "#{username}/#{username}"
+ end
+
+ repo_perm = if repo_name
+ create_user_repo_link repo_name
+ end
+
+ # Add virtual machine
+ if vm_uuid.nil? and !Rails.configuration.Users.AutoSetupNewUsersWithVmUUID.empty?
+ vm_uuid = Rails.configuration.Users.AutoSetupNewUsersWithVmUUID
+ end
+
+ vm_login_perm = if vm_uuid && username
+ create_vm_login_permission_link(vm_uuid, username)
+ end
+
+ # Send welcome email
+ if send_notification_email.nil?
+ send_notification_email = Rails.configuration.Mail.SendUserSetupNotificationEmail
+ end
+
+ if newly_invited and send_notification_email and !Rails.configuration.Users.UserSetupMailText.empty?
+ begin
+ UserNotifier.account_is_setup(self).deliver_now
+ rescue => e
+ logger.warn "Failed to send email to #{self.email}: #{e}"
+ end
+ end
+
return [repo_perm, vm_login_perm, group_perm, self].compact
end
self.prefs = {}
# mark the user as inactive
+ self.is_admin = false # can't be admin and inactive
self.is_active = false
self.save!
end
# Automatically setup new user during creation
def auto_setup_new_user
setup
- if username
- create_vm_login_permission_link(Rails.configuration.Users.AutoSetupNewUsersWithVmUUID,
- username)
- repo_name = "#{username}/#{username}"
- if Rails.configuration.Users.AutoSetupNewUsersWithRepository and
- Repository.where(name: repo_name).first.nil?
- repo = Repository.create!(name: repo_name, owner_uuid: uuid)
- Link.create!(tail_uuid: uuid, head_uuid: repo.uuid,
- link_class: "permission", name: "can_manage")
- end
- end
end
# Send notification if the user saved profile for the first time
assert_equal 'barney', json_response['username']
end
- test 'get inactive user from Login cluster when AutoSetupNewUsers is set' do
- Rails.configuration.Login.LoginCluster = 'zbbbb'
- Rails.configuration.Users.AutoSetupNewUsers = true
- @stub_content = {
- uuid: 'zbbbb-tpzed-000000000000001',
- email: 'foo@example.com',
- username: 'barney',
- is_admin: false,
- is_active: false,
- is_invited: false,
- }
- get '/arvados/v1/users/current',
- params: {format: 'json'},
- headers: auth(remote: 'zbbbb')
- assert_response :success
- assert_equal 'zbbbb-tpzed-000000000000001', json_response['uuid']
- assert_equal false, json_response['is_admin']
- assert_equal false, json_response['is_active']
- assert_equal false, json_response['is_invited']
- assert_equal 'foo@example.com', json_response['email']
- assert_equal 'barney', json_response['username']
+ [true, false].each do |trusted|
+ [true, false].each do |logincluster|
+ [true, false].each do |admin|
+ [true, false].each do |active|
+ [true, false].each do |autosetup|
+ [true, false].each do |invited|
+ test "get invited=#{invited}, active=#{active}, admin=#{admin} user from #{if logincluster then "Login" else "peer" end} cluster when AutoSetupNewUsers=#{autosetup} ActivateUsers=#{trusted}" do
+ Rails.configuration.Login.LoginCluster = 'zbbbb' if logincluster
+ Rails.configuration.RemoteClusters['zbbbb'].ActivateUsers = trusted
+ Rails.configuration.Users.AutoSetupNewUsers = autosetup
+ @stub_content = {
+ uuid: 'zbbbb-tpzed-000000000000001',
+ email: 'foo@example.com',
+ username: 'barney',
+ is_admin: admin,
+ is_active: active,
+ is_invited: invited,
+ }
+ get '/arvados/v1/users/current',
+ params: {format: 'json'},
+ headers: auth(remote: 'zbbbb')
+ assert_response :success
+ assert_equal 'zbbbb-tpzed-000000000000001', json_response['uuid']
+ assert_equal (logincluster && admin && invited && active), json_response['is_admin']
+ assert_equal (invited and (logincluster || trusted || autosetup)), json_response['is_invited']
+ assert_equal (invited and (logincluster || trusted) and active), json_response['is_active']
+ assert_equal 'foo@example.com', json_response['email']
+ assert_equal 'barney', json_response['username']
+ end
+ end
+ end
+ end
+ end
+ end
end
- test 'get active user from Login cluster when AutoSetupNewUsers is set' do
+ test 'get active user from Login cluster when AutoSetupNewUsers is set' do
Rails.configuration.Login.LoginCluster = 'zbbbb'
Rails.configuration.Users.AutoSetupNewUsers = true
@stub_content = {
[false, active_notify_list, inactive_notify_list, "&4a_d9.@example.com", true, true, "ad9"],
[false, active_notify_list, inactive_notify_list, "&4a_d9.@example.com", false, false, "ad9"],
].each do |active, new_user_recipients, inactive_recipients, email, auto_setup_vm, auto_setup_repo, expect_username|
- test "create new user with auto setup #{active} #{email} #{auto_setup_vm} #{auto_setup_repo}" do
+ test "create new user with auto setup active=#{active} email=#{email} vm=#{auto_setup_vm} repo=#{auto_setup_repo}" do
set_user_from_auth :admin
Rails.configuration.Users.AutoSetupNewUsers = true
Rails.configuration.Users.AutoSetupNewUsersWithRepository),
named_repo.uuid, user.uuid, "permission", "can_manage")
end
+
# Check for VM login.
if (auto_vm_uuid = Rails.configuration.Users.AutoSetupNewUsersWithVmUUID) != ""
verify_link_exists(can_setup, auto_vm_uuid, user.uuid,
tail_uuid: tail_uuid,
link_class: link_class,
name: link_name)
- assert_equal link_exists, all_links.any?, "Link #{'not' if link_exists} found for #{link_name} #{link_class} #{property_value}"
+ assert_equal link_exists, all_links.any?, "Link#{' not' if link_exists} found for #{link_name} #{link_class} #{property_value}"
if link_exists && property_name && property_value
all_links.each do |link|
assert_equal true, all_links.first.properties[property_name].start_with?(property_value), 'Property not found in link'
env_version = os.environ.get("ARVADOS_BUILDING_VERSION")
if env_version:
+ env_version = env_version.replace("~rc", "rc")
save_version(setup_dir, module, env_version)
else:
try:
env_version = os.environ.get("ARVADOS_BUILDING_VERSION")
if env_version:
+ env_version = env_version.replace("~rc", "rc")
save_version(setup_dir, module, env_version)
else:
try:
func (c *cache) Update(client *arvados.Client, coll arvados.Collection, fs arvados.CollectionFileSystem) error {
c.setupOnce.Do(c.setup)
- if m, err := fs.MarshalManifest("."); err != nil || m == coll.ManifestText {
+ m, err := fs.MarshalManifest(".")
+ if err != nil || m == coll.ManifestText {
return err
- } else {
- coll.ManifestText = m
}
+ coll.ManifestText = m
var updated arvados.Collection
defer c.pdhs.Remove(coll.UUID)
- err := client.RequestAndDecode(&updated, "PATCH", "arvados/v1/collections/"+coll.UUID, nil, map[string]interface{}{
+ err = client.RequestAndDecode(&updated, "PATCH", "arvados/v1/collections/"+coll.UUID, nil, map[string]interface{}{
"collection": map[string]string{
"manifest_text": coll.ManifestText,
},
package main
import (
+ "crypto/hmac"
+ "crypto/sha256"
"encoding/xml"
"errors"
"fmt"
+ "hash"
"io"
"net/http"
+ "net/url"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
+ "time"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
"github.com/AdRoll/goamz/s3"
)
-const s3MaxKeys = 1000
+const (
+ s3MaxKeys = 1000
+ s3SignAlgorithm = "AWS4-HMAC-SHA256"
+ s3MaxClockSkew = 5 * time.Minute
+)
+
+func hmacstring(msg string, key []byte) []byte {
+ h := hmac.New(sha256.New, key)
+ io.WriteString(h, msg)
+ return h.Sum(nil)
+}
+
+func hashdigest(h hash.Hash, payload string) string {
+ io.WriteString(h, payload)
+ return fmt.Sprintf("%x", h.Sum(nil))
+}
+
+// Signing key for given secret key and request attrs.
+func s3signatureKey(key, datestamp, regionName, serviceName string) []byte {
+ return hmacstring("aws4_request",
+ hmacstring(serviceName,
+ hmacstring(regionName,
+ hmacstring(datestamp, []byte("AWS4"+key)))))
+}
+
+// Canonical query string for S3 V4 signature: sorted keys, spaces
+// escaped as %20 instead of +, keyvalues joined with &.
+func s3querystring(u *url.URL) string {
+ keys := make([]string, 0, len(u.Query()))
+ values := make(map[string]string, len(u.Query()))
+ for k, vs := range u.Query() {
+ k = strings.Replace(url.QueryEscape(k), "+", "%20", -1)
+ keys = append(keys, k)
+ for _, v := range vs {
+ v = strings.Replace(url.QueryEscape(v), "+", "%20", -1)
+ if values[k] != "" {
+ values[k] += "&"
+ }
+ values[k] += k + "=" + v
+ }
+ }
+ sort.Strings(keys)
+ for i, k := range keys {
+ keys[i] = values[k]
+ }
+ return strings.Join(keys, "&")
+}
+
+func s3stringToSign(alg, scope, signedHeaders string, r *http.Request) (string, error) {
+ timefmt, timestr := "20060102T150405Z", r.Header.Get("X-Amz-Date")
+ if timestr == "" {
+ timefmt, timestr = time.RFC1123, r.Header.Get("Date")
+ }
+ t, err := time.Parse(timefmt, timestr)
+ if err != nil {
+ return "", fmt.Errorf("invalid timestamp %q: %s", timestr, err)
+ }
+ if skew := time.Now().Sub(t); skew < -s3MaxClockSkew || skew > s3MaxClockSkew {
+ return "", errors.New("exceeded max clock skew")
+ }
+
+ var canonicalHeaders string
+ for _, h := range strings.Split(signedHeaders, ";") {
+ if h == "host" {
+ canonicalHeaders += h + ":" + r.Host + "\n"
+ } else {
+ canonicalHeaders += h + ":" + r.Header.Get(h) + "\n"
+ }
+ }
+
+ canonicalRequest := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", r.Method, r.URL.EscapedPath(), s3querystring(r.URL), canonicalHeaders, signedHeaders, r.Header.Get("X-Amz-Content-Sha256"))
+ ctxlog.FromContext(r.Context()).Debugf("s3stringToSign: canonicalRequest %s", canonicalRequest)
+ return fmt.Sprintf("%s\n%s\n%s\n%s", alg, r.Header.Get("X-Amz-Date"), scope, hashdigest(sha256.New(), canonicalRequest)), nil
+}
+
+func s3signature(secretKey, scope, signedHeaders, stringToSign string) (string, error) {
+ // scope is {datestamp}/{region}/{service}/aws4_request
+ drs := strings.Split(scope, "/")
+ if len(drs) != 4 {
+ return "", fmt.Errorf("invalid scope %q", scope)
+ }
+ key := s3signatureKey(secretKey, drs[0], drs[1], drs[2])
+ return hashdigest(hmac.New(sha256.New, key), stringToSign), nil
+}
+
+// checks3signature verifies the given S3 V4 signature and returns the
+// Arvados token that corresponds to the given accessKey. An error is
+// returned if accessKey is not a valid token UUID or the signature
+// does not match.
+func (h *handler) checks3signature(r *http.Request) (string, error) {
+ var key, scope, signedHeaders, signature string
+ authstring := strings.TrimPrefix(r.Header.Get("Authorization"), s3SignAlgorithm+" ")
+ for _, cmpt := range strings.Split(authstring, ",") {
+ cmpt = strings.TrimSpace(cmpt)
+ split := strings.SplitN(cmpt, "=", 2)
+ switch {
+ case len(split) != 2:
+ // (?) ignore
+ case split[0] == "Credential":
+ keyandscope := strings.SplitN(split[1], "/", 2)
+ if len(keyandscope) == 2 {
+ key, scope = keyandscope[0], keyandscope[1]
+ }
+ case split[0] == "SignedHeaders":
+ signedHeaders = split[1]
+ case split[0] == "Signature":
+ signature = split[1]
+ }
+ }
+
+ client := (&arvados.Client{
+ APIHost: h.Config.cluster.Services.Controller.ExternalURL.Host,
+ Insecure: h.Config.cluster.TLS.Insecure,
+ }).WithRequestID(r.Header.Get("X-Request-Id"))
+ var aca arvados.APIClientAuthorization
+ var secret string
+ var err error
+ if len(key) == 27 && key[5:12] == "-gj3su-" {
+ // Access key is the UUID of an Arvados token, secret
+ // key is the secret part.
+ ctx := arvados.ContextWithAuthorization(r.Context(), "Bearer "+h.Config.cluster.SystemRootToken)
+ err = client.RequestAndDecodeContext(ctx, &aca, "GET", "arvados/v1/api_client_authorizations/"+key, nil, nil)
+ secret = aca.APIToken
+ } else {
+ // Access key and secret key are both an entire
+ // Arvados token or OIDC access token.
+ ctx := arvados.ContextWithAuthorization(r.Context(), "Bearer "+key)
+ err = client.RequestAndDecodeContext(ctx, &aca, "GET", "arvados/v1/api_client_authorizations/current", nil, nil)
+ secret = key
+ }
+ if err != nil {
+ ctxlog.FromContext(r.Context()).WithError(err).WithField("UUID", key).Info("token lookup failed")
+ return "", errors.New("invalid access key")
+ }
+ stringToSign, err := s3stringToSign(s3SignAlgorithm, scope, signedHeaders, r)
+ if err != nil {
+ return "", err
+ }
+ expect, err := s3signature(secret, scope, signedHeaders, stringToSign)
+ if err != nil {
+ return "", err
+ } else if expect != signature {
+ return "", fmt.Errorf("signature does not match (scope %q signedHeaders %q stringToSign %q)", scope, signedHeaders, stringToSign)
+ }
+ return secret, nil
+}
// serveS3 handles r and returns true if r is a request from an S3
// client, otherwise it returns false.
if auth := r.Header.Get("Authorization"); strings.HasPrefix(auth, "AWS ") {
split := strings.SplitN(auth[4:], ":", 2)
if len(split) < 2 {
- w.WriteHeader(http.StatusUnauthorized)
+ http.Error(w, "malformed Authorization header", http.StatusUnauthorized)
return true
}
token = split[0]
- } else if strings.HasPrefix(auth, "AWS4-HMAC-SHA256 ") {
- for _, cmpt := range strings.Split(auth[17:], ",") {
- cmpt = strings.TrimSpace(cmpt)
- split := strings.SplitN(cmpt, "=", 2)
- if len(split) == 2 && split[0] == "Credential" {
- keyandscope := strings.Split(split[1], "/")
- if len(keyandscope[0]) > 0 {
- token = keyandscope[0]
- break
- }
- }
- }
- if token == "" {
- w.WriteHeader(http.StatusBadRequest)
- fmt.Println(w, "invalid V4 signature")
+ } else if strings.HasPrefix(auth, s3SignAlgorithm+" ") {
+ t, err := h.checks3signature(r)
+ if err != nil {
+ http.Error(w, "signature verification failed: "+err.Error(), http.StatusForbidden)
return true
}
+ token = t
} else {
return false
}
"io/ioutil"
"net/http"
"os"
+ "os/exec"
"strings"
"sync"
"time"
err = arv.RequestAndDecode(&coll, "GET", "arvados/v1/collections/"+coll.UUID, nil, nil)
c.Assert(err, check.IsNil)
- auth := aws.NewAuth(arvadostest.ActiveTokenV2, arvadostest.ActiveTokenV2, "", time.Now().Add(time.Hour))
+ auth := aws.NewAuth(arvadostest.ActiveTokenUUID, arvadostest.ActiveToken, "", time.Now().Add(time.Hour))
region := aws.Region{
Name: s.testServer.Addr,
S3Endpoint: "http://" + s.testServer.Addr,
}
client := s3.New(*auth, region)
+ client.Signature = aws.V4Signature
return s3stage{
arv: arv,
ac: ac,
}
}
+func (s *IntegrationSuite) TestS3Signatures(c *check.C) {
+ stage := s.s3setup(c)
+ defer stage.teardown(c)
+
+ bucket := stage.collbucket
+ for _, trial := range []struct {
+ success bool
+ signature int
+ accesskey string
+ secretkey string
+ }{
+ {true, aws.V2Signature, arvadostest.ActiveToken, "none"},
+ {false, aws.V2Signature, "none", "none"},
+ {false, aws.V2Signature, "none", arvadostest.ActiveToken},
+
+ {true, aws.V4Signature, arvadostest.ActiveTokenUUID, arvadostest.ActiveToken},
+ {true, aws.V4Signature, arvadostest.ActiveToken, arvadostest.ActiveToken},
+ {false, aws.V4Signature, arvadostest.ActiveToken, ""},
+ {false, aws.V4Signature, arvadostest.ActiveToken, "none"},
+ {false, aws.V4Signature, "none", arvadostest.ActiveToken},
+ {false, aws.V4Signature, "none", "none"},
+ } {
+ c.Logf("%#v", trial)
+ bucket.S3.Auth = *(aws.NewAuth(trial.accesskey, trial.secretkey, "", time.Now().Add(time.Hour)))
+ bucket.S3.Signature = trial.signature
+ _, err := bucket.GetReader("emptyfile")
+ if trial.success {
+ c.Check(err, check.IsNil)
+ } else {
+ c.Check(err, check.NotNil)
+ }
+ }
+}
+
func (s *IntegrationSuite) TestS3HeadBucket(c *check.C) {
stage := s.s3setup(c)
defer stage.teardown(c)
}
func (s *IntegrationSuite) testS3PutObjectFailure(c *check.C, bucket *s3.Bucket, prefix string) {
s.testServer.Config.cluster.Collections.S3FolderObjects = false
+
+ // Can't use V4 signature for these tests, because
+ // double-slash is incorrectly cleaned by the aws.V4Signature,
+ // resulting in a "bad signature" error. (Cleaning the path is
+ // appropriate for other services, but not in S3 where object
+ // names "foo//bar" and "foo/bar" are semantically different.)
+ bucket.S3.Auth = *(aws.NewAuth(arvadostest.ActiveToken, "none", "", time.Now().Add(time.Hour)))
+ bucket.S3.Signature = aws.V2Signature
+
var wg sync.WaitGroup
for _, trial := range []struct {
path string
c.Logf("=== trial %+v keys %q prefixes %q nextMarker %q", trial, gotKeys, gotPrefixes, resp.NextMarker)
}
}
+
+// TestS3cmd checks compatibility with the s3cmd command line tool, if
+// it's installed. As of Debian buster, s3cmd is only in backports, so
+// `arvados-server install` don't install it, and this test skips if
+// it's not installed.
+func (s *IntegrationSuite) TestS3cmd(c *check.C) {
+ if _, err := exec.LookPath("s3cmd"); err != nil {
+ c.Skip("s3cmd not found")
+ return
+ }
+
+ stage := s.s3setup(c)
+ defer stage.teardown(c)
+
+ cmd := exec.Command("s3cmd", "--no-ssl", "--host="+s.testServer.Addr, "--host-bucket="+s.testServer.Addr, "--access_key="+arvadostest.ActiveTokenUUID, "--secret_key="+arvadostest.ActiveToken, "ls", "s3://"+arvadostest.FooCollection)
+ buf, err := cmd.CombinedOutput()
+ c.Check(err, check.IsNil)
+ c.Check(string(buf), check.Matches, `.* 3 +s3://`+arvadostest.FooCollection+`/foo\n`)
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package main
+
+import (
+ "bytes"
+ "context"
+ "io/ioutil"
+
+ "git.arvados.org/arvados.git/sdk/go/arvadostest"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/defaults"
+ "github.com/aws/aws-sdk-go-v2/aws/ec2metadata"
+ "github.com/aws/aws-sdk-go-v2/aws/ec2rolecreds"
+ "github.com/aws/aws-sdk-go-v2/aws/endpoints"
+ "github.com/aws/aws-sdk-go-v2/service/s3"
+ check "gopkg.in/check.v1"
+)
+
+func (s *IntegrationSuite) TestS3AWSSDK(c *check.C) {
+ stage := s.s3setup(c)
+ defer stage.teardown(c)
+
+ cfg := defaults.Config()
+ cfg.Credentials = aws.NewChainProvider([]aws.CredentialsProvider{
+ aws.NewStaticCredentialsProvider(arvadostest.ActiveTokenUUID, arvadostest.ActiveToken, ""),
+ ec2rolecreds.New(ec2metadata.New(cfg)),
+ })
+ cfg.EndpointResolver = aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) {
+ if service == "s3" {
+ return aws.Endpoint{
+ URL: "http://" + s.testServer.Addr,
+ SigningRegion: "custom-signing-region",
+ }, nil
+ } else {
+ return endpoints.NewDefaultResolver().ResolveEndpoint(service, region)
+ }
+ })
+ client := s3.New(cfg)
+ client.ForcePathStyle = true
+ listreq := client.ListObjectsV2Request(&s3.ListObjectsV2Input{
+ Bucket: aws.String(arvadostest.FooCollection),
+ MaxKeys: aws.Int64(100),
+ Prefix: aws.String(""),
+ ContinuationToken: nil,
+ })
+ resp, err := listreq.Send(context.Background())
+ c.Assert(err, check.IsNil)
+ c.Check(resp.Contents, check.HasLen, 1)
+ for _, key := range resp.Contents {
+ c.Check(*key.Key, check.Equals, "foo")
+ }
+
+ p := make([]byte, 100000000)
+ for i := range p {
+ p[i] = byte('a')
+ }
+ putreq := client.PutObjectRequest(&s3.PutObjectInput{
+ Body: bytes.NewReader(p),
+ Bucket: aws.String(stage.collbucket.Name),
+ ContentType: aws.String("application/octet-stream"),
+ Key: aws.String("aaaa"),
+ })
+ _, err = putreq.Send(context.Background())
+ c.Assert(err, check.IsNil)
+
+ getreq := client.GetObjectRequest(&s3.GetObjectInput{
+ Bucket: aws.String(stage.collbucket.Name),
+ Key: aws.String("aaaa"),
+ })
+ getresp, err := getreq.Send(context.Background())
+ c.Assert(err, check.IsNil)
+ getdata, err := ioutil.ReadAll(getresp.Body)
+ c.Assert(err, check.IsNil)
+ c.Check(bytes.Equal(getdata, p), check.Equals, true)
+}
cfg.cluster.Services.WebDAV.InternalURLs[arvados.URL{Host: listen}] = arvados.ServiceInstance{}
cfg.cluster.Services.WebDAVDownload.InternalURLs[arvados.URL{Host: listen}] = arvados.ServiceInstance{}
cfg.cluster.ManagementToken = arvadostest.ManagementToken
+ cfg.cluster.SystemRootToken = arvadostest.SystemRootToken
cfg.cluster.Users.AnonymousUserToken = arvadostest.AnonymousToken
s.testServer = &server{Config: cfg}
err = s.testServer.Start(ctxlog.TestLogger(c))
//
// SPDX-License-Identifier: AGPL-3.0
-// Arvados-ws exposes Arvados APIs (currently just one, the
+// Package ws exposes Arvados APIs (currently just one, the
// cache-invalidation event feed at "ws://.../websocket") to
// websocket clients.
//
cd /usr/src/arvados/services/api
export DISABLE_DATABASE_ENVIRONMENT_CHECK=1
export RAILS_ENV=development
-bundle exec rake db:drop
+flock $GEM_HOME/gems.lock bundle exec rake db:drop
rm $ARVADOS_CONTAINER_PATH/api_database_setup
rm $ARVADOS_CONTAINER_PATH/superuser_token
-rm $ARVADOS_CONTAINER_PATH/keep0-uuid
-rm $ARVADOS_CONTAINER_PATH/keep1-uuid
-rm $ARVADOS_CONTAINER_PATH/keepproxy-uuid
sv start api
sv start controller
sv start websockets
# put everything (/var/lib/arvados)
ENV ARVADOS_CONTAINER_PATH /var/lib/arvados-arvbox
+RUN /bin/ln -s /var/lib/arvados/bin/ruby /usr/local/bin/
+
# Start the supervisor.
ENV SVDIR /etc/service
STOPSIGNAL SIGINT
RUN echo "production" > $ARVADOS_CONTAINER_PATH/api_rails_env
RUN echo "production" > $ARVADOS_CONTAINER_PATH/workbench_rails_env
+# for the federation tests, the dev server watches a lot of files,
+# and we run three instances of the docker container. Bump up the
+# inotify limit from 8192, to avoid errors like
+# events.js:183
+# throw er; // Unhandled 'error' event
+# ^
+#
+# Error: watch /usr/src/workbench2/public ENOSPC
+# cf. https://github.com/facebook/jest/issues/3254
+RUN echo fs.inotify.max_user_watches=524288 >> /etc/sysctl.conf
+
RUN /usr/local/lib/arvbox/createusers.sh
RUN sudo -u arvbox /var/lib/arvbox/service/api/run-service --only-deps
fi
if ! test -f $ARVADOS_CONTAINER_PATH/api_database_setup ; then
- bundle exec rake db:setup
+ flock $GEM_HOME/gems.lock bundle exec rake db:setup
touch $ARVADOS_CONTAINER_PATH/api_database_setup
fi
if ! test -s $ARVADOS_CONTAINER_PATH/superuser_token ; then
- superuser_tok=$(bundle exec ./script/create_superuser_token.rb)
+ superuser_tok=$(flock $GEM_HOME/gems.lock bundle exec ./script/create_superuser_token.rb)
echo "$superuser_tok" > $ARVADOS_CONTAINER_PATH/superuser_token
fi
rm -rf tmp
mkdir -p tmp/cache
-bundle exec rake db:migrate
+flock $GEM_HOME/gems.lock bundle exec rake db:migrate
cd /usr/src/arvados
if [[ $UID = 0 ]] ; then
- /usr/local/lib/arvbox/runsu.sh flock /var/lib/gopath/gopath.lock go mod download
- if [[ ! -f /usr/local/bin/arvados-server ]]; then
- /usr/local/lib/arvbox/runsu.sh flock /var/lib/gopath/gopath.lock go install git.arvados.org/arvados.git/cmd/arvados-server
- fi
-else
- flock /var/lib/gopath/gopath.lock go mod download
- if [[ ! -f /usr/local/bin/arvados-server ]]; then
- flock /var/lib/gopath/gopath.lock go install git.arvados.org/arvados.git/cmd/arvados-server
- fi
+ RUNSU="/usr/local/lib/arvbox/runsu.sh"
+fi
+
+if [[ ! -f /usr/local/bin/arvados-server ]]; then
+ $RUNSU flock /var/lib/gopath/gopath.lock go mod download
+ $RUNSU flock /var/lib/gopath/gopath.lock go install git.arvados.org/arvados.git/cmd/arvados-server
+ $RUNSU flock /var/lib/gopath/gopath.lock install $GOPATH/bin/arvados-server /usr/local/bin
fi
-install $GOPATH/bin/arvados-server /usr/local/bin
mkdir -p $ARVADOS_CONTAINER_PATH/$1
-export ARVADOS_API_HOST=$localip:${services[controller-ssl]}
-export ARVADOS_API_HOST_INSECURE=1
-export ARVADOS_API_TOKEN=$(cat $ARVADOS_CONTAINER_PATH/superuser_token)
-
-set +e
-read -rd $'\000' keepservice <<EOF
-{
- "service_host":"localhost",
- "service_port":$2,
- "service_ssl_flag":false,
- "service_type":"disk"
-}
-EOF
-set -e
-
-if test -s $ARVADOS_CONTAINER_PATH/$1-uuid ; then
- keep_uuid=$(cat $ARVADOS_CONTAINER_PATH/$1-uuid)
- arv keep_service update --uuid $keep_uuid --keep-service "$keepservice"
-else
- UUID=$(arv --format=uuid keep_service create --keep-service "$keepservice")
- echo $UUID > $ARVADOS_CONTAINER_PATH/$1-uuid
-fi
-
-management_token=$(cat $ARVADOS_CONTAINER_PATH/management_token)
-
-set +e
-sv hup /var/lib/arvbox/service/keepproxy
-
-cat >$ARVADOS_CONTAINER_PATH/$1.yml <<EOF
-Listen: "localhost:$2"
-BlobSigningKeyFile: $ARVADOS_CONTAINER_PATH/blob_signing_key
-SystemAuthTokenFile: $ARVADOS_CONTAINER_PATH/superuser_token
-ManagementToken: $management_token
-MaxBuffers: 20
-EOF
-
-exec /usr/local/bin/keepstore -config=$ARVADOS_CONTAINER_PATH/$1.yml
+exec /usr/local/bin/keepstore
chown arvbox /dev/stderr
+# Load our custom sysctl.conf entries
+/sbin/sysctl -p >/dev/null
+
if test -z "$1" ; then
exec chpst -u arvbox:arvbox:docker $0-service
else
fi
run_bundler --without=development
-bundle exec passenger-config build-native-support
-bundle exec passenger-config install-standalone-runtime
+flock $GEM_HOME/gems.lock bundle exec passenger-config build-native-support
+flock $GEM_HOME/gems.lock bundle exec passenger-config install-standalone-runtime
if test "$1" = "--only-deps" ; then
exit
exit
fi
+touch $ARVADOS_CONTAINER_PATH/api.ready
+
exec bundle exec passenger start --port=${services[api]}
. /usr/local/lib/arvbox/common.sh
+if test "$1" != "--only-deps" ; then
+ while [ ! -f $ARVADOS_CONTAINER_PATH/api.ready ]; do
+ sleep 1
+ done
+fi
cd /usr/src/arvados/doc
run_bundler --without=development
fi
cd /usr/src/arvados/doc
-bundle exec rake generate baseurl=http://$localip:${services[doc]} arvados_api_host=$localip:${services[controller-ssl]} arvados_workbench_host=http://$localip
+flock $GEM_HOME/gems.lock bundle exec rake generate baseurl=http://$localip:${services[doc]} arvados_api_host=$localip:${services[controller-ssl]} arvados_workbench_host=http://$localip
. /usr/local/lib/arvbox/common.sh
+if test "$1" != "--only-deps" ; then
+ while [ ! -f $ARVADOS_CONTAINER_PATH/api.ready ]; do
+ sleep 1
+ done
+fi
+
mkdir -p $ARVADOS_CONTAINER_PATH/git
export ARVADOS_API_HOST=$localip:${services[controller-ssl]}
EOF
while true ; do
- bundle exec script/arvados-git-sync.rb $RAILS_ENV
+ flock $GEM_HOME/gems.lock bundle exec script/arvados-git-sync.rb $RAILS_ENV
sleep 120
done
exit
fi
-export ARVADOS_API_HOST=$localip:${services[controller-ssl]}
-export ARVADOS_API_HOST_INSECURE=1
-export ARVADOS_API_TOKEN=$(cat $ARVADOS_CONTAINER_PATH/superuser_token)
-
-set +e
-read -rd $'\000' keepservice <<EOF
-{
- "service_host":"$localip",
- "service_port":${services[keepproxy-ssl]},
- "service_ssl_flag":true,
- "service_type":"proxy"
-}
-EOF
-set -e
-
-if test -s $ARVADOS_CONTAINER_PATH/keepproxy-uuid ; then
- keep_uuid=$(cat $ARVADOS_CONTAINER_PATH/keepproxy-uuid)
- arv keep_service update --uuid $keep_uuid --keep-service "$keepservice"
-else
- UUID=$(arv --format=uuid keep_service create --keep-service "$keepservice")
- echo $UUID > $ARVADOS_CONTAINER_PATH/keepproxy-uuid
-fi
-
exec /usr/local/bin/keepproxy
. /usr/local/lib/arvbox/common.sh
+if test "$1" != "--only-deps" ; then
+ while [ ! -f $ARVADOS_CONTAINER_PATH/api.ready ]; do
+ sleep 1
+ done
+fi
+
cd /usr/src/arvados/services/login-sync
run_bundler --binstubs=$PWD/binstubs
ln -sf /usr/src/arvados/services/login-sync/binstubs/arvados-login-sync /usr/local/bin/arvados-login-sync
. /usr/local/lib/arvbox/common.sh
+if test "$1" != "--only-deps" ; then
+ while [ ! -f $ARVADOS_CONTAINER_PATH/api.ready ]; do
+ sleep 1
+ done
+fi
+
cd /usr/src/arvados/apps/workbench
if test -s $ARVADOS_CONTAINER_PATH/workbench_rails_env ; then
fi
run_bundler --without=development
-bundle exec passenger-config build-native-support
-bundle exec passenger-config install-standalone-runtime
+flock $GEM_HOME/gems.lock bundle exec passenger-config build-native-support
+flock $GEM_HOME/gems.lock bundle exec passenger-config install-standalone-runtime
mkdir -p /usr/src/arvados/apps/workbench/tmp
if test "$1" = "--only-deps" ; then
$RAILS_ENV:
keep_web_url: https://example.com/c=%{uuid_or_pdh}
EOF
- RAILS_GROUPS=assets bundle exec rake npm:install
+ RAILS_GROUPS=assets flock $GEM_HOME/gems.lock bundle exec rake npm:install
rm config/application.yml
exit
fi
(cd config && /usr/local/lib/arvbox/yml_override.py application.yml)
fi
-RAILS_GROUPS=assets bundle exec rake npm:install
-bundle exec rake assets:precompile
+RAILS_GROUPS=assets flock $GEM_HOME/gems.lock bundle exec rake npm:install
+flock $GEM_HOME/gems.lock bundle exec rake assets:precompile
. /usr/local/lib/arvbox/common.sh
+if test "$1" != "--only-deps" ; then
+ while [ ! -f $ARVADOS_CONTAINER_PATH/api.ready ]; do
+ sleep 1
+ done
+fi
+
cd /usr/src/workbench2
npm -d install --prefix /usr/local --global yarn@1.17.3
env_version = os.environ.get("ARVADOS_BUILDING_VERSION")
if env_version:
+ env_version = env_version.replace("~rc", "rc")
save_version(setup_dir, module, env_version)
else:
try:
if !u.IsActive || !u.IsAdmin {
return config, fmt.Errorf("current user (%s) is not an active admin user", u.UUID)
}
- config.SysUserUUID = u.UUID[:12] + "000000000000000"
+
+ var ac struct{ ClusterID string }
+ err = config.Client.RequestAndDecode(&ac, "GET", "arvados/v1/config", nil, nil)
+ if err != nil {
+ return config, fmt.Errorf("error getting the exported config: %s", err)
+ }
+ config.SysUserUUID = ac.ClusterID + "-tpzed-000000000000000"
// Set up remote groups' parent
if err = SetParentGroup(&config); err != nil {
users map[string]arvados.User
}
-func (s *TestSuite) SetUpSuite(c *C) {
- arvadostest.StartAPI()
-}
-
-func (s *TestSuite) TearDownSuite(c *C) {
- arvadostest.StopAPI()
-}
-
func (s *TestSuite) SetUpTest(c *C) {
ac := arvados.NewClientFromEnv()
u, err := ac.CurrentUser()