SHELL ["/bin/bash", "-c"]
# Install dependencies.
-RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python3 python3-pip libcurl4-gnutls-dev libgnutls28-dev curl git libattr1-dev libfuse-dev libpq-dev unzip tzdata python3-venv python3-dev libpam-dev equivs
+RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python3.8 python3-pip libcurl4-gnutls-dev libgnutls28-dev curl git libattr1-dev libfuse-dev libpq-dev unzip tzdata python3.8-venv python3.8-dev libpam-dev equivs
# Install virtualenv
RUN /usr/bin/pip3 install 'virtualenv<20'
declare -a PYTHON3_BACKPORTS
-PYTHON3_VERSION=$(python3 -c 'import sys; print("{v.major}.{v.minor}".format(v=sys.version_info))')
+PYTHON3_EXECUTABLE=python3
+PYTHON3_VERSION=$($PYTHON3_EXECUTABLE -c 'import sys; print("{v.major}.{v.minor}".format(v=sys.version_info))')
## These defaults are suitable for any Debian-based distribution.
# You can customize them as needed in distro sections below.
debian*)
FORMAT=deb
;;
+ ubuntu1804)
+ FORMAT=deb
+ PYTHON3_EXECUTABLE=python3.8
+ PYTHON3_VERSION=$($PYTHON3_EXECUTABLE -c 'import sys; print("{v.major}.{v.minor}".format(v=sys.version_info))')
+ PYTHON3_PACKAGE=python$PYTHON3_VERSION
+ PYTHON3_INSTALL_LIB=lib/python$PYTHON3_VERSION/dist-packages
+ ;;
ubuntu*)
FORMAT=deb
;;
ARVADOS_BUILDING_ITERATION=1
fi
- local python=python3
+ local python=$PYTHON3_EXECUTABLE
pip=pip3
PACKAGE_PREFIX=$PYTHON3_PKG_PREFIX
set -ex
cd "$WORKSPACE/services/api"
export RAILS_ENV=test
- if "$bundle" exec rails db:environment:set ; then
- "$bundle" exec rake db:drop
+ if bin/rails db:environment:set ; then
+ bin/rake db:drop
fi
- "$bundle" exec rake db:setup
- "$bundle" exec rake db:fixtures:load
+ bin/rake db:setup
+ bin/rake db:fixtures:load
) || return 1
}
<div class="releasenotes">
</notextile>
-h2(#main). development main (as of 2021-11-10)
+h2(#main). development main (as of 2022-02-10)
"previous: Upgrading from 2.3.0":#v2_3_0
+h3. Anonymous token changes
+
+The anonymous token configured in @Users.AnonymousUserToken@ must now be 32 characters or longer. This was already the suggestion in the documentation, now it is enforced. The @script/get_anonymous_user_token.rb@ script that was needed to register the anonymous user token in the database has been removed. Registration of the anonymous token is no longer necessary.
+
h3. Preemptible instance types are used automatically, if any are configured
The default behavior for selecting "preemptible instances":{{site.baseurl}}/admin/spot-instances.html has changed. If your configuration lists any instance types with @Preemptible: true@, all child (non-top-level) containers will automatically be scheduled on preemptible instances. To avoid using preemptible instances except when explicitly requested by clients, add @AlwaysUsePreemptibleInstances: false@ in the @Containers@ config section. (Previously, preemptible instance types were never used unless the configuration specified @UsePreemptibleInstances: true@. That flag has been removed.)
# "Introduction":#introduction
# "Configure DNS":#introduction
-# "Configure anonymous user token.yml":#update-config
+# "Configure anonymous user token":#update-config
# "Update nginx configuration":#update-nginx
# "Install keep-web package":#install-packages
# "Start the service":#start-service
h2(#update-config). Configure anonymous user token
-{% assign railscmd = "bin/bundle exec ./script/get_anonymous_user_token.rb --get" %}
-{% assign railsout = "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz" %}
If you intend to use Keep-web to serve public data to anonymous clients, configure it with an anonymous token.
-# First, generate a long random string and put it in the @config.yml@ file, in the @AnonymousUserToken@ field.
-# Then, use the following command on the <strong>API server</strong> to register the anonymous user token in the database. {% include 'install_rails_command' %}
+Generate a random string (>= 32 characters long) and put it in the @config.yml@ file, in the @AnonymousUserToken@ field.
<notextile>
<pre><code> Users:
- AnonymousUserToken: <span class="userinput">"{{railsout}}"</span>
+ AnonymousUserToken: <span class="userinput">"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"</span>
</code></pre>
</notextile>
if err != nil {
return err
}
- err = super.RunProgram(ctx, "services/api", runOptions{env: railsEnv}, "bundle", "exec", "./script/get_anonymous_user_token.rb")
- if err != nil {
- return err
- }
return nil
}
NewInactiveUserNotificationRecipients: {}
# Set AnonymousUserToken to enable anonymous user access. Populate this
- # field with a long random string. Then run "bundle exec
- # ./script/get_anonymous_user_token.rb" in the directory where your API
- # server is running to record the token in the database.
+ # field with a random string at least 50 characters long.
AnonymousUserToken: ""
# If a new user has an alternate email address (local@domain)
for _, err = range []error{
ldr.checkClusterID(fmt.Sprintf("Clusters.%s", id), id, false),
ldr.checkClusterID(fmt.Sprintf("Clusters.%s.Login.LoginCluster", id), cc.Login.LoginCluster, true),
- ldr.checkToken(fmt.Sprintf("Clusters.%s.ManagementToken", id), cc.ManagementToken),
- ldr.checkToken(fmt.Sprintf("Clusters.%s.SystemRootToken", id), cc.SystemRootToken),
- ldr.checkToken(fmt.Sprintf("Clusters.%s.Collections.BlobSigningKey", id), cc.Collections.BlobSigningKey),
+ ldr.checkToken(fmt.Sprintf("Clusters.%s.ManagementToken", id), cc.ManagementToken, true),
+ ldr.checkToken(fmt.Sprintf("Clusters.%s.SystemRootToken", id), cc.SystemRootToken, true),
+ ldr.checkToken(fmt.Sprintf("Clusters.%s.Users.AnonymousUserToken", id), cc.Users.AnonymousUserToken, false),
+ ldr.checkToken(fmt.Sprintf("Clusters.%s.Collections.BlobSigningKey", id), cc.Collections.BlobSigningKey, true),
checkKeyConflict(fmt.Sprintf("Clusters.%s.PostgreSQL.Connection", id), cc.PostgreSQL.Connection),
ldr.checkEnum("Containers.LocalKeepLogsToContainerLog", cc.Containers.LocalKeepLogsToContainerLog, "none", "all", "errors"),
ldr.checkEmptyKeepstores(cc),
var acceptableTokenRe = regexp.MustCompile(`^[a-zA-Z0-9]+$`)
var acceptableTokenLength = 32
-func (ldr *Loader) checkToken(label, token string) error {
- if token == "" {
- if ldr.Logger != nil {
- ldr.Logger.Warnf("%s: secret token is not set (use %d+ random characters from a-z, A-Z, 0-9)", label, acceptableTokenLength)
+func (ldr *Loader) checkToken(label, token string, mandatory bool) error {
+ if len(token) == 0 {
+ if !mandatory {
+ // when a token is not mandatory, the acceptable length and content is only checked if its length is non-zero
+ return nil
+ } else {
+ if ldr.Logger != nil {
+ ldr.Logger.Warnf("%s: secret token is not set (use %d+ random characters from a-z, A-Z, 0-9)", label, acceptableTokenLength)
+ }
}
} else if !acceptableTokenRe.MatchString(token) {
return fmt.Errorf("%s: unacceptable characters in token (only a-z, A-Z, 0-9 are acceptable)", label)
sort.Strings(binds)
for _, bind := range binds {
- mnt, ok := runner.Container.Mounts[bind]
- if !ok {
+ mnt, notSecret := runner.Container.Mounts[bind]
+ if !notSecret {
mnt = runner.SecretMounts[bind]
}
if bind == "stdout" || bind == "stderr" {
}
} else {
src = fmt.Sprintf("%s/tmp%d", runner.ArvMountPoint, tmpcount)
- arvMountCmd = append(arvMountCmd, "--mount-tmp")
- arvMountCmd = append(arvMountCmd, fmt.Sprintf("tmp%d", tmpcount))
+ arvMountCmd = append(arvMountCmd, "--mount-tmp", fmt.Sprintf("tmp%d", tmpcount))
tmpcount++
}
if mnt.Writable {
if err != nil {
return nil, fmt.Errorf("writing temp file: %v", err)
}
- if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
+ if strings.HasPrefix(bind, runner.Container.OutputPath+"/") && (notSecret || runner.Container.Mounts[runner.Container.OutputPath].Kind != "collection") {
+ // In most cases, if the container
+ // specifies a literal file inside the
+ // output path, we copy it into the
+ // output directory (either a mounted
+ // collection or a staging area on the
+ // host fs). If it's a secret, it will
+ // be skipped when copying output from
+ // staging to Keep later.
copyFiles = append(copyFiles, copyFile{tmpfn, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
} else {
+ // If a secret is outside OutputPath,
+ // we bind mount the secret file
+ // directly just like other mounts. We
+ // also use this strategy when a
+ // secret is inside OutputPath but
+ // OutputPath is a live collection, to
+ // avoid writing the secret to
+ // Keep. Attempting to remove a
+ // bind-mounted secret file from
+ // inside the container will return a
+ // "Device or resource busy" error
+ // that might not be handled well by
+ // the container, which is why we
+ // don't use this strategy when
+ // OutputPath is a staging directory.
bindmounts[bind] = bindmount{HostPath: tmpfn, ReadOnly: true}
}
runner *ContainerRunner
executor *stubExecutor
keepmount string
+ keepmountTmp []string
testDispatcherKeepClient KeepTestClient
testContainerKeepClient KeepTestClient
}
}
s.runner.RunArvMount = func(cmd []string, tok string) (*exec.Cmd, error) {
s.runner.ArvMountPoint = s.keepmount
+ for i, opt := range cmd {
+ if opt == "--mount-tmp" {
+ err := os.Mkdir(s.keepmount+"/"+cmd[i+1], 0700)
+ if err != nil {
+ return nil, err
+ }
+ s.keepmountTmp = append(s.keepmountTmp, cmd[i+1])
+ }
+ }
return nil, nil
}
s.keepmount = c.MkDir()
err = os.Mkdir(s.keepmount+"/by_id", 0755)
+ s.keepmountTmp = nil
c.Assert(err, IsNil)
err = os.Mkdir(s.keepmount+"/by_id/"+arvadostest.DockerImage112PDH, 0755)
c.Assert(err, IsNil)
s.runner.statInterval = 100 * time.Millisecond
s.runner.containerWatchdogInterval = time.Second
- am := &ArvMountCmdLine{}
- s.runner.RunArvMount = am.ArvMountTest
realTemp := c.MkDir()
tempcount := 0
c.Check(s.api.CalledWith("container.state", "Complete"), NotNil)
c.Check(s.runner.ContainerArvClient.(*ArvTestClient).CalledWith("collection.manifest_text", ". 34819d7beeabb9260a5c854bc85b3e44+10 0:10:secret.conf\n"), IsNil)
c.Check(s.runner.ContainerArvClient.(*ArvTestClient).CalledWith("collection.manifest_text", ""), NotNil)
+
+ // under secret mounts, output dir is a collection, not captured in output
+ helperRecord = `{
+ "command": ["true"],
+ "container_image": "` + arvadostest.DockerImage112PDH + `",
+ "cwd": "/bin",
+ "mounts": {
+ "/tmp": {"kind": "collection", "writable": true}
+ },
+ "secret_mounts": {
+ "/tmp/secret.conf": {"kind": "text", "content": "mypassword"}
+ },
+ "output_path": "/tmp",
+ "priority": 1,
+ "runtime_constraints": {},
+ "state": "Locked"
+ }`
+
+ s.SetUpTest(c)
+ _, _, realtemp := s.fullRunHelper(c, helperRecord, nil, 0, func() {
+ // secret.conf should be provisioned as a separate
+ // bind mount, i.e., it should not appear in the
+ // (fake) fuse filesystem as viewed from the host.
+ content, err := ioutil.ReadFile(s.runner.HostOutputDir + "/secret.conf")
+ if !c.Check(errors.Is(err, os.ErrNotExist), Equals, true) {
+ c.Logf("secret.conf: content %q, err %#v", content, err)
+ }
+ err = ioutil.WriteFile(s.runner.HostOutputDir+"/.arvados#collection", []byte(`{"manifest_text":". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo.txt\n"}`), 0700)
+ c.Check(err, IsNil)
+ })
+
+ content, err := ioutil.ReadFile(realtemp + "/text1/mountdata.text")
+ c.Check(err, IsNil)
+ c.Check(string(content), Equals, "mypassword")
+ c.Check(s.executor.created.BindMounts["/tmp/secret.conf"], DeepEquals, bindmount{realtemp + "/text1/mountdata.text", true})
+ c.Check(s.api.CalledWith("container.exit_code", 0), NotNil)
+ c.Check(s.api.CalledWith("container.state", "Complete"), NotNil)
+ c.Check(s.runner.ContainerArvClient.(*ArvTestClient).CalledWith("collection.manifest_text", ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo.txt\n"), NotNil)
}
type FakeProcess struct {
} else {
err = inst.runBash(`
PJS=phantomjs-`+pjsversion+`-linux-x86_64
-wget --progress=dot:giga -O- https://bitbucket.org/ariya/phantomjs/downloads/$PJS.tar.bz2 | tar -C /var/lib/arvados -xjf -
+wget --progress=dot:giga -O- https://cache.arvados.org/$PJS.tar.bz2 | tar -C /var/lib/arvados -xjf -
ln -sf /var/lib/arvados/$PJS/bin/phantomjs /usr/local/bin/
`, stdout, stderr)
if err != nil {
logger.error(str(u"Caught signal {}, exiting.").format(sigcode))
sys.exit(-sigcode)
-def main(args, stdout, stderr, api_client=None, keep_client=None,
+def main(args=sys.argv[1:],
+ stdout=sys.stdout,
+ stderr=sys.stderr,
+ api_client=None,
+ keep_client=None,
install_sig_handlers=True):
parser = arg_parser()
metadata = scanobj
sc_result = scandeps(uri, scanobj,
- loadref_fields,
- set(("$include", "$schemas", "location")),
- loadref, urljoin=document_loader.fetcher.urljoin)
+ loadref_fields,
+ set(("$include", "$schemas", "location")),
+ loadref, urljoin=document_loader.fetcher.urljoin,
+ nestdirs=False)
sc = []
uuids = {}
license='Apache 2.0',
packages=find_packages(),
package_data={'arvados_cwl': ['arv-cwl-schema-v1.0.yml', 'arv-cwl-schema-v1.1.yml', 'arv-cwl-schema-v1.2.yml']},
- scripts=[
- 'bin/cwl-runner',
- 'bin/arvados-cwl-runner',
- ],
+ entry_points={"console_scripts": ["cwl-runner=arvados_cwl:main", "arvados-cwl-runner=arvados_cwl:main"]},
# Note that arvados/build/run-build-packages.sh looks at this
# file to determine what version of cwltool and schema-salad to
# build.
install_requires=[
- 'cwltool==3.1.20211107152837',
+ 'cwltool==3.1.20220210171524',
'schema-salad==8.2.20211116214159',
'arvados-python-client{}'.format(pysdk_dep),
'setuptools',
"outstr": "foo woble bar"
tool: 17879-ignore-sbg-fields.cwl
doc: "Test issue 17879 - ignores sbg fields"
+
+- job: chipseq/chip-seq-single.json
+ output: {}
+ tool: chipseq/cwl-packed.json
+ doc: "Test issue 18723 - correctly upload two directories with the same basename"
RUN apt-get update -q && apt-get install -qy --no-install-recommends \
git ${pythoncmd}-pip ${pythoncmd}-virtualenv ${pythoncmd}-dev libcurl4-gnutls-dev \
- libgnutls28-dev nodejs ${pythoncmd}-pyasn1-modules build-essential
-
-RUN $pipcmd install -U setuptools six requests
+ libgnutls28-dev nodejs ${pythoncmd}-pyasn1-modules build-essential ${pythoncmd}-setuptools
ARG sdk
ARG runner
RUN cd /tmp/arvados-python-client-* && $pipcmd install .
RUN if test -d /tmp/schema-salad-* ; then cd /tmp/schema-salad-* && $pipcmd install . ; fi
-RUN if test -d /tmp/cwltool-* ; then cd /tmp/cwltool-* && $pipcmd install networkx==2.2 && $pipcmd install . ; fi
+RUN if test -d /tmp/cwltool-* ; then cd /tmp/cwltool-* && $pipcmd install . ; fi
RUN cd /tmp/arvados-cwl-runner-* && $pipcmd install .
# Install dependencies and set up system.
return newFileCall(request);
}
+ public String delete(String collectionUuid, String filePathName) {
+ Request request = getRequestBuilder()
+ .url(getUrlBuilder(collectionUuid, filePathName).build())
+ .delete()
+ .build();
+
+ return newCall(request);
+ }
+
private HttpUrl.Builder getUrlBuilder(String collectionUuid, String filePathName) {
return new HttpUrl.Builder()
.scheme(config.getApiProtocol())
import java.util.List;
@JsonInclude(JsonInclude.Include.NON_NULL)
-@JsonPropertyOrder({ "limit", "offset", "filters", "order", "select", "distinct", "count" })
+@JsonPropertyOrder({ "limit", "offset", "filters", "order", "select", "distinct", "count", "exclude_home_project" })
public class ListArgument extends Argument {
@JsonProperty("limit")
@JsonProperty("count")
private Count count;
+ @JsonProperty("exclude_home_project")
+ private Boolean excludeHomeProject;
- ListArgument(Integer limit, Integer offset, List<Filter> filters, List<String> order, List<String> select, Boolean distinct, Count count) {
+ ListArgument(Integer limit, Integer offset, List<Filter> filters, List<String> order, List<String> select, Boolean distinct, Count count, Boolean excludeHomeProject) {
this.limit = limit;
this.offset = offset;
this.filters = filters;
this.select = select;
this.distinct = distinct;
this.count = count;
+ this.excludeHomeProject = excludeHomeProject;
}
public static ListArgumentBuilder builder() {
private List<String> select;
private Boolean distinct;
private Count count;
+ private Boolean excludeHomeProject;
ListArgumentBuilder() {
}
return this;
}
+ public ListArgument.ListArgumentBuilder excludeHomeProject(Boolean excludeHomeProject) {
+ this.excludeHomeProject = excludeHomeProject;
+ return this;
+ }
+
public ListArgument build() {
- return new ListArgument(limit, offset, filters, order, select, distinct, count);
+ return new ListArgument(limit, offset, filters, order, select, distinct, count, excludeHomeProject);
}
public String toString() {
return "ListArgument.ListArgumentBuilder(limit=" + this.limit +
", offset=" + this.offset + ", filters=" + this.filters +
", order=" + this.order + ", select=" + this.select +
- ", distinct=" + this.distinct + ", count=" + this.count + ")";
+ ", distinct=" + this.distinct + ", count=" + this.count +
+ ", excludeHomeProject=" + this.excludeHomeProject + ")";
}
}
}
self._delete_bufferblock(locator)
def _delete_bufferblock(self, locator):
- bb = self._bufferblocks[locator]
- bb.clear()
- del self._bufferblocks[locator]
+ if locator in self._bufferblocks:
+ bb = self._bufferblocks[locator]
+ bb.clear()
+ del self._bufferblocks[locator]
def get_block_contents(self, locator, num_retries, cache_only=False):
"""Fetch a block.
# our tokens.
return
else:
- self._past_versions.add((response.get("modified_at"), response.get("portable_data_hash")))
+ self._remember_api_response(response)
other = CollectionReader(response["manifest_text"])
baseline = CollectionReader(self._manifest_text)
self.apply(baseline.diff(other))
clnt
end
+ def self.check_anonymous_user_token token
+ case token[0..2]
+ when 'v2/'
+ _, token_uuid, secret, optional = token.split('/')
+ unless token_uuid.andand.length == 27 && secret.andand.length.andand > 0 &&
+ token_uuid == Rails.configuration.ClusterID+"-gj3su-anonymouspublic"
+ # invalid v2 token, or v2 token for another user
+ return nil
+ end
+ else
+ # v1 token
+ secret = token
+ end
+
+ # The anonymous token content and minimum length is verified in lib/config
+ if secret.length >= 0 && secret == Rails.configuration.Users.AnonymousUserToken
+ return ApiClientAuthorization.new(user: User.find_by_uuid(anonymous_user_uuid),
+ uuid: Rails.configuration.ClusterID+"-gj3su-anonymouspublic",
+ api_token: token,
+ api_client: anonymous_user_token_api_client)
+ else
+ return nil
+ end
+ end
+
def self.check_system_root_token token
if token == Rails.configuration.SystemRootToken
return ApiClientAuthorization.new(user: User.find_by_uuid(system_user_uuid),
return nil if token.nil? or token.empty?
remote ||= Rails.configuration.ClusterID
+ auth = self.check_anonymous_user_token(token)
+ if !auth.nil?
+ return auth
+ end
+
auth = self.check_system_root_token(token)
if !auth.nil?
return auth
anonymous_group
anonymous_group_read_permission
anonymous_user
+ anonymous_user_token_api_client
system_root_token_api_client
public_project_group
public_project_read_permission
end
end
+ def anonymous_user_token_api_client
+ $anonymous_user_token_api_client = check_cache $anonymous_user_token_api_client do
+ act_as_system_user do
+ ActiveRecord::Base.transaction do
+ ApiClient.find_or_create_by!(is_trusted: false, url_prefix: "", name: "AnonymousUserToken")
+ end
+ end
+ end
+ end
+
def system_root_token_api_client
$system_root_token_api_client = check_cache $system_root_token_api_client do
act_as_system_user do
+++ /dev/null
-#!/usr/bin/env ruby
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-# Get or Create an anonymous user token.
-# If get option is used, an existing anonymous user token is returned. If none exist, one is created.
-# If the get option is omitted, a new token is created and returned.
-
-require 'optimist'
-
-opts = Optimist::options do
- banner ''
- banner "Usage: get_anonymous_user_token "
- banner ''
- opt :get, <<-eos
-Get an existing anonymous user token. If no such token exists \
-or if this option is omitted, a new token is created and returned.
- eos
- opt :token, "token to create (optional)", :type => :string
-end
-
-get_existing = opts[:get]
-supplied_token = opts[:token]
-
-require File.dirname(__FILE__) + '/../config/environment'
-
-include ApplicationHelper
-act_as_system_user
-
-def create_api_client_auth(supplied_token=nil)
- supplied_token = Rails.configuration.Users["AnonymousUserToken"]
-
- if supplied_token.nil? or supplied_token.empty?
- puts "Users.AnonymousUserToken is empty. Destroying tokens that belong to anonymous."
- # Token is empty. Destroy any anonymous tokens.
- ApiClientAuthorization.where(user: anonymous_user).destroy_all
- return nil
- end
-
- attr = {user: anonymous_user,
- api_client_id: 0,
- scopes: ['GET /']}
-
- secret = supplied_token
-
- if supplied_token[0..2] == 'v2/'
- _, token_uuid, secret, optional = supplied_token.split('/')
- if token_uuid[0..4] != Rails.configuration.ClusterID
- # Belongs to a different cluster.
- puts supplied_token
- return nil
- end
- attr[:uuid] = token_uuid
- end
-
- attr[:api_token] = secret
-
- api_client_auth = ApiClientAuthorization.where(attr).first
- if !api_client_auth
- # The anonymous user token should never expire but we are not allowed to
- # set :expires_at to nil, so we set it to 1000 years in the future.
- attr[:expires_at] = Time.now + 1000.years
- api_client_auth = ApiClientAuthorization.create!(attr)
- end
- api_client_auth
-end
-
-if get_existing
- api_client_auth = ApiClientAuthorization.
- where('user_id=?', anonymous_user.id.to_i).
- where('expires_at>?', Time.now).
- select { |auth| auth.scopes == ['GET /'] }.
- first
-end
-
-# either not a get or no api_client_auth was found
-if !api_client_auth
- api_client_auth = create_api_client_auth(supplied_token)
-end
-
-# print it to the console
-if api_client_auth
- puts "v2/#{api_client_auth.uuid}/#{api_client_auth.api_token}"
-end
for item in self.inodes.inode_cache.find_by_uuid(ev["object_uuid"]):
item.invalidate()
- if ev.get("object_kind") == "arvados#collection":
- pdh = new_attrs.get("portable_data_hash")
- # new_attributes.modified_at currently lacks
- # subsecond precision (see #6347) so use event_at
- # which should always be the same.
- stamp = ev.get("event_at")
- if (stamp and pdh and item.writable() and
- item.collection is not None and
- item.collection.modified() and
- new_attrs.get("is_trashed") is not True):
- item.update(to_record_version=(stamp, pdh))
oldowner = old_attrs.get("owner_uuid")
newowner = ev.get("object_owner_uuid")
for parent in (
self.inodes.inode_cache.find_by_uuid(oldowner) +
self.inodes.inode_cache.find_by_uuid(newowner)):
- parent.child_event(ev)
+ parent.invalidate()
@getattr_time.time()
@catch_exceptions
"""
- def __init__(self, parent_inode, inodes, apiconfig, enable_write, collection):
+ def __init__(self, parent_inode, inodes, apiconfig, enable_write, collection, collection_root):
super(CollectionDirectoryBase, self).__init__(parent_inode, inodes, apiconfig, enable_write)
self.apiconfig = apiconfig
self.collection = collection
+ self.collection_root = collection_root
+ self.collection_record_file = None
def new_entry(self, name, item, mtime):
name = self.sanitize_filename(name)
item.fuse_entry.dead = False
self._entries[name] = item.fuse_entry
elif isinstance(item, arvados.collection.RichCollectionBase):
- self._entries[name] = self.inodes.add_entry(CollectionDirectoryBase(self.inode, self.inodes, self.apiconfig, self._enable_write, item))
+ self._entries[name] = self.inodes.add_entry(CollectionDirectoryBase(self.inode, self.inodes, self.apiconfig, self._enable_write, item, self.collection_root))
self._entries[name].populate(mtime)
else:
self._entries[name] = self.inodes.add_entry(FuseArvadosFile(self.inode, item, mtime, self._enable_write))
item.fuse_entry = self._entries[name]
def on_event(self, event, collection, name, item):
+ # These are events from the Collection object (ADD/DEL/MOD)
+ # emitted by operations on the Collection object (like
+ # "mkdirs" or "remove"), and by "update", which we need to
+ # synchronize with our FUSE objects that are assigned inodes.
if collection == self.collection:
name = self.sanitize_filename(name)
self.inodes.invalidate_inode(item.fuse_entry)
elif name in self._entries:
self.inodes.invalidate_inode(self._entries[name])
+
+ if self.collection_record_file is not None:
+ self.collection_record_file.invalidate()
+ self.inodes.invalidate_inode(self.collection_record_file)
finally:
while lockcount > 0:
self.collection.lock.acquire()
@use_counter
def flush(self):
- if not self.writable():
- return
- with llfuse.lock_released:
- self.collection.root_collection().save()
+ self.collection_root.flush()
@use_counter
@check_update
"""Represents the root of a directory tree representing a collection."""
def __init__(self, parent_inode, inodes, api, num_retries, enable_write, collection_record=None, explicit_collection=None):
- super(CollectionDirectory, self).__init__(parent_inode, inodes, api.config, enable_write, None)
+ super(CollectionDirectory, self).__init__(parent_inode, inodes, api.config, enable_write, None, self)
self.api = api
self.num_retries = num_retries
- self.collection_record_file = None
- self.collection_record = None
self._poll = True
try:
self._poll_time = (api._rootDesc.get('blobSignatureTtl', 60*60*2) // 2)
def writable(self):
return self._enable_write and (self.collection.writable() if self.collection is not None else self._writable)
+ @use_counter
+ def flush(self):
+ if not self.writable():
+ return
+ with llfuse.lock_released:
+ with self._updating_lock:
+ if self.collection.committed():
+ self.collection.update()
+ else:
+ self.collection.save()
+ self.new_collection_record(self.collection.api_response())
+
def want_event_subscribe(self):
return (uuid_pattern.match(self.collection_locator) is not None)
- # Used by arv-web.py to switch the contents of the CollectionDirectory
- def change_collection(self, new_locator):
- """Switch the contents of the CollectionDirectory.
-
- Must be called with llfuse.lock held.
- """
-
- self.collection_locator = new_locator
- self.collection_record = None
- self.update()
-
def new_collection(self, new_collection_record, coll_reader):
if self.inode:
self.clear()
-
- self.collection_record = new_collection_record
-
- if self.collection_record:
- self._mtime = convertTime(self.collection_record.get('modified_at'))
- self.collection_locator = self.collection_record["uuid"]
- if self.collection_record_file is not None:
- self.collection_record_file.update(self.collection_record)
-
self.collection = coll_reader
+ self.new_collection_record(new_collection_record)
self.populate(self.mtime())
+ def new_collection_record(self, new_collection_record):
+ if not new_collection_record:
+ raise Exception("invalid new_collection_record")
+ self._mtime = convertTime(new_collection_record.get('modified_at'))
+ self._manifest_size = len(new_collection_record["manifest_text"])
+ self.collection_locator = new_collection_record["uuid"]
+ if self.collection_record_file is not None:
+ self.collection_record_file.invalidate()
+ self.inodes.invalidate_inode(self.collection_record_file)
+ _logger.debug("%s invalidated collection record file", self)
+ self.fresh()
+
def uuid(self):
return self.collection_locator
@use_counter
- def update(self, to_record_version=None):
+ def update(self):
try:
- if self.collection_record is not None and portable_data_hash_pattern.match(self.collection_locator):
+ if self.collection is not None and portable_data_hash_pattern.match(self.collection_locator):
+ # It's immutable, nothing to update
return True
if self.collection_locator is None:
+ # No collection locator to retrieve from
self.fresh()
return True
+ new_collection_record = None
try:
with llfuse.lock_released:
self._updating_lock.acquire()
if not self.stale():
- return
+ return True
- _logger.debug("Updating collection %s inode %s to record version %s", self.collection_locator, self.inode, to_record_version)
- new_collection_record = None
+ _logger.debug("Updating collection %s inode %s", self.collection_locator, self.inode)
+ coll_reader = None
if self.collection is not None:
- if self.collection.known_past_version(to_record_version):
- _logger.debug("%s already processed %s", self.collection_locator, to_record_version)
- else:
- self.collection.update()
+ # Already have a collection object
+ self.collection.update()
+ new_collection_record = self.collection.api_response()
else:
+ # Create a new collection object
if uuid_pattern.match(self.collection_locator):
coll_reader = arvados.collection.Collection(
self.collection_locator, self.api, self.api.keep,
new_collection_record['storage_classes_desired'] = coll_reader.storage_classes_desired()
# end with llfuse.lock_released, re-acquire lock
- if (new_collection_record is not None and
- (self.collection_record is None or
- self.collection_record["portable_data_hash"] != new_collection_record.get("portable_data_hash"))):
- self.new_collection(new_collection_record, coll_reader)
- self._manifest_size = len(coll_reader.manifest_text())
- _logger.debug("%s manifest_size %i", self, self._manifest_size)
- self.fresh()
+ if new_collection_record is not None:
+ if coll_reader is not None:
+ self.new_collection(new_collection_record, coll_reader)
+ else:
+ self.new_collection_record(new_collection_record)
+
return True
finally:
self._updating_lock.release()
_logger.error("Error fetching collection '%s': %s", self.collection_locator, e)
except arvados.errors.ArgumentError as detail:
_logger.warning("arv-mount %s: error %s", self.collection_locator, detail)
- if self.collection_record is not None and "manifest_text" in self.collection_record:
- _logger.warning("arv-mount manifest_text is: %s", self.collection_record["manifest_text"])
+ if new_collection_record is not None and "manifest_text" in new_collection_record:
+ _logger.warning("arv-mount manifest_text is: %s", new_collection_record["manifest_text"])
except Exception:
_logger.exception("arv-mount %s: error", self.collection_locator)
- if self.collection_record is not None and "manifest_text" in self.collection_record:
- _logger.error("arv-mount manifest_text is: %s", self.collection_record["manifest_text"])
+ if new_collection_record is not None and "manifest_text" in new_collection_record:
+ _logger.error("arv-mount manifest_text is: %s", new_collection_record["manifest_text"])
self.invalidate()
return False
+ @use_counter
+ def collection_record(self):
+ self.flush()
+ return self.collection.api_response()
+
@use_counter
@check_update
def __getitem__(self, item):
if item == '.arvados#collection':
if self.collection_record_file is None:
- self.collection_record_file = ObjectFile(self.inode, self.collection_record)
+ self.collection_record_file = FuncToJSONFile(
+ self.inode, self.collection_record)
self.inodes.add_entry(self.collection_record_file)
+ self.invalidate() # use lookup as a signal to force update
return self.collection_record_file
else:
return super(CollectionDirectory, self).__getitem__(item)
return super(CollectionDirectory, self).__contains__(k)
def invalidate(self):
- self.collection_record = None
- self.collection_record_file = None
+ if self.collection_record_file is not None:
+ self.collection_record_file.invalidate()
+ self.inodes.invalidate_inode(self.collection_record_file)
super(CollectionDirectory, self).invalidate()
def persisted(self):
# This is always enable_write=True because it never tries to
# save to the backend
super(TmpCollectionDirectory, self).__init__(
- parent_inode, inodes, api_client.config, True, collection)
- self.collection_record_file = None
+ parent_inode, inodes, api_client.config, True, collection, self)
self.populate(self.mtime())
def on_event(self, *args, **kwargs):
super(TmpCollectionDirectory, self).on_event(*args, **kwargs)
- if self.collection_record_file:
+ if self.collection_record_file is None:
+ return
- # See discussion in CollectionDirectoryBase.on_event
- lockcount = 0
- try:
- while True:
- self.collection.lock.release()
- lockcount += 1
- except RuntimeError:
- pass
+ # See discussion in CollectionDirectoryBase.on_event
+ lockcount = 0
+ try:
+ while True:
+ self.collection.lock.release()
+ lockcount += 1
+ except RuntimeError:
+ pass
- try:
- with llfuse.lock:
- with self.collection.lock:
- self.collection_record_file.invalidate()
- self.inodes.invalidate_inode(self.collection_record_file)
- _logger.debug("%s invalidated collection record", self)
- finally:
- while lockcount > 0:
- self.collection.lock.acquire()
- lockcount -= 1
+ try:
+ with llfuse.lock:
+ with self.collection.lock:
+ self.collection_record_file.invalidate()
+ self.inodes.invalidate_inode(self.collection_record_file)
+ _logger.debug("%s invalidated collection record", self)
+ finally:
+ while lockcount > 0:
+ self.collection.lock.acquire()
+ lockcount -= 1
def collection_record(self):
with llfuse.lock_released:
def writable(self):
return True
+ def flush(self):
+ pass
+
def want_event_subscribe(self):
return False
uuid=self.project_uuid,
filters=[["uuid", "is_a", "arvados#group"],
["groups.group_class", "in", ["project","filter"]]]))
- contents.extend(arvados.util.keyset_list_all(self.api.groups().contents,
+ contents.extend(filter(lambda i: i["current_version_uuid"] == i["uuid"],
+ arvados.util.keyset_list_all(self.api.groups().contents,
order_key="uuid",
num_retries=self.num_retries,
uuid=self.project_uuid,
- filters=[["uuid", "is_a", "arvados#collection"]]))
+ filters=[["uuid", "is_a", "arvados#collection"]])))
+
# end with llfuse.lock_released, re-acquire lock
GOSTUFF="$ARVBOX_DATA/gopath"
RLIBS="$ARVBOX_DATA/Rlibs"
ARVADOS_CONTAINER_PATH="/var/lib/arvados-arvbox"
-GEM_HOME="/var/lib/arvados/lib/ruby/gems/2.7.0"
getip() {
docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $ARVBOX_CONTAINER
"--volume=$PG_DATA:/var/lib/postgresql:rw" \
"--volume=$VAR_DATA:$ARVADOS_CONTAINER_PATH:rw" \
"--volume=$PASSENGER:/var/lib/passenger:rw" \
- "--volume=$GEMS:$GEM_HOME:rw" \
+ "--volume=$GEMS:/var/lib/arvados/lib/ruby/gems:rw" \
"--volume=$PIPCACHE:/var/lib/pip:rw" \
"--volume=$NPMCACHE:/var/lib/npm:rw" \
"--volume=$GOSTUFF:/var/lib/gopath:rw" \
-e COLUMNS=$(tput cols) \
-e TERM=$TERM \
-e WORKSPACE=/usr/src/arvados \
- -e GEM_HOME=$GEM_HOME \
-e CONFIGSRC=$ARVADOS_CONTAINER_PATH/run_tests \
$ARVBOX_CONTAINER \
/usr/local/lib/arvbox/runsu.sh \
-e LINES=$(tput lines) \
-e COLUMNS=$(tput cols) \
-e TERM=$TERM \
- -e GEM_HOME=$GEM_HOME \
$ARVBOX_CONTAINER /bin/bash
;;
-e LINES=$(tput lines) \
-e COLUMNS=$(tput cols) \
-e TERM=$TERM \
- -e GEM_HOME=$GEM_HOME \
-u arvbox \
-w /usr/src/arvados \
$ARVBOX_CONTAINER /bin/bash --login
;;
pipe)
- exec docker exec -i $ARVBOX_CONTAINER /usr/bin/env GEM_HOME=$GEM_HOME /bin/bash -
+ exec docker exec -i $ARVBOX_CONTAINER /usr/bin/env /bin/bash -
;;
stop)
;;
hotreset)
- exec docker exec -i $ARVBOX_CONTAINER /usr/bin/env GEM_HOME=$GEM_HOME /bin/bash - <<EOF
+ exec docker exec -i $ARVBOX_CONTAINER /usr/bin/env /bin/bash - <<EOF
sv stop api
sv stop controller
sv stop websockets
cd /usr/src/arvados/services/api
export DISABLE_DATABASE_ENVIRONMENT_CHECK=1
export RAILS_ENV=development
-flock $GEM_HOME/gems.lock bin/bundle exec rake db:drop
+bin/bundle exec rake db:drop
rm $ARVADOS_CONTAINER_PATH/api_database_setup
rm $ARVADOS_CONTAINER_PATH/superuser_token
sv start api
su postgres -c 'createuser -s arvbox' && \
/etc/init.d/postgresql stop
-ENV GEM_HOME /var/lib/arvados/lib/ruby/gems/2.5.0
-ENV PATH $PATH:$GEM_HOME/bin
-
VOLUME /var/lib/docker
VOLUME /var/log/nginx
VOLUME /etc/ssl/private
fi
if ! test -f $ARVADOS_CONTAINER_PATH/api_database_setup ; then
- flock $GEM_HOME/gems.lock bin/bundle exec rake db:setup
+ flock $GEMLOCK bin/bundle exec rake db:setup
touch $ARVADOS_CONTAINER_PATH/api_database_setup
fi
if ! test -s $ARVADOS_CONTAINER_PATH/superuser_token ; then
- superuser_tok=$(flock $GEM_HOME/gems.lock bin/bundle exec ./script/create_superuser_token.rb)
+ superuser_tok=$(flock $GEMLOCK bin/bundle exec ./script/create_superuser_token.rb)
echo "$superuser_tok" > $ARVADOS_CONTAINER_PATH/superuser_token
fi
rm -rf tmp
mkdir -p tmp/cache
-flock $GEM_HOME/gems.lock bin/bundle exec rake db:migrate
+flock $GEMLOCK bin/bundle exec rake db:migrate
#
# SPDX-License-Identifier: AGPL-3.0
+export RUBY_VERSION=2.7.0
+export BUNDLER_VERSION=2.2.19
+
export DEBIAN_FRONTEND=noninteractive
-export GEM_HOME=/var/lib/arvados/lib/ruby/gems/2.5.0
-export PATH=${PATH}:/usr/local/go/bin:$GEM_HOME/bin:/var/lib/arvados/bin
+export PATH=${PATH}:/usr/local/go/bin:/var/lib/arvados/bin:/usr/src/arvados/sdk/cli/binstubs
export npm_config_cache=/var/lib/npm
export npm_config_cache_min=Infinity
export R_LIBS=/var/lib/Rlibs
export HOME=$(getent passwd arvbox | cut -d: -f6)
export ARVADOS_CONTAINER_PATH=/var/lib/arvados-arvbox
+GEMLOCK=/var/lib/arvados/lib/ruby/gems/gems.lock
defaultdev=$(/sbin/ip route|awk '/default/ { print $5 }')
dockerip=$(/sbin/ip route | grep default | awk '{ print $3 }')
fi
run_bundler() {
+ flock $GEMLOCK /var/lib/arvados/bin/gem install --no-document bundler:$BUNDLER_VERSION
if test -f Gemfile.lock ; then
frozen=--frozen
else
frozen=""
fi
- BUNDLER=bundler
- if test -x $PWD/bin/bundler ; then
+ BUNDLER=bundle
+ if test -x $PWD/bin/bundle ; then
# If present, use the one associated with rails workbench or API
- BUNDLER=$PWD/bin/bundler
+ BUNDLER=$PWD/bin/bundle
fi
- if ! flock $GEM_HOME/gems.lock $BUNDLER install --verbose --local --no-deployment $frozen "$@" ; then
- flock $GEM_HOME/gems.lock $BUNDLER install --verbose --no-deployment $frozen "$@"
+ if ! flock $GEMLOCK $BUNDLER install --verbose --local --no-deployment $frozen "$@" ; then
+ flock $GEMLOCK $BUNDLER install --verbose --no-deployment $frozen "$@"
fi
}
set -e -o pipefail
-export GEM_HOME=/var/lib/arvados/lib/ruby/gems/2.5.0
export ARVADOS_CONTAINER_PATH=/var/lib/arvados-arvbox
if ! grep "^arvbox:" /etc/passwd >/dev/null 2>/dev/null ; then
HOSTUID=$(ls -nd /usr/src/arvados | sed 's/ */ /' | cut -d' ' -f4)
HOSTGID=$(ls -nd /usr/src/arvados | sed 's/ */ /' | cut -d' ' -f5)
- mkdir -p $ARVADOS_CONTAINER_PATH/git $GEM_HOME \
+ mkdir -p $ARVADOS_CONTAINER_PATH/git \
/var/lib/passenger /var/lib/gopath \
/var/lib/pip /var/lib/npm
useradd --groups docker crunch
if [[ "$1" != --no-chown ]] ; then
- chown arvbox:arvbox -R /usr/local $ARVADOS_CONTAINER_PATH $GEM_HOME \
+ chown arvbox:arvbox -R /usr/local $ARVADOS_CONTAINER_PATH \
/var/lib/passenger /var/lib/postgresql \
/var/lib/nginx /var/log/nginx /etc/ssl/private \
/var/lib/gopath /var/lib/pip /var/lib/npm \
echo "arvbox ALL=(crunch) NOPASSWD: ALL" >> /etc/sudoers
cat <<EOF > /etc/profile.d/paths.sh
-export PATH=/var/lib/arvados/bin:/usr/local/bin:/usr/bin:/bin
-export GEM_HOME=/var/lib/arvados/lib/ruby/gems/2.5.0
+export PATH=/var/lib/arvados/bin:/usr/local/bin:/usr/bin:/bin:/usr/src/arvados/sdk/cli/binstubs
export npm_config_cache=/var/lib/npm
export npm_config_cache_min=Infinity
export R_LIBS=/var/lib/Rlibs
export RAILS_ENV=development
fi
-run_bundler --without=development
-flock $GEM_HOME/gems.lock bin/bundle exec passenger-config build-native-support
-flock $GEM_HOME/gems.lock bin/bundle exec passenger-config install-standalone-runtime
+run_bundler --without=development --binstubs=binstubs
+binstubs/passenger-config build-native-support
+binstubs/passenger-config install-standalone-runtime
if test "$1" = "--only-deps" ; then
exit
touch $ARVADOS_CONTAINER_PATH/api.ready
-exec bin/bundle exec passenger start --port=${services[api]}
+exec binstubs/passenger start --port=${services[api]}
fi
cd /usr/src/arvados/doc
-flock $GEM_HOME/gems.lock bundle exec rake generate baseurl=http://$localip:${services[doc]} arvados_api_host=$localip:${services[controller-ssl]} arvados_workbench_host=http://$localip
+flock $GEMLOCK bundle exec rake generate baseurl=http://$localip:${services[doc]} arvados_api_host=$localip:${services[controller-ssl]} arvados_workbench_host=http://$localip
EOF
while true ; do
- flock $GEM_HOME/gems.lock bundle exec script/arvados-git-sync.rb $RAILS_ENV
+ flock $GEMLOCK bundle exec script/arvados-git-sync.rb $RAILS_ENV
sleep 120
done
if ! [[ -z "$waiting" ]] ; then
if ps x | grep -v grep | grep "bundle install" > /dev/null; then
- gemcount=$(ls $GEM_HOME/gems 2>/dev/null | wc -l)
+ gemcount=$(ls /var/lib/arvados/lib/ruby/gems/*/gems 2>/dev/null | wc -l)
gemlockcount=0
for l in /usr/src/arvados/services/api/Gemfile.lock \
EOF
cd /usr/src/arvados/sdk/cli
-run_bundler --binstubs=$PWD/binstubs
-ln -sf /usr/src/arvados/sdk/cli/binstubs/arv /usr/local/bin/arv
+run_bundler --binstubs=binstubs
export PYCMD=python3
export ARVADOS_VIRTUAL_MACHINE_UUID=$(cat $ARVADOS_CONTAINER_PATH/vm-uuid)
while true ; do
- arvados-login-sync
+ /usr/src/arvados/services/login-sync/binstubs/arvados-login-sync
sleep 120
done
fi
cd /usr/src/arvados/services/login-sync
-run_bundler --binstubs=$PWD/binstubs
-ln -sf /usr/src/arvados/services/login-sync/binstubs/arvados-login-sync /usr/local/bin/arvados-login-sync
+run_bundler --binstubs=binstubs
if test "$1" = "--only-deps" ; then
exit
if test "$1" != "--only-deps" ; then
openssl verify -CAfile $root_cert $server_cert
- exec bin/bundle exec passenger start --port=${services[workbench]} \
+ exec binstubs/passenger start --port=${services[workbench]} \
--ssl --ssl-certificate=$ARVADOS_CONTAINER_PATH/server-cert-${localip}.pem \
--ssl-certificate-key=$ARVADOS_CONTAINER_PATH/server-cert-${localip}.key \
--user arvbox
export RAILS_ENV=development
fi
-run_bundler --without=development
-flock $GEM_HOME/gems.lock bin/bundle exec passenger-config build-native-support
-flock $GEM_HOME/gems.lock bin/bundle exec passenger-config install-standalone-runtime
+run_bundler --without=development --binstubs=binstubs
+binstubs/passenger-config build-native-support
+binstubs/passenger-config install-standalone-runtime
mkdir -p /usr/src/arvados/apps/workbench/tmp
if test "$1" = "--only-deps" ; then
$RAILS_ENV:
keep_web_url: https://example.com/c=%{uuid_or_pdh}
EOF
- RAILS_GROUPS=assets flock $GEM_HOME/gems.lock bin/bundle exec rake npm:install
+ RAILS_GROUPS=assets flock $GEMLOCK bin/bundle exec rake npm:install
rm config/application.yml
exit
fi
secret_token=$(cat $ARVADOS_CONTAINER_PATH/workbench_secret_token)
-RAILS_GROUPS=assets flock $GEM_HOME/gems.lock bin/bundle exec rake npm:install
-flock $GEM_HOME/gems.lock bin/bundle exec rake assets:precompile
+RAILS_GROUPS=assets flock $GEMLOCK bin/bundle exec rake npm:install
+flock $GEMLOCK bin/bundle exec rake assets:precompile