From: Javier Bértoli Date: Thu, 17 Mar 2022 18:19:06 +0000 (-0300) Subject: Merge branch '18681-install-passenger-only-when-required' X-Git-Tag: 2.4.0~41 X-Git-Url: https://git.arvados.org/arvados.git/commitdiff_plain/9a11b502e406202db58fb6751f3448aa30288062?hp=4060fcafe05a811394d217917c184ea4b590b413 Merge branch '18681-install-passenger-only-when-required' closes #18681 Arvados-DCO-1.1-Signed-off-by: Javier Bértoli --- diff --git a/build/package-build-dockerfiles/build-all-build-containers.sh b/build/package-build-dockerfiles/build-all-build-containers.sh index 5ed33dc9f3..5f8817f20a 100755 --- a/build/package-build-dockerfiles/build-all-build-containers.sh +++ b/build/package-build-dockerfiles/build-all-build-containers.sh @@ -12,7 +12,7 @@ for target in `find -maxdepth 1 -type d |grep -v generated`; do target=${target#./} echo $target cd $target - docker build --tag arvados/build:$target --build-arg HOSTTYPE=$HOSTTYPE . + docker build --tag arvados/build:$target --build-arg HOSTTYPE=$HOSTTYPE --build-arg BRANCH=$(git rev-parse --abbrev-ref HEAD) . cd .. done diff --git a/build/package-build-dockerfiles/centos7/Dockerfile b/build/package-build-dockerfiles/centos7/Dockerfile index 01662d6ac5..e44d231edf 100644 --- a/build/package-build-dockerfiles/centos7/Dockerfile +++ b/build/package-build-dockerfiles/centos7/Dockerfile @@ -3,6 +3,7 @@ # SPDX-License-Identifier: AGPL-3.0 ARG HOSTTYPE +ARG BRANCH FROM centos:7 as build_x86_64 # Install go @@ -64,7 +65,12 @@ RUN /usr/local/rvm/bin/rvm-exec default bundle config --global jobs $(let a=$(gr # Cf. https://build.betterup.com/one-weird-trick-that-will-speed-up-your-bundle-install/ ENV MAKE "make --jobs $(grep -c processor /proc/cpuinfo)" +# Preseed the go module cache and the ruby gems, using the currently checked +# out branch of the source tree. This avoids potential compatibility issues +# between the version of Ruby and certain gems. RUN git clone --depth 1 git://git.arvados.org/arvados.git /tmp/arvados && \ + cd /tmp/arvados && \ + if [[ -n "${BRANCH}" ]]; then git checkout ${BRANCH}; fi && \ cd /tmp/arvados/services/api && \ /usr/local/rvm/bin/rvm-exec default bundle install && \ cd /tmp/arvados/apps/workbench && \ diff --git a/build/package-build-dockerfiles/debian10/Dockerfile b/build/package-build-dockerfiles/debian10/Dockerfile index edd47cf69c..ed0a0cdc1f 100644 --- a/build/package-build-dockerfiles/debian10/Dockerfile +++ b/build/package-build-dockerfiles/debian10/Dockerfile @@ -3,6 +3,7 @@ # SPDX-License-Identifier: AGPL-3.0 ARG HOSTTYPE +ARG BRANCH ## dont use debian:10 here since the word 'buster' is used for rvm precompiled binaries FROM debian:buster as build_x86_64 @@ -51,7 +52,12 @@ RUN /usr/local/rvm/bin/rvm-exec default bundle config --global jobs $(let a=$(gr # Cf. https://build.betterup.com/one-weird-trick-that-will-speed-up-your-bundle-install/ ENV MAKE "make --jobs $(grep -c processor /proc/cpuinfo)" +# Preseed the go module cache and the ruby gems, using the currently checked +# out branch of the source tree. This avoids potential compatibility issues +# between the version of Ruby and certain gems. RUN git clone --depth 1 git://git.arvados.org/arvados.git /tmp/arvados && \ + cd /tmp/arvados && \ + if [[ -n "${BRANCH}" ]]; then git checkout ${BRANCH}; fi && \ cd /tmp/arvados/services/api && \ /usr/local/rvm/bin/rvm-exec default bundle install && \ cd /tmp/arvados/apps/workbench && \ diff --git a/build/package-build-dockerfiles/debian11/Dockerfile b/build/package-build-dockerfiles/debian11/Dockerfile index cb4c695c4c..cfeaf2463a 100644 --- a/build/package-build-dockerfiles/debian11/Dockerfile +++ b/build/package-build-dockerfiles/debian11/Dockerfile @@ -3,6 +3,7 @@ # SPDX-License-Identifier: AGPL-3.0 ARG HOSTTYPE +ARG BRANCH ## dont use debian:11 here since the word 'bullseye' is used for rvm precompiled binaries FROM debian:bullseye as build_x86_64 @@ -56,7 +57,12 @@ RUN /usr/local/rvm/bin/rvm-exec default bundle config --global jobs $(let a=$(gr # Cf. https://build.betterup.com/one-weird-trick-that-will-speed-up-your-bundle-install/ ENV MAKE "make --jobs $(grep -c processor /proc/cpuinfo)" +# Preseed the go module cache and the ruby gems, using the currently checked +# out branch of the source tree. This avoids potential compatibility issues +# between the version of Ruby and certain gems. RUN git clone --depth 1 git://git.arvados.org/arvados.git /tmp/arvados && \ + cd /tmp/arvados && \ + if [[ -n "${BRANCH}" ]]; then git checkout ${BRANCH}; fi && \ cd /tmp/arvados/services/api && \ /usr/local/rvm/bin/rvm-exec default bundle install && \ cd /tmp/arvados/apps/workbench && \ diff --git a/build/package-build-dockerfiles/ubuntu1804/Dockerfile b/build/package-build-dockerfiles/ubuntu1804/Dockerfile index b026fa2a88..9b20b41a4e 100644 --- a/build/package-build-dockerfiles/ubuntu1804/Dockerfile +++ b/build/package-build-dockerfiles/ubuntu1804/Dockerfile @@ -3,6 +3,7 @@ # SPDX-License-Identifier: AGPL-3.0 ARG HOSTTYPE +ARG BRANCH FROM ubuntu:bionic as build_x86_64 # Install go @@ -50,7 +51,12 @@ RUN /usr/local/rvm/bin/rvm-exec default bundle config --global jobs $(let a=$(gr # Cf. https://build.betterup.com/one-weird-trick-that-will-speed-up-your-bundle-install/ ENV MAKE "make --jobs $(grep -c processor /proc/cpuinfo)" +# Preseed the go module cache and the ruby gems, using the currently checked +# out branch of the source tree. This avoids potential compatibility issues +# between the version of Ruby and certain gems. RUN git clone --depth 1 git://git.arvados.org/arvados.git /tmp/arvados && \ + cd /tmp/arvados && \ + if [[ -n "${BRANCH}" ]]; then git checkout ${BRANCH}; fi && \ cd /tmp/arvados/services/api && \ /usr/local/rvm/bin/rvm-exec default bundle install && \ cd /tmp/arvados/apps/workbench && \ diff --git a/build/package-build-dockerfiles/ubuntu2004/Dockerfile b/build/package-build-dockerfiles/ubuntu2004/Dockerfile index 1457670ce6..f28e6fef1d 100644 --- a/build/package-build-dockerfiles/ubuntu2004/Dockerfile +++ b/build/package-build-dockerfiles/ubuntu2004/Dockerfile @@ -3,6 +3,7 @@ # SPDX-License-Identifier: AGPL-3.0 ARG HOSTTYPE +ARG BRANCH FROM ubuntu:focal as build_x86_64 # Install go @@ -61,7 +62,12 @@ RUN /usr/local/rvm/bin/rvm-exec default bundle config --global jobs $(let a=$(gr # Cf. https://build.betterup.com/one-weird-trick-that-will-speed-up-your-bundle-install/ ENV MAKE "make --jobs $(grep -c processor /proc/cpuinfo)" +# Preseed the go module cache and the ruby gems, using the currently checked +# out branch of the source tree. This avoids potential compatibility issues +# between the version of Ruby and certain gems. RUN git clone --depth 1 git://git.arvados.org/arvados.git /tmp/arvados && \ + cd /tmp/arvados && \ + if [[ -n "${BRANCH}" ]]; then git checkout ${BRANCH}; fi && \ cd /tmp/arvados/services/api && \ /usr/local/rvm/bin/rvm-exec default bundle install && \ cd /tmp/arvados/apps/workbench && \ diff --git a/build/run-build-packages-one-target.sh b/build/run-build-packages-one-target.sh index e06a732979..c1cc2e5877 100755 --- a/build/run-build-packages-one-target.sh +++ b/build/run-build-packages-one-target.sh @@ -195,7 +195,7 @@ fi echo $TARGET cd $TARGET -time docker build --tag "$IMAGE" --build-arg HOSTTYPE=$HOSTTYPE . +time docker build --tag "$IMAGE" --build-arg HOSTTYPE=$HOSTTYPE --build-arg BRANCH=$(git rev-parse --abbrev-ref HEAD) . popd if test -z "$packages" ; then @@ -307,16 +307,24 @@ else set +e mv -f ${WORKSPACE}/packages/${TARGET}/* ${WORKSPACE}/packages/${TARGET}/processed/ 2>/dev/null set -e + # give bundle (almost) all the cores. See also the MAKE env var that is passed into the + # docker run command below. + # Cf. https://build.betterup.com/one-weird-trick-that-will-speed-up-your-bundle-install/ + tmpfile=$(mktemp /tmp/run-build-packages-one-target.XXXXXX) + cores=$(let a=$(grep -c processor /proc/cpuinfo )-1; echo $a) + printf -- "---\nBUNDLE_JOBS: \"$cores\"" > $tmpfile # Build packages. if docker run \ --rm \ "${docker_volume_args[@]}" \ + -v $tmpfile:/root/.bundle/config \ --env ARVADOS_BUILDING_VERSION="$ARVADOS_BUILDING_VERSION" \ --env ARVADOS_BUILDING_ITERATION="$ARVADOS_BUILDING_ITERATION" \ --env ARVADOS_DEBUG=$ARVADOS_DEBUG \ --env "ONLY_BUILD=$ONLY_BUILD" \ --env "FORCE_BUILD=$FORCE_BUILD" \ --env "ARCH=$ARCH" \ + --env "MAKE=make --jobs $cores" \ "$IMAGE" $COMMAND then echo @@ -325,6 +333,8 @@ else FINAL_EXITCODE=$? echo "ERROR: build packages on $IMAGE failed with exit status $FINAL_EXITCODE" >&2 fi + # Clean up the bundle config file + rm -f $tmpfile fi if test -n "$package_fails" ; then diff --git a/build/run-library.sh b/build/run-library.sh index 0257e65241..fa2be6ac7a 100755 --- a/build/run-library.sh +++ b/build/run-library.sh @@ -492,7 +492,8 @@ handle_rails_package() { cd "$srcdir" mkdir -p tmp git rev-parse HEAD >git-commit.version - bundle package --all + bundle config set cache_all true + bundle package ) if [[ 0 != "$?" ]] || ! cd "$WORKSPACE/packages/$TARGET"; then echo "ERROR: $pkgname package prep failed" >&2 @@ -603,7 +604,8 @@ handle_workbench () { # We need to bundle to be ready even when we build a package without vendor directory # because asset compilation requires it. - bundle install --system >"$STDOUT_IF_DEBUG" + bundle config set --local system 'true' >"$STDOUT_IF_DEBUG" + bundle install >"$STDOUT_IF_DEBUG" # clear the tmp directory; the asset generation step will recreate tmp/cache/assets, # and we want that in the package, so it's easier to not exclude the tmp directory diff --git a/doc/_config.yml b/doc/_config.yml index f2ddd7f58c..9dd7f40529 100644 --- a/doc/_config.yml +++ b/doc/_config.yml @@ -26,9 +26,10 @@ navbar: - user/getting_started/community.html.textile.liquid - Walkthough: - user/tutorials/wgs-tutorial.html.textile.liquid - - Run a workflow using Workbench: + - Using Workbench: - user/getting_started/workbench.html.textile.liquid - user/tutorials/tutorial-workflow-workbench.html.textile.liquid + - user/topics/workbench-migration.html.textile.liquid - Working at the Command Line: - user/getting_started/setup-cli.html.textile.liquid - user/reference/api-tokens.html.textile.liquid diff --git a/doc/api/methods/collections.html.textile.liquid b/doc/api/methods/collections.html.textile.liquid index 01efda2b0c..5ff8d529f8 100644 --- a/doc/api/methods/collections.html.textile.liquid +++ b/doc/api/methods/collections.html.textile.liquid @@ -47,7 +47,7 @@ table(table table-bordered table-condensed). h3. Conditions of creating a Collection -The @portable_data_hash@ and @manifest_text@ attributes must be provided when creating a Collection. The cryptographic digest of the supplied @manifest_text@ must match the supplied @portable_data_hash@. +If a new @portable_data_hash@ is specified when creating or updating a Collection, it must match the cryptographic digest of the supplied @manifest_text@. h3. Side effects of creating a Collection @@ -72,6 +72,9 @@ Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | |collection|object||query|| +|replace_files|object|Initialize files and directories using content from other collections|query|| + +The new collection's content can be initialized by providing a @manifest_text@ key in the provided @collection@ object, or by using the @replace_files@ option (see "replace_files":#replace_files below). h3. delete @@ -116,6 +119,9 @@ table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the Collection in question.|path|| |collection|object||query|| +|replace_files|object|Delete and replace files and directories using content from other collections|query|| + +The collection's content can be updated by providing a @manifest_text@ key in the provided @collection@ object, or by using the @replace_files@ option (see "replace_files":#replace_files below). h3. untrash @@ -160,3 +166,56 @@ Arguments: table(table table-bordered table-condensed). |_. Argument |_. Type |_. Description |_. Location |_. Example | {background:#ccffcc}.|uuid|string|The UUID of the Collection to get usage.|path|| + +h2(#replace_files). Using "replace_files" to create/update collections + +The @replace_files@ option can be used with the @create@ and @update@ APIs to efficiently copy individual files and directory trees from other collections, and copy/rename/delete items within an existing collection, without transferring any file data. + +@replace_files@ keys indicate target paths in the new collection, and values specify sources that should be copied to the target paths. +* Each target path must be an absolute canonical path beginning with @/@. It must not contain @.@ or @..@ components, consecutive @/@ characters, or a trailing @/@ after the final component. +* Each source must be either an empty string (signifying that the target path is to be deleted), or @PDH/path@ where @PDH@ is the portable data hash of a collection on the cluster and @/path@ is a file or directory in that collection. +* In an @update@ request, sources may reference the current portable data hash of the collection being updated. + +Example: delete @foo.txt@ from a collection + +
+"replace_files": {
+  "/foo.txt": ""
+}
+
+ +Example: rename @foo.txt@ to @bar.txt@ in a collection with portable data hash @fa7aeb5140e2848d39b416daeef4ffc5+45@ + +
+"replace_files": {
+  "/foo.txt": "",
+  "/bar.txt": "fa7aeb5140e2848d39b416daeef4ffc5+45/foo.txt"
+}
+
+ +Example: delete current contents, then add content from multiple collections + +
+"replace_files": {
+  "/": "",
+  "/copy of collection 1": "1f4b0bc7583c2a7f9102c395f4ffc5e3+45/",
+  "/copy of collection 2": "ea10d51bcf88862dbcc36eb292017dfd+45/"
+}
+
+ +Example: replace entire collection with a copy of a subdirectory from another collection + +
+"replace_files": {
+  "/": "1f4b0bc7583c2a7f9102c395f4ffc5e3+45/subdir"
+}
+
+ +A target path with a non-empty source cannot be the ancestor of another target path in the same request. For example, the following request is invalid: + +
+"replace_files": {
+  "/foo": "fa7aeb5140e2848d39b416daeef4ffc5+45/",
+  "/foo/this_will_return_an_error": ""
+}
+
diff --git a/doc/images/switch-to-wb1.png b/doc/images/switch-to-wb1.png new file mode 100644 index 0000000000..3787e31535 Binary files /dev/null and b/doc/images/switch-to-wb1.png differ diff --git a/doc/images/switch-to-wb2.png b/doc/images/switch-to-wb2.png new file mode 100644 index 0000000000..177090b329 Binary files /dev/null and b/doc/images/switch-to-wb2.png differ diff --git a/doc/images/wb2-example.png b/doc/images/wb2-example.png new file mode 100644 index 0000000000..7bdea9e78b Binary files /dev/null and b/doc/images/wb2-example.png differ diff --git a/doc/user/getting_started/workbench.html.textile.liquid b/doc/user/getting_started/workbench.html.textile.liquid index 644cf7d208..7091e31eae 100644 --- a/doc/user/getting_started/workbench.html.textile.liquid +++ b/doc/user/getting_started/workbench.html.textile.liquid @@ -10,9 +10,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0 {% endcomment %} {% include 'notebox_begin' %} -This guide covers the classic Arvados Workbench web application, sometimes referred to as "Workbench 1". There is also a new Workbench web application under development called "Workbench 2". Sites which have both Workbench applications installed will have a dropdown menu option "Switch to Workbench 2" to switch between versions. - -This guide will be updated to cover "Workbench 2" in the future. +This guide covers the classic Arvados Workbench web application, sometimes referred to as "Workbench 1". There is also a new Workbench web application under development called "Workbench 2". This guide will be updated to cover "Workbench 2" in the future. See "Workbench 2 migration":{{site.baseurl}}/user/topics/workbench-migration.html for more information. {% include 'notebox_end' %} You can access the Arvados Workbench used in this guide using this link: diff --git a/doc/user/topics/workbench-migration.html.textile.liquid b/doc/user/topics/workbench-migration.html.textile.liquid new file mode 100644 index 0000000000..9a36435eac --- /dev/null +++ b/doc/user/topics/workbench-migration.html.textile.liquid @@ -0,0 +1,49 @@ +--- +layout: default +navsection: userguide +title: "Workbench 2 migration" +... +{% comment %} +Copyright (C) The Arvados Authors. All rights reserved. + +SPDX-License-Identifier: CC-BY-SA-3.0 +{% endcomment %} + +Arvados is in the process of migrating from the classic web application, referred to as "Workbench 1", to a completely new web application, referred to as "Workbench 2". + +!{width: 90%}{{ site.baseurl }}/images/wb2-example.png! + +Workbench 2 is the new Workbench web application that will, over time, replace Workbench 1. Workbench 2 is being built based on user feedback, and it is approaching feature parity with Workbench 1. Workbench 2 has a modern look and feel and offers several advanced features and performance enhancements. Arvados clusters typically have both Workbench applications installed and have a dropdown menu option in the user menu to switch between versions. + +!{{ site.baseurl }}/images/switch-to-wb2.png! + +Workbench 2 is stable and recommended for general use, but still lacks some features available in the classic Workbench 1 application. When necessary, you can easily switch back: + +!{{ site.baseurl }}/images/switch-to-wb1.png! + +Some major improvements of Workbench 2 include: + +h2. General + +* More responsive, only loads data needed for display +* More familiar user interface, modeled on the file explorer of MacOS and Windows. +* Advanced search capabilities + +h2. Project browsing + +* Expanded informational columns +* Expanded filtering options +* Right side informational panel providing details about selected item without navigating away from the project +* Support for adding and querying user-supplied metadata properties on Projects + +h2. Collection browsing + +* Able to browse collections with millions of files +* Support for adding and querying user-supplied metadata properties on Collections +* Support for viewing past versions of a collection + +h2. User and Group management + +* Able to create user groups through the GUI +* Able to add/view/remove members of user groups, and what permissions are shared with the group +* Able to add/view/remove permissions shared with individual users diff --git a/lib/controller/localdb/collection.go b/lib/controller/localdb/collection.go index 96c89252ec..868e466e9e 100644 --- a/lib/controller/localdb/collection.go +++ b/lib/controller/localdb/collection.go @@ -6,10 +6,17 @@ package localdb import ( "context" + "fmt" + "net/http" + "os" + "sort" + "strings" "time" "git.arvados.org/arvados.git/sdk/go/arvados" + "git.arvados.org/arvados.git/sdk/go/arvadosclient" "git.arvados.org/arvados.git/sdk/go/auth" + "git.arvados.org/arvados.git/sdk/go/httpserver" ) // CollectionGet defers to railsProxy for everything except blob @@ -61,6 +68,9 @@ func (conn *Conn) CollectionCreate(ctx context.Context, opts arvados.CreateOptio // them. opts.Select = append([]string{"is_trashed", "trash_at"}, opts.Select...) } + if opts.Attrs, err = conn.applyReplaceFilesOption(ctx, "", opts.Attrs, opts.ReplaceFiles); err != nil { + return arvados.Collection{}, err + } resp, err := conn.railsProxy.CollectionCreate(ctx, opts) if err != nil { return resp, err @@ -82,6 +92,9 @@ func (conn *Conn) CollectionUpdate(ctx context.Context, opts arvados.UpdateOptio // them. opts.Select = append([]string{"is_trashed", "trash_at"}, opts.Select...) } + if opts.Attrs, err = conn.applyReplaceFilesOption(ctx, opts.UUID, opts.Attrs, opts.ReplaceFiles); err != nil { + return arvados.Collection{}, err + } resp, err := conn.railsProxy.CollectionUpdate(ctx, opts) if err != nil { return resp, err @@ -108,3 +121,147 @@ func (conn *Conn) signCollection(ctx context.Context, coll *arvados.Collection) } coll.ManifestText = arvados.SignManifest(coll.ManifestText, token, exp, ttl, []byte(conn.cluster.Collections.BlobSigningKey)) } + +// If replaceFiles is non-empty, populate attrs["manifest_text"] by +// starting with the content of fromUUID (or an empty collection if +// fromUUID is empty) and applying the specified file/directory +// replacements. +// +// Return value is the (possibly modified) attrs map. +func (conn *Conn) applyReplaceFilesOption(ctx context.Context, fromUUID string, attrs map[string]interface{}, replaceFiles map[string]string) (map[string]interface{}, error) { + if len(replaceFiles) == 0 { + return attrs, nil + } else if mtxt, ok := attrs["manifest_text"].(string); ok && len(mtxt) > 0 { + return nil, httpserver.Errorf(http.StatusBadRequest, "ambiguous request: both 'replace_files' and attrs['manifest_text'] values provided") + } + + // Load the current collection (if any) and set up an + // in-memory filesystem. + var dst arvados.Collection + if _, replacingRoot := replaceFiles["/"]; !replacingRoot && fromUUID != "" { + src, err := conn.CollectionGet(ctx, arvados.GetOptions{UUID: fromUUID}) + if err != nil { + return nil, err + } + dst = src + } + dstfs, err := dst.FileSystem(&arvados.StubClient{}, &arvados.StubClient{}) + if err != nil { + return nil, err + } + + // Sort replacements by source collection to avoid redundant + // reloads when a source collection is used more than + // once. Note empty sources (which mean "delete target path") + // sort first. + dstTodo := make([]string, 0, len(replaceFiles)) + { + srcid := make(map[string]string, len(replaceFiles)) + for dst, src := range replaceFiles { + dstTodo = append(dstTodo, dst) + if i := strings.IndexRune(src, '/'); i > 0 { + srcid[dst] = src[:i] + } + } + sort.Slice(dstTodo, func(i, j int) bool { + return srcid[dstTodo[i]] < srcid[dstTodo[j]] + }) + } + + // Reject attempt to replace a node as well as its descendant + // (e.g., a/ and a/b/), which is unsupported, except where the + // source for a/ is empty (i.e., delete). + for _, dst := range dstTodo { + if dst != "/" && (strings.HasSuffix(dst, "/") || + strings.HasSuffix(dst, "/.") || + strings.HasSuffix(dst, "/..") || + strings.Contains(dst, "//") || + strings.Contains(dst, "/./") || + strings.Contains(dst, "/../") || + !strings.HasPrefix(dst, "/")) { + return nil, httpserver.Errorf(http.StatusBadRequest, "invalid replace_files target: %q", dst) + } + for i := 0; i < len(dst)-1; i++ { + if dst[i] != '/' { + continue + } + outerdst := dst[:i] + if outerdst == "" { + outerdst = "/" + } + if outersrc := replaceFiles[outerdst]; outersrc != "" { + return nil, httpserver.Errorf(http.StatusBadRequest, "replace_files: cannot operate on target %q inside non-empty target %q", dst, outerdst) + } + } + } + + var srcidloaded string + var srcfs arvados.FileSystem + // Apply the requested replacements. + for _, dst := range dstTodo { + src := replaceFiles[dst] + if src == "" { + if dst == "/" { + // In this case we started with a + // blank manifest, so there can't be + // anything to delete. + continue + } + err := dstfs.RemoveAll(dst) + if err != nil { + return nil, fmt.Errorf("RemoveAll(%s): %w", dst, err) + } + continue + } + srcspec := strings.SplitN(src, "/", 2) + srcid, srcpath := srcspec[0], "/" + if !arvadosclient.PDHMatch(srcid) { + return nil, httpserver.Errorf(http.StatusBadRequest, "invalid source %q for replace_files[%q]: must be \"\" or \"PDH\" or \"PDH/path\"", src, dst) + } + if len(srcspec) == 2 && srcspec[1] != "" { + srcpath = srcspec[1] + } + if srcidloaded != srcid { + srcfs = nil + srccoll, err := conn.CollectionGet(ctx, arvados.GetOptions{UUID: srcid}) + if err != nil { + return nil, err + } + // We use StubClient here because we don't + // want srcfs to read/write any file data or + // sync collection state to/from the database. + srcfs, err = srccoll.FileSystem(&arvados.StubClient{}, &arvados.StubClient{}) + if err != nil { + return nil, err + } + srcidloaded = srcid + } + snap, err := arvados.Snapshot(srcfs, srcpath) + if err != nil { + return nil, httpserver.Errorf(http.StatusBadRequest, "error getting snapshot of %q from %q: %w", srcpath, srcid, err) + } + // Create intermediate dirs, in case dst is + // "newdir1/newdir2/dst". + for i := 1; i < len(dst)-1; i++ { + if dst[i] == '/' { + err = dstfs.Mkdir(dst[:i], 0777) + if err != nil && !os.IsExist(err) { + return nil, httpserver.Errorf(http.StatusBadRequest, "error creating parent dirs for %q: %w", dst, err) + } + } + } + err = arvados.Splice(dstfs, dst, snap) + if err != nil { + return nil, fmt.Errorf("error splicing snapshot onto path %q: %w", dst, err) + } + } + mtxt, err := dstfs.MarshalManifest(".") + if err != nil { + return nil, err + } + if attrs == nil { + attrs = make(map[string]interface{}, 1) + } + attrs["manifest_text"] = mtxt + return attrs, nil +} diff --git a/lib/controller/localdb/collection_test.go b/lib/controller/localdb/collection_test.go index bbfb811165..dac8b769fe 100644 --- a/lib/controller/localdb/collection_test.go +++ b/lib/controller/localdb/collection_test.go @@ -6,16 +6,22 @@ package localdb import ( "context" + "io/fs" + "path/filepath" "regexp" + "sort" "strconv" + "strings" "time" "git.arvados.org/arvados.git/lib/config" "git.arvados.org/arvados.git/lib/controller/rpc" "git.arvados.org/arvados.git/sdk/go/arvados" + "git.arvados.org/arvados.git/sdk/go/arvadosclient" "git.arvados.org/arvados.git/sdk/go/arvadostest" "git.arvados.org/arvados.git/sdk/go/auth" "git.arvados.org/arvados.git/sdk/go/ctxlog" + "git.arvados.org/arvados.git/sdk/go/keepclient" check "gopkg.in/check.v1" ) @@ -71,7 +77,7 @@ func (s *CollectionSuite) setUpVocabulary(c *check.C, testVocabulary string) { s.localdb.vocabularyCache = voc } -func (s *CollectionSuite) TestCollectionCreateWithProperties(c *check.C) { +func (s *CollectionSuite) TestCollectionCreateAndUpdateWithProperties(c *check.C) { s.setUpVocabulary(c, "") ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}}) @@ -88,6 +94,7 @@ func (s *CollectionSuite) TestCollectionCreateWithProperties(c *check.C) { for _, tt := range tests { c.Log(c.TestName()+" ", tt.name) + // Create with properties coll, err := s.localdb.CollectionCreate(ctx, arvados.CreateOptions{ Select: []string{"uuid", "properties"}, Attrs: map[string]interface{}{ @@ -99,26 +106,9 @@ func (s *CollectionSuite) TestCollectionCreateWithProperties(c *check.C) { } else { c.Assert(err, check.NotNil) } - } -} - -func (s *CollectionSuite) TestCollectionUpdateWithProperties(c *check.C) { - s.setUpVocabulary(c, "") - ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}}) - tests := []struct { - name string - props map[string]interface{} - success bool - }{ - {"Invalid prop key", map[string]interface{}{"Priority": "IDVALIMPORTANCES1"}, false}, - {"Invalid prop value", map[string]interface{}{"IDTAGIMPORTANCES": "high"}, false}, - {"Valid prop key & value", map[string]interface{}{"IDTAGIMPORTANCES": "IDVALIMPORTANCES1"}, true}, - {"Empty properties", map[string]interface{}{}, true}, - } - for _, tt := range tests { - c.Log(c.TestName()+" ", tt.name) - coll, err := s.localdb.CollectionCreate(ctx, arvados.CreateOptions{}) + // Create, then update with properties + coll, err = s.localdb.CollectionCreate(ctx, arvados.CreateOptions{}) c.Assert(err, check.IsNil) coll, err = s.localdb.CollectionUpdate(ctx, arvados.UpdateOptions{ UUID: coll.UUID, @@ -135,6 +125,180 @@ func (s *CollectionSuite) TestCollectionUpdateWithProperties(c *check.C) { } } +func (s *CollectionSuite) TestCollectionReplaceFiles(c *check.C) { + ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.AdminToken}}) + foo, err := s.localdb.railsProxy.CollectionCreate(ctx, arvados.CreateOptions{ + Attrs: map[string]interface{}{ + "owner_uuid": arvadostest.ActiveUserUUID, + "manifest_text": ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo.txt\n", + }}) + c.Assert(err, check.IsNil) + s.localdb.signCollection(ctx, &foo) + foobarbaz, err := s.localdb.railsProxy.CollectionCreate(ctx, arvados.CreateOptions{ + Attrs: map[string]interface{}{ + "owner_uuid": arvadostest.ActiveUserUUID, + "manifest_text": "./foo/bar 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz.txt\n", + }}) + c.Assert(err, check.IsNil) + s.localdb.signCollection(ctx, &foobarbaz) + wazqux, err := s.localdb.railsProxy.CollectionCreate(ctx, arvados.CreateOptions{ + Attrs: map[string]interface{}{ + "owner_uuid": arvadostest.ActiveUserUUID, + "manifest_text": "./waz d85b1213473c2fd7c2045020a6b9c62b+3 0:3:qux.txt\n", + }}) + c.Assert(err, check.IsNil) + s.localdb.signCollection(ctx, &wazqux) + + ctx = auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}}) + + // Create using content from existing collections + dst, err := s.localdb.CollectionCreate(ctx, arvados.CreateOptions{ + ReplaceFiles: map[string]string{ + "/f": foo.PortableDataHash + "/foo.txt", + "/b": foobarbaz.PortableDataHash + "/foo/bar", + "/q": wazqux.PortableDataHash + "/", + "/w": wazqux.PortableDataHash + "/waz", + }, + Attrs: map[string]interface{}{ + "owner_uuid": arvadostest.ActiveUserUUID, + }}) + c.Assert(err, check.IsNil) + s.expectFiles(c, dst, "f", "b/baz.txt", "q/waz/qux.txt", "w/qux.txt") + + // Delete a file and a directory + dst, err = s.localdb.CollectionUpdate(ctx, arvados.UpdateOptions{ + UUID: dst.UUID, + ReplaceFiles: map[string]string{ + "/f": "", + "/q/waz": "", + }}) + c.Assert(err, check.IsNil) + s.expectFiles(c, dst, "b/baz.txt", "q/", "w/qux.txt") + + // Move and copy content within collection + dst, err = s.localdb.CollectionUpdate(ctx, arvados.UpdateOptions{ + UUID: dst.UUID, + ReplaceFiles: map[string]string{ + // Note splicing content to /b/corge.txt but + // removing everything else from /b + "/b": "", + "/b/corge.txt": dst.PortableDataHash + "/b/baz.txt", + "/quux/corge.txt": dst.PortableDataHash + "/b/baz.txt", + }}) + c.Assert(err, check.IsNil) + s.expectFiles(c, dst, "b/corge.txt", "q/", "w/qux.txt", "quux/corge.txt") + + // Remove everything except one file + dst, err = s.localdb.CollectionUpdate(ctx, arvados.UpdateOptions{ + UUID: dst.UUID, + ReplaceFiles: map[string]string{ + "/": "", + "/b/corge.txt": dst.PortableDataHash + "/b/corge.txt", + }}) + c.Assert(err, check.IsNil) + s.expectFiles(c, dst, "b/corge.txt") + + // Copy entire collection to root + dstcopy, err := s.localdb.CollectionCreate(ctx, arvados.CreateOptions{ + ReplaceFiles: map[string]string{ + "/": dst.PortableDataHash, + }}) + c.Check(err, check.IsNil) + c.Check(dstcopy.PortableDataHash, check.Equals, dst.PortableDataHash) + s.expectFiles(c, dstcopy, "b/corge.txt") + + // Check invalid targets, sources, and combinations + for _, badrepl := range []map[string]string{ + { + "/foo/nope": dst.PortableDataHash + "/b", + "/foo": dst.PortableDataHash + "/b", + }, + { + "/foo": dst.PortableDataHash + "/b", + "/foo/nope": "", + }, + { + "/": dst.PortableDataHash + "/", + "/nope": "", + }, + { + "/": dst.PortableDataHash + "/", + "/nope": dst.PortableDataHash + "/b", + }, + {"/bad/": ""}, + {"/./bad": ""}, + {"/b/./ad": ""}, + {"/b/../ad": ""}, + {"/b/.": ""}, + {".": ""}, + {"bad": ""}, + {"": ""}, + {"/bad": "/b"}, + {"/bad": "bad/b"}, + {"/bad": dst.UUID + "/b"}, + } { + _, err = s.localdb.CollectionUpdate(ctx, arvados.UpdateOptions{ + UUID: dst.UUID, + ReplaceFiles: badrepl, + }) + c.Logf("badrepl %#v\n... got err: %s", badrepl, err) + c.Check(err, check.NotNil) + } + + // Check conflicting replace_files and manifest_text + _, err = s.localdb.CollectionUpdate(ctx, arvados.UpdateOptions{ + UUID: dst.UUID, + ReplaceFiles: map[string]string{"/": ""}, + Attrs: map[string]interface{}{ + "manifest_text": ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:z\n", + }}) + c.Logf("replace_files+manifest_text\n... got err: %s", err) + c.Check(err, check.ErrorMatches, "ambiguous request: both.*replace_files.*manifest_text.*") +} + +// expectFiles checks coll's directory structure against the given +// list of expected files and empty directories. An expected path with +// a trailing slash indicates an empty directory. +func (s *CollectionSuite) expectFiles(c *check.C, coll arvados.Collection, expected ...string) { + client := arvados.NewClientFromEnv() + ac, err := arvadosclient.New(client) + c.Assert(err, check.IsNil) + kc, err := keepclient.MakeKeepClient(ac) + c.Assert(err, check.IsNil) + cfs, err := coll.FileSystem(arvados.NewClientFromEnv(), kc) + c.Assert(err, check.IsNil) + var found []string + nonemptydirs := map[string]bool{} + fs.WalkDir(arvados.FS(cfs), "/", func(path string, d fs.DirEntry, err error) error { + dir, _ := filepath.Split(path) + nonemptydirs[dir] = true + if d.IsDir() { + if path != "/" { + path += "/" + } + if !nonemptydirs[path] { + nonemptydirs[path] = false + } + } else { + found = append(found, path) + } + return nil + }) + for d, nonempty := range nonemptydirs { + if !nonempty { + found = append(found, d) + } + } + for i, path := range found { + if path != "/" { + found[i] = strings.TrimPrefix(path, "/") + } + } + sort.Strings(found) + sort.Strings(expected) + c.Check(found, check.DeepEquals, expected) +} + func (s *CollectionSuite) TestSignatures(c *check.C) { ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}}) diff --git a/sdk/cli/test/test_arv-collection-create.rb b/sdk/cli/test/test_arv-collection-create.rb index 39c50bcc83..1b5a368b7d 100644 --- a/sdk/cli/test/test_arv-collection-create.rb +++ b/sdk/cli/test/test_arv-collection-create.rb @@ -14,14 +14,48 @@ class TestCollectionCreate < Minitest::Test def test_small_collection uuid = Digest::MD5.hexdigest(foo_manifest) + '+' + foo_manifest.size.to_s + ok = nil out, err = capture_subprocess_io do - assert_arv('--format', 'uuid', 'collection', 'create', '--collection', { - uuid: uuid, - manifest_text: foo_manifest - }.to_json) + ok = arv('--format', 'uuid', 'collection', 'create', '--collection', { + uuid: uuid, + manifest_text: foo_manifest + }.to_json) end - assert(/^([0-9a-z]{5}-4zz18-[0-9a-z]{15})?$/.match(out)) - assert_equal '', err + assert_equal('', err) + assert_equal(true, ok) + assert_match(/^([0-9a-z]{5}-4zz18-[0-9a-z]{15})?$/, out) + end + + def test_collection_replace_files + ok = nil + uuid, err = capture_subprocess_io do + ok = arv('--format', 'uuid', 'collection', 'create', '--collection', '{}') + end + assert_equal('', err) + assert_equal(true, ok) + assert_match(/^([0-9a-z]{5}-4zz18-[0-9a-z]{15})?$/, uuid) + uuid = uuid.strip + + out, err = capture_subprocess_io do + ok = arv('--format', 'uuid', + 'collection', 'update', + '--uuid', uuid, + '--collection', '{}', + '--replace-files', { + "/gpl.pdf": "b519d9cb706a29fc7ea24dbea2f05851+93/GNU_General_Public_License,_version_3.pdf", + }.to_json) + end + assert_equal('', err) + assert_equal(true, ok) + assert_equal(uuid, out.strip) + + ok = nil + out, err = capture_subprocess_io do + ok = arv('--format', 'json', 'collection', 'get', '--uuid', uuid) + end + assert_equal('', err) + assert_equal(true, ok) + assert_match(/\. 6a4ff0499484c6c79c95cd8c566bd25f\+249025.* 0:249025:gpl.pdf\\n/, out) end def test_read_resource_object_from_file @@ -29,29 +63,22 @@ class TestCollectionCreate < Minitest::Test begin tempfile.write({manifest_text: foo_manifest}.to_json) tempfile.close + ok = nil out, err = capture_subprocess_io do - assert_arv('--format', 'uuid', - 'collection', 'create', '--collection', tempfile.path) + ok = arv('--format', 'uuid', + 'collection', 'create', '--collection', tempfile.path) end - assert(/^([0-9a-z]{5}-4zz18-[0-9a-z]{15})?$/.match(out)) - assert_equal '', err + assert_equal('', err) + assert_equal(true, ok) + assert_match(/^([0-9a-z]{5}-4zz18-[0-9a-z]{15})?$/, out) ensure tempfile.unlink end end protected - def assert_arv(*args) - expect = case args.first - when true, false - args.shift - else - true - end - assert_equal(expect, - system(['./bin/arv', 'arv'], *args), - "`arv #{args.join ' '}` " + - "should exit #{if expect then 0 else 'non-zero' end}") + def arv(*args) + system(['./bin/arv', 'arv'], *args) end def foo_manifest diff --git a/sdk/go/arvados/api.go b/sdk/go/arvados/api.go index 7409b18132..d76ece1edd 100644 --- a/sdk/go/arvados/api.go +++ b/sdk/go/arvados/api.go @@ -139,6 +139,8 @@ type CreateOptions struct { EnsureUniqueName bool `json:"ensure_unique_name"` Select []string `json:"select"` Attrs map[string]interface{} `json:"attrs"` + // ReplaceFiles only applies when creating a collection. + ReplaceFiles map[string]string `json:"replace_files"` } type UpdateOptions struct { @@ -146,6 +148,8 @@ type UpdateOptions struct { Attrs map[string]interface{} `json:"attrs"` Select []string `json:"select"` BypassFederation bool `json:"bypass_federation"` + // ReplaceFiles only applies when updating a collection. + ReplaceFiles map[string]string `json:"replace_files"` } type GroupContentsOptions struct { diff --git a/sdk/go/arvados/fs_backend.go b/sdk/go/arvados/fs_backend.go index 32365a5317..cc4c32ffe9 100644 --- a/sdk/go/arvados/fs_backend.go +++ b/sdk/go/arvados/fs_backend.go @@ -6,6 +6,7 @@ package arvados import ( "context" + "errors" "io" ) @@ -30,3 +31,16 @@ type keepClient interface { type apiClient interface { RequestAndDecode(dst interface{}, method, path string, body io.Reader, params interface{}) error } + +var errStubClient = errors.New("stub client") + +type StubClient struct{} + +func (*StubClient) ReadAt(string, []byte, int) (int, error) { return 0, errStubClient } +func (*StubClient) LocalLocator(loc string) (string, error) { return loc, nil } +func (*StubClient) BlockWrite(context.Context, BlockWriteOptions) (BlockWriteResponse, error) { + return BlockWriteResponse{}, errStubClient +} +func (*StubClient) RequestAndDecode(_ interface{}, _, _ string, _ io.Reader, _ interface{}) error { + return errStubClient +} diff --git a/sdk/go/arvados/fs_base.go b/sdk/go/arvados/fs_base.go index 80b8037293..bebb74261e 100644 --- a/sdk/go/arvados/fs_base.go +++ b/sdk/go/arvados/fs_base.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "io" + "io/fs" "log" "net/http" "os" @@ -159,6 +160,18 @@ type FileSystem interface { MemorySize() int64 } +type fsFS struct { + FileSystem +} + +// FS returns an fs.FS interface to the given FileSystem, to enable +// the use of fs.WalkDir, etc. +func FS(fs FileSystem) fs.FS { return fsFS{fs} } +func (fs fsFS) Open(path string) (fs.File, error) { + f, err := fs.FileSystem.Open(path) + return f, err +} + type inode interface { SetParent(parent inode, name string) Parent() inode @@ -450,14 +463,14 @@ func (fs *fileSystem) openFile(name string, flag int, perm os.FileMode) (*fileha default: return nil, fmt.Errorf("invalid flags 0x%x", flag) } - if !writable && parent.IsDir() { + if parent.IsDir() { // A directory can be opened via "foo/", "foo/.", or // "foo/..". switch name { case ".", "": - return &filehandle{inode: parent}, nil + return &filehandle{inode: parent, readable: readable, writable: writable}, nil case "..": - return &filehandle{inode: parent.Parent()}, nil + return &filehandle{inode: parent.Parent(), readable: readable, writable: writable}, nil } } createMode := flag&os.O_CREATE != 0 @@ -753,7 +766,7 @@ func Splice(fs FileSystem, target string, newsubtree *Subtree) error { f, err = fs.OpenFile(target, os.O_CREATE|os.O_WRONLY, 0700) } if err != nil { - return err + return fmt.Errorf("open %s: %w", target, err) } defer f.Close() return f.Splice(newsubtree) diff --git a/sdk/go/arvados/fs_collection.go b/sdk/go/arvados/fs_collection.go index 0c5819721e..f4dae746e2 100644 --- a/sdk/go/arvados/fs_collection.go +++ b/sdk/go/arvados/fs_collection.go @@ -1565,7 +1565,7 @@ func (dn *dirnode) snapshot() (*dirnode, error) { func (dn *dirnode) Splice(repl inode) error { repl, err := repl.Snapshot() if err != nil { - return err + return fmt.Errorf("cannot copy snapshot: %w", err) } switch repl := repl.(type) { default: @@ -1599,7 +1599,7 @@ func (dn *dirnode) Splice(repl inode) error { defer dn.Unlock() _, err = dn.parent.Child(dn.fileinfo.name, func(inode) (inode, error) { return repl, nil }) if err != nil { - return err + return fmt.Errorf("error replacing filenode: dn.parent.Child(): %w", err) } repl.fs = dn.fs } diff --git a/sdk/go/arvados/fs_collection_test.go b/sdk/go/arvados/fs_collection_test.go index fab91d1f77..b221aaa083 100644 --- a/sdk/go/arvados/fs_collection_test.go +++ b/sdk/go/arvados/fs_collection_test.go @@ -1441,6 +1441,30 @@ func (s *CollectionFSSuite) TestEdgeCaseManifests(c *check.C) { } } +func (s *CollectionFSSuite) TestSnapshotSplice(c *check.C) { + filedata1 := "hello snapshot+splice world\n" + fs, err := (&Collection{}).FileSystem(s.client, s.kc) + c.Assert(err, check.IsNil) + { + f, err := fs.OpenFile("file1", os.O_CREATE|os.O_RDWR, 0700) + c.Assert(err, check.IsNil) + _, err = f.Write([]byte(filedata1)) + c.Assert(err, check.IsNil) + err = f.Close() + c.Assert(err, check.IsNil) + } + + snap, err := Snapshot(fs, "/") + c.Assert(err, check.IsNil) + err = Splice(fs, "dir1", snap) + c.Assert(err, check.IsNil) + f, err := fs.Open("dir1/file1") + c.Assert(err, check.IsNil) + buf, err := io.ReadAll(f) + c.Assert(err, check.IsNil) + c.Check(string(buf), check.Equals, filedata1) +} + func (s *CollectionFSSuite) TestRefreshSignatures(c *check.C) { filedata1 := "hello refresh signatures world\n" fs, err := (&Collection{}).FileSystem(s.client, s.kc) diff --git a/sdk/go/arvados/fs_filehandle.go b/sdk/go/arvados/fs_filehandle.go index 4530a7b06a..f50dd4612b 100644 --- a/sdk/go/arvados/fs_filehandle.go +++ b/sdk/go/arvados/fs_filehandle.go @@ -6,6 +6,7 @@ package arvados import ( "io" + "io/fs" "os" ) @@ -73,6 +74,31 @@ func (f *filehandle) Write(p []byte) (n int, err error) { return } +// dirEntry implements fs.DirEntry, see (*filehandle)ReadDir(). +type dirEntry struct { + os.FileInfo +} + +func (ent dirEntry) Type() fs.FileMode { + return ent.Mode().Type() +} +func (ent dirEntry) Info() (fs.FileInfo, error) { + return ent, nil +} + +// ReadDir implements fs.ReadDirFile. +func (f *filehandle) ReadDir(count int) ([]fs.DirEntry, error) { + fis, err := f.Readdir(count) + if len(fis) == 0 { + return nil, err + } + ents := make([]fs.DirEntry, len(fis)) + for i, fi := range fis { + ents[i] = dirEntry{fi} + } + return ents, err +} + func (f *filehandle) Readdir(count int) ([]os.FileInfo, error) { if !f.inode.IsDir() { return nil, ErrInvalidOperation diff --git a/sdk/go/arvados/fs_site_test.go b/sdk/go/arvados/fs_site_test.go index 59fa5fc176..bf24efa7ed 100644 --- a/sdk/go/arvados/fs_site_test.go +++ b/sdk/go/arvados/fs_site_test.go @@ -10,6 +10,7 @@ import ( "io/ioutil" "net/http" "os" + "strings" "syscall" "time" @@ -291,40 +292,41 @@ func (s *SiteFSSuite) TestSnapshotSplice(c *check.C) { c.Check(string(buf), check.Equals, string(thisfile)) } - // Cannot splice a file onto a collection root, or anywhere - // outside a collection + // Cannot splice a file onto a collection root; cannot splice + // anything to a target outside a collection. for _, badpath := range []string{ + dstPath + "/", dstPath, + "/home/A Project/newnodename/", "/home/A Project/newnodename", + "/home/A Project/", "/home/A Project", + "/home/newnodename/", "/home/newnodename", + "/home/", "/home", + "/newnodename/", "/newnodename", + "/", } { err = Splice(s.fs, badpath, snapFile) c.Check(err, check.NotNil) - c.Check(err, ErrorIs, ErrInvalidOperation, check.Commentf("badpath %s")) - if badpath == dstPath { - c.Check(err, check.ErrorMatches, `cannot use Splice to attach a file at top level of \*arvados.collectionFileSystem: invalid operation`, check.Commentf("badpath: %s", badpath)) + if strings.Contains(badpath, "newnodename") && strings.HasSuffix(badpath, "/") { + c.Check(err, ErrorIs, os.ErrNotExist, check.Commentf("badpath %q", badpath)) + } else { + c.Check(err, ErrorIs, ErrInvalidOperation, check.Commentf("badpath %q", badpath)) + } + if strings.TrimSuffix(badpath, "/") == dstPath { + c.Check(err, check.ErrorMatches, `cannot use Splice to attach a file at top level of \*arvados.collectionFileSystem: invalid operation`, check.Commentf("badpath: %q", badpath)) continue } - err = Splice(s.fs, badpath, snap1) - c.Check(err, ErrorIs, ErrInvalidOperation, check.Commentf("badpath %s")) - } - // Destination cannot have trailing slash - for _, badpath := range []string{ - dstPath + "/ctxlog/", - dstPath + "/", - "/home/A Project/", - "/home/", - "/", - "", - } { err = Splice(s.fs, badpath, snap1) - c.Check(err, ErrorIs, ErrInvalidArgument, check.Commentf("badpath %s", badpath)) - err = Splice(s.fs, badpath, snapFile) - c.Check(err, ErrorIs, ErrInvalidArgument, check.Commentf("badpath %s", badpath)) + if strings.Contains(badpath, "newnodename") && strings.HasSuffix(badpath, "/") { + c.Check(err, ErrorIs, os.ErrNotExist, check.Commentf("badpath %q", badpath)) + } else { + c.Check(err, ErrorIs, ErrInvalidOperation, check.Commentf("badpath %q", badpath)) + } } // Destination's parent must already exist @@ -340,9 +342,10 @@ func (s *SiteFSSuite) TestSnapshotSplice(c *check.C) { } snap2, err := Snapshot(s.fs, dstPath+"/ctxlog-copy") - c.Check(err, check.IsNil) - err = Splice(s.fs, dstPath+"/ctxlog-copy-copy", snap2) - c.Check(err, check.IsNil) + if c.Check(err, check.IsNil) { + err = Splice(s.fs, dstPath+"/ctxlog-copy-copy", snap2) + c.Check(err, check.IsNil) + } // Snapshot entire collection, splice into same collection at // a new path, remove file from original location, verify @@ -362,9 +365,10 @@ func (s *SiteFSSuite) TestSnapshotSplice(c *check.C) { _, err = s.fs.Open(dstPath + "/arvados/fs_site_test.go") c.Check(err, check.Equals, os.ErrNotExist) f, err = s.fs.Open(dstPath + "/copy2/arvados/fs_site_test.go") - c.Check(err, check.IsNil) - defer f.Close() - buf, err := ioutil.ReadAll(f) - c.Check(err, check.IsNil) - c.Check(string(buf), check.Equals, string(thisfile)) + if c.Check(err, check.IsNil) { + defer f.Close() + buf, err := ioutil.ReadAll(f) + c.Check(err, check.IsNil) + c.Check(string(buf), check.Equals, string(thisfile)) + } } diff --git a/sdk/go/httpserver/error.go b/sdk/go/httpserver/error.go index f1817d3374..75ff85336f 100644 --- a/sdk/go/httpserver/error.go +++ b/sdk/go/httpserver/error.go @@ -6,9 +6,14 @@ package httpserver import ( "encoding/json" + "fmt" "net/http" ) +func Errorf(status int, tmpl string, args ...interface{}) error { + return errorWithStatus{fmt.Errorf(tmpl, args...), status} +} + func ErrorWithStatus(err error, status int) error { return errorWithStatus{err, status} } diff --git a/sdk/python/arvados/keep.py b/sdk/python/arvados/keep.py index 0018687ff3..1a83eae944 100644 --- a/sdk/python/arvados/keep.py +++ b/sdk/python/arvados/keep.py @@ -376,6 +376,7 @@ class KeepClient(object): curl.setopt(pycurl.HEADERFUNCTION, self._headerfunction) if self.insecure: curl.setopt(pycurl.SSL_VERIFYPEER, 0) + curl.setopt(pycurl.SSL_VERIFYHOST, 0) else: curl.setopt(pycurl.CAINFO, arvados.util.ca_certs_path()) if method == "HEAD": @@ -478,6 +479,7 @@ class KeepClient(object): curl.setopt(pycurl.HEADERFUNCTION, self._headerfunction) if self.insecure: curl.setopt(pycurl.SSL_VERIFYPEER, 0) + curl.setopt(pycurl.SSL_VERIFYHOST, 0) else: curl.setopt(pycurl.CAINFO, arvados.util.ca_certs_path()) self._setcurltimeouts(curl, timeout) diff --git a/sdk/python/tests/test_keep_client.py b/sdk/python/tests/test_keep_client.py index aa7e371bf4..605b90301c 100644 --- a/sdk/python/tests/test_keep_client.py +++ b/sdk/python/tests/test_keep_client.py @@ -265,6 +265,9 @@ class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock): self.assertEqual( mock.responses[0].getopt(pycurl.SSL_VERIFYPEER), 0) + self.assertEqual( + mock.responses[0].getopt(pycurl.SSL_VERIFYHOST), + 0) api_client.insecure = False with tutil.mock_keep_responses(b'foo', 200) as mock: @@ -276,6 +279,9 @@ class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock): self.assertEqual( mock.responses[0].getopt(pycurl.SSL_VERIFYPEER), None) + self.assertEqual( + mock.responses[0].getopt(pycurl.SSL_VERIFYHOST), + None) def test_refresh_signature(self): blk_digest = '6f5902ac237024bdd0c176cb93063dc4+11' diff --git a/services/api/Gemfile b/services/api/Gemfile index ac6bfdb01f..30d877fac7 100644 --- a/services/api/Gemfile +++ b/services/api/Gemfile @@ -32,8 +32,6 @@ gem 'oj' gem 'jquery-rails' -gem 'rvm-capistrano', :group => :test - gem 'acts_as_api' gem 'passenger' diff --git a/services/api/Gemfile.lock b/services/api/Gemfile.lock index 0dd9166ee4..c5ecbaef7a 100644 --- a/services/api/Gemfile.lock +++ b/services/api/Gemfile.lock @@ -82,12 +82,6 @@ GEM multi_json (>= 1.0.0) builder (3.2.4) byebug (11.0.1) - capistrano (2.15.9) - highline - net-scp (>= 1.0.0) - net-sftp (>= 2.0.0) - net-ssh (>= 2.0.14) - net-ssh-gateway (>= 1.1.0) concurrent-ruby (1.1.9) crass (1.0.6) erubi (1.10.0) @@ -109,7 +103,6 @@ GEM multi_json (~> 1.11) os (>= 0.9, < 2.0) signet (~> 0.7) - highline (2.0.1) httpclient (2.8.3) i18n (0.9.5) concurrent-ruby (~> 1.0) @@ -146,13 +139,6 @@ GEM metaclass (~> 0.0.1) multi_json (1.15.0) multipart-post (2.1.1) - net-scp (2.0.0) - net-ssh (>= 2.6.5, < 6.0.0) - net-sftp (2.1.2) - net-ssh (>= 2.6.5) - net-ssh (5.2.0) - net-ssh-gateway (2.0.0) - net-ssh (>= 4.0.0) nio4r (2.5.8) nokogiri (1.13.3) mini_portile2 (~> 2.8.0) @@ -212,8 +198,6 @@ GEM railties (>= 4.2.0, < 6.0) retriable (1.4.1) ruby-prof (0.15.9) - rvm-capistrano (1.5.6) - capistrano (~> 2.15.4) safe_yaml (1.0.5) signet (0.11.0) addressable (~> 2.3) @@ -271,7 +255,6 @@ DEPENDENCIES rails-perftest responders (~> 2.0) ruby-prof (~> 0.15.0) - rvm-capistrano safe_yaml signet (< 0.12) simplecov (~> 0.7.1) diff --git a/services/api/app/controllers/arvados/v1/schema_controller.rb b/services/api/app/controllers/arvados/v1/schema_controller.rb index 100b916817..5508ac0fbd 100644 --- a/services/api/app/controllers/arvados/v1/schema_controller.rb +++ b/services/api/app/controllers/arvados/v1/schema_controller.rb @@ -406,6 +406,20 @@ class Arvados::V1::SchemaController < ApplicationController end end + # The 'replace_files' option is implemented in lib/controller, + # not Rails -- we just need to add it here so discovery-aware + # clients know how to validate it. + [:create, :update].each do |action| + discovery[:resources]['collections'][:methods][action][:parameters]['replace_files'] = { + type: 'object', + description: 'Files and directories to initialize/replace with content from other collections.', + required: false, + location: 'query', + properties: {}, + additionalProperties: {type: 'string'}, + } + end + discovery[:resources]['configs'] = { methods: { get: { diff --git a/services/api/lib/tasks/delete_old_container_logs.rake b/services/api/lib/tasks/delete_old_container_logs.rake index 2146d9bc37..7a0ab3826a 100644 --- a/services/api/lib/tasks/delete_old_container_logs.rake +++ b/services/api/lib/tasks/delete_old_container_logs.rake @@ -11,7 +11,7 @@ namespace :db do desc "Remove old container log entries from the logs table" task delete_old_container_logs: :environment do - delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN containers ON logs.object_uuid = containers.uuid WHERE event_type IN ('stdout', 'stderr', 'arv-mount', 'crunch-run', 'crunchstat') AND containers.log IS NOT NULL AND clock_timestamp() - containers.finished_at > interval '#{Rails.configuration.Containers.Logging.MaxAge.to_i} seconds')" + delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN containers ON logs.object_uuid = containers.uuid WHERE event_type IN ('stdout', 'stderr', 'arv-mount', 'crunch-run', 'crunchstat') AND containers.log IS NOT NULL AND now() - containers.finished_at > interval '#{Rails.configuration.Containers.Logging.MaxAge.to_i} seconds')" ActiveRecord::Base.connection.execute(delete_sql) end diff --git a/services/api/lib/tasks/delete_old_job_logs.rake b/services/api/lib/tasks/delete_old_job_logs.rake deleted file mode 100644 index a1ae2226a0..0000000000 --- a/services/api/lib/tasks/delete_old_job_logs.rake +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (C) The Arvados Authors. All rights reserved. -# -# SPDX-License-Identifier: AGPL-3.0 - -# This task finds jobs that have been finished for at least as long as -# the duration specified in the `clean_job_log_rows_after` -# configuration setting, and deletes their stderr logs from the logs table. - -namespace :db do - desc "Remove old job stderr entries from the logs table" - task delete_old_job_logs: :environment do - delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN jobs ON logs.object_uuid = jobs.uuid WHERE event_type = 'stderr' AND jobs.log IS NOT NULL AND clock_timestamp() - jobs.finished_at > interval '#{Rails.configuration.Containers.Logging.MaxAge.to_i} seconds')" - - ActiveRecord::Base.connection.execute(delete_sql) - end -end diff --git a/services/api/lib/tasks/symbols.rake b/services/api/lib/tasks/symbols.rake deleted file mode 100644 index dc9ed461dd..0000000000 --- a/services/api/lib/tasks/symbols.rake +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright (C) The Arvados Authors. All rights reserved. -# -# SPDX-License-Identifier: AGPL-3.0 - -require 'current_api_client' - -# This is needed instead of just including CurrentApiClient so that its -# methods don't get imported as Object's class methods; this is a problem because -# the methods would be imported only on test environment. See #15716 for more info. -class CurrentApiClientHelper - extend CurrentApiClient -end - -def has_symbols? x - if x.is_a? Hash - x.each do |k,v| - return true if has_symbols?(k) or has_symbols?(v) - end - elsif x.is_a? Array - x.each do |k| - return true if has_symbols?(k) - end - elsif x.is_a? Symbol - return true - elsif x.is_a? String - return true if x.start_with?(':') && !x.start_with?('::') - end - false -end - -def check_for_serialized_symbols rec - jsonb_cols = rec.class.columns.select{|c| c.type == :jsonb}.collect{|j| j.name} - (jsonb_cols + rec.class.serialized_attributes.keys).uniq.each do |colname| - if has_symbols? rec.attributes[colname] - st = recursive_stringify rec.attributes[colname] - puts "Found value potentially containing Ruby symbols in #{colname} attribute of #{rec.uuid}, current value is\n#{rec.attributes[colname].to_s[0..1024]}\nrake symbols:stringify will update it to:\n#{st.to_s[0..1024]}\n\n" - end - end -end - -def recursive_stringify x - if x.is_a? Hash - Hash[x.collect do |k,v| - [recursive_stringify(k), recursive_stringify(v)] - end] - elsif x.is_a? Array - x.collect do |k| - recursive_stringify k - end - elsif x.is_a? Symbol - x.to_s - elsif x.is_a? String and x.start_with?(':') and !x.start_with?('::') - x[1..-1] - else - x - end -end - -def stringify_serialized_symbols rec - # ensure_serialized_attribute_type should prevent symbols from - # getting into the database in the first place. If someone managed - # to get them into the database (perhaps using an older version) - # we'll convert symbols to strings when loading from the - # database. (Otherwise, loading and saving an object with existing - # symbols in a serialized field will crash.) - jsonb_cols = rec.class.columns.select{|c| c.type == :jsonb}.collect{|j| j.name} - (jsonb_cols + rec.class.serialized_attributes.keys).uniq.each do |colname| - if has_symbols? rec.attributes[colname] - begin - st = recursive_stringify rec.attributes[colname] - puts "Updating #{colname} attribute of #{rec.uuid} from\n#{rec.attributes[colname].to_s[0..1024]}\nto\n#{st.to_s[0..1024]}\n\n" - rec.write_attribute(colname, st) - rec.save! - rescue => e - puts "Failed to update #{rec.uuid}: #{e}" - end - end - end -end - -namespace :symbols do - desc 'Warn about serialized values starting with ":" that may be symbols' - task check: :environment do - [ApiClientAuthorization, ApiClient, - AuthorizedKey, Collection, - Container, ContainerRequest, Group, - Human, Job, JobTask, KeepDisk, KeepService, Link, - Node, PipelineInstance, PipelineTemplate, - Repository, Specimen, Trait, User, VirtualMachine, - Workflow].each do |klass| - CurrentApiClientHelper.act_as_system_user do - klass.all.each do |c| - check_for_serialized_symbols c - end - end - end - end - - task stringify: :environment do - [ApiClientAuthorization, ApiClient, - AuthorizedKey, Collection, - Container, ContainerRequest, Group, - Human, Job, JobTask, KeepDisk, KeepService, Link, - Node, PipelineInstance, PipelineTemplate, - Repository, Specimen, Trait, User, VirtualMachine, - Workflow].each do |klass| - CurrentApiClientHelper.act_as_system_user do - klass.all.each do |c| - stringify_serialized_symbols c - end - end - end - end -end diff --git a/services/api/test/tasks/delete_old_job_logs_test.rb b/services/api/test/tasks/delete_old_job_logs_test.rb deleted file mode 100644 index 00660431c3..0000000000 --- a/services/api/test/tasks/delete_old_job_logs_test.rb +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (C) The Arvados Authors. All rights reserved. -# -# SPDX-License-Identifier: AGPL-3.0 - -require 'test_helper' -require 'rake' - -Rake.application.rake_require "tasks/delete_old_job_logs" -Rake::Task.define_task(:environment) - -class DeleteOldJobLogsTaskTest < ActiveSupport::TestCase - TASK_NAME = "db:delete_old_job_logs" - - def log_uuids(*fixture_names) - fixture_names.map { |name| logs(name).uuid } - end - - def run_with_expiry(clean_after) - Rails.configuration.Containers.Logging.MaxAge = clean_after - Rake::Task[TASK_NAME].reenable - Rake.application.invoke_task TASK_NAME - end - - def job_stderr_logs - Log.where("object_uuid LIKE :pattern AND event_type = :etype", - pattern: "_____-8i9sb-_______________", - etype: "stderr") - end - - def check_existence(test_method, fixture_uuids) - uuids_now = job_stderr_logs.map(&:uuid) - fixture_uuids.each do |expect_uuid| - send(test_method, uuids_now, expect_uuid) - end - end - - test "delete all logs" do - uuids_to_keep = log_uuids(:crunchstat_for_running_job) - uuids_to_clean = log_uuids(:crunchstat_for_previous_job, - :crunchstat_for_ancient_job) - run_with_expiry(1) - check_existence(:assert_includes, uuids_to_keep) - check_existence(:refute_includes, uuids_to_clean) - end - - test "delete only old logs" do - uuids_to_keep = log_uuids(:crunchstat_for_running_job, - :crunchstat_for_previous_job) - uuids_to_clean = log_uuids(:crunchstat_for_ancient_job) - run_with_expiry(360.days) - check_existence(:assert_includes, uuids_to_keep) - check_existence(:refute_includes, uuids_to_clean) - end -end diff --git a/tools/compute-images/arvados-images-aws.json b/tools/compute-images/arvados-images-aws.json index 131aa8a878..94cb24adf9 100644 --- a/tools/compute-images/arvados-images-aws.json +++ b/tools/compute-images/arvados-images-aws.json @@ -25,7 +25,7 @@ "region": "{{user `aws_default_region`}}", "ena_support": "true", "source_ami": "{{user `aws_source_ami`}}", - "instance_type": "m4.large", + "instance_type": "m5.large", "vpc_id": "{{user `vpc_id`}}", "subnet_id": "{{user `subnet_id`}}", "associate_public_ip_address": "{{user `associate_public_ip_address`}}", @@ -34,7 +34,7 @@ "launch_block_device_mappings": [{ "device_name": "/dev/xvda", "volume_size": 20, - "volume_type": "gp2", + "volume_type": "gp3", "delete_on_termination": true }], "ami_block_device_mappings": [ diff --git a/tools/compute-images/scripts/base.sh b/tools/compute-images/scripts/base.sh index 90b845f1ac..260c5d47ee 100644 --- a/tools/compute-images/scripts/base.sh +++ b/tools/compute-images/scripts/base.sh @@ -15,6 +15,9 @@ wait_for_apt_locks() { done } +# $DIST should not have a dot if there is one in /etc/os-release (e.g. 18.04) +DIST=$(. /etc/os-release; echo $ID$VERSION_ID | tr -d '.') + # Run apt-get update $SUDO DEBIAN_FRONTEND=noninteractive apt-get --yes update @@ -36,6 +39,11 @@ fi TMP_LSB=`/usr/bin/lsb_release -c -s` LSB_RELEASE_CODENAME=${TMP_LSB//[$'\t\r\n ']} +SET_RESOLVER= +if [ -n "$RESOLVER" ]; then + SET_RESOLVER="--dns ${RESOLVER}" +fi + # Add the arvados apt repository echo "# apt.arvados.org" |$SUDO tee --append /etc/apt/sources.list.d/apt.arvados.org.list echo "deb http://apt.arvados.org/$LSB_RELEASE_CODENAME $LSB_RELEASE_CODENAME${REPOSUFFIX} main" |$SUDO tee --append /etc/apt/sources.list.d/apt.arvados.org.list @@ -66,8 +74,45 @@ wait_for_apt_locks && $SUDO DEBIAN_FRONTEND=noninteractive apt-get -qq --yes ins # Install the Arvados packages we need wait_for_apt_locks && $SUDO DEBIAN_FRONTEND=noninteractive apt-get -qq --yes install \ python3-arvados-fuse \ - arvados-docker-cleaner \ - docker.io + arvados-docker-cleaner + +# We want Docker 20.10 or later so that we support glibc 2.33 and up in the container, cf. +# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1005906 +dockerversion=5:20.10.13~3-0 +if [[ "$DIST" =~ ^debian ]]; then + family="debian" + if [ "$DIST" == "debian10" ]; then + distro="buster" + elif [ "$DIST" == "debian11" ]; then + distro="bullseye" + fi +elif [[ "$DIST" =~ ^ubuntu ]]; then + family="ubuntu" + if [ "$DIST" == "ubuntu1804" ]; then + distro="bionic" + elif [ "$DIST" == "ubuntu2004" ]; then + distro="focal" + fi +else + echo "Unsupported distribution $DIST" + exit 1 +fi +curl -fsSL https://download.docker.com/linux/$family/gpg | $SUDO gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg +echo deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/$family/ $distro stable | \ + $SUDO tee /etc/apt/sources.list.d/docker.list +$SUDO apt-get update +$SUDO apt-get -yq --no-install-recommends install docker-ce=${dockerversion}~${family}-${distro} + +# Set a higher ulimit and the resolver (if set) for docker +$SUDO sed "s/ExecStart=\(.*\)/ExecStart=\1 --default-ulimit nofile=10000:10000 ${SET_RESOLVER}/g" \ + /lib/systemd/system/docker.service \ + > /etc/systemd/system/docker.service + +$SUDO systemctl daemon-reload + +# docker should not start on boot: we restart it inside /usr/local/bin/ensure-encrypted-partitions.sh, +# and the BootProbeCommand might be "docker ps -q" +$SUDO systemctl disable docker # Get Go and build singularity goversion=1.17.1 @@ -109,21 +154,6 @@ $SUDO echo -e "{\n \"Quota\": \"10G\",\n \"RemoveStoppedContainers\": \"always $SUDO sed -i 's/GRUB_CMDLINE_LINUX=""/GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"/g' /etc/default/grub $SUDO update-grub -# Set a higher ulimit and the resolver (if set) for docker -if [ "x$RESOLVER" != "x" ]; then - SET_RESOLVER="--dns ${RESOLVER}" -fi - -$SUDO sed "s/ExecStart=\(.*\)/ExecStart=\1 --default-ulimit nofile=10000:10000 ${SET_RESOLVER}/g" \ - /lib/systemd/system/docker.service \ - > /etc/systemd/system/docker.service - -$SUDO systemctl daemon-reload - -# docker should not start on boot: we restart it inside /usr/local/bin/ensure-encrypted-partitions.sh, -# and the BootProbeCommand might be "docker ps -q" -$SUDO systemctl disable docker - # Make sure user_allow_other is set in fuse.conf $SUDO sed -i 's/#user_allow_other/user_allow_other/g' /etc/fuse.conf @@ -145,7 +175,10 @@ if [ "x$RESOLVER" != "x" ]; then $SUDO sed -i "s/#prepend domain-name-servers 127.0.0.1;/prepend domain-name-servers ${RESOLVER};/" /etc/dhcp/dhclient.conf fi -if [ "$AWS_EBS_AUTOSCALE" != "1" ]; then +# AWS_EBS_AUTOSCALE is not always set, work around unset variable check +EBS_AUTOSCALE=${AWS_EBS_AUTOSCALE:-} + +if [ "$EBS_AUTOSCALE" != "1" ]; then # Set up the cloud-init script that will ensure encrypted disks $SUDO mv /tmp/usr-local-bin-ensure-encrypted-partitions.sh /usr/local/bin/ensure-encrypted-partitions.sh else @@ -173,8 +206,6 @@ $SUDO mv /tmp/etc-cloud-cloud.cfg.d-07_compute_arvados_dispatch_cloud.cfg /etc/c $SUDO chown root:root /etc/cloud/cloud.cfg.d/07_compute_arvados_dispatch_cloud.cfg if [ "$NVIDIA_GPU_SUPPORT" == "1" ]; then - # $DIST should not have a dot if there is one in /etc/os-release (e.g. 18.04) - DIST=$(. /etc/os-release; echo $ID$VERSION_ID | tr -d '.') # We need a kernel and matching headers if [[ "$DIST" =~ ^debian ]]; then $SUDO apt-get -y install linux-image-cloud-amd64 linux-headers-cloud-amd64 @@ -188,7 +219,8 @@ if [ "$NVIDIA_GPU_SUPPORT" == "1" ]; then $SUDO apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/$DIST/x86_64/7fa2af80.pub $SUDO apt-get -y install software-properties-common $SUDO add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/$DIST/x86_64/ /" - $SUDO add-apt-repository contrib + # Ubuntu 18.04's add-apt-repository does not understand 'contrib' + $SUDO add-apt-repository contrib || true $SUDO apt-get update $SUDO apt-get -y install cuda @@ -210,24 +242,6 @@ if [ "$NVIDIA_GPU_SUPPORT" == "1" ]; then $SUDO tee /etc/apt/sources.list.d/libnvidia-container.list fi - if [ "$DIST" == "debian10" ]; then - # Debian 10 comes with Docker 18.xx, we need 19.03 or later - curl -fsSL https://download.docker.com/linux/debian/gpg | $SUDO gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg - echo deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian/ buster stable | \ - $SUDO tee /etc/apt/sources.list.d/docker.list - $SUDO apt-get update - $SUDO apt-get -yq --no-install-recommends install docker-ce=5:19.03.15~3-0~debian-buster - - $SUDO sed "s/ExecStart=\(.*\)/ExecStart=\1 --default-ulimit nofile=10000:10000 ${SET_RESOLVER}/g" \ - /lib/systemd/system/docker.service \ - > /etc/systemd/system/docker.service - - $SUDO systemctl daemon-reload - - # docker should not start on boot: we restart it inside /usr/local/bin/ensure-encrypted-partitions.sh, - # and the BootProbeCommand might be "docker ps -q" - $SUDO systemctl disable docker - fi $SUDO apt-get update $SUDO apt-get -y install libnvidia-container1 libnvidia-container-tools nvidia-container-toolkit # This service fails to start when the image is booted without Nvidia GPUs present, which makes diff --git a/tools/salt-install/provision.sh b/tools/salt-install/provision.sh index 07bc63c11d..0f3c9a1411 100755 --- a/tools/salt-install/provision.sh +++ b/tools/salt-install/provision.sh @@ -777,19 +777,6 @@ if [ "${DUMP_CONFIG}" = "yes" ]; then exit 0 fi -# FIXME! #16992 Temporary fix for psql call in arvados-api-server -if [ -e /root/.psqlrc ]; then - if ! ( grep 'pset pager off' /root/.psqlrc ); then - RESTORE_PSQL="yes" - cp /root/.psqlrc /root/.psqlrc.provision.backup - fi -else - DELETE_PSQL="yes" -fi - -echo '\pset pager off' >> /root/.psqlrc -# END FIXME! #16992 Temporary fix for psql call in arvados-api-server - # Now run the install salt-call --local state.apply -l ${LOG_LEVEL} @@ -799,18 +786,6 @@ if [ -d /etc/cloud/cloud.cfg.d ]; then sed -i 's/^manage_etc_hosts: true/#manage_etc_hosts: true/g' /etc/cloud/cloud.cfg.d/* fi -# FIXME! #16992 Temporary fix for psql call in arvados-api-server -if [ "x${DELETE_PSQL}" = "xyes" ]; then - echo "Removing .psql file" - rm /root/.psqlrc -fi - -if [ "x${RESTORE_PSQL}" = "xyes" ]; then - echo "Restoring .psql file" - mv -v /root/.psqlrc.provision.backup /root/.psqlrc -fi -# END FIXME! #16992 Temporary fix for psql call in arvados-api-server - # Leave a copy of the Arvados CA so the user can copy it where it's required if [ "$DEV_MODE" = "yes" ]; then echo "Copying the Arvados CA certificate to the installer dir, so you can import it"