end
test 'view file' do
+ need_selenium "phantomjs does not follow redirects reliably, maybe https://github.com/ariya/phantomjs/issues/10389"
magic = rand(2**512).to_s 36
owner = api_fixture('groups')['anonymously_accessible_project']['uuid']
col = upload_data_and_get_collection(magic, 'admin', "Hello\\040world.txt", owner)
end
test "can download an entire collection with a reader token" do
+ need_selenium "phantomjs does not follow redirects reliably, maybe https://github.com/ariya/phantomjs/issues/10389"
+
token = api_token('active')
data = "foo\nfile\n"
datablock = `echo -n #{data.shellescape} | ARVADOS_API_TOKEN=#{token.shellescape} arv-put --no-progress --raw -`.strip
token = api_fixture('api_client_authorizations')['active_all_collections']['api_token']
url_head = "/collections/download/#{uuid}/#{token}/"
visit url_head
+ assert_text "You can download individual files listed below"
# It seems that Capybara can't inspect tags outside the body, so this is
# a very blunt approach.
assert_no_match(/<\s*meta[^>]+\bnofollow\b/i, page.html,
"wget prohibited from recursing the collection page")
# Look at all the links that wget would recurse through using our
# recommended options, and check that it's exactly the file list.
- hrefs = page.all('a').map do |anchor|
- link = anchor[:href] || ''
- if link.start_with? url_head
- link[url_head.size .. -1]
- elsif link.start_with? '/'
- nil
- else
- link
- end
- end
- assert_equal(['./foo'], hrefs.compact.sort,
- "download page did provide strictly file links")
+ hrefs = []
+ page.html.scan(/href="(.*?)"/) { |m| hrefs << m[0] }
+ assert_equal(['./foo'], hrefs, "download page did provide strictly file links")
click_link "foo"
assert_text "foo\nfile\n"
end
. build/run-library.sh
-python_sdk_ts=$(cd sdk/python && timestamp_from_git)
-cwl_runner_ts=$(cd sdk/cwl && timestamp_from_git)
-
-python_sdk_version=$(cd sdk/python && nohash_version_from_git 0.1)
-cwl_runner_version=$(cd sdk/cwl && nohash_version_from_git 1.0)
-
-if [[ $python_sdk_ts -gt $cwl_runner_ts ]]; then
- cwl_runner_version=$(cd sdk/python && nohash_version_from_git 1.0)
-fi
+calculate_python_sdk_cwl_package_versions
set -x
docker build --no-cache --build-arg sdk=$sdk --build-arg runner=$runner --build-arg salad=$salad --build-arg cwltool=$cwltool --build-arg pythoncmd=$py --build-arg pipcmd=$pipcmd -f "$WORKSPACE/sdk/dev-jobs.dockerfile" -t arvados/jobs:$cwl_runner_version "$WORKSPACE/sdk"
--- /dev/null
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+set -e
+
+arvados-cwl-runner --version
ARVADOS_BUILDING_ITERATION="1"
fi
-python_sdk_ts=$(cd sdk/python && timestamp_from_git)
-cwl_runner_ts=$(cd sdk/cwl && timestamp_from_git)
-
-python_sdk_version=$(cd sdk/python && nohash_version_from_git 0.1)
-cwl_runner_version=$(cd sdk/cwl && nohash_version_from_git 1.0)
-
-if [[ $python_sdk_ts -gt $cwl_runner_ts ]]; then
- cwl_runner_version=$(cd sdk/python && nohash_version_from_git 1.0)
-fi
+calculate_python_sdk_cwl_package_versions
echo cwl_runner_version $cwl_runner_version python_sdk_version $python_sdk_version
fi
if [[ -n "$(find $WORKSPACE/packages/$TARGET -name '*.deb')" ]] ; then
+ set +e
+ /usr/bin/which dpkg-scanpackages >/dev/null
+ if [[ "$?" != "0" ]]; then
+ echo >&2
+ echo >&2 "Error: please install dpkg-dev. E.g. sudo apt-get install dpkg-dev"
+ echo >&2
+ exit 1
+ fi
+ /usr/bin/which apt-ftparchive >/dev/null
+ if [[ "$?" != "0" ]]; then
+ echo >&2
+ echo >&2 "Error: please install apt-utils. E.g. sudo apt-get install apt-utils"
+ echo >&2
+ exit 1
+ fi
+ set -e
(cd $WORKSPACE/packages/$TARGET
dpkg-scanpackages . 2> >(grep -v 'warning' 1>&2) | tee Packages | gzip -c > Packages.gz
apt-ftparchive -o APT::FTPArchive::Release::Origin=Arvados release . > Release
# Output the version being built, or if we're building a
# dev/prerelease, output a version number based on the git log for
# the given $subdir.
- local minorversion="$1"; shift # unused
local subdir="$1"; shift
if [[ -n "$ARVADOS_BUILDING_VERSION" ]]; then
echo "$ARVADOS_BUILDING_VERSION"
echo "$ARVADOS_BUILDING_VERSION"
return
fi
- version_from_git $1 | cut -d. -f1-4
+ version_from_git | cut -d. -f1-4
}
timestamp_from_git() {
format_last_commit_here "%ct" "$subdir"
}
+calculate_python_sdk_cwl_package_versions() {
+ python_sdk_ts=$(cd sdk/python && timestamp_from_git)
+ cwl_runner_ts=$(cd sdk/cwl && timestamp_from_git)
+
+ python_sdk_version=$(cd sdk/python && nohash_version_from_git)
+ cwl_runner_version=$(cd sdk/cwl && nohash_version_from_git)
+
+ if [[ $python_sdk_ts -gt $cwl_runner_ts ]]; then
+ cwl_runner_version=$python_sdk_version
+ fi
+}
+
handle_python_package () {
# This function assumes the current working directory is the python package directory
if [ -n "$(find dist -name "*-$(nohash_version_from_git).tar.gz" -print -quit)" ]; then
cd "$WORKSPACE"
ts="$(timestamp_from_git "$dir")"
if [[ "$ts" -gt "$timestamp" ]]; then
- version=$(version_from_git "" "$dir")
+ version=$(version_from_git "$dir")
timestamp="$ts"
fi
done
fi
# arvados-python-client sdist should always be built, to be available
- # for other dependant packages.
+ # for other dependent packages.
if [[ -n "$ONLY_BUILD" ]] && [[ "arvados-python-client" != "$PKG" ]] && [[ "$PYTHON_PKG" != "$ONLY_BUILD" ]] && [[ "$PKG" != "$ONLY_BUILD" ]]; then
return 0
fi
# 12271 - As FPM-generated packages don't include scripts by default, the
# packages cleanup on upgrade depends on files being listed on the %files
# section in the generated SPEC files. To remove DIRECTORIES, they need to
- # be listed in that sectiontoo, so we need to add this parameter to properly
+ # be listed in that section too, so we need to add this parameter to properly
# remove lingering dirs. But this only works for python2: if used on
# python33, it includes dirs like /opt/rh/python33 that belong to
# other packages.
done
# make sure the systemd service file ends up in the right place
- # currently only used by arvados-docker-cleaner
+ # used by arvados-docker-cleaner and arvados-node-manager
if [[ -e "${systemd_unit}" ]]; then
- COMMAND_ARR+=("usr/share/python3/dist/$PKG/share/doc/$PKG/$PKG.service=/lib/systemd/system/$PKG.service")
+ COMMAND_ARR+=("usr/share/$python/dist/$PKG/share/doc/$PKG/$PKG.service=/lib/systemd/system/$PKG.service")
fi
COMMAND_ARR+=("${fpm_args[@]}")
echo "== Interactive commands:"
echo "TARGET (short for 'test DIR')"
echo "test TARGET"
+ echo "10 test TARGET (run test 10 times)"
echo "test TARGET:py3 (test with python3)"
echo "test TARGET -check.vv (pass arguments to test)"
echo "install TARGET"
while read -p 'What next? ' -e -i "$nextcmd" nextcmd; do
history -s "$nextcmd"
history -w
+ count=1
+ if [[ "${nextcmd}" =~ ^[0-9] ]]; then
+ read count nextcmd <<<"${nextcmd}"
+ fi
read verb target opts <<<"${nextcmd}"
target="${target%/}"
target="${target/\/:/:}"
${verb}_${target}
;;
*)
- argstarget=${target%:py3}
+ argstarget=${target%:py3}
testargs["$argstarget"]="${opts}"
tt="${testfuncargs[${target}]}"
tt="${tt:-$target}"
- do_$verb $tt
+ while [ $count -gt 0 ]; do
+ do_$verb $tt
+ let "count=count-1"
+ done
;;
esac
;;
A federation of clusters can be configured to use a separate user database per cluster, or delegate a central cluster to manage the database.
-If clusters belong to separate organizations, each cluster will have its own user database for the members of that organization. Through federation, a user from one organization can be granted access to the cluster of another organization. The admin of the seond cluster controls access on a individual basis by choosing to activate or deactivate accounts from other organizations (with the default policy the value of @ActivateUsers@).
+If clusters belong to separate organizations, each cluster will have its own user database for the members of that organization. Through federation, a user from one organization can be granted access to the cluster of another organization. The admin of the second cluster controls access on a individual basis by choosing to activate or deactivate accounts from other organizations (with the default policy the value of @ActivateUsers@).
On the other hand, if all clusters belong to the same organization, and users in that organization should have access to all the clusters, user management can be simplified by setting the @LoginCluster@ which manages the user database used by all other clusters in the federation. To do this, choose one cluster in the federation which will be the 'login cluster'. Set the the @Login.LoginCluster@ configuration value on all clusters in the federation to the cluster id of the login cluster. After setting @LoginCluster@, restart arvados-api-server and arvados-controller.
After migrating your configuration, uninstall the @arvados-sso-provider@ package.
+h3. S3 signatures
+
+Keepstore now uses "V4 signatures":https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html by default for S3 requests. If you are using Amazon S3, no action is needed; all regions support V4 signatures. If you are using a different S3-compatible service that does not support V4 signatures, add @V2Signature: true@ to your volume driver parameters to preserve the old behavior. See "configuring S3 object storage":{{site.baseurl}}/install/configure-s3-object-storage.html.
+
h2(#v2_0_0). v2.0.0 (2020-02-07)
"Upgrading from 1.4":#v1_4_1
|keep-block-check|Given a list of keep block locators, check that each block exists on one of the configured keepstore servers and verify the block hash.|
|keep-exercise|Benchmarking tool to test throughput and reliability of keepstores under various usage patterns.|
|keep-rsync|Get lists of blocks from two clusters, copy blocks which exist on source cluster but are missing from destination cluster.|
-|sync-groups|Take a CSV file listing (group, username) pairs and synchronize membership in Arvados groups.|
+|sync-groups|Take a CSV file listing with (group, user, permission) records and synchronize membership in Arvados groups.|
h2(#git). Clone the repository
-Clone the repository and nagivate to the @arvados-kubernetes/charts/arvados@ directory:
+Clone the repository and nagivate to the @arvados-k8s/charts/arvados@ directory:
<pre>
-$ git clone https://github.com/arvados/arvados-kubernetes.git
-$ cd arvados-kubernetes/charts/arvados
+$ git clone https://github.com/arvados/arvados-k8s.git
+$ cd arvados-k8s/charts/arvados
</pre>
h2(#Start). Start the Arvados cluster
h2(#git). Clone the repository
-Clone the repository and nagivate to the @arvados-kubernetes/charts/arvados@ directory:
+Clone the repository and nagivate to the @arvados-k8s/charts/arvados@ directory:
<pre>
-$ git clone https://github.com/arvados/arvados-kubernetes.git
-$ cd arvados-kubernetes/charts/arvados
+$ git clone https://github.com/arvados/arvados-k8s.git
+$ cd arvados-k8s/charts/arvados
</pre>
h2(#Start). Start the Arvados cluster
# declaration.
LocationConstraint: false
+ # Use V2 signatures instead of the default V4. Amazon S3
+ # supports V4 signatures in all regions, but this option
+ # might be needed for other S3-compatible services.
+ V2Signature: false
+
# Requested page size for "list bucket contents" requests.
IndexPageSize: 1000
This tool reads a CSV (comma-separated values) file having information about external groups and their members. When running it for the first time, it'll create a special group named 'Externally synchronized groups' meant to be the parent of all the remote groups.
-Every line on the file should have 2 values: a group name and a local user identifier, meaning that the named user is a member of the group. The tool will create the group if it doesn't exist, and add the user to it. If group member is not present on the input file, the account will be removed from the group.
+Every line on the file should have 3 values: a group name, a local user identifier and a permission level, meaning that the named user is a member of the group with the provided permission. The tool will create the group if it doesn't exist, and add the user to it. If any group member is not present on the input file, it will be removed from the group.
Users can be identified by their email address or username: the tool will check if every user exist on the system, and report back when not found. Groups on the other hand, are identified by their name.
+Permission level can be one of the following: @can_read@, @can_write@ or @can_manage@, giving the group member read, read/write or managing privileges on the group. For backwards compatibility purposes, if any record omits the third (permission) field, it will default to @can_write@ permission. You can read more about permissions on the "group management admin guide":/admin/group-management.html.
+
This tool is designed to be run periodically reading a file created by a remote auth system (ie: LDAP) dump script, applying what's included on the file as the source of truth.
# "ou=Users,dc=example,dc=com"
SearchBase: ""
- # Additional filters for username lookup. Special characters
- # in assertion values must be escaped (see RFC4515). Example:
- # "(objectClass=person)"
+ # Additional filters to apply when looking up users' LDAP
+ # entries. This can be used to restrict access to a subset of
+ # LDAP users, or to disambiguate users from other directory
+ # entries that have the SearchAttribute present.
+ #
+ # Special characters in assertion values must be escaped (see
+ # RFC4515).
+ #
+ # Example: "(objectClass=person)"
SearchFilters: ""
# LDAP attribute to use as the user's email address.
Region: us-east-1a
Bucket: aaaaa
LocationConstraint: false
+ V2Signature: false
IndexPageSize: 1000
ConnectTimeout: 1m
ReadTimeout: 10m
# "ou=Users,dc=example,dc=com"
SearchBase: ""
- # Additional filters for username lookup. Special characters
- # in assertion values must be escaped (see RFC4515). Example:
- # "(objectClass=person)"
+ # Additional filters to apply when looking up users' LDAP
+ # entries. This can be used to restrict access to a subset of
+ # LDAP users, or to disambiguate users from other directory
+ # entries that have the SearchAttribute present.
+ #
+ # Special characters in assertion values must be escaped (see
+ # RFC4515).
+ #
+ # Example: "(objectClass=person)"
SearchFilters: ""
# LDAP attribute to use as the user's email address.
Region: us-east-1a
Bucket: aaaaa
LocationConstraint: false
+ V2Signature: false
IndexPageSize: 1000
ConnectTimeout: 1m
ReadTimeout: 10m
}
defer func() {
cmd.Process.Signal(syscall.SIGTERM)
- logger.Infof("sent SIGTERM; waiting for postgres to shut down")
+ logger.Info("sent SIGTERM; waiting for postgres to shut down")
cmd.Wait()
}()
- for deadline := time.Now().Add(10 * time.Second); ; {
- output, err2 := exec.Command("pg_isready").CombinedOutput()
- if err2 == nil {
- break
- } else if time.Now().After(deadline) {
- err = fmt.Errorf("timed out waiting for pg_isready (%q)", output)
- return 1
- } else {
- time.Sleep(time.Second)
- }
+ err = waitPostgreSQLReady()
+ if err != nil {
+ return 1
}
}
// might never have been run.
}
+ var needcoll []string
+ // If the en_US.UTF-8 locale wasn't installed when
+ // postgresql initdb ran, it needs to be added
+ // explicitly before we can use it in our test suite.
+ for _, collname := range []string{"en_US", "en_US.UTF-8"} {
+ cmd := exec.Command("sudo", "-u", "postgres", "psql", "-t", "-c", "SELECT 1 FROM pg_catalog.pg_collation WHERE collname='"+collname+"' AND collcollate IN ('en_US.UTF-8', 'en_US.utf8')")
+ cmd.Dir = "/"
+ out, err2 := cmd.CombinedOutput()
+ if err != nil {
+ err = fmt.Errorf("error while checking postgresql collations: %s", err2)
+ return 1
+ }
+ if strings.Contains(string(out), "1") {
+ logger.Infof("postgresql supports collation %s", collname)
+ } else {
+ needcoll = append(needcoll, collname)
+ }
+ }
+ if len(needcoll) > 0 && os.Getpid() != 1 {
+ // In order for the CREATE COLLATION statement
+ // below to work, the locale must have existed
+ // when PostgreSQL started up. If we're
+ // running as init, we must have started
+ // PostgreSQL ourselves after installing the
+ // locales. Otherwise, it might need a
+ // restart, so we attempt to restart it with
+ // systemd.
+ if err = runBash(`sudo systemctl restart postgresql`, stdout, stderr); err != nil {
+ logger.Warn("`systemctl restart postgresql` failed; hoping postgresql does not need to be restarted")
+ } else if err = waitPostgreSQLReady(); err != nil {
+ return 1
+ }
+ }
+ for _, collname := range needcoll {
+ cmd := exec.Command("sudo", "-u", "postgres", "psql", "-c", "CREATE COLLATION \""+collname+"\" (LOCALE = \"en_US.UTF-8\")")
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+ cmd.Dir = "/"
+ err = cmd.Run()
+ if err != nil {
+ err = fmt.Errorf("error adding postgresql collation %s: %s", collname, err)
+ return 1
+ }
+ }
+
withstuff := "WITH LOGIN SUPERUSER ENCRYPTED PASSWORD " + pq.QuoteLiteral(devtestDatabasePassword)
cmd := exec.Command("sudo", "-u", "postgres", "psql", "-c", "ALTER ROLE arvados "+withstuff)
cmd.Dir = "/"
return osv, nil
}
+func waitPostgreSQLReady() error {
+ for deadline := time.Now().Add(10 * time.Second); ; {
+ output, err := exec.Command("pg_isready").CombinedOutput()
+ if err == nil {
+ return nil
+ } else if time.Now().After(deadline) {
+ return fmt.Errorf("timed out waiting for pg_isready (%q)", output)
+ } else {
+ time.Sleep(time.Second)
+ }
+ }
+}
+
func runBash(script string, stdout, stderr io.Writer) error {
cmd := exec.Command("bash", "-")
cmd.Stdin = bytes.NewBufferString("set -ex -o pipefail\n" + script)
debian8)
fpm_depends+=(libgnutls-deb0-28 libcurl3-gnutls)
;;
+ debian9 | ubuntu1604)
+ fpm_depends+=(libcurl3-gnutls)
+ ;;
debian* | ubuntu*)
- fpm_depends+=(libcurl3-gnutls libpython2.7)
+ fpm_depends+=(libcurl3-gnutls python3-distutils)
;;
esac
# file to determine what version of cwltool and schema-salad to
# build.
install_requires=[
- 'cwltool==3.0.20200317203547',
+ 'cwltool==3.0.20200324120055',
'schema-salad==5.0.20200302192450',
'arvados-python-client{}'.format(pysdk_dep),
'setuptools',
set -x
-if [ \$PYCMD = "python3" ]; then
+if [ "\$PYCMD" = "python3" ]; then
pip3 install cwltest
else
pip install cwltest
git clone https://github.com/common-workflow-language/cwl-\${version}.git
fi
cd cwl-\${version}
+elif [[ "$suite" != "integration" ]] ; then
+ echo "ERROR: unknown suite '$suite'"
+ exit 1
fi
if [[ "$suite" != "integration" ]] ; then
elif test "$tag" = "latest" ; then
arv-keepdocker --pull arvados/jobs $tag
else
- jobsimg=\$(curl https://versions.arvados.org/v1/commit/$tag | python -c "import json; import sys; sys.stdout.write(json.load(sys.stdin)['Versions']['Docker']['arvados/jobs'])")
- arv-keepdocker --pull arvados/jobs \$jobsimg
- docker tag arvados/jobs:\$jobsimg arvados/jobs:latest
+ set +u
+ export WORKSPACE=/usr/src/arvados
+ . /usr/src/arvados/build/run-library.sh
+ TMPHERE=\$(pwd)
+ cd /usr/src/arvados
+ calculate_python_sdk_cwl_package_versions
+ cd \$TMPHERE
+ set -u
+
+ arv-keepdocker --pull arvados/jobs \$cwl_runner_version
+ docker tag arvados/jobs:\$cwl_runner_version arvados/jobs:latest
arv-keepdocker arvados/jobs latest
fi
EXTRA=--compute-checksum
-if [[ $devcwl == 1 ]] ; then
+if [[ $devcwl -eq 1 ]] ; then
EXTRA="\$EXTRA --enable-dev"
fi
}
type S3VolumeDriverParameters struct {
+ IAMRole string
AccessKey string
SecretKey string
Endpoint string
Region string
Bucket string
LocationConstraint bool
+ V2Signature bool
IndexPageSize int
ConnectTimeout Duration
ReadTimeout Duration
curl.setopt(pycurl.HEADERFUNCTION, self._headerfunction)
if self.insecure:
curl.setopt(pycurl.SSL_VERIFYPEER, 0)
+ else:
+ curl.setopt(pycurl.CAINFO, arvados.util.ca_certs_path())
if method == "HEAD":
curl.setopt(pycurl.NOBODY, True)
self._setcurltimeouts(curl, timeout, method=="HEAD")
curl.setopt(pycurl.HEADERFUNCTION, self._headerfunction)
if self.insecure:
curl.setopt(pycurl.SSL_VERIFYPEER, 0)
+ else:
+ curl.setopt(pycurl.CAINFO, arvados.util.ca_certs_path())
self._setcurltimeouts(curl, timeout)
try:
curl.perform()
it returns the value of `fallback` (httplib2's CA certs by default).
"""
for ca_certs_path in [
+ # SSL_CERT_FILE and SSL_CERT_DIR are openssl overrides - note
+ # that httplib2 itself also supports HTTPLIB2_CA_CERTS.
+ os.environ.get('SSL_CERT_FILE'),
# Arvados specific:
'/etc/arvados/ca-certificates.crt',
# Debian:
# Red Hat:
'/etc/pki/tls/certs/ca-bundle.crt',
]:
- if os.path.exists(ca_certs_path):
+ if ca_certs_path and os.path.exists(ca_certs_path):
return ca_certs_path
return fallback
dbcfg.declare_config "PostgreSQL.Connection.dbname", String, :database
dbcfg.declare_config "PostgreSQL.Connection.template", String, :template
dbcfg.declare_config "PostgreSQL.Connection.encoding", String, :encoding
+dbcfg.declare_config "PostgreSQL.Connection.collation", String, :collation
application_config = {}
%w(application.default application).each do |cfgfile|
# Use template0 when creating a new database. Avoids
# character-encoding/collation problems.
$arvados_config["PostgreSQL"]["Connection"]["template"] = "template0"
+ # Some test cases depend on en_US.UTF-8 collation.
+ $arvados_config["PostgreSQL"]["Connection"]["collation"] = "en_US.UTF-8"
end
if $arvados_config["PostgreSQL"]["Connection"]["password"].empty?
"#{dbhost}/#{$arvados_config["PostgreSQL"]["Connection"]["dbname"]}?"+
"template=#{$arvados_config["PostgreSQL"]["Connection"]["template"]}&"+
"encoding=#{$arvados_config["PostgreSQL"]["Connection"]["client_encoding"]}&"+
+ "collation=#{$arvados_config["PostgreSQL"]["Connection"]["collation"]}&"+
"pool=#{$arvados_config["PostgreSQL"]["ConnectionPool"]}"
Server::Application.configure do
// S3Volume implements Volume using an S3 bucket.
type S3Volume struct {
- AccessKey string
- SecretKey string
- AuthToken string // populated automatically when IAMRole is used
- AuthExpiration time.Time // populated automatically when IAMRole is used
- IAMRole string
- Endpoint string
- Region string
- Bucket string
- LocationConstraint bool
- IndexPageSize int
- ConnectTimeout arvados.Duration
- ReadTimeout arvados.Duration
- RaceWindow arvados.Duration
- UnsafeDelete bool
+ arvados.S3VolumeDriverParameters
+ AuthToken string // populated automatically when IAMRole is used
+ AuthExpiration time.Time // populated automatically when IAMRole is used
cluster *arvados.Cluster
volume arvados.Volume
func (v *S3Volume) newS3Client() *s3.S3 {
auth := aws.NewAuth(v.AccessKey, v.SecretKey, v.AuthToken, v.AuthExpiration)
client := s3.New(*auth, v.region)
- if v.region.EC2Endpoint.Signer == aws.V4Signature {
- // Currently affects only eu-central-1
+ if !v.V2Signature {
client.Signature = aws.V4Signature
}
client.ConnectTimeout = time.Duration(v.ConnectTimeout)
}
}
+func (s *StubbedS3Suite) TestSignatureVersion(c *check.C) {
+ var header http.Header
+ stub := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ header = r.Header
+ }))
+ defer stub.Close()
+
+ // Default V4 signature
+ vol := S3Volume{
+ S3VolumeDriverParameters: arvados.S3VolumeDriverParameters{
+ AccessKey: "xxx",
+ SecretKey: "xxx",
+ Endpoint: stub.URL,
+ Region: "test-region-1",
+ Bucket: "test-bucket-name",
+ },
+ cluster: s.cluster,
+ logger: ctxlog.TestLogger(c),
+ metrics: newVolumeMetricsVecs(prometheus.NewRegistry()),
+ }
+ err := vol.check()
+ c.Check(err, check.IsNil)
+ err = vol.Put(context.Background(), "acbd18db4cc2f85cedef654fccc4a4d8", []byte("foo"))
+ c.Check(err, check.IsNil)
+ c.Check(header.Get("Authorization"), check.Matches, `AWS4-HMAC-SHA256 .*`)
+
+ // Force V2 signature
+ vol = S3Volume{
+ S3VolumeDriverParameters: arvados.S3VolumeDriverParameters{
+ AccessKey: "xxx",
+ SecretKey: "xxx",
+ Endpoint: stub.URL,
+ Region: "test-region-1",
+ Bucket: "test-bucket-name",
+ V2Signature: true,
+ },
+ cluster: s.cluster,
+ logger: ctxlog.TestLogger(c),
+ metrics: newVolumeMetricsVecs(prometheus.NewRegistry()),
+ }
+ err = vol.check()
+ c.Check(err, check.IsNil)
+ err = vol.Put(context.Background(), "acbd18db4cc2f85cedef654fccc4a4d8", []byte("foo"))
+ c.Check(err, check.IsNil)
+ c.Check(header.Get("Authorization"), check.Matches, `AWS xxx:.*`)
+}
+
func (s *StubbedS3Suite) TestIAMRoleCredentials(c *check.C) {
s.metadata = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
upd := time.Now().UTC().Add(-time.Hour).Format(time.RFC3339)
w.WriteHeader(http.StatusNotFound)
}))
deadv := &S3Volume{
- IAMRole: s.metadata.URL + "/fake-metadata/test-role",
- Endpoint: "http://localhost:12345",
- Region: "test-region-1",
- Bucket: "test-bucket-name",
- cluster: s.cluster,
- logger: ctxlog.TestLogger(c),
- metrics: newVolumeMetricsVecs(prometheus.NewRegistry()),
+ S3VolumeDriverParameters: arvados.S3VolumeDriverParameters{
+ IAMRole: s.metadata.URL + "/fake-metadata/test-role",
+ Endpoint: "http://localhost:12345",
+ Region: "test-region-1",
+ Bucket: "test-bucket-name",
+ },
+ cluster: s.cluster,
+ logger: ctxlog.TestLogger(c),
+ metrics: newVolumeMetricsVecs(prometheus.NewRegistry()),
}
err := deadv.check()
c.Check(err, check.ErrorMatches, `.*/fake-metadata/test-role.*`)
v := &TestableS3Volume{
S3Volume: &S3Volume{
- AccessKey: accessKey,
- SecretKey: secretKey,
- IAMRole: iamRole,
- Bucket: TestBucketName,
- Endpoint: endpoint,
- Region: "test-region-1",
- LocationConstraint: true,
- UnsafeDelete: true,
- IndexPageSize: 1000,
- cluster: cluster,
- volume: volume,
- logger: ctxlog.TestLogger(c),
- metrics: metrics,
+ S3VolumeDriverParameters: arvados.S3VolumeDriverParameters{
+ IAMRole: iamRole,
+ AccessKey: accessKey,
+ SecretKey: secretKey,
+ Bucket: TestBucketName,
+ Endpoint: endpoint,
+ Region: "test-region-1",
+ LocationConstraint: true,
+ UnsafeDelete: true,
+ IndexPageSize: 1000,
+ },
+ cluster: cluster,
+ volume: volume,
+ logger: ctxlog.TestLogger(c),
+ metrics: metrics,
},
c: c,
server: srv,
return e
}
defer v.unlockfile(f)
- ts := syscall.NsecToTimespec(time.Now().UnixNano())
+ ts := time.Now()
v.os.stats.TickOps("utimes")
v.os.stats.Tick(&v.os.stats.UtimesOps)
- err = syscall.UtimesNano(p, []syscall.Timespec{ts, ts})
+ err = os.Chtimes(p, ts, ts)
v.os.stats.TickErr(err)
return err
}
v.os.Remove(tmpfile.Name())
return err
}
+ // ext4 uses a low-precision clock and effectively backdates
+ // files by up to 10 ms, sometimes across a 1-second boundary,
+ // which produces confusing results in logs and tests. We
+ // avoid this by setting the output file's timestamps
+ // explicitly, using a higher resolution clock.
+ ts := time.Now()
+ v.os.stats.TickOps("utimes")
+ v.os.stats.Tick(&v.os.stats.UtimesOps)
+ if err = os.Chtimes(tmpfile.Name(), ts, ts); err != nil {
+ err = fmt.Errorf("error setting timestamps on %s: %s", tmpfile.Name(), err)
+ v.os.Remove(tmpfile.Name())
+ return err
+ }
if err := v.os.Rename(tmpfile.Name(), bpath); err != nil {
err = fmt.Errorf("error renaming %s to %s: %s", tmpfile.Name(), bpath, err)
v.os.Remove(tmpfile.Name())
c.Check(stats(), check.Matches, `.*"OutBytes":3,.*`)
c.Check(stats(), check.Matches, `.*"CreateOps":1,.*`)
c.Check(stats(), check.Matches, `.*"OpenOps":0,.*`)
- c.Check(stats(), check.Matches, `.*"UtimesOps":0,.*`)
+ c.Check(stats(), check.Matches, `.*"UtimesOps":1,.*`)
err = vol.Touch(loc)
c.Check(err, check.IsNil)
c.Check(stats(), check.Matches, `.*"FlockOps":1,.*`)
c.Check(stats(), check.Matches, `.*"OpenOps":1,.*`)
- c.Check(stats(), check.Matches, `.*"UtimesOps":1,.*`)
+ c.Check(stats(), check.Matches, `.*"UtimesOps":2,.*`)
_, err = vol.Get(context.Background(), loc, make([]byte, 3))
c.Check(err, check.IsNil)
TrustAllContent: true
Login:
SSO:
+ Enable: true
ProviderAppSecret: $sso_app_secret
ProviderAppID: arvados-server
Users:
popd
if [ "$PYCMD" = "python3" ]; then
- if ! pip3 install --prefix /usr/local --no-index --find-links /var/lib/pip $1 ; then
+ if ! pip3 install --prefix /usr/local --no-index --find-links /var/lib/pip $1 ; then
pip3 install --prefix /usr/local $1
- fi
+ fi
else
- if ! pip install --no-index --find-links /var/lib/pip $1 ; then
+ if ! pip install --no-index --find-links /var/lib/pip $1 ; then
pip install $1
- fi
+ fi
fi
}
GetItems() []interface{}
}
-// GroupInfo tracks previous and current members of a particular Group
+// GroupPermissions maps permission levels on groups (can_read, can_write, can_manage)
+type GroupPermissions map[string]bool
+
+// GroupInfo tracks previous and current member's permissions on a particular Group
type GroupInfo struct {
Group arvados.Group
- PreviousMembers map[string]bool
- CurrentMembers map[string]bool
+ PreviousMembers map[string]GroupPermissions
+ CurrentMembers map[string]GroupPermissions
}
// GetUserID returns the correct user id value depending on the selector
// Set up usage message
flags.Usage = func() {
- usageStr := `Synchronize remote groups into Arvados from a CSV format file with 2 columns:
- * 1st column: Group name
- * 2nd column: User identifier`
+ usageStr := `Synchronize remote groups into Arvados from a CSV format file with 3 columns:
+ * 1st: Group name
+ * 2nd: User identifier
+ * 3rd (Optional): User permission on the group: can_read, can_write or can_manage. (Default: can_write)`
fmt.Fprintf(os.Stderr, "%s\n\n", usageStr)
fmt.Fprintf(os.Stderr, "Usage:\n%s [OPTIONS] <input-file.csv>\n\n", os.Args[0])
fmt.Fprintf(os.Stderr, "Options:\n")
// Remove previous members not listed on this run
for groupUUID := range remoteGroups {
gi := remoteGroups[groupUUID]
- evictedMembers := subtract(gi.PreviousMembers, gi.CurrentMembers)
+ evictedMemberPerms := subtract(gi.PreviousMembers, gi.CurrentMembers)
groupName := gi.Group.Name
- if len(evictedMembers) > 0 {
- log.Printf("Removing %d users from group %q", len(evictedMembers), groupName)
- }
- for evictedUser := range evictedMembers {
- if err := RemoveMemberFromGroup(cfg, allUsers[userIDToUUID[evictedUser]], gi.Group); err != nil {
+ if len(evictedMemberPerms) > 0 {
+ log.Printf("Removing permissions from %d users on group %q", len(evictedMemberPerms), groupName)
+ }
+ for member := range evictedMemberPerms {
+ var perms []string
+ completeMembershipRemoval := false
+ if _, ok := gi.CurrentMembers[member]; !ok {
+ completeMembershipRemoval = true
+ membershipsRemoved++
+ } else {
+ // Collect which user->group permission links should be removed
+ for p := range evictedMemberPerms[member] {
+ if evictedMemberPerms[member][p] {
+ perms = append(perms, p)
+ }
+ }
+ membershipsRemoved += len(perms)
+ }
+ if err := RemoveMemberLinksFromGroup(cfg, allUsers[userIDToUUID[member]],
+ perms, completeMembershipRemoval, gi.Group); err != nil {
return err
}
- membershipsRemoved++
}
}
log.Printf("Groups created: %d. Memberships added: %d, removed: %d, skipped: %d", groupsCreated, membershipsAdded, membershipsRemoved, membershipsSkipped)
) (groupsCreated, membersAdded, membersSkipped int, err error) {
lineNo := 0
csvReader := csv.NewReader(f)
- csvReader.FieldsPerRecord = 2
+ // Allow variable number of fields.
+ csvReader.FieldsPerRecord = -1
for {
record, e := csvReader.Read()
if e == io.EOF {
err = fmt.Errorf("error parsing %q, line %d", cfg.Path, lineNo)
return
}
+ // Only allow 2 or 3 fields per record for backwards compatibility.
+ if len(record) < 2 || len(record) > 3 {
+ err = fmt.Errorf("error parsing %q, line %d: found %d fields but only 2 or 3 are allowed", cfg.Path, lineNo, len(record))
+ return
+ }
groupName := strings.TrimSpace(record[0])
groupMember := strings.TrimSpace(record[1]) // User ID (username or email)
- if groupName == "" || groupMember == "" {
- log.Printf("Warning: CSV record has at least one empty field (%s, %s). Skipping", groupName, groupMember)
+ groupPermission := "can_write"
+ if len(record) == 3 {
+ groupPermission = strings.ToLower(record[2])
+ }
+ if groupName == "" || groupMember == "" || groupPermission == "" {
+ log.Printf("Warning: CSV record has at least one empty field (%s, %s, %s). Skipping", groupName, groupMember, groupPermission)
+ membersSkipped++
+ continue
+ }
+ if !(groupPermission == "can_read" || groupPermission == "can_write" || groupPermission == "can_manage") {
+ log.Printf("Warning: 3rd field should be 'can_read', 'can_write' or 'can_manage'. Found: %q at line %d, skipping.", groupPermission, lineNo)
membersSkipped++
continue
}
groupNameToUUID[groupName] = newGroup.UUID
remoteGroups[newGroup.UUID] = &GroupInfo{
Group: newGroup,
- PreviousMembers: make(map[string]bool), // Empty set
- CurrentMembers: make(map[string]bool), // Empty set
+ PreviousMembers: make(map[string]GroupPermissions),
+ CurrentMembers: make(map[string]GroupPermissions),
}
groupsCreated++
}
// Both group & user exist, check if user is a member
groupUUID := groupNameToUUID[groupName]
gi := remoteGroups[groupUUID]
- if !gi.PreviousMembers[groupMember] && !gi.CurrentMembers[groupMember] {
+ if !gi.PreviousMembers[groupMember][groupPermission] && !gi.CurrentMembers[groupMember][groupPermission] {
if cfg.Verbose {
log.Printf("Adding %q to group %q", groupMember, groupName)
}
- // User wasn't a member, but should be.
- if e := AddMemberToGroup(cfg, allUsers[userIDToUUID[groupMember]], gi.Group); e != nil {
+ // User permissionwasn't there, but should be. Avoid duplicating the
+ // group->user link when necessary.
+ createG2ULink := true
+ if _, ok := gi.PreviousMembers[groupMember]; ok {
+ createG2ULink = false // User is already member of the group
+ }
+ if e := AddMemberToGroup(cfg, allUsers[userIDToUUID[groupMember]], gi.Group, groupPermission, createG2ULink); e != nil {
err = e
return
}
membersAdded++
}
- gi.CurrentMembers[groupMember] = true
+ if _, ok := gi.CurrentMembers[groupMember]; ok {
+ gi.CurrentMembers[groupMember][groupPermission] = true
+ } else {
+ gi.CurrentMembers[groupMember] = GroupPermissions{groupPermission: true}
+ }
+
}
return
}
return allItems, nil
}
-func subtract(setA map[string]bool, setB map[string]bool) map[string]bool {
- result := make(map[string]bool)
+func subtract(setA map[string]GroupPermissions, setB map[string]GroupPermissions) map[string]GroupPermissions {
+ result := make(map[string]GroupPermissions)
for element := range setA {
- if !setB[element] {
- result[element] = true
+ if _, ok := setB[element]; !ok {
+ result[element] = setA[element]
+ } else {
+ for perm := range setA[element] {
+ if _, ok := setB[element][perm]; !ok {
+ result[element] = GroupPermissions{perm: true}
+ }
+ }
}
}
return result
Operand: "permission",
}, {
Attr: "name",
- Operator: "=",
- Operand: "can_write",
+ Operator: "in",
+ Operand: []string{"can_read", "can_write", "can_manage"},
}, {
Attr: "head_uuid",
Operator: "=",
}
g2uLinks, err := GetAll(cfg.Client, "links", g2uFilter, &LinkList{})
if err != nil {
- return remoteGroups, groupNameToUUID, fmt.Errorf("error getting member (can_read) links for group %q: %s", group.Name, err)
+ return remoteGroups, groupNameToUUID, fmt.Errorf("error getting group->user 'can_read' links for group %q: %s", group.Name, err)
}
u2gLinks, err := GetAll(cfg.Client, "links", u2gFilter, &LinkList{})
if err != nil {
- return remoteGroups, groupNameToUUID, fmt.Errorf("error getting member (can_write) links for group %q: %s", group.Name, err)
+ return remoteGroups, groupNameToUUID, fmt.Errorf("error getting user->group links for group %q: %s", group.Name, err)
}
- // Build a list of user ids (email or username) belonging to this group
- membersSet := make(map[string]bool)
- u2gLinkSet := make(map[string]bool)
+ // Build a list of user ids (email or username) belonging to this group.
+ membersSet := make(map[string]GroupPermissions)
+ u2gLinkSet := make(map[string]GroupPermissions)
for _, l := range u2gLinks {
- linkedMemberUUID := l.(arvados.Link).TailUUID
- u2gLinkSet[linkedMemberUUID] = true
+ link := l.(arvados.Link)
+ // Also save the member's group access level.
+ if _, ok := u2gLinkSet[link.TailUUID]; ok {
+ u2gLinkSet[link.TailUUID][link.Name] = true
+ } else {
+ u2gLinkSet[link.TailUUID] = GroupPermissions{link.Name: true}
+ }
}
for _, item := range g2uLinks {
link := item.(arvados.Link)
if err != nil {
return remoteGroups, groupNameToUUID, err
}
- membersSet[memberID] = true
+ membersSet[memberID] = u2gLinkSet[link.HeadUUID]
}
remoteGroups[group.UUID] = &GroupInfo{
Group: group,
PreviousMembers: membersSet,
- CurrentMembers: make(map[string]bool), // Empty set
+ CurrentMembers: make(map[string]GroupPermissions),
}
groupNameToUUID[group.Name] = group.UUID
}
return remoteGroups, groupNameToUUID, nil
}
-// RemoveMemberFromGroup remove all links related to the membership
-func RemoveMemberFromGroup(cfg *ConfigParams, user arvados.User, group arvados.Group) error {
+// RemoveMemberLinksFromGroup remove all links related to the membership
+func RemoveMemberLinksFromGroup(cfg *ConfigParams, user arvados.User, linkNames []string, completeRemoval bool, group arvados.Group) error {
if cfg.Verbose {
log.Printf("Getting group membership links for user %q (%s) on group %q (%s)", user.Username, user.UUID, group.Name, group.UUID)
}
var links []interface{}
- // Search for all group<->user links (both ways)
- for _, filterset := range [][]arvados.Filter{
- // Group -> User
- {{
- Attr: "link_class",
- Operator: "=",
- Operand: "permission",
- }, {
- Attr: "tail_uuid",
- Operator: "=",
- Operand: group.UUID,
- }, {
- Attr: "head_uuid",
- Operator: "=",
- Operand: user.UUID,
- }},
- // Group <- User
- {{
- Attr: "link_class",
- Operator: "=",
- Operand: "permission",
- }, {
- Attr: "tail_uuid",
- Operator: "=",
- Operand: user.UUID,
- }, {
- Attr: "head_uuid",
- Operator: "=",
- Operand: group.UUID,
- }},
- } {
+ var filters [][]arvados.Filter
+ if completeRemoval {
+ // Search for all group<->user links (both ways)
+ filters = [][]arvados.Filter{
+ // Group -> User
+ {{
+ Attr: "link_class",
+ Operator: "=",
+ Operand: "permission",
+ }, {
+ Attr: "tail_uuid",
+ Operator: "=",
+ Operand: group.UUID,
+ }, {
+ Attr: "head_uuid",
+ Operator: "=",
+ Operand: user.UUID,
+ }},
+ // Group <- User
+ {{
+ Attr: "link_class",
+ Operator: "=",
+ Operand: "permission",
+ }, {
+ Attr: "tail_uuid",
+ Operator: "=",
+ Operand: user.UUID,
+ }, {
+ Attr: "head_uuid",
+ Operator: "=",
+ Operand: group.UUID,
+ }},
+ }
+ } else {
+ // Search only for the requested Group <- User permission links
+ filters = [][]arvados.Filter{
+ {{
+ Attr: "link_class",
+ Operator: "=",
+ Operand: "permission",
+ }, {
+ Attr: "tail_uuid",
+ Operator: "=",
+ Operand: user.UUID,
+ }, {
+ Attr: "head_uuid",
+ Operator: "=",
+ Operand: group.UUID,
+ }, {
+ Attr: "name",
+ Operator: "in",
+ Operand: linkNames,
+ }},
+ }
+ }
+
+ for _, filterset := range filters {
l, err := GetAll(cfg.Client, "links", arvados.ResourceListParams{Filters: filterset}, &LinkList{})
if err != nil {
userID, _ := GetUserID(user, cfg.UserID)
}
// AddMemberToGroup create membership links
-func AddMemberToGroup(cfg *ConfigParams, user arvados.User, group arvados.Group) error {
+func AddMemberToGroup(cfg *ConfigParams, user arvados.User, group arvados.Group, perm string, createG2ULink bool) error {
var newLink arvados.Link
- linkData := map[string]string{
- "owner_uuid": cfg.SysUserUUID,
- "link_class": "permission",
- "name": "can_read",
- "tail_uuid": group.UUID,
- "head_uuid": user.UUID,
- }
- if err := CreateLink(cfg, &newLink, linkData); err != nil {
- userID, _ := GetUserID(user, cfg.UserID)
- return fmt.Errorf("error adding group %q -> user %q read permission: %s", group.Name, userID, err)
+ var linkData map[string]string
+ if createG2ULink {
+ linkData = map[string]string{
+ "owner_uuid": cfg.SysUserUUID,
+ "link_class": "permission",
+ "name": "can_read",
+ "tail_uuid": group.UUID,
+ "head_uuid": user.UUID,
+ }
+ if err := CreateLink(cfg, &newLink, linkData); err != nil {
+ userID, _ := GetUserID(user, cfg.UserID)
+ return fmt.Errorf("error adding group %q -> user %q read permission: %s", group.Name, userID, err)
+ }
}
linkData = map[string]string{
"owner_uuid": cfg.SysUserUUID,
"link_class": "permission",
- "name": "can_write",
+ "name": perm,
"tail_uuid": user.UUID,
"head_uuid": group.UUID,
}
if err := CreateLink(cfg, &newLink, linkData); err != nil {
userID, _ := GetUserID(user, cfg.UserID)
- return fmt.Errorf("error adding user %q -> group %q write permission: %s", userID, group.Name, err)
+ return fmt.Errorf("error adding user %q -> group %q %s permission: %s", userID, group.Name, perm, err)
}
return nil
}
}
// GroupMembershipExists checks that both needed links exist between user and group
-func GroupMembershipExists(ac *arvados.Client, userUUID string, groupUUID string) bool {
+func GroupMembershipExists(ac *arvados.Client, userUUID string, groupUUID string, perm string) bool {
ll := LinkList{}
// Check Group -> User can_read permission
params := arvados.ResourceListParams{
}, {
Attr: "name",
Operator: "=",
- Operand: "can_write",
+ Operand: perm,
}, {
Attr: "tail_uuid",
Operator: "=",
groupUUID, err := RemoteGroupExists(s.cfg, groupName)
c.Assert(err, IsNil)
c.Assert(groupUUID, Not(Equals), "")
- c.Assert(GroupMembershipExists(s.cfg.Client, activeUserUUID, groupUUID), Equals, true)
+ c.Assert(GroupMembershipExists(s.cfg.Client, activeUserUUID, groupUUID, "can_write"), Equals, true)
}
}
+// Error out when records have <2 or >3 records
+func (s *TestSuite) TestWrongNumberOfFields(c *C) {
+ for _, testCase := range [][][]string{
+ {{"field1"}},
+ {{"field1", "field2", "field3", "field4"}},
+ {{"field1", "field2", "field3", "field4", "field5"}},
+ } {
+ tmpfile, err := MakeTempCSVFile(testCase)
+ c.Assert(err, IsNil)
+ defer os.Remove(tmpfile.Name())
+ s.cfg.Path = tmpfile.Name()
+ err = doMain(s.cfg)
+ c.Assert(err, Not(IsNil))
+ }
+}
+
+// Check different membership permissions
+func (s *TestSuite) TestMembershipLevels(c *C) {
+ userEmail := s.users[arvadostest.ActiveUserUUID].Email
+ userUUID := s.users[arvadostest.ActiveUserUUID].UUID
+ data := [][]string{
+ {"TestGroup1", userEmail, "can_read"},
+ {"TestGroup2", userEmail, "can_write"},
+ {"TestGroup3", userEmail, "can_manage"},
+ {"TestGroup4", userEmail, "invalid_permission"},
+ }
+ tmpfile, err := MakeTempCSVFile(data)
+ c.Assert(err, IsNil)
+ defer os.Remove(tmpfile.Name()) // clean up
+ s.cfg.Path = tmpfile.Name()
+ err = doMain(s.cfg)
+ c.Assert(err, IsNil)
+ for _, record := range data {
+ groupName := record[0]
+ permLevel := record[2]
+ if permLevel != "invalid_permission" {
+ groupUUID, err := RemoteGroupExists(s.cfg, groupName)
+ c.Assert(err, IsNil)
+ c.Assert(groupUUID, Not(Equals), "")
+ c.Assert(GroupMembershipExists(s.cfg.Client, userUUID, groupUUID, permLevel), Equals, true)
+ } else {
+ groupUUID, err := RemoteGroupExists(s.cfg, groupName)
+ c.Assert(err, IsNil)
+ c.Assert(groupUUID, Equals, "")
+ }
+ }
+}
+
+// Check membership level change
+func (s *TestSuite) TestMembershipLevelUpdate(c *C) {
+ userEmail := s.users[arvadostest.ActiveUserUUID].Email
+ userUUID := s.users[arvadostest.ActiveUserUUID].UUID
+ groupName := "TestGroup1"
+ // Give read permissions
+ tmpfile, err := MakeTempCSVFile([][]string{{groupName, userEmail, "can_read"}})
+ c.Assert(err, IsNil)
+ defer os.Remove(tmpfile.Name()) // clean up
+ s.cfg.Path = tmpfile.Name()
+ err = doMain(s.cfg)
+ c.Assert(err, IsNil)
+ // Check permissions
+ groupUUID, err := RemoteGroupExists(s.cfg, groupName)
+ c.Assert(err, IsNil)
+ c.Assert(groupUUID, Not(Equals), "")
+ c.Assert(GroupMembershipExists(s.cfg.Client, userUUID, groupUUID, "can_read"), Equals, true)
+ c.Assert(GroupMembershipExists(s.cfg.Client, userUUID, groupUUID, "can_write"), Equals, false)
+ c.Assert(GroupMembershipExists(s.cfg.Client, userUUID, groupUUID, "can_manage"), Equals, false)
+
+ // Give write permissions
+ tmpfile, err = MakeTempCSVFile([][]string{{groupName, userEmail, "can_write"}})
+ c.Assert(err, IsNil)
+ defer os.Remove(tmpfile.Name()) // clean up
+ s.cfg.Path = tmpfile.Name()
+ err = doMain(s.cfg)
+ c.Assert(err, IsNil)
+ // Check permissions
+ c.Assert(GroupMembershipExists(s.cfg.Client, userUUID, groupUUID, "can_read"), Equals, false)
+ c.Assert(GroupMembershipExists(s.cfg.Client, userUUID, groupUUID, "can_write"), Equals, true)
+ c.Assert(GroupMembershipExists(s.cfg.Client, userUUID, groupUUID, "can_manage"), Equals, false)
+
+ // Give manage permissions
+ tmpfile, err = MakeTempCSVFile([][]string{{groupName, userEmail, "can_manage"}})
+ c.Assert(err, IsNil)
+ defer os.Remove(tmpfile.Name()) // clean up
+ s.cfg.Path = tmpfile.Name()
+ err = doMain(s.cfg)
+ c.Assert(err, IsNil)
+ // Check permissions
+ c.Assert(GroupMembershipExists(s.cfg.Client, userUUID, groupUUID, "can_read"), Equals, false)
+ c.Assert(GroupMembershipExists(s.cfg.Client, userUUID, groupUUID, "can_write"), Equals, false)
+ c.Assert(GroupMembershipExists(s.cfg.Client, userUUID, groupUUID, "can_manage"), Equals, true)
+}
+
// The absence of a user membership on the CSV file implies its removal
func (s *TestSuite) TestMembershipRemoval(c *C) {
localUserEmail := s.users[arvadostest.ActiveUserUUID].Email
groupUUID, err := RemoteGroupExists(s.cfg, groupName)
c.Assert(err, IsNil)
c.Assert(groupUUID, Not(Equals), "")
- c.Assert(GroupMembershipExists(s.cfg.Client, localUserUUID, groupUUID), Equals, true)
- c.Assert(GroupMembershipExists(s.cfg.Client, remoteUserUUID, groupUUID), Equals, true)
+ c.Assert(GroupMembershipExists(s.cfg.Client, localUserUUID, groupUUID, "can_write"), Equals, true)
+ c.Assert(GroupMembershipExists(s.cfg.Client, remoteUserUUID, groupUUID, "can_write"), Equals, true)
}
// New CSV with some previous membership missing
data = [][]string{
groupUUID, err := RemoteGroupExists(s.cfg, "TestGroup1")
c.Assert(err, IsNil)
c.Assert(groupUUID, Not(Equals), "")
- c.Assert(GroupMembershipExists(s.cfg.Client, localUserUUID, groupUUID), Equals, true)
- c.Assert(GroupMembershipExists(s.cfg.Client, remoteUserUUID, groupUUID), Equals, false)
+ c.Assert(GroupMembershipExists(s.cfg.Client, localUserUUID, groupUUID, "can_write"), Equals, true)
+ c.Assert(GroupMembershipExists(s.cfg.Client, remoteUserUUID, groupUUID, "can_write"), Equals, false)
// Confirm TestGroup1 memberships
groupUUID, err = RemoteGroupExists(s.cfg, "TestGroup2")
c.Assert(err, IsNil)
c.Assert(groupUUID, Not(Equals), "")
- c.Assert(GroupMembershipExists(s.cfg.Client, localUserUUID, groupUUID), Equals, false)
- c.Assert(GroupMembershipExists(s.cfg.Client, remoteUserUUID, groupUUID), Equals, true)
+ c.Assert(GroupMembershipExists(s.cfg.Client, localUserUUID, groupUUID, "can_write"), Equals, false)
+ c.Assert(GroupMembershipExists(s.cfg.Client, remoteUserUUID, groupUUID, "can_write"), Equals, true)
}
// If a group doesn't exist on the system, create it before adding users
c.Assert(err, IsNil)
c.Assert(groupUUID, Not(Equals), "")
// active user should be a member
- c.Assert(GroupMembershipExists(s.cfg.Client, arvadostest.ActiveUserUUID, groupUUID), Equals, true)
+ c.Assert(GroupMembershipExists(s.cfg.Client, arvadostest.ActiveUserUUID, groupUUID, "can_write"), Equals, true)
}
// Users listed on the file that don't exist on the system are ignored
groupUUID, err = RemoteGroupExists(s.cfg, "TestGroup4")
c.Assert(err, IsNil)
c.Assert(groupUUID, Not(Equals), "")
- c.Assert(GroupMembershipExists(s.cfg.Client, activeUserUUID, groupUUID), Equals, true)
+ c.Assert(GroupMembershipExists(s.cfg.Client, activeUserUUID, groupUUID, "can_write"), Equals, true)
}
// Users listed on the file that don't exist on the system are ignored
activeUserEmail := s.users[arvadostest.ActiveUserUUID].Email
activeUserUUID := s.users[arvadostest.ActiveUserUUID].UUID
// Confirm that group doesn't exist
- groupUUID, err := RemoteGroupExists(s.cfg, "TestGroup4")
- c.Assert(err, IsNil)
- c.Assert(groupUUID, Equals, "")
+ for _, groupName := range []string{"TestGroup4", "TestGroup5"} {
+ groupUUID, err := RemoteGroupExists(s.cfg, groupName)
+ c.Assert(err, IsNil)
+ c.Assert(groupUUID, Equals, "")
+ }
// Create file & run command
data := [][]string{
- {"", activeUserEmail}, // Empty field
- {"TestGroup5", ""}, // Empty field
+ {"", activeUserEmail}, // Empty field
+ {"TestGroup5", ""}, // Empty field
+ {"TestGroup5", activeUserEmail, ""}, // Empty 3rd field: is optional but cannot be empty
{"TestGroup4", activeUserEmail},
}
tmpfile, err := MakeTempCSVFile(data)
s.cfg.Path = tmpfile.Name()
err = doMain(s.cfg)
c.Assert(err, IsNil)
- // Confirm that memberships exist
+ // Confirm that records about TestGroup5 were skipped
+ groupUUID, err := RemoteGroupExists(s.cfg, "TestGroup5")
+ c.Assert(err, IsNil)
+ c.Assert(groupUUID, Equals, "")
+ // Confirm that membership exists
groupUUID, err = RemoteGroupExists(s.cfg, "TestGroup4")
c.Assert(err, IsNil)
c.Assert(groupUUID, Not(Equals), "")
- c.Assert(GroupMembershipExists(s.cfg.Client, activeUserUUID, groupUUID), Equals, true)
+ c.Assert(GroupMembershipExists(s.cfg.Client, activeUserUUID, groupUUID, "can_write"), Equals, true)
}
// Instead of emails, use username as identifier
groupUUID, err = RemoteGroupExists(s.cfg, "TestGroup1")
c.Assert(err, IsNil)
c.Assert(groupUUID, Not(Equals), "")
- c.Assert(GroupMembershipExists(s.cfg.Client, activeUserUUID, groupUUID), Equals, true)
+ c.Assert(GroupMembershipExists(s.cfg.Client, activeUserUUID, groupUUID, "can_write"), Equals, true)
}