12199: Merge branch 'master' into 12199-dispatch-to-node-type
authorTom Clegg <tclegg@veritasgenetics.com>
Mon, 12 Feb 2018 22:28:22 +0000 (17:28 -0500)
committerTom Clegg <tclegg@veritasgenetics.com>
Mon, 12 Feb 2018 22:28:22 +0000 (17:28 -0500)
Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tclegg@veritasgenetics.com>

102 files changed:
.gitignore
apps/workbench/app/assets/javascripts/application.js
apps/workbench/app/assets/javascripts/components/search.js
apps/workbench/app/assets/javascripts/components/sessions.js
apps/workbench/app/assets/javascripts/models/session_db.js
apps/workbench/app/controllers/container_requests_controller.rb
apps/workbench/app/models/container_work_unit.rb
apps/workbench/app/models/pipeline_instance_work_unit.rb
apps/workbench/app/models/proxy_work_unit.rb
apps/workbench/app/views/container_requests/_show_recent_rows.html.erb
apps/workbench/app/views/pipeline_instances/_show_components_running.html.erb
apps/workbench/app/views/projects/_show_dashboard.html.erb
apps/workbench/config/application.default.yml
apps/workbench/npm_packages
apps/workbench/test/controllers/container_requests_controller_test.rb
apps/workbench/test/integration/pipeline_instances_test.rb
apps/workbench/test/unit/work_unit_test.rb
build/libcloud-pin.sh
build/run-tests.sh
doc/Rakefile
doc/_config.yml
doc/_includes/_mount_types.liquid
doc/sdk/R/R.html.textile.liquid [new file with mode: 0644]
doc/sdk/index.html.textile.liquid
sdk/R/.Rbuildignore [new file with mode: 0644]
sdk/R/ArvadosR.Rproj [new file with mode: 0644]
sdk/R/DESCRIPTION [new file with mode: 0644]
sdk/R/NAMESPACE [new file with mode: 0644]
sdk/R/R/Arvados.R [new file with mode: 0644]
sdk/R/R/ArvadosFile.R [new file with mode: 0644]
sdk/R/R/Collection.R [new file with mode: 0644]
sdk/R/R/CollectionTree.R [new file with mode: 0644]
sdk/R/R/HttpParser.R [new file with mode: 0644]
sdk/R/R/HttpRequest.R [new file with mode: 0644]
sdk/R/R/RESTService.R [new file with mode: 0644]
sdk/R/R/Subcollection.R [new file with mode: 0644]
sdk/R/R/util.R [new file with mode: 0644]
sdk/R/README.Rmd [new file with mode: 0644]
sdk/R/man/Arvados.Rd [new file with mode: 0644]
sdk/R/man/ArvadosFile.Rd [new file with mode: 0644]
sdk/R/man/Collection.Rd [new file with mode: 0644]
sdk/R/man/Subcollection.Rd [new file with mode: 0644]
sdk/R/man/print.Arvados.Rd [new file with mode: 0644]
sdk/R/man/print.ArvadosFile.Rd [new file with mode: 0644]
sdk/R/man/print.Collection.Rd [new file with mode: 0644]
sdk/R/man/print.Subcollection.Rd [new file with mode: 0644]
sdk/R/run_test.R [new file with mode: 0644]
sdk/R/tests/testthat.R [new file with mode: 0644]
sdk/R/tests/testthat/fakes/FakeArvados.R [new file with mode: 0644]
sdk/R/tests/testthat/fakes/FakeHttpParser.R [new file with mode: 0644]
sdk/R/tests/testthat/fakes/FakeHttpRequest.R [new file with mode: 0644]
sdk/R/tests/testthat/fakes/FakeRESTService.R [new file with mode: 0644]
sdk/R/tests/testthat/test-Arvados.R [new file with mode: 0644]
sdk/R/tests/testthat/test-ArvadosFile.R [new file with mode: 0644]
sdk/R/tests/testthat/test-Collection.R [new file with mode: 0644]
sdk/R/tests/testthat/test-CollectionTree.R [new file with mode: 0644]
sdk/R/tests/testthat/test-HttpParser.R [new file with mode: 0644]
sdk/R/tests/testthat/test-HttpRequest.R [new file with mode: 0644]
sdk/R/tests/testthat/test-RESTService.R [new file with mode: 0644]
sdk/R/tests/testthat/test-Subcollection.R [new file with mode: 0644]
sdk/R/tests/testthat/test-util.R [new file with mode: 0644]
sdk/cwl/arvados_cwl/__init__.py
sdk/cwl/arvados_cwl/arvcontainer.py
sdk/cwl/arvados_cwl/arvtool.py
sdk/cwl/arvados_cwl/pathmapper.py
sdk/cwl/arvados_cwl/runner.py
sdk/cwl/setup.py
sdk/cwl/tests/arvados-tests.sh
sdk/cwl/tests/hg19/hg19.fa [new file with mode: 0644]
sdk/cwl/tests/hg19/hg19.fa.amb [new file with mode: 0644]
sdk/cwl/tests/hg19/hg19.fa.ann [new file with mode: 0644]
sdk/cwl/tests/hg19/hg19.fa.fai [new file with mode: 0644]
sdk/cwl/tests/test_make_output.py
sdk/go/arvadostest/fixtures.go
services/api/app/models/container.rb
services/api/app/models/container_request.rb
services/api/test/fixtures/container_requests.yml
services/api/test/fixtures/pipeline_instances.yml
services/api/test/fixtures/users.yml
services/api/test/unit/container_test.rb
services/crunch-run/crunchrun.go
services/crunch-run/crunchrun_test.go
services/keepstore/azure_blob_volume.go
services/keepstore/azure_blob_volume_test.go
services/nodemanager/setup.py
tools/arvbox/bin/arvbox
tools/arvbox/lib/arvbox/docker/Dockerfile.base
tools/arvbox/lib/arvbox/docker/Dockerfile.demo
tools/arvbox/lib/arvbox/docker/common.sh
tools/arvbox/lib/arvbox/docker/crunch-setup.sh
tools/arvbox/lib/arvbox/docker/go-setup.sh [new file with mode: 0644]
tools/arvbox/lib/arvbox/docker/keep-setup.sh
tools/arvbox/lib/arvbox/docker/service/arv-git-httpd/run-service
tools/arvbox/lib/arvbox/docker/service/composer/run-service
tools/arvbox/lib/arvbox/docker/service/crunch-dispatch-local/run-service
tools/arvbox/lib/arvbox/docker/service/doc/run-service
tools/arvbox/lib/arvbox/docker/service/keep-web/run-service
tools/arvbox/lib/arvbox/docker/service/keepproxy/run-service
tools/arvbox/lib/arvbox/docker/service/postgres/run
tools/arvbox/lib/arvbox/docker/service/websockets/run-service
tools/sync-groups/sync-groups.go
tools/sync-groups/sync-groups_test.go

index e61f485237b6b1145e3527982d0fbbbadaf56727..cc4f17e9ab2611357eab383624fc18158447609b 100644 (file)
@@ -12,6 +12,7 @@ docker/*/generated
 docker/config.yml
 doc/.site
 doc/sdk/python/arvados
+doc/sdk/R/arvados
 sdk/perl/MYMETA.*
 sdk/perl/Makefile
 sdk/perl/blib
@@ -27,3 +28,4 @@ services/api/config/arvados-clients.yml
 *#*
 .DS_Store
 .vscode
+.Rproj.user
index b90081f46fe9d5ccdec360165e6bc2528817d7b2..270a4c766d3152f3edd487561cc40ae4e2bdb256 100644 (file)
@@ -34,6 +34,7 @@
 //= require npm-dependencies
 //= require mithril/stream/stream
 //= require awesomplete
+//= require jssha
 //= require_tree .
 
 Es6ObjectAssign.polyfill()
index 2fe73193e7f116fcda6c2831c8c2250c2a266aee..04572ec3cc9ebd82e5ef896d086339337771a431 100644 (file)
@@ -41,6 +41,8 @@ window.SearchResultsTable = {
             collections: m('i.fa.fa-fw.fa-archive'),
             projects: m('i.fa.fa-fw.fa-folder'),
         }
+        var db = new SessionDB()
+        var sessions = db.loadActive()
         return m('table.table.table-condensed', [
             m('thead', m('tr', [
                 m('th'),
@@ -50,18 +52,29 @@ window.SearchResultsTable = {
             ])),
             m('tbody', [
                 loader.items().map(function(item) {
+                    var session = sessions[item.uuid.slice(0,5)]
+                    var tokenParam = ''
+                    // Add the salted token to search result links from federated
+                    // remote hosts.
+                    if (!session.isFromRails && session.token.indexOf('v2/') == 0) {
+                        tokenParam = session.token
+                    }
                     return m('tr', [
-                        m('td', [
+                        m('td', m('form', {
+                            action: item.workbenchBaseURL() + '/' + item.objectType.wb_path + '/' + item.uuid,
+                            method: 'GET'
+                        }, [
+                            tokenParam !== '' &&
+                                m('input[type=hidden][name=api_token]', {value: tokenParam}),
                             item.workbenchBaseURL() &&
-                                m('a.btn.btn-xs.btn-default', {
+                                m('button.btn.btn-xs.btn-default[type=submit]', {
                                     'data-original-title': 'show '+item.objectType.description,
                                     'data-placement': 'top',
                                     'data-toggle': 'tooltip',
-                                    href: item.workbenchBaseURL()+'/'+item.objectType.wb_path+'/'+item.uuid,
                                     // Bootstrap's tooltip feature
                                     oncreate: function(vnode) { $(vnode.dom).tooltip() },
                                 }, iconsMap[item.objectType.wb_path]),
-                        ]),
+                        ])),
                         m('td.arvados-uuid', item.uuid),
                         m('td', item.name || '(unnamed)'),
                         m('td', m(LocalizedDateTime, {parse: item.modified_at})),
@@ -91,6 +104,7 @@ window.SearchResultsTable = {
 window.Search = {
     oninit: function(vnode) {
         vnode.state.sessionDB = new SessionDB()
+        vnode.state.sessionDB.autoRedirectToHomeCluster('/search')
         vnode.state.searchEntered = m.stream()
         vnode.state.searchActive = m.stream()
         // When searchActive changes (e.g., when restoring state
@@ -154,7 +168,6 @@ window.Search = {
         })
     },
     view: function(vnode) {
-        var sessions = vnode.state.sessionDB.loadAll()
         return m('form', {
             onsubmit: function() {
                 vnode.state.searchActive(vnode.state.searchEntered())
index e7cc5055734d4edaa2144d7eb4d704ec7e737736..e172d3a3f3500c36b3b978b3cf3da79ce7693f01 100644 (file)
@@ -3,26 +3,29 @@
 // SPDX-License-Identifier: AGPL-3.0
 
 $(document).on('ready', function() {
-    var db = new SessionDB()
-    db.checkForNewToken()
-    db.fillMissingUUIDs()
-})
+    var db = new SessionDB();
+    db.checkForNewToken();
+    db.fillMissingUUIDs();
+    db.autoLoadRemoteHosts();
+});
 
 window.SessionsTable = {
     oninit: function(vnode) {
-        vnode.state.db = new SessionDB()
-        vnode.state.hostToAdd = m.stream('')
-        vnode.state.error = m.stream()
-        vnode.state.checking = m.stream()
+        vnode.state.db = new SessionDB();
+        vnode.state.db.autoRedirectToHomeCluster('/sessions');
+        vnode.state.db.migrateNonFederatedSessions();
+        vnode.state.hostToAdd = m.stream('');
+        vnode.state.error = m.stream();
+        vnode.state.checking = m.stream();
     },
     view: function(vnode) {
-        var db = vnode.state.db
-        var sessions = db.loadAll()
+        var db = vnode.state.db;
+        var sessions = db.loadAll();
         return m('.container', [
             m('p', [
                 'You can log in to multiple Arvados sites here, then use the ',
                 m('a[href="/search"]', 'multi-site search'),
-                ' page to search collections and projects on all sites at once.',
+                ' page to search collections and projects on all sites at once.'
             ]),
             m('table.table.table-condensed.table-hover', [
                 m('thead', m('tr', [
@@ -31,21 +34,23 @@ window.SessionsTable = {
                     m('th', 'username'),
                     m('th', 'email'),
                     m('th', 'actions'),
-                    m('th'),
+                    m('th')
                 ])),
                 m('tbody', [
                     Object.keys(sessions).map(function(uuidPrefix) {
-                        var session = sessions[uuidPrefix]
+                        var session = sessions[uuidPrefix];
                         return m('tr', [
                             session.token && session.user ? [
-                                m('td', m('span.label.label-success', 'logged in')),
+                                m('td', session.user.is_active ?
+                                    m('span.label.label-success', 'logged in') :
+                                    m('span.label.label-warning', 'inactive')),
                                 m('td', {title: session.baseURL}, uuidPrefix),
                                 m('td', session.user.username),
                                 m('td', session.user.email),
                                 m('td', session.isFromRails ? null : m('button.btn.btn-xs.btn-default', {
                                     uuidPrefix: uuidPrefix,
                                     onclick: m.withAttr('uuidPrefix', db.logout),
-                                }, 'Log out ', m('span.glyphicon.glyphicon-log-out'))),
+                                }, session.listedHost ? 'Disable ':'Log out ', m('span.glyphicon.glyphicon-log-out')))
                             ] : [
                                 m('td', m('span.label.label-default', 'logged out')),
                                 m('td', {title: session.baseURL}, uuidPrefix),
@@ -54,7 +59,7 @@ window.SessionsTable = {
                                 m('td', m('a.btn.btn-xs.btn-primary', {
                                     uuidPrefix: uuidPrefix,
                                     onclick: db.login.bind(db, session.baseURL),
-                                }, 'Log in ', m('span.glyphicon.glyphicon-log-in'))),
+                                }, session.listedHost ? 'Enable ':'Log in ', m('span.glyphicon.glyphicon-log-in')))
                             ],
                             m('td', session.isFromRails ? null : m('button.btn.btn-xs.btn-default', {
                                 uuidPrefix: uuidPrefix,
index d5cd98d386499adf68f049980c036fbd1c4155e7..fab8fe3925c00da42f5223a763a659ff18e70441 100644 (file)
@@ -3,77 +3,85 @@
 // SPDX-License-Identifier: AGPL-3.0
 
 window.SessionDB = function() {
-    var db = this
+    var db = this;
     Object.assign(db, {
         discoveryCache: {},
+        tokenUUIDCache: null,
         loadFromLocalStorage: function() {
             try {
-                return JSON.parse(window.localStorage.getItem('sessions')) || {}
+                return JSON.parse(window.localStorage.getItem('sessions')) || {};
             } catch(e) {}
-            return {}
+            return {};
         },
         loadAll: function() {
-            var all = db.loadFromLocalStorage()
+            var all = db.loadFromLocalStorage();
             if (window.defaultSession) {
-                window.defaultSession.isFromRails = true
-                all[window.defaultSession.user.uuid.slice(0, 5)] = window.defaultSession
+                window.defaultSession.isFromRails = true;
+                all[window.defaultSession.user.uuid.slice(0, 5)] = window.defaultSession;
             }
-            return all
+            return all;
         },
         loadActive: function() {
-            var sessions = db.loadAll()
+            var sessions = db.loadAll();
             Object.keys(sessions).forEach(function(key) {
-                if (!sessions[key].token)
-                    delete sessions[key]
-            })
-            return sessions
+                if (!sessions[key].token || (sessions[key].user && !sessions[key].user.is_active)) {
+                    delete sessions[key];
+                }
+            });
+            return sessions;
         },
         loadLocal: function() {
-            var sessions = db.loadActive()
-            var s = false
-            Object.values(sessions).forEach(function(session) {
-                if (session.isFromRails) {
-                    s = session
-                    return
+            var sessions = db.loadActive();
+            var s = false;
+            Object.keys(sessions).forEach(function(key) {
+                if (sessions[key].isFromRails) {
+                    s = sessions[key];
+                    return;
                 }
-            })
-            return s
+            });
+            return s;
         },
         save: function(k, v) {
-            var sessions = db.loadAll()
-            sessions[k] = v
+            var sessions = db.loadAll();
+            sessions[k] = v;
             Object.keys(sessions).forEach(function(key) {
-                if (sessions[key].isFromRails)
-                    delete sessions[key]
-            })
-            window.localStorage.setItem('sessions', JSON.stringify(sessions))
+                if (sessions[key].isFromRails) {
+                    delete sessions[key];
+                }
+            });
+            window.localStorage.setItem('sessions', JSON.stringify(sessions));
         },
         trash: function(k) {
-            var sessions = db.loadAll()
-            delete sessions[k]
-            window.localStorage.setItem('sessions', JSON.stringify(sessions))
+            var sessions = db.loadAll();
+            delete sessions[k];
+            window.localStorage.setItem('sessions', JSON.stringify(sessions));
         },
         findAPI: function(url) {
             // Given a Workbench or API host or URL, return a promise
             // for the corresponding API server's base URL.  Typical
             // use:
             // sessionDB.findAPI('https://workbench.example/foo').then(sessionDB.login)
-            if (url.indexOf('://') < 0)
-                url = 'https://' + url
-            url = new URL(url)
+            if (url.length === 5 && url.indexOf('.') < 0) {
+                url += '.arvadosapi.com';
+            }
+            if (url.indexOf('://') < 0) {
+                url = 'https://' + url;
+            }
+            url = new URL(url);
             return m.request(url.origin + '/discovery/v1/apis/arvados/v1/rest').then(function() {
-                return url.origin + '/'
+                return url.origin + '/';
             }).catch(function(err) {
                 // If url is a Workbench site (and isn't too old),
                 // /status.json will tell us its API host.
                 return m.request(url.origin + '/status.json').then(function(resp) {
-                    if (!resp.apiBaseURL)
-                        throw 'no apiBaseURL in status response'
-                    return resp.apiBaseURL
-                })
-            })
+                    if (!resp.apiBaseURL) {
+                        throw 'no apiBaseURL in status response';
+                    }
+                    return resp.apiBaseURL;
+                });
+            });
         },
-        login: function(baseURL) {
+        login: function(baseURL, fallbackLogin) {
             // Initiate login procedure with given API base URL (e.g.,
             // "http://api.example/").
             //
@@ -81,92 +89,246 @@ window.SessionDB = function() {
             // also call checkForNewToken() on (at least) its first
             // render. Otherwise, the login procedure can't be
             // completed.
-            document.location = baseURL + 'login?return_to=' + encodeURIComponent(document.location.href.replace(/\?.*/, '')+'?baseURL='+encodeURIComponent(baseURL))
-            return false
+            if (fallbackLogin === undefined) {
+                fallbackLogin = true;
+            }
+            var session = db.loadLocal();
+            var apiHostname = new URL(session.baseURL).hostname;
+            m.request(session.baseURL+'discovery/v1/apis/arvados/v1/rest').then(function(localDD) {
+                var uuidPrefix = localDD.uuidPrefix;
+                m.request(baseURL+'discovery/v1/apis/arvados/v1/rest').then(function(dd) {
+                    if (uuidPrefix in dd.remoteHosts ||
+                        (dd.remoteHostsViaDNS && apiHostname.endsWith('.arvadosapi.com'))) {
+                        // Federated identity login via salted token
+                        db.saltedToken(dd.uuidPrefix).then(function(token) {
+                            m.request(baseURL+'arvados/v1/users/current', {
+                                headers: {
+                                    authorization: 'Bearer '+token
+                                }
+                            }).then(function(user) {
+                                // Federated login successful.
+                                var remoteSession = {
+                                    user: user,
+                                    baseURL: baseURL,
+                                    token: token,
+                                    listedHost: (dd.uuidPrefix in localDD.remoteHosts)
+                                };
+                                db.save(dd.uuidPrefix, remoteSession);
+                            }).catch(function(e) {
+                                if (dd.uuidPrefix in localDD.remoteHosts) {
+                                    // If the remote system is configured to allow federated
+                                    // logins from this cluster, but rejected the salted
+                                    // token, save as a logged out session anyways.
+                                    var remoteSession = {
+                                        baseURL: baseURL,
+                                        listedHost: true
+                                    };
+                                    db.save(dd.uuidPrefix, remoteSession);
+                                } else if (fallbackLogin) {
+                                    // Remote cluster not listed as a remote host and rejecting
+                                    // the salted token, try classic login.
+                                    db.loginClassic(baseURL);
+                                }
+                            });
+                        });
+                    } else if (fallbackLogin) {
+                        // Classic login will be used when the remote system doesn't list this
+                        // cluster as part of the federation.
+                        db.loginClassic(baseURL);
+                    }
+                });
+            });
+            return false;
+        },
+        loginClassic: function(baseURL) {
+            document.location = baseURL + 'login?return_to=' + encodeURIComponent(document.location.href.replace(/\?.*/, '')+'?baseURL='+encodeURIComponent(baseURL));
         },
         logout: function(k) {
             // Forget the token, but leave the other info in the db so
             // the user can log in again without providing the login
             // host again.
-            var sessions = db.loadAll()
-            delete sessions[k].token
-            db.save(k, sessions[k])
+            var sessions = db.loadAll();
+            delete sessions[k].token;
+            db.save(k, sessions[k]);
+        },
+        saltedToken: function(uuid_prefix) {
+            // Takes a cluster UUID prefix and returns a salted token to allow
+            // log into said cluster using federated identity.
+            var session = db.loadLocal();
+            return db.tokenUUID().then(function(token_uuid) {
+                var shaObj = new jsSHA("SHA-1", "TEXT");
+                shaObj.setHMACKey(session.token, "TEXT");
+                shaObj.update(uuid_prefix);
+                var hmac = shaObj.getHMAC("HEX");
+                return 'v2/' + token_uuid + '/' + hmac;
+            });
         },
         checkForNewToken: function() {
             // If there's a token and baseURL in the location bar (i.e.,
             // we just landed here after a successful login), save it and
             // scrub the location bar.
-            if (document.location.search[0] != '?')
-                return
-            var params = {}
+            if (document.location.search[0] != '?') { return; }
+            var params = {};
             document.location.search.slice(1).split('&').map(function(kv) {
-                var e = kv.indexOf('=')
-                if (e < 0)
-                    return
-                params[decodeURIComponent(kv.slice(0, e))] = decodeURIComponent(kv.slice(e+1))
-            })
-            if (!params.baseURL || !params.api_token)
+                var e = kv.indexOf('=');
+                if (e < 0) {
+                    return;
+                }
+                params[decodeURIComponent(kv.slice(0, e))] = decodeURIComponent(kv.slice(e+1));
+            });
+            if (!params.baseURL || !params.api_token) {
                 // Have a query string, but it's not a login callback.
-                return
-            params.token = params.api_token
-            delete params.api_token
-            db.save(params.baseURL, params)
-            history.replaceState({}, '', document.location.origin + document.location.pathname)
+                return;
+            }
+            params.token = params.api_token;
+            delete params.api_token;
+            db.save(params.baseURL, params);
+            history.replaceState({}, '', document.location.origin + document.location.pathname);
         },
         fillMissingUUIDs: function() {
-            var sessions = db.loadAll()
+            var sessions = db.loadAll();
             Object.keys(sessions).map(function(key) {
-                if (key.indexOf('://') < 0)
-                    return
+                if (key.indexOf('://') < 0) {
+                    return;
+                }
                 // key is the baseURL placeholder. We need to get our user
                 // record to find out the cluster's real uuid prefix.
-                var session = sessions[key]
+                var session = sessions[key];
                 m.request(session.baseURL+'arvados/v1/users/current', {
                     headers: {
-                        authorization: 'OAuth2 '+session.token,
-                    },
+                        authorization: 'OAuth2 '+session.token
+                    }
                 }).then(function(user) {
-                    session.user = user
-                    db.save(user.owner_uuid.slice(0, 5), session)
-                    db.trash(key)
-                })
-            })
+                    session.user = user;
+                    db.save(user.owner_uuid.slice(0, 5), session);
+                    db.trash(key);
+                });
+            });
         },
         // Return the Workbench base URL advertised by the session's
         // API server, or a reasonable guess, or (if neither strategy
         // works out) null.
         workbenchBaseURL: function(session) {
-            var dd = db.discoveryDoc(session)()
-            if (!dd)
+            var dd = db.discoveryDoc(session)();
+            if (!dd) {
                 // Don't fall back to guessing until we receive the discovery doc
-                return null
-            if (dd.workbenchUrl)
-                return dd.workbenchUrl
+                return null;
+            }
+            if (dd.workbenchUrl) {
+                return dd.workbenchUrl;
+            }
             // Guess workbench.{apihostport} is a Workbench... unless
             // the host part of apihostport is an IPv4 or [IPv6]
             // address.
             if (!session.baseURL.match('://(\\[|\\d+\\.\\d+\\.\\d+\\.\\d+[:/])')) {
-                var wbUrl = session.baseURL.replace('://', '://workbench.')
+                var wbUrl = session.baseURL.replace('://', '://workbench.');
                 // Remove the trailing slash, if it's there.
-                return wbUrl.slice(-1) == '/' ? wbUrl.slice(0, -1) : wbUrl
+                return wbUrl.slice(-1) === '/' ? wbUrl.slice(0, -1) : wbUrl;
             }
-            return null
+            return null;
         },
         // Return a m.stream that will get fulfilled with the
         // discovery doc from a session's API server.
         discoveryDoc: function(session) {
-            var cache = db.discoveryCache[session.baseURL]
+            var cache = db.discoveryCache[session.baseURL];
+            if (!cache) {
+                db.discoveryCache[session.baseURL] = cache = m.stream();
+                m.request(session.baseURL+'discovery/v1/apis/arvados/v1/rest').then(cache);
+            }
+            return cache;
+        },
+        // Return a promise with the local session token's UUID from the API server.
+        tokenUUID: function() {
+            var cache = db.tokenUUIDCache;
             if (!cache) {
-                db.discoveryCache[session.baseURL] = cache = m.stream()
-                m.request(session.baseURL+'discovery/v1/apis/arvados/v1/rest').then(cache)
+                var session = db.loadLocal();
+                return db.request(session, '/arvados/v1/api_client_authorizations', {
+                    data: {
+                        filters: JSON.stringify([['api_token', '=', session.token]])
+                    }
+                }).then(function(resp) {
+                    var uuid = resp.items[0].uuid;
+                    db.tokenUUIDCache = uuid;
+                    return uuid;
+                });
+            } else {
+                return new Promise(function(resolve, reject) {
+                    resolve(cache);
+                });
             }
-            return cache
         },
         request: function(session, path, opts) {
-            opts = opts || {}
-            opts.headers = opts.headers || {}
-            opts.headers.authorization = 'OAuth2 '+ session.token
-            return m.request(session.baseURL + path, opts)
+            opts = opts || {};
+            opts.headers = opts.headers || {};
+            opts.headers.authorization = 'OAuth2 '+ session.token;
+            return m.request(session.baseURL + path, opts);
         },
-    })
-}
+        // Check non-federated remote active sessions if they should be migrated to
+        // a salted token.
+        migrateNonFederatedSessions: function() {
+            var sessions = db.loadActive();
+            Object.keys(sessions).map(function(uuidPrefix) {
+                session = sessions[uuidPrefix];
+                if (!session.isFromRails && session.token) {
+                    db.saltedToken(uuidPrefix).then(function(saltedToken) {
+                        if (session.token != saltedToken) {
+                            // Only try the federated login
+                            db.login(session.baseURL, false);
+                        }
+                    });
+                }
+            });
+        },
+        // If remoteHosts is populated on the local API discovery doc, try to
+        // add any listed missing session.
+        autoLoadRemoteHosts: function() {
+            var sessions = db.loadAll();
+            var doc = db.discoveryDoc(db.loadLocal());
+            doc.map(function(d) {
+                Object.keys(d.remoteHosts).map(function(uuidPrefix) {
+                    if (!(sessions[uuidPrefix])) {
+                        db.findAPI(d.remoteHosts[uuidPrefix]).then(function(baseURL) {
+                            db.login(baseURL, false);
+                        });
+                    }
+                });
+            });
+        },
+        // If the current logged in account is from a remote federated cluster,
+        // redirect the user to their home cluster's workbench.
+        // This is meant to avoid confusion when the user clicks through a search
+        // result on the home cluster's multi site search page, landing on the
+        // remote workbench and later trying to do another search by just clicking
+        // on the multi site search button instead of going back with the browser.
+        autoRedirectToHomeCluster: function(path) {
+            path = path || '/';
+            var session = db.loadLocal();
+            var userUUIDPrefix = session.user.uuid.slice(0, 5);
+            // If the current user is local to the cluster, do nothing.
+            if (userUUIDPrefix === session.user.owner_uuid.slice(0, 5)) {
+                return;
+            }
+            var doc = db.discoveryDoc(session);
+            doc.map(function(d) {
+                // Guess the remote host from the local discovery doc settings
+                var rHost = null;
+                if (d.remoteHosts[userUUIDPrefix]) {
+                    rHost = d.remoteHosts[userUUIDPrefix];
+                } else if (d.remoteHostsViaDNS) {
+                    rHost = userUUIDPrefix + '.arvadosapi.com';
+                } else {
+                    // This should not happen: having remote user whose uuid prefix
+                    // isn't listed on remoteHosts and dns mechanism is deactivated
+                    return;
+                }
+                // Get the remote cluster workbench url & redirect there.
+                db.findAPI(rHost).then(function (apiUrl) {
+                    var doc = db.discoveryDoc({baseURL: apiUrl});
+                    doc.map(function (d) {
+                        document.location = d.workbenchUrl + path;
+                    });
+                });
+            });
+        }
+    });
+};
index f61596ecc766ce7b305f4db3d5e2a4e95c466749..783cafa117d9c23b3641f5f9883b90ea066be384 100644 (file)
@@ -77,6 +77,17 @@ class ContainerRequestsController < ApplicationController
   end
 
   def cancel
+    if @object.container_uuid
+      c = Container.select(['state']).where(uuid: @object.container_uuid).first
+      if c && c.state != 'Running'
+        # If the container hasn't started yet, setting priority=0
+        # leaves our request in "Committed" state and doesn't cancel
+        # the container (even if no other requests are giving it
+        # priority). To avoid showing this container request as "on
+        # hold" after hitting the Cancel button, set state=Final too.
+        @object.state = 'Final'
+      end
+    end
     @object.update_attributes! priority: 0
     if params[:return_to]
       redirect_to params[:return_to]
index a5b26f0d6d3d5534fd19e1df76d45ac97e7aadd8..dbc81c52a376940231094dbf4415e5625016814f 100644 (file)
@@ -58,7 +58,10 @@ class ContainerWorkUnit < ProxyWorkUnit
   end
 
   def can_cancel?
-    @proxied.is_a?(ContainerRequest) && @proxied.state == "Committed" && @proxied.priority > 0 && @proxied.editable?
+    @proxied.is_a?(ContainerRequest) &&
+      @proxied.state == "Committed" &&
+      (@proxied.priority > 0 || get(:state, @container) != 'Running') &&
+      @proxied.editable?
   end
 
   def container_uuid
@@ -95,14 +98,29 @@ class ContainerWorkUnit < ProxyWorkUnit
   end
 
   def state_label
-    ec = exit_code
-    return "Failed" if (ec && ec != 0)
-
-    state = get_combined(:state)
-
-    return "Queued" if state == "Locked"
-    return "Cancelled" if ((priority == 0) and (state == "Queued"))
-    state
+    if get(:state) == 'Final' && get(:state, @container) != 'Complete'
+      # Request was finalized before its container started (or the
+      # container was cancelled)
+      return 'Cancelled'
+    end
+    state = get(:state, @container) || get(:state, @proxied)
+    case state
+    when 'Locked', 'Queued'
+      if priority == 0
+        'On hold'
+      else
+        'Queued'
+      end
+    when 'Complete'
+      if exit_code == 0
+        state
+      else
+        'Failed'
+      end
+    else
+      # Cancelled, Running, or Uncommitted (no container assigned)
+      state
+    end
   end
 
   def exit_code
index 517dbc7a352a78502117c85662cc1e7318739eb8..a9bc9cfb5941d3ee559d6dab0b4131df93f2d48d 100644 (file)
@@ -59,4 +59,22 @@ class PipelineInstanceWorkUnit < ProxyWorkUnit
   def template_uuid
     get(:pipeline_template_uuid)
   end
+
+  def state_label
+    if get(:state) != "Failed"
+      return super
+    end
+    if get(:components_summary).andand[:failed].andand > 0
+      return super
+    end
+    # Show "Cancelled" instead of "Failed" if there are no failed
+    # components. #12840
+    get(:components).each do |_, c|
+      jstate = c[:job][:state] rescue nil
+      if jstate == "Failed"
+        return "Failed"
+      end
+    end
+    "Cancelled"
+  end
 end
index f570563b751ee66ec074733a548b41e6fc4a98fe..02f6b42fd926eb432bec264c87b8def198a06151 100644 (file)
@@ -255,7 +255,9 @@ class ProxyWorkUnit < WorkUnit
       if state_label == 'Complete'
         resp << "completed in "
       elsif state_label == 'Failed'
-         resp << "failed after "
+        resp << "failed after "
+      elsif state_label == 'Cancelled'
+        resp << "was cancelled after "
       else
         resp << "has been active for "
       end
index 32de59cde831af32bb7d972afdf5169c11dadd0a..0212162fccb15e26d460b80d9969672b15959e70 100644 (file)
@@ -24,7 +24,7 @@ SPDX-License-Identifier: AGPL-3.0 %>
     <td>
       <span class="label label-<%= wu.state_bootstrap_class %>"><%= wu.state_label %></span>
     </td><td>
-      <%= link_to_if_arvados_object obj, friendly_name: true, link_text: if !obj.name.empty? then obj.name else obj.uuid end %>
+      <%= link_to_if_arvados_object obj, friendly_name: true, link_text: if obj.name && !obj.name.empty? then obj.name else obj.uuid end %>
     </td><td>
       <%= obj.description || '' %>
     </td><td>
index 1cd9445a5a1212a693ab6f80c64f7b98cd670fd7..60d4c2abc2077e44d3472f6528d6697374829d20 100644 (file)
@@ -34,6 +34,8 @@ SPDX-License-Identifier: AGPL-3.0 %>
       completed in
     <% elsif @object.state == 'Failed' %>
       failed after
+    <% elsif @object.state == 'Cancelled' %>
+      was cancelled after
     <% else %>
       has been active for
     <% end %>
index 713582654fee93b3c9bd11432701e002b6fc5d0c..e51cf5314d275137260f214f2e0bbbc6852bd373 100644 (file)
@@ -125,12 +125,15 @@ SPDX-License-Identifier: AGPL-3.0 %>
               </div>
 
               <div class="clearfix">
-                Started at <%= render_localized_date(wu.started_at || wu.created_at, "noseconds") %>.
-                <% wu_time = Time.now - (wu.started_at || wu.created_at) %>
-                Active for <%= render_runtime(wu_time, false) %>.
-
-                <div class="pull-right">
-                </div>
+                <% if wu.started_at %>
+                  Started at <%= render_localized_date(wu.started_at, "noseconds") %>
+                  Active for <%= render_runtime(Time.now - wu.started_at, false) %>.
+                <% else %>
+                  Created at <%= render_localized_date(wu.created_at, "noseconds") %>.
+                  <% if wu.state_label == 'Queued' %>
+                    Queued for <%= render_runtime(Time.now - wu.created_at, false) %>.
+                  <% end %>
+                <% end %>
               </div>
             </div>
             <% end %>
index 187845038ea3c48449ccd1e7d1c002657ffe6e37..76f7a3081751df228bb32b810b50208902aac8bc 100644 (file)
@@ -313,4 +313,4 @@ common:
   #
   # Link to use for Arvados Workflow Composer app, or false if not available.
   #
-  composer_url: false
\ No newline at end of file
+  composer_url: false
index c126b559fb138387b27424773cd931053f6bcc86..64f58ac4686f4686eccc18854bfc9da6a91d7432 100644 (file)
@@ -8,6 +8,7 @@
 npm 'browserify', require: false
 npm 'jquery'
 npm 'awesomplete'
+npm 'jssha'
 
 npm 'mithril'
 npm 'es6-object-assign'
index 206352a2afa8d77abca7404abf5ad3143fbacf51..261169cd1f954c352aaba6e58e577c9d0a955b4d 100644 (file)
@@ -42,7 +42,21 @@ class ContainerRequestsControllerTest < ActionController::TestCase
     get :show, {id: uuid}, session_for(:active)
     assert_response :success
 
-   assert_includes @response.body, "action=\"/container_requests/#{uuid}/copy\""
+    assert_includes @response.body, "action=\"/container_requests/#{uuid}/copy\""
+  end
+
+  test "cancel request for queued container" do
+    cr_fixture = api_fixture('container_requests')['queued']
+    post :cancel, {id: cr_fixture['uuid']}, session_for(:active)
+    assert_response 302
+
+    use_token 'active'
+    cr = ContainerRequest.find(cr_fixture['uuid'])
+    assert_equal 'Final', cr.state
+    assert_equal 0, cr.priority
+    c = Container.find(cr_fixture['container_uuid'])
+    assert_equal 'Queued', c.state
+    assert_equal 0, c.priority
   end
 
   [
index f3c0789cb6aee9fc302c14bba6a3c4622bfa8476..47e385a4e27830b4ab5293c9996a933f1496c3f6 100644 (file)
@@ -476,30 +476,22 @@ class PipelineInstancesTest < ActionDispatch::IntegrationTest
       need_selenium 'to parse timestamps correctly across DST boundaries'
       visit page_with_token(user, "/pipeline_instances/#{uuid}")
 
-      assert page.has_text? 'This pipeline started at'
-      page_text = page.text
-
+      regexp = "This pipeline started at (.+?)\\. "
       if run_time
-        match = /This pipeline started at (.*)\. It failed after (.*) at (.*)\. Check the Log/.match page_text
+        regexp += "It failed after (.+?) at (.+?)\\. Check the Log"
       else
-        match = /This pipeline started at (.*). It has been active for(.*)/.match page_text
+        regexp += "It has been active for \\d"
       end
-      assert_not_nil(match, 'Did not find text - This pipeline started at . . . ')
+      assert_match /#{regexp}/, page.text
 
-      start_at = match[1]
-      assert_not_nil(start_at, 'Did not find start_at time')
+      return if !run_time
 
-      start_time = parse_browser_timestamp start_at
-      if run_time
-        finished_at = match[3]
-        assert_not_nil(finished_at, 'Did not find finished_at time')
-        finished_time = parse_browser_timestamp finished_at
-        assert_equal(run_time, finished_time-start_time,
-          "Time difference did not match for start_at #{start_at}, finished_at #{finished_at}, ran_for #{match[2]}")
-      else
-        match = /\d(.*)/.match match[2]
-        assert_not_nil match, 'Did not find expected match for running component'
-      end
+      # match again to capture (.*)
+      _, started, duration, finished = *(/#{regexp}/.match(page.text))
+      assert_equal(
+        run_time,
+        parse_browser_timestamp(finished) - parse_browser_timestamp(started),
+        "expected: #{run_time}, got: started #{started}, finished #{finished}, duration #{duration}")
     end
   end
 
index 5cf9499aad5201cb9e61074fc3294c49bc981785..1daf582553bf5ce0283e2cd4dfc0550753ca846e 100644 (file)
@@ -17,11 +17,12 @@ class WorkUnitTest < ActiveSupport::TestCase
     [PipelineInstance, 'pipeline_in_running_state', nil, 1, "Running", nil, 0.0],
     [PipelineInstance, 'has_component_with_completed_jobs', nil, 3, "Complete", true, 1.0],
     [PipelineInstance, 'pipeline_with_tagged_collection_input', "pwu", 1, "Ready", nil, 0.0],
+    [PipelineInstance, 'failed_pipeline_with_two_jobs', nil, 2, "Cancelled", false, 0.0],
     [Container, 'requester', 'cwu', 1, "Complete", true, 1.0],
     [ContainerRequest, 'cr_for_requester', 'cwu', 1, "Complete", true, 1.0],
     [ContainerRequest, 'queued', 'cwu', 0, "Queued", nil, 0.0],   # priority 1
     [ContainerRequest, 'canceled_with_queued_container', 'cwu', 0, "Cancelled", false, 0.0],
-    [ContainerRequest, 'canceled_with_locked_container', 'cwu', 0, "Queued", nil, 0.0],
+    [ContainerRequest, 'canceled_with_locked_container', 'cwu', 0, "Cancelled", false, 0.0],
     [ContainerRequest, 'canceled_with_running_container', 'cwu', 1, "Running", nil, 0.0],
   ].each do |type, fixture, label, num_children, state, success, progress|
     test "children of #{fixture}" do
index 4182575651ff91ec5e48376f7c1a2b2270b43340..c795bb8a24b94c3aed4e551fa0143b1838945af4 100644 (file)
@@ -2,4 +2,4 @@
 #
 # SPDX-License-Identifier: AGPL-3.0
 
-LIBCLOUD_PIN=2.2.2.dev3
+LIBCLOUD_PIN=2.2.2.dev4
index 57ce41e3a52b1e57bf1ec9e2afbb5c2f173de9ae..1ef15cd3dbe9974f2fe37619f06845b8964a4ecf 100755 (executable)
@@ -109,6 +109,7 @@ sdk/go/asyncbuf
 sdk/go/stats
 sdk/go/crunchrunner
 sdk/cwl
+sdk/R
 tools/sync-groups
 tools/crunchstat-summary
 tools/keep-exercise
@@ -133,6 +134,7 @@ VENV3DIR=
 PYTHONPATH=
 GEMHOME=
 PERLINSTALLBASE=
+R_LIBS=
 
 short=
 only_install=
@@ -240,6 +242,16 @@ sanity_checks() {
     which Xvfb || fatal "No xvfb. Try: apt-get install xvfb"
     echo -n 'graphviz: '
     dot -V || fatal "No graphviz. Try: apt-get install graphviz"
+
+    # R SDK stuff
+    echo -n 'R: '
+    which R || fatal "No R. Try: apt-get install r-base"
+    echo -n 'testthat: '
+    R -q -e "library('testthat')" || fatal "No testthat. Try: apt-get install r-cran-testthat"
+    # needed for roxygen2, needed for devtools, needed for R sdk
+    pkg-config --exists libxml-2.0 || fatal "No libxml2. Try: apt-get install libxml2-dev"
+    # needed for pkgdown, builds R SDK doc pages
+    which pandoc || fatal "No pandoc. Try: apt-get install pandoc"
 }
 
 rotate_logfile() {
@@ -368,7 +380,7 @@ if [[ -z "$temp" ]]; then
 fi
 
 # Set up temporary install dirs (unless existing dirs were supplied)
-for tmpdir in VENVDIR VENV3DIR GOPATH GEMHOME PERLINSTALLBASE
+for tmpdir in VENVDIR VENV3DIR GOPATH GEMHOME PERLINSTALLBASE R_LIBS
 do
     if [[ -z "${!tmpdir}" ]]; then
         eval "$tmpdir"="$temp/$tmpdir"
@@ -477,6 +489,7 @@ setup_virtualenv() {
 export PERLINSTALLBASE
 export PERLLIB="$PERLINSTALLBASE/lib/perl5:${PERLLIB:+$PERLLIB}"
 
+export R_LIBS
 
 export GOPATH
 mkdir -p "$GOPATH/src/git.curoverse.com"
@@ -766,6 +779,24 @@ install_ruby_sdk() {
 }
 do_install sdk/ruby ruby_sdk
 
+install_R_sdk() {
+    cd "$WORKSPACE/sdk/R" \
+       && R --quiet --vanilla <<EOF
+options(repos=structure(c(CRAN="http://cran.wustl.edu/")))
+if (!requireNamespace("devtools")) {
+  install.packages("devtools")
+}
+if (!requireNamespace("roxygen2")) {
+  install.packages("roxygen2")
+}
+if (!requireNamespace("pkgdown")) {
+  devtools::install_github("hadley/pkgdown")
+}
+devtools::install_dev_deps()
+EOF
+}
+do_install sdk/R R_sdk
+
 install_perl_sdk() {
     cd "$WORKSPACE/sdk/perl" \
         && perl Makefile.PL INSTALL_BASE="$PERLINSTALLBASE" \
@@ -937,6 +968,12 @@ test_ruby_sdk() {
 }
 do_test sdk/ruby ruby_sdk
 
+test_R_sdk() {
+    cd "$WORKSPACE/sdk/R" \
+        && R --quiet --file=run_test.R
+}
+do_test sdk/R R_sdk
+
 test_cli() {
     cd "$WORKSPACE/sdk/cli" \
         && mkdir -p /tmp/keep \
index f68a96471d51060a4adc88451916a561c2f8b075..eaa5410100992dfc8bc4482e2b6974d54b0ff8a0 100644 (file)
@@ -6,7 +6,7 @@
 require "rubygems"
 require "colorize"
 
-task :generate => [ :realclean, 'sdk/python/arvados/index.html' ] do
+task :generate => [ :realclean, 'sdk/python/arvados/index.html', 'sdk/R/arvados/index.html' ] do
   vars = ['baseurl', 'arvados_api_host', 'arvados_workbench_host']
   vars.each do |v|
     if ENV[v]
@@ -25,6 +25,19 @@ file "sdk/python/arvados/index.html" do |t|
   end
 end
 
+file "sdk/R/arvados/index.html" do |t|
+  `which R`
+  if $? == 0
+    Dir.chdir("../sdk/R/") do
+      STDERR.puts `R --quiet --vanilla -e 'pkgdown::build_site()' 2>&1`
+    end
+    raise if $? != 0
+    cp_r("../sdk/R/docs", "sdk/R/arvados")
+  else
+    puts "Warning: R not found, R documentation will not be generated".colorize(:light_red)
+  end
+end
+
 task :linkchecker => [ :generate ] do
   Dir.chdir(".site") do
     `which linkchecker`
@@ -38,6 +51,8 @@ end
 
 task :clean do
   rm_rf "sdk/python/arvados"
+  rm_rf "sdk/R/arvados"
+  rm_rf "../sdk/R/docs"
 end
 
 require "zenweb/tasks"
index 08b581c0d71de7c3fe7ab831191735ee4e7c05b1..680f940f26b04f3c4fa9885ccd0fa291d59fc7a8 100644 (file)
@@ -103,6 +103,8 @@ navbar:
     - Go:
       - sdk/go/index.html.textile.liquid
       - sdk/go/example.html.textile.liquid
+    - R:
+      - sdk/R/R.html.textile.liquid
     - Perl:
       - sdk/perl/index.html.textile.liquid
       - sdk/perl/example.html.textile.liquid
index 734b07c8b7970c00bdd6e99be3815e9d8d31f1f0..fc8a7991b38c65616704d0d096d7500a1b9e1473 100644 (file)
@@ -64,7 +64,7 @@ When a container's output_path is a tmp mount backed by local disk, this output
 
 1. Only mount points of kind @collection@ are supported.
 
-2. Mount points underneath output_path must not use @"writable":true@. If any of them are set as @writable@, the API will refuse to create/update the container request, and crunch-run will fail the container.
+2. Mount points underneath output_path which have "writable":true are copied into output_path during container initialization and may be updated, renamed, or deleted by the running container.  The original collection is not modified.  On container completion, files remaining in the output are saved to the output collection.   The mount at output_path must be big enough to accommodate copies of the inner writable mounts.
 
 3. If any such mount points are configured as @exclude_from_output":true@, they will be excluded from the output.
 
diff --git a/doc/sdk/R/R.html.textile.liquid b/doc/sdk/R/R.html.textile.liquid
new file mode 100644 (file)
index 0000000..7b788a1
--- /dev/null
@@ -0,0 +1,15 @@
+---
+layout: default
+navsection: sdk
+navmenu: R
+title: "R Reference"
+
+no_nav_left: true
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+notextile. <iframe src="arvados/" style="width:100%; height:100%; border:none" />
index e2b6eba8995434181279e39d321e93d9f3c83e2c..fbc5ca11b68ae09dfffefde5ddc3274835ceb940 100644 (file)
@@ -14,6 +14,7 @@ This section documents language bindings for the "Arvados API":{{site.baseurl}}/
 * "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html
 * "Command line SDK":{{site.baseurl}}/sdk/cli/install.html ("arv")
 * "Go SDK":{{site.baseurl}}/sdk/go/index.html
+* "R SDK":{{site.baseurl}}/sdk/go/index.html
 * "Perl SDK":{{site.baseurl}}/sdk/perl/index.html
 * "Ruby SDK":{{site.baseurl}}/sdk/ruby/index.html
 * "Java SDK":{{site.baseurl}}/sdk/java/index.html
diff --git a/sdk/R/.Rbuildignore b/sdk/R/.Rbuildignore
new file mode 100644 (file)
index 0000000..b9136fe
--- /dev/null
@@ -0,0 +1,4 @@
+^.*\.Rproj$
+^\.Rproj\.user$
+^docs$
+^pkgdown$
diff --git a/sdk/R/ArvadosR.Rproj b/sdk/R/ArvadosR.Rproj
new file mode 100644 (file)
index 0000000..a648ce1
--- /dev/null
@@ -0,0 +1,20 @@
+Version: 1.0
+
+RestoreWorkspace: Default
+SaveWorkspace: Default
+AlwaysSaveHistory: Default
+
+EnableCodeIndexing: Yes
+UseSpacesForTab: Yes
+NumSpacesForTab: 4
+Encoding: UTF-8
+
+RnwWeave: Sweave
+LaTeX: pdfLaTeX
+
+AutoAppendNewline: Yes
+StripTrailingWhitespace: Yes
+
+BuildType: Package
+PackageUseDevtools: Yes
+PackageInstallArgs: --no-multiarch --with-keep.source
diff --git a/sdk/R/DESCRIPTION b/sdk/R/DESCRIPTION
new file mode 100644 (file)
index 0000000..19cf8fb
--- /dev/null
@@ -0,0 +1,20 @@
+Package: ArvadosR
+Type: Package
+Title: Arvados R SDK
+Version: 0.0.2
+Authors@R: person("Fuad", "Muhic", role = c("aut", "cre"), email = "fmuhic@capeannenterprises.com")
+Maintainer: Ward Vandewege <wvandewege@veritasgenetics.com>
+Description: This is the Arvados R SDK
+URL: http://doc.arvados.org
+License: Apache-2.0
+Encoding: UTF-8
+LazyData: true
+RoxygenNote: 6.0.1.9000
+Imports:
+    R6,
+    httr,
+    stringr,
+    jsonlite,
+    curl,
+    XML
+Suggests: testthat
diff --git a/sdk/R/NAMESPACE b/sdk/R/NAMESPACE
new file mode 100644 (file)
index 0000000..f2cae86
--- /dev/null
@@ -0,0 +1,10 @@
+# Generated by roxygen2: do not edit by hand
+
+S3method(print,Arvados)
+S3method(print,ArvadosFile)
+S3method(print,Collection)
+S3method(print,Subcollection)
+export(Arvados)
+export(ArvadosFile)
+export(Collection)
+export(Subcollection)
diff --git a/sdk/R/R/Arvados.R b/sdk/R/R/Arvados.R
new file mode 100644 (file)
index 0000000..8b0e92b
--- /dev/null
@@ -0,0 +1,236 @@
+source("./R/RESTService.R")
+source("./R/HttpRequest.R")
+source("./R/HttpParser.R")
+
+#' Arvados
+#' 
+#' Arvados class gives users ability to manipulate collections and projects.
+#' 
+#' @section Usage:
+#' \preformatted{arv = Arvados$new(authToken = NULL, hostName = NULL, numRetries = 0)}
+#'
+#' @section Arguments:
+#' \describe{
+#'   \item{authToken}{Authentification token. If not specified ARVADOS_API_TOKEN environment variable will be used.}
+#'   \item{hostName}{Host name. If not specified ARVADOS_API_HOST environment variable will be used.}
+#'   \item{numRetries}{Number which specifies how many times to retry failed service requests.}
+#' }
+#' 
+#' @section Methods:
+#' \describe{
+#'   \item{getToken()}{Returns authentification token currently in use.}
+#'   \item{getHostName()}{Returns host name currently in use.}
+#'   \item{getNumRetries()}{Returns number which specifies how many times to retry failed service requests.}
+#'   \item{setNumRetries(newNumOfRetries)}{Sets number which specifies how many times to retry failed service requests.}
+#'   \item{getCollection(uuid)}{Get collection with specified UUID.}
+#'   \item{listCollections(filters = NULL, limit = 100, offset = 0)}{Returns list of collections based on filters parameter.}
+#'   \item{listAllCollections(filters = NULL)}{Lists all collections, based on filters parameter, even if the number of items is greater than maximum API limit.}
+#'   \item{deleteCollection(uuid)}{Deletes collection with specified UUID.}
+#'   \item{updateCollection(uuid, newContent)}{Updates collection with specified UUID.}
+#'   \item{createCollection(content)}{Creates new collection.}
+#'   \item{getProject(uuid)}{Get project with specified UUID.}
+#'   \item{listProjects(filters = NULL, limit = 100, offset = 0)}{Returns list of projects based on filters parameter.}
+#'   \item{listAllProjects(filters = NULL)}{Lists all projects, based on filters parameter, even if the number of items is greater than maximum API limit.}
+#'   \item{deleteProject(uuid)}{Deletes project with specified UUID.}
+#'   \item{updateProject(uuid, newContent)}{Updates project with specified UUID.}
+#'   \item{createProject(content)}{Creates new project.}
+#' }
+#'
+#' @name Arvados
+#' @examples
+#' \dontrun{
+#' arv <- Arvados$new("your Arvados token", "example.arvadosapi.com")
+#'
+#' collection <- arv$getCollection("uuid")
+#'
+#' collectionList <- arv$listCollections(list(list("name", "like", "Test%")))
+#' collectionList <- arv$listAllCollections(list(list("name", "like", "Test%")))
+#'
+#' deletedCollection <- arv$deleteCollection("uuid")
+#'
+#' updatedCollection <- arv$updateCollection("uuid", list(name = "New name",
+#'                                                        description = "New description"))
+#'
+#' createdCollection <- arv$createCollection(list(name = "Example",
+#'                                                description = "This is a test collection"))
+#' }
+NULL
+
+#' @export
+Arvados <- R6::R6Class(
+
+    "Arvados",
+
+    public = list(
+
+        initialize = function(authToken = NULL, hostName = NULL, numRetries = 0)
+        {
+            if(!is.null(hostName))
+               Sys.setenv(ARVADOS_API_HOST = hostName)
+
+            if(!is.null(authToken))
+                Sys.setenv(ARVADOS_API_TOKEN = authToken)
+
+            hostName  <- Sys.getenv("ARVADOS_API_HOST");
+            token     <- Sys.getenv("ARVADOS_API_TOKEN");
+
+            if(hostName == "" | token == "")
+                stop(paste("Please provide host name and authentification token",
+                           "or set ARVADOS_API_HOST and ARVADOS_API_TOKEN",
+                           "environment variables."))
+
+            private$numRetries  <- numRetries
+            private$REST  <- RESTService$new(token, hostName,
+                                             HttpRequest$new(), HttpParser$new(),
+                                             numRetries)
+
+            private$token <- private$REST$token
+            private$host  <- private$REST$hostName
+        },
+
+        getToken          = function() private$REST$token,
+        getHostName       = function() private$REST$hostName,
+        getWebDavHostName = function() private$REST$getWebDavHostName(),
+        getRESTService    = function() private$REST,
+        setRESTService    = function(newRESTService) private$REST <- newRESTService,
+
+        getNumRetries = function() private$REST$numRetries,
+        setNumRetries = function(newNumOfRetries)
+        {
+            private$REST$setNumRetries(newNumOfRetries)
+        },
+
+        getCollection = function(uuid)
+        {
+            collection <- private$REST$getResource("collections", uuid)
+            collection
+        },
+
+        listCollections = function(filters = NULL, limit = 100, offset = 0)
+        {
+            if(!is.null(filters))
+                names(filters) <- c("collection")
+
+            collections <- private$REST$listResources("collections", filters,
+                                                      limit, offset)
+            collections
+        },
+
+        listAllCollections = function(filters = NULL)
+        {
+            if(!is.null(filters))
+                names(filters) <- c("collection")
+
+            collectionURL <- paste0(private$host, "collections")
+            allCollection <- private$REST$fetchAllItems(collectionURL, filters)
+            allCollection
+        },
+
+        deleteCollection = function(uuid)
+        {
+            removedCollection <- private$REST$deleteResource("collections", uuid)
+            removedCollection
+        },
+
+        updateCollection = function(uuid, newContent)
+        {
+            body <- list(list())
+            names(body) <- c("collection")
+            body$collection <- newContent
+
+            updatedCollection <- private$REST$updateResource("collections",
+                                                             uuid, body)
+            updatedCollection
+        },
+
+        createCollection = function(content)
+        {
+            body <- list(list())
+            names(body) <- c("collection")
+            body$collection <- content
+
+            newCollection <- private$REST$createResource("collections", body)
+            newCollection
+        },
+
+        getProject = function(uuid)
+        {
+            project <- private$REST$getResource("groups", uuid)
+            project
+        },
+
+        createProject = function(content)
+        {
+            body <- list(list())
+            names(body) <- c("group")
+            body$group <- c("group_class" = "project", content)
+
+            newProject <- private$REST$createResource("groups", body)
+            newProject
+        },
+
+        updateProject = function(uuid, newContent)
+        {
+            body <- list(list())
+            names(body) <- c("group")
+            body$group <- newContent
+
+            updatedProject <- private$REST$updateResource("groups", uuid, body)
+            updatedProject
+        },
+
+        listProjects = function(filters = NULL, limit = 100, offset = 0)
+        {
+            if(!is.null(filters))
+                names(filters) <- c("groups")
+
+            filters[[length(filters) + 1]] <- list("group_class", "=", "project")
+
+            projects <- private$REST$listResources("groups", filters, limit, offset)
+            projects
+        },
+
+        listAllProjects = function(filters = NULL)
+        {
+            if(!is.null(filters))
+                names(filters) <- c("groups")
+
+            filters[[length(filters) + 1]] <- list("group_class", "=", "project")
+
+            projectURL <- paste0(private$host, "groups")
+
+            result <- private$REST$fetchAllItems(projectURL, filters)
+            result
+        },
+
+        deleteProject = function(uuid)
+        {
+            removedProject <- private$REST$deleteResource("groups", uuid)
+            removedProject
+        }
+    ),
+
+    private = list(
+
+        token      = NULL,
+        host       = NULL,
+        REST       = NULL,
+        numRetries = NULL
+    ),
+
+    cloneable = FALSE
+)
+
+#' print.Arvados
+#'
+#' Custom print function for Arvados class
+#'
+#' @param x Instance of Arvados class
+#' @param ... Optional arguments.
+#' @export 
+print.Arvados = function(x, ...)
+{
+    cat(paste0("Type:  ", "\"", "Arvados",       "\""), sep = "\n")
+    cat(paste0("Host:  ", "\"", x$getHostName(), "\""), sep = "\n")
+    cat(paste0("Token: ", "\"", x$getToken(),    "\""), sep = "\n")
+}
diff --git a/sdk/R/R/ArvadosFile.R b/sdk/R/R/ArvadosFile.R
new file mode 100644 (file)
index 0000000..e28ba96
--- /dev/null
@@ -0,0 +1,270 @@
+source("./R/util.R")
+
+#' ArvadosFile
+#' 
+#' ArvadosFile class represents a file inside Arvados collection.
+#' 
+#' @section Usage:
+#' \preformatted{file = ArvadosFile$new(name)}
+#'
+#' @section Arguments:
+#' \describe{
+#'   \item{name}{Name of the file.}
+#' }
+#' 
+#' @section Methods:
+#' \describe{
+#'   \item{getName()}{Returns name of the file.}
+#'   \item{getRelativePath()}{Returns file path relative to the root.}
+#'   \item{read(contentType = "raw", offset = 0, length = 0)}{Read file content.}
+#'   \item{write(content, contentType = "text/html")}{Write to file (override current content of the file).}
+#'   \item{connection(rw)}{Get connection opened in "read" or "write" mode.}
+#'   \item{flush()}{Write connections content to a file (override current content of the file).}
+#'   \item{remove(name)}{Removes ArvadosFile or Subcollection specified by name from the subcollection.}
+#'   \item{getSizeInBytes()}{Returns file size in bytes.}
+#'   \item{move(newLocation)}{Moves file to a new location inside collection.}
+#' }
+#'
+#' @name ArvadosFile
+#' @examples
+#' \dontrun{
+#' myFile <- ArvadosFile$new("myFile")
+#'
+#' myFile$write("This is new file content")
+#' fileContent <- myFile$read()
+#' fileContent <- myFile$read("text")
+#' fileContent <- myFile$read("raw", offset = 8, length = 4) 
+#'
+#' #Write a table:
+#' arvConnection <- myFile$connection("w")
+#' write.table(mytable, arvConnection)
+#' arvadosFile$flush()
+#'
+#' #Read a table:
+#' arvConnection <- myFile$connection("r")
+#' mytable <- read.table(arvConnection)
+#'
+#' myFile$move("newFolder/myFile")
+#' }
+NULL
+
+#' @export
+ArvadosFile <- R6::R6Class(
+
+    "ArvadosFile",
+
+    public = list(
+
+        initialize = function(name)
+        {
+            if(name == "")
+                stop("Invalid name.")
+
+            private$name <- name
+        },
+
+        getName = function() private$name,
+
+        getFileListing = function(fullpath = TRUE)
+        {
+            self$getName()
+        },
+
+        getSizeInBytes = function()
+        {
+            if(is.null(private$collection))
+                return(0)
+
+            REST <- private$collection$getRESTService()
+
+            fileSize <- REST$getResourceSize(self$getRelativePath(),
+                                             private$collection$uuid)
+
+            fileSize
+        },
+
+        get = function(fileLikeObjectName)
+        {
+            return(NULL)
+        },
+
+        getFirst = function()
+        {
+            return(NULL)
+        },
+
+        getCollection = function() private$collection,
+
+        setCollection = function(collection)
+        {
+            private$collection <- collection
+        },
+
+        getRelativePath = function()
+        {
+            relativePath <- c(private$name)
+            parent <- private$parent
+
+            while(!is.null(parent))
+            {
+                relativePath <- c(parent$getName(), relativePath)
+                parent <- parent$getParent()
+            }
+
+            relativePath <- relativePath[relativePath != ""]
+            paste0(relativePath, collapse = "/")
+        },
+
+        getParent = function() private$parent,
+
+        setParent = function(newParent) private$parent <- newParent,
+
+        read = function(contentType = "raw", offset = 0, length = 0)
+        {
+            if(is.null(private$collection))
+                stop("ArvadosFile doesn't belong to any collection.")
+
+            if(offset < 0 || length < 0)
+                stop("Offset and length must be positive values.")
+
+            REST <- private$collection$getRESTService()
+
+            fileContent <- REST$read(self$getRelativePath(),
+                                     private$collection$uuid,
+                                     contentType, offset, length)
+            fileContent
+        },
+
+        connection = function(rw)
+        {
+            if (rw == "r" || rw == "rb") 
+            {
+                REST <- private$collection$getRESTService()
+                return(REST$getConnection(private$collection$uuid,
+                                          self$getRelativePath(),
+                                          rw))
+            }
+            else if (rw == "w") 
+            {
+                private$buffer <- textConnection(NULL, "w")
+
+                return(private$buffer)
+            }
+        },
+
+        flush = function() 
+        {
+            v <- textConnectionValue(private$buffer)
+            close(private$buffer)
+            self$write(paste(v, collapse='\n'))
+        },
+
+        write = function(content, contentType = "text/html")
+        {
+            if(is.null(private$collection))
+                stop("ArvadosFile doesn't belong to any collection.")
+
+            REST <- private$collection$getRESTService()
+
+            writeResult <- REST$write(self$getRelativePath(),
+                                      private$collection$uuid,
+                                      content, contentType)
+            writeResult
+        },
+
+        move = function(newLocation)
+        {
+            if(is.null(private$collection))
+                stop("ArvadosFile doesn't belong to any collection")
+
+            newLocation <- trimFromEnd(newLocation, "/")
+            nameAndPath <- splitToPathAndName(newLocation)
+
+            newParent <- private$collection$get(nameAndPath$path)
+
+            if(is.null(newParent))
+            {
+                stop("Unable to get destination subcollection")
+            }
+
+            childWithSameName <- newParent$get(nameAndPath$name)
+
+            if(!is.null(childWithSameName))
+                stop("Destination already contains content with same name.")
+
+            REST <- private$collection$getRESTService()
+            REST$move(self$getRelativePath(),
+                      paste0(newParent$getRelativePath(), "/", nameAndPath$name),
+                      private$collection$uuid)
+
+            private$dettachFromCurrentParent()
+            private$attachToNewParent(newParent)
+
+            private$name <- nameAndPath$name
+
+            "Content moved successfully."
+        }
+    ),
+
+    private = list(
+
+        name       = NULL,
+        size       = NULL,
+        parent     = NULL,
+        collection = NULL,
+        buffer     = NULL,
+
+        attachToNewParent = function(newParent)
+        {
+            #Note: We temporary set parents collection to NULL. This will ensure that
+            #      add method doesn't post file on REST.
+            parentsCollection <- newParent$getCollection()
+            newParent$setCollection(NULL, setRecursively = FALSE)
+
+            newParent$add(self)
+
+            newParent$setCollection(parentsCollection, setRecursively = FALSE)
+
+            private$parent <- newParent
+        },
+
+        dettachFromCurrentParent = function()
+        {
+            #Note: We temporary set parents collection to NULL. This will ensure that
+            #      remove method doesn't remove this subcollection from REST.
+            parent <- private$parent
+            parentsCollection <- parent$getCollection()
+            parent$setCollection(NULL, setRecursively = FALSE)
+
+            parent$remove(private$name)
+
+            parent$setCollection(parentsCollection, setRecursively = FALSE)
+        }
+    ),
+
+    cloneable = FALSE
+)
+
+#' print.ArvadosFile
+#'
+#' Custom print function for ArvadosFile class
+#'
+#' @param x Instance of ArvadosFile class
+#' @param ... Optional arguments.
+#' @export 
+print.ArvadosFile = function(x, ...)
+{
+    collection   <- NULL
+    relativePath <- x$getRelativePath()
+
+    if(!is.null(x$getCollection()))
+    {
+        collection <- x$getCollection()$uuid
+        relativePath <- paste0("/", relativePath)
+    }
+
+    cat(paste0("Type:          ", "\"", "ArvadosFile",         "\""), sep = "\n")
+    cat(paste0("Name:          ", "\"", x$getName(),           "\""), sep = "\n")
+    cat(paste0("Relative path: ", "\"", relativePath,          "\""), sep = "\n")
+    cat(paste0("Collection:    ", "\"", collection,            "\""), sep = "\n")
+}
diff --git a/sdk/R/R/Collection.R b/sdk/R/R/Collection.R
new file mode 100644 (file)
index 0000000..ed5b4f4
--- /dev/null
@@ -0,0 +1,228 @@
+source("./R/Subcollection.R")
+source("./R/ArvadosFile.R")
+source("./R/RESTService.R")
+source("./R/util.R")
+
+#' Collection
+#' 
+#' Collection class provides interface for working with Arvados collections.
+#' 
+#' @section Usage:
+#' \preformatted{collection = Collection$new(arv, uuid)}
+#'
+#' @section Arguments:
+#' \describe{
+#'   \item{arv}{Arvados object.}
+#'   \item{uuid}{UUID of a collection.}
+#' }
+#' 
+#' @section Methods:
+#' \describe{
+#'   \item{add(content)}{Adds ArvadosFile or Subcollection specified by content to the collection.}
+#'   \item{create(fileNames, relativePath = "")}{Creates one or more ArvadosFiles and adds them to the collection at specified path.}
+#'   \item{remove(fileNames)}{Remove one or more files from the collection.}
+#'   \item{move(content, newLocation)}{Moves ArvadosFile or Subcollection to another location in the collection.}
+#'   \item{getFileListing()}{Returns collections file content as character vector.}
+#'   \item{get(relativePath)}{If relativePath is valid, returns ArvadosFile or Subcollection specified by relativePath, else returns NULL.}
+#' }
+#'
+#' @name Collection
+#' @examples
+#' \dontrun{
+#' arv <- Arvados$new("your Arvados token", "example.arvadosapi.com")
+#' collection <- Collection$new(arv, "uuid")
+#'
+#' newFile <- ArvadosFile$new("myFile")
+#' collection$add(newFile, "myFolder")
+#'
+#' createdFiles <- collection$create(c("main.cpp", lib.dll), "cpp/src/")
+#'
+#' collection$remove("location/to/my/file.cpp")
+#'
+#' collection$move("folder/file.cpp", "file.cpp")
+#'
+#' arvadosFile <- collection$get("location/to/my/file.cpp")
+#' arvadosSubcollection <- collection$get("location/to/my/directory/")
+#' }
+NULL
+
+#' @export
+Collection <- R6::R6Class(
+
+    "Collection",
+
+    public = list(
+
+        api  = NULL,
+        uuid = NULL,
+
+        initialize = function(api, uuid)
+        {
+            self$api <- api
+            private$REST <- api$getRESTService()
+
+            self$uuid <- uuid
+
+            private$fileContent <- private$REST$getCollectionContent(uuid)
+            private$tree <- CollectionTree$new(private$fileContent, self)
+        },
+
+        add = function(content, relativePath = "")
+        {
+            if(relativePath == ""  ||
+               relativePath == "." ||
+               relativePath == "./")
+            {
+                subcollection <- private$tree$getTree()
+            }
+            else
+            {
+                relativePath <- trimFromEnd(relativePath, "/")
+                subcollection <- self$get(relativePath)
+            }
+
+            if(is.null(subcollection))
+                stop(paste("Subcollection", relativePath, "doesn't exist."))
+
+            if("ArvadosFile"   %in% class(content) ||
+               "Subcollection" %in% class(content))
+            {
+
+                if(content$getName() == "")
+                    stop("Content has invalid name.")
+
+                subcollection$add(content)
+                content
+            }
+            else
+            {
+                stop(paste0("Expected AravodsFile or Subcollection object, got ",
+                            paste0("(", paste0(class(content), collapse = ", "), ")"),
+                            "."))
+            }
+        },
+
+        create = function(fileNames, relativePath = "")
+        {
+            if(relativePath == ""  ||
+               relativePath == "." ||
+               relativePath == "./")
+            {
+                subcollection <- private$tree$getTree()
+            }
+            else
+            {
+                relativePath  <- trimFromEnd(relativePath, "/") 
+                subcollection <- self$get(relativePath)
+            }
+
+            if(is.null(subcollection))
+                stop(paste("Subcollection", relativePath, "doesn't exist."))
+
+            if(is.character(fileNames))
+            {
+                arvadosFiles <- NULL
+                sapply(fileNames, function(fileName)
+                {
+                    childWithSameName <- subcollection$get(fileName)
+                    if(!is.null(childWithSameName))
+                        stop("Destination already contains file with same name.")
+
+                    newFile <- ArvadosFile$new(fileName)
+                    subcollection$add(newFile)
+
+                    arvadosFiles <<- c(arvadosFiles, newFile)
+                })
+
+                if(length(arvadosFiles) == 1)
+                    return(arvadosFiles[[1]])
+                else
+                    return(arvadosFiles)
+            }
+            else 
+            {
+                stop(paste0("Expected character vector, got ",
+                            paste0("(", paste0(class(fileNames), collapse = ", "), ")"),
+                            "."))
+            }
+        },
+
+        remove = function(paths)
+        {
+            if(is.character(paths))
+            {
+                sapply(paths, function(filePath)
+                {
+                    filePath <- trimFromEnd(filePath, "/")
+                    file <- self$get(filePath)
+
+                    if(is.null(file))
+                        stop(paste("File", filePath, "doesn't exist."))
+
+                    parent <- file$getParent()
+
+                    if(is.null(parent))
+                        stop("You can't delete root folder.")
+
+                    parent$remove(file$getName())
+                })
+
+                "Content removed"
+            }
+            else 
+            {
+                stop(paste0("Expected character vector, got ",
+                            paste0("(", paste0(class(paths), collapse = ", "), ")"),
+                            "."))
+            }
+        },
+
+        move = function(content, newLocation)
+        {
+            content <- trimFromEnd(content, "/")
+
+            elementToMove <- self$get(content)
+
+            if(is.null(elementToMove))
+                stop("Content you want to move doesn't exist in the collection.")
+
+            elementToMove$move(newLocation)
+        },
+
+        getFileListing = function()
+        {
+            content <- private$REST$getCollectionContent(self$uuid)
+            content[order(tolower(content))]
+        },
+
+        get = function(relativePath)
+        {
+            private$tree$getElement(relativePath)
+        },
+
+        getRESTService = function() private$REST,
+        setRESTService = function(newRESTService) private$REST <- newRESTService
+    ),
+
+    private = list(
+
+        REST        = NULL,
+        tree        = NULL,
+        fileContent = NULL
+    ),
+
+    cloneable = FALSE
+)
+
+#' print.Collection
+#'
+#' Custom print function for Collection class
+#'
+#' @param x Instance of Collection class
+#' @param ... Optional arguments.
+#' @export 
+print.Collection = function(x, ...)
+{
+    cat(paste0("Type: ", "\"", "Arvados Collection", "\""), sep = "\n")
+    cat(paste0("uuid: ", "\"", x$uuid,               "\""), sep = "\n")
+}
diff --git a/sdk/R/R/CollectionTree.R b/sdk/R/R/CollectionTree.R
new file mode 100644 (file)
index 0000000..91e4ec8
--- /dev/null
@@ -0,0 +1,119 @@
+source("./R/Subcollection.R")
+source("./R/ArvadosFile.R")
+source("./R/util.R")
+
+CollectionTree <- R6::R6Class(
+    "CollectionTree",
+    public = list(
+
+        pathsList = NULL,
+
+        initialize = function(fileContent, collection)
+        {
+            self$pathsList <- fileContent
+
+            treeBranches <- sapply(fileContent, function(filePath)
+            {
+                splitPath <- unlist(strsplit(filePath, "/", fixed = TRUE))
+                branch <- private$createBranch(splitPath)      
+            })
+
+            root <- Subcollection$new("")
+
+            sapply(treeBranches, function(branch)
+            {
+                private$addBranch(root, branch)
+            })
+
+            root$setCollection(collection)
+            private$tree <- root
+        },
+
+        getElement = function(relativePath)
+        {
+            relativePath <- trimFromStart(relativePath, "./")
+            relativePath <- trimFromEnd(relativePath, "/")
+
+            if(endsWith(relativePath, "/"))
+                relativePath <- substr(relativePath, 0, nchar(relativePath) - 1)
+
+            splitPath <- unlist(strsplit(relativePath, "/", fixed = TRUE))
+            returnElement <- private$tree
+
+            for(pathFragment in splitPath)
+            {
+                returnElement <- returnElement$get(pathFragment)
+
+                if(is.null(returnElement))
+                    return(NULL)
+            }
+
+            returnElement
+        },
+
+        getTree = function() private$tree
+    ),
+
+    private = list(
+
+        tree = NULL,
+
+        createBranch = function(splitPath)
+        {
+            branch <- NULL
+            lastElementIndex <- length(splitPath)
+
+            for(elementIndex in lastElementIndex:1)
+            {
+                if(elementIndex == lastElementIndex)
+                {
+                    branch <- ArvadosFile$new(splitPath[[elementIndex]])
+                }
+                else
+                {
+                    newFolder <- Subcollection$new(splitPath[[elementIndex]])
+                    newFolder$add(branch)
+                    branch <- newFolder
+                }
+            }
+            
+            branch
+        },
+
+        addBranch = function(container, node)
+        {
+            child <- container$get(node$getName())
+
+            if(is.null(child))
+            {
+                container$add(node)
+            }
+            else
+            {
+                # Note: REST always returns folder name alone before other folder 
+                # content, so in first iteration we don't know if it's a file
+                # or folder since its just a name, so we assume it's a file. 
+                # If we encounter that same name again we know 
+                # it's a folder so we need to replace ArvadosFile with Subcollection.
+                if("ArvadosFile" %in% class(child))
+                {
+                    child = private$replaceFileWithSubcollection(child)
+                }
+
+                private$addBranch(child, node$getFirst())
+            }
+        },
+
+        replaceFileWithSubcollection = function(arvadosFile)
+        {
+            subcollection <- Subcollection$new(arvadosFile$getName())
+            fileParent <- arvadosFile$getParent()
+            fileParent$remove(arvadosFile$getName())
+            fileParent$add(subcollection)
+
+            arvadosFile$setParent(NULL)
+
+            subcollection
+        }
+    )
+)
diff --git a/sdk/R/R/HttpParser.R b/sdk/R/R/HttpParser.R
new file mode 100644 (file)
index 0000000..5df8287
--- /dev/null
@@ -0,0 +1,53 @@
+HttpParser <- R6::R6Class(
+
+    "HttrParser",
+
+    public = list(
+
+        validContentTypes = NULL,
+
+        initialize = function() 
+        {
+            self$validContentTypes <- c("text", "raw")
+        },
+
+        parseJSONResponse = function(serverResponse) 
+        {
+            parsed_response <- httr::content(serverResponse,
+                                             as = "parsed",
+                                             type = "application/json")
+        },
+
+        parseResponse = function(serverResponse, outputType)
+        {
+            parsed_response <- httr::content(serverResponse, as = outputType)
+        },
+
+        getFileNamesFromResponse = function(response, uri)
+        {
+            text <- rawToChar(response$content)
+            doc <- XML::xmlParse(text, asText=TRUE)
+            base <- paste(paste("/", strsplit(uri, "/")[[1]][-1:-3], sep="", collapse=""), "/", sep="")
+            result <- unlist(
+                XML::xpathApply(doc, "//D:response/D:href", function(node) {
+                    sub(base, "", URLdecode(XML::xmlValue(node)), fixed=TRUE)
+                })
+            )
+            result <- result[result != ""]
+            result[-1]
+        },
+
+        getFileSizesFromResponse = function(response, uri)    
+        {
+            text <- rawToChar(response$content)
+            doc <- XML::xmlParse(text, asText=TRUE)
+
+            base <- paste(paste("/", strsplit(uri, "/")[[1]][-1:-3], sep="", collapse=""), "/", sep="")
+            result <- XML::xpathApply(doc, "//D:response/D:propstat/D:prop/D:getcontentlength", function(node) {
+              XML::xmlValue(node)
+            })
+
+            unlist(result)
+        }
+    )
+)
diff --git a/sdk/R/R/HttpRequest.R b/sdk/R/R/HttpRequest.R
new file mode 100644 (file)
index 0000000..f8ad0a6
--- /dev/null
@@ -0,0 +1,98 @@
+source("./R/util.R")
+
+HttpRequest <- R6::R6Class(
+
+    "HttrRequest",
+
+    public = list(
+
+        validContentTypes = NULL,
+        validVerbs = NULL,
+
+        initialize = function() 
+        {
+            self$validContentTypes <- c("text", "raw")
+            self$validVerbs <- c("GET", "POST", "PUT", "DELETE", "PROPFIND", "MOVE")
+        },
+
+        execute = function(verb, url, headers = NULL, body = NULL, query = NULL,
+                           limit = NULL, offset = NULL, retryTimes = 0)
+        {
+            if(!(verb %in% self$validVerbs))
+                stop("Http verb is not valid.")
+
+            headers  <- httr::add_headers(unlist(headers))
+            urlQuery <- self$createQuery(query, limit, offset)
+            url      <- paste0(url, urlQuery)
+
+            # times = 1 regular call + numberOfRetries
+            response <- httr::RETRY(verb, url = url, body = body,
+                                    config = headers, times = retryTimes + 1)
+        },
+
+        createQuery = function(filters, limit, offset)
+        {
+            finalQuery <- NULL
+
+            finalQuery <- c(finalQuery, private$createFiltersQuery(filters))
+            finalQuery <- c(finalQuery, private$createLimitQuery(limit))
+            finalQuery <- c(finalQuery, private$createOffsetQuery(offset))
+
+            finalQuery <- finalQuery[!is.null(finalQuery)]
+            finalQuery <- paste0(finalQuery, collapse = "&")
+
+            if(finalQuery != "")
+                finalQuery <- paste0("/?", finalQuery)
+
+            finalQuery
+        }
+    ),
+
+    private = list(
+
+        createFiltersQuery = function(filters)
+        {
+            if(!is.null(filters))
+            {
+                filters <- RListToPythonList(filters, ",")
+                encodedQuery <- URLencode(filters, reserved = T, repeated = T)
+
+                return(paste0("filters=", encodedQuery))
+            }
+
+            return(NULL)
+        },
+
+        createLimitQuery = function(limit)
+        {
+            if(!is.null(limit))
+            {
+                limit <- suppressWarnings(as.numeric(limit))
+
+                if(is.na(limit))
+                    stop("Limit must be a numeric type.")
+                
+                return(paste0("limit=", limit))
+            }
+
+            return(NULL)
+        },
+
+        createOffsetQuery = function(offset)
+        {
+            if(!is.null(offset))
+            {
+                offset <- suppressWarnings(as.numeric(offset))
+
+                if(is.na(offset))
+                    stop("Offset must be a numeric type.")
+                
+                return(paste0("offset=", offset))
+            }
+
+            return(NULL)
+        }
+    ),
+
+    cloneable = FALSE
+)
diff --git a/sdk/R/R/RESTService.R b/sdk/R/R/RESTService.R
new file mode 100644 (file)
index 0000000..12e6591
--- /dev/null
@@ -0,0 +1,349 @@
+RESTService <- R6::R6Class(
+
+    "RESTService",
+
+    public = list(
+
+        hostName   = NULL,
+        token      = NULL,
+        http       = NULL,
+        httpParser = NULL,
+        numRetries = NULL,
+
+        initialize = function(token, hostName,
+                              http, httpParser,
+                              numRetries     = 0,
+                              webDavHostName = NULL)
+        {
+            version <- "v1"
+
+            self$token       <- token
+            self$hostName    <- paste0("https://", hostName,
+                                       "/arvados/", version, "/")
+            self$http        <- http
+            self$httpParser  <- httpParser
+            self$numRetries  <- numRetries
+
+            private$rawHostName    <- hostName
+            private$webDavHostName <- webDavHostName
+        },
+
+        setNumConnRetries = function(newNumOfRetries)
+        {
+            self$numRetries <- newNumOfRetries
+        },
+
+        getWebDavHostName = function()
+        {
+            if(is.null(private$webDavHostName))
+            {
+                discoveryDocumentURL <- paste0("https://", private$rawHostName,
+                                               "/discovery/v1/apis/arvados/v1/rest")
+
+                headers <- list(Authorization = paste("OAuth2", self$token))
+
+                serverResponse <- self$http$execute("GET", discoveryDocumentURL, headers,
+                                                    retryTimes = self$numRetries)
+
+                discoveryDocument <- self$httpParser$parseJSONResponse(serverResponse)
+                private$webDavHostName <- discoveryDocument$keepWebServiceUrl
+
+                if(is.null(private$webDavHostName))
+                    stop("Unable to find WebDAV server.")
+            }
+
+            private$webDavHostName
+        },
+
+        getResource = function(resource, uuid)
+        {
+            resourceURL <- paste0(self$hostName, resource, "/", uuid)
+            headers <- list(Authorization = paste("OAuth2", self$token))
+
+            serverResponse <- self$http$execute("GET", resourceURL, headers,
+                                                retryTimes = self$numRetries)
+
+            resource <- self$httpParser$parseJSONResponse(serverResponse)
+
+            if(!is.null(resource$errors))
+                stop(resource$errors)
+
+            resource
+        },
+
+        listResources = function(resource, filters = NULL, limit = 100, offset = 0)
+        {
+            resourceURL <- paste0(self$hostName, resource)
+            headers <- list(Authorization = paste("OAuth2", self$token))
+            body <- NULL
+
+            serverResponse <- self$http$execute("GET", resourceURL, headers, body,
+                                                filters, limit, offset,
+                                                self$numRetries)
+
+            resources <- self$httpParser$parseJSONResponse(serverResponse)
+
+            if(!is.null(resources$errors))
+                stop(resources$errors)
+
+            resources
+        },
+
+        fetchAllItems = function(resourceURL, filters)
+        {
+            headers <- list(Authorization = paste("OAuth2", self$token))
+
+            offset <- 0
+            itemsAvailable <- .Machine$integer.max
+            items <- c()
+            while(length(items) < itemsAvailable)
+            {
+                serverResponse <- self$http$execute(verb       = "GET",
+                                                    url        = resourceURL,
+                                                    headers    = headers,
+                                                    body       = NULL,
+                                                    query      = filters,
+                                                    limit      = NULL,
+                                                    offset     = offset,
+                                                    retryTimes = self$numRetries)
+
+                parsedResponse <- self$httpParser$parseJSONResponse(serverResponse)
+
+                if(!is.null(parsedResponse$errors))
+                    stop(parsedResponse$errors)
+
+                items          <- c(items, parsedResponse$items)
+                offset         <- length(items)
+                itemsAvailable <- parsedResponse$items_available
+            }
+
+            items
+        },
+
+        deleteResource = function(resource, uuid)
+        {
+            collectionURL <- paste0(self$hostName, resource, "/", uuid)
+            headers <- list("Authorization" = paste("OAuth2", self$token),
+                            "Content-Type"  = "application/json")
+
+            serverResponse <- self$http$execute("DELETE", collectionURL, headers,
+                                                retryTimes = self$numRetries)
+
+            removedResource <- self$httpParser$parseJSONResponse(serverResponse)
+
+            if(!is.null(removedResource$errors))
+                stop(removedResource$errors)
+
+            removedResource
+        },
+
+        updateResource = function(resource, uuid, newContent)
+        {
+            resourceURL <- paste0(self$hostName, resource, "/", uuid)
+            headers <- list("Authorization" = paste("OAuth2", self$token),
+                            "Content-Type"  = "application/json")
+
+            newContent <- jsonlite::toJSON(newContent, auto_unbox = T)
+
+            serverResponse <- self$http$execute("PUT", resourceURL, headers, newContent,
+                                                retryTimes = self$numRetries)
+
+            updatedResource <- self$httpParser$parseJSONResponse(serverResponse)
+
+            if(!is.null(updatedResource$errors))
+                stop(updatedResource$errors)
+
+            updatedResource
+        },
+
+        createResource = function(resource, content)
+        {
+            resourceURL <- paste0(self$hostName, resource)
+            headers <- list("Authorization" = paste("OAuth2", self$token),
+                            "Content-Type"  = "application/json")
+
+            content <- jsonlite::toJSON(content, auto_unbox = T)
+
+            serverResponse <- self$http$execute("POST", resourceURL, headers, content,
+                                                retryTimes = self$numRetries)
+
+            newResource <- self$httpParser$parseJSONResponse(serverResponse)
+
+            if(!is.null(newResource$errors))
+                stop(newResource$errors)
+
+            newResource
+        },
+
+        create = function(files, uuid)
+        {
+            sapply(files, function(filePath)
+            {
+                private$createNewFile(filePath, uuid, "text/html")
+            })
+        },
+
+        delete = function(relativePath, uuid)
+        {
+            fileURL <- paste0(self$getWebDavHostName(), "c=",
+                              uuid, "/", relativePath);
+            headers <- list(Authorization = paste("OAuth2", self$token)) 
+
+            serverResponse <- self$http$execute("DELETE", fileURL, headers,
+                                                retryTimes = self$numRetries)
+
+            if(serverResponse$status_code < 200 || serverResponse$status_code >= 300)
+                stop(paste("Server code:", serverResponse$status_code))
+
+            serverResponse
+        },
+
+        move = function(from, to, uuid)
+        {
+            collectionURL <- paste0(self$getWebDavHostName(), "c=", uuid, "/")
+            fromURL <- paste0(collectionURL, from)
+            toURL <- paste0(collectionURL, to)
+
+            headers <- list("Authorization" = paste("OAuth2", self$token),
+                           "Destination" = toURL)
+
+            serverResponse <- self$http$execute("MOVE", fromURL, headers,
+                                                retryTimes = self$numRetries)
+
+            if(serverResponse$status_code < 200 || serverResponse$status_code >= 300)
+                stop(paste("Server code:", serverResponse$status_code))
+
+            serverResponse
+        },
+
+        getCollectionContent = function(uuid)
+        {
+            collectionURL <- URLencode(paste0(self$getWebDavHostName(),
+                                              "c=", uuid))
+
+            headers <- list("Authorization" = paste("OAuth2", self$token))
+
+            response <- self$http$execute("PROPFIND", collectionURL, headers,
+                                          retryTimes = self$numRetries)
+
+            if(all(response == ""))
+                stop("Response is empty, request may be misconfigured")
+
+            if(response$status_code < 200 || response$status_code >= 300)
+                stop(paste("Server code:", response$status_code))
+
+            self$httpParser$getFileNamesFromResponse(response, collectionURL)
+        },
+
+        getResourceSize = function(relativePath, uuid)
+        {
+            collectionURL <- URLencode(paste0(self$getWebDavHostName(),
+                                              "c=", uuid))
+
+            subcollectionURL <- paste0(collectionURL, "/", relativePath);
+
+            headers <- list("Authorization" = paste("OAuth2", self$token))
+
+            response <- self$http$execute("PROPFIND", subcollectionURL, headers,
+                                          retryTimes = self$numRetries)
+
+            if(all(response == ""))
+                stop("Response is empty, request may be misconfigured")
+
+            if(response$status_code < 200 || response$status_code >= 300)
+                stop(paste("Server code:", response$status_code))
+
+            sizes <- self$httpParser$getFileSizesFromResponse(response,
+                                                              collectionURL)
+            as.numeric(sizes)
+        },
+
+        read = function(relativePath, uuid, contentType = "raw", offset = 0, length = 0)
+        {
+            fileURL <- paste0(self$getWebDavHostName(),
+                             "c=", uuid, "/", relativePath);
+
+            range <- paste0("bytes=", offset, "-")
+
+            if(length > 0)
+                range = paste0(range, offset + length - 1)
+
+            if(offset == 0 && length == 0)
+            {
+                headers <- list(Authorization = paste("OAuth2", self$token))
+            }
+            else
+            {
+                headers <- list(Authorization = paste("OAuth2", self$token),
+                                Range = range)
+            }
+
+            if(!(contentType %in% self$httpParser$validContentTypes))
+                stop("Invalid contentType. Please use text or raw.")
+
+            serverResponse <- self$http$execute("GET", fileURL, headers,
+                                                retryTimes = self$numRetries)
+
+            if(serverResponse$status_code < 200 || serverResponse$status_code >= 300)
+                stop(paste("Server code:", serverResponse$status_code))
+
+            self$httpParser$parseResponse(serverResponse, contentType)
+        },
+
+        write = function(relativePath, uuid, content, contentType)
+        {
+            fileURL <- paste0(self$getWebDavHostName(),
+                             "c=", uuid, "/", relativePath);
+            headers <- list(Authorization = paste("OAuth2", self$token),
+                            "Content-Type" = contentType)
+            body <- content
+
+            serverResponse <- self$http$execute("PUT", fileURL, headers, body,
+                                                retryTimes = self$numRetries)
+
+            if(serverResponse$status_code < 200 || serverResponse$status_code >= 300)
+                stop(paste("Server code:", serverResponse$status_code))
+
+            self$httpParser$parseResponse(serverResponse, "text")
+        },
+
+        getConnection = function(uuid, relativePath, openMode)
+        {
+            fileURL <- paste0(self$getWebDavHostName(), 
+                              "c=", uuid, "/", relativePath);
+            headers <- list(Authorization = paste("OAuth2", self$token))
+
+            h <- curl::new_handle()
+            curl::handle_setheaders(h, .list = headers)
+
+            conn <- curl::curl(url = fileURL, open = openMode, handle = h)
+
+            conn
+        }
+    ),
+
+    private = list(
+
+        webDavHostName = NULL,
+        rawHostName    = NULL,
+
+        createNewFile = function(relativePath, uuid, contentType)
+        {
+            fileURL <- paste0(self$getWebDavHostName(), "c=",
+                              uuid, "/", relativePath)
+            headers <- list(Authorization = paste("OAuth2", self$token), 
+                            "Content-Type" = contentType)
+            body <- NULL
+
+            serverResponse <- self$http$execute("PUT", fileURL, headers, body,
+                                                retryTimes = self$numRetries)
+
+            if(serverResponse$status_code < 200 || serverResponse$status_code >= 300)
+                stop(paste("Server code:", serverResponse$status_code))
+
+            paste("File created:", relativePath)
+        }
+    ),
+
+    cloneable = FALSE
+)
diff --git a/sdk/R/R/Subcollection.R b/sdk/R/R/Subcollection.R
new file mode 100644 (file)
index 0000000..863e3f7
--- /dev/null
@@ -0,0 +1,329 @@
+source("./R/util.R")
+
+#' Subcollection
+#' 
+#' Subcollection class represents a folder inside Arvados collection.
+#' It is essentially a composite of ArvadosFiles and other Subcollections.
+#' 
+#' @section Usage:
+#' \preformatted{subcollection = Subcollection$new(name)}
+#'
+#' @section Arguments:
+#' \describe{
+#'   \item{name}{Name of the subcollection.}
+#' }
+#' 
+#' @section Methods:
+#' \describe{
+#'   \item{getName()}{Returns name of the subcollection.}
+#'   \item{getRelativePath()}{Returns subcollection path relative to the root.}
+#'   \item{add(content)}{Adds ArvadosFile or Subcollection specified by content to the subcollection.}
+#'   \item{remove(name)}{Removes ArvadosFile or Subcollection specified by name from the subcollection.}
+#'   \item{get(relativePath)}{If relativePath is valid, returns ArvadosFile or Subcollection specified by relativePath, else returns NULL.}
+#'   \item{getFileListing()}{Returns subcollections file content as character vector.}
+#'   \item{getSizeInBytes()}{Returns subcollections content size in bytes.}
+#'   \item{move(newLocation)}{Moves subcollection to a new location inside collection.}
+#' }
+#'
+#' @name Subcollection
+#' @examples
+#' \dontrun{
+#' myFolder <- Subcollection$new("myFolder")
+#' myFile   <- ArvadosFile$new("myFile")
+#'
+#' myFolder$add(myFile)
+#' myFolder$get("myFile")
+#' myFolder$remove("myFile")
+#'
+#' myFolder$move("newLocation/myFolder")
+#' }
+NULL
+
+#' @export
+Subcollection <- R6::R6Class(
+
+    "Subcollection",
+
+    public = list(
+
+        initialize = function(name)
+        {
+            private$name <- name
+        },
+
+        getName = function() private$name,
+        
+        getRelativePath = function()
+        {
+            relativePath <- c(private$name)
+            parent <- private$parent
+
+            while(!is.null(parent))
+            {
+                relativePath <- c(parent$getName(), relativePath)
+                parent <- parent$getParent()
+            }
+
+            relativePath <- relativePath[relativePath != ""]
+            paste0(relativePath, collapse = "/")
+        },
+
+        add = function(content)
+        {
+            if("ArvadosFile"   %in% class(content) ||
+               "Subcollection" %in% class(content))
+            {
+                if(content$getName() == "")
+                    stop("Content has invalid name.")
+
+                childWithSameName <- self$get(content$getName())
+
+                if(!is.null(childWithSameName))
+                    stop(paste("Subcollection already contains ArvadosFile",
+                               "or Subcollection with same name."))
+
+                if(!is.null(private$collection))
+                {       
+                    if(self$getRelativePath() != "")
+                        contentPath <- paste0(self$getRelativePath(),
+                                              "/", content$getFileListing())
+                    else
+                        contentPath <- content$getFileListing()
+
+                    REST <- private$collection$getRESTService()
+                    REST$create(contentPath, private$collection$uuid)
+                    content$setCollection(private$collection)
+                }
+
+                private$children <- c(private$children, content)
+                content$setParent(self)
+
+                "Content added successfully."
+            }
+            else
+            {
+                stop(paste0("Expected AravodsFile or Subcollection object, got ",
+                            paste0("(", paste0(class(content), collapse = ", "), ")"),
+                            "."))
+            }
+        },
+
+        remove = function(name)
+        {
+            if(is.character(name))
+            {
+                child <- self$get(name)
+
+                if(is.null(child))
+                    stop(paste("Subcollection doesn't contains ArvadosFile",
+                               "or Subcollection with specified name."))
+
+                if(!is.null(private$collection))
+                {
+                    REST <- private$collection$getRESTService()
+                    REST$delete(child$getRelativePath(), private$collection$uuid)
+
+                    child$setCollection(NULL)
+                }
+
+                private$removeChild(name)
+                child$setParent(NULL)
+
+                "Content removed"
+            }
+            else
+            {
+                stop(paste0("Expected character, got ",
+                            paste0("(", paste0(class(name), collapse = ", "), ")"),
+                            "."))
+            }
+        },
+
+        getFileListing = function(fullPath = TRUE)
+        {
+            content <- private$getContentAsCharVector(fullPath)
+            content[order(tolower(content))]
+        },
+
+        getSizeInBytes = function()
+        {
+            if(is.null(private$collection))
+                return(0)
+
+            REST <- private$collection$getRESTService()
+
+            fileSizes <- REST$getResourceSize(paste0(self$getRelativePath(), "/"),
+                                              private$collection$uuid)
+            return(sum(fileSizes))
+        },
+
+        move = function(newLocation)
+        {
+            if(is.null(private$collection))
+                stop("Subcollection doesn't belong to any collection")
+
+            newLocation <- trimFromEnd(newLocation, "/")
+            nameAndPath <- splitToPathAndName(newLocation)
+
+            newParent <- private$collection$get(nameAndPath$path)
+
+            if(is.null(newParent))
+            {
+                stop("Unable to get destination subcollection")
+            }
+
+            childWithSameName <- newParent$get(nameAndPath$name)
+
+            if(!is.null(childWithSameName))
+                stop("Destination already contains content with same name.")
+
+            REST <- private$collection$getRESTService()
+            REST$move(self$getRelativePath(),
+                      paste0(newParent$getRelativePath(), "/", nameAndPath$name),
+                      private$collection$uuid)
+
+            private$dettachFromCurrentParent()
+            private$attachToNewParent(newParent)
+
+            private$name <- nameAndPath$name
+
+            "Content moved successfully."
+        },
+
+        get = function(name)
+        {
+            for(child in private$children)
+            {
+                if(child$getName() == name)
+                    return(child)
+            }
+
+            return(NULL)
+        },
+
+        getFirst = function()
+        {
+            if(length(private$children) == 0)
+               return(NULL)
+
+            private$children[[1]]
+        },
+
+        setCollection = function(collection, setRecursively = TRUE)
+        {
+            private$collection = collection
+
+            if(setRecursively)
+            {
+                for(child in private$children)
+                    child$setCollection(collection)
+            }
+        },
+
+        getCollection = function() private$collection,
+
+        getParent = function() private$parent,
+
+        setParent = function(newParent) private$parent <- newParent
+    ),
+
+    private = list(
+
+        name       = NULL,
+        children   = NULL,
+        parent     = NULL,
+        collection = NULL,
+
+        removeChild = function(name)
+        {
+            numberOfChildren = length(private$children)
+            if(numberOfChildren > 0)
+            {
+                for(childIndex in 1:numberOfChildren)
+                {
+                    if(private$children[[childIndex]]$getName() == name)
+                    {
+                        private$children = private$children[-childIndex]
+                        return()
+                    }
+                }
+            }
+        },
+
+        attachToNewParent = function(newParent)
+        {
+            #Note: We temporary set parents collection to NULL. This will ensure that
+            #      add method doesn't post file on REST.
+            parentsCollection <- newParent$getCollection()
+            newParent$setCollection(NULL, setRecursively = FALSE)
+
+            newParent$add(self)
+
+            newParent$setCollection(parentsCollection, setRecursively = FALSE)
+
+            private$parent <- newParent
+        },
+
+        dettachFromCurrentParent = function()
+        {
+            #Note: We temporary set parents collection to NULL. This will ensure that
+            #      remove method doesn't remove this subcollection from REST.
+            parent <- private$parent
+            parentsCollection <- parent$getCollection()
+            parent$setCollection(NULL, setRecursively = FALSE)
+
+            parent$remove(private$name)
+
+            parent$setCollection(parentsCollection, setRecursively = FALSE)
+        },
+
+        getContentAsCharVector = function(fullPath = TRUE)
+        {
+            content <- NULL
+
+            if(fullPath)
+            {
+                for(child in private$children)
+                    content <- c(content, child$getFileListing())
+
+                if(private$name != "")
+                    content <- unlist(paste0(private$name, "/", content))
+            }
+            else
+            {
+                for(child in private$children)
+                    content <- c(content, child$getName())
+            }
+
+            content
+        }
+    ),
+    
+    cloneable = FALSE
+)
+
+#' print.Subcollection
+#'
+#' Custom print function for Subcollection class
+#'
+#' @param x Instance of Subcollection class
+#' @param ... Optional arguments.
+#' @export 
+print.Subcollection = function(x, ...)
+{
+    collection   <- NULL
+    relativePath <- x$getRelativePath()
+
+    if(!is.null(x$getCollection()))
+    {
+        collection <- x$getCollection()$uuid
+
+        if(!x$getName() == "")
+            relativePath <- paste0("/", relativePath)
+    }
+
+    cat(paste0("Type:          ", "\"", "Arvados Subcollection", "\""), sep = "\n")
+    cat(paste0("Name:          ", "\"", x$getName(),             "\""), sep = "\n")
+    cat(paste0("Relative path: ", "\"", relativePath,            "\""), sep = "\n")
+    cat(paste0("Collection:    ", "\"", collection,              "\""), sep = "\n")
+}
diff --git a/sdk/R/R/util.R b/sdk/R/R/util.R
new file mode 100644 (file)
index 0000000..d9af8b0
--- /dev/null
@@ -0,0 +1,54 @@
+trimFromStart <- function(sample, trimCharacters)
+{
+    if(startsWith(sample, trimCharacters))
+        sample <- substr(sample, nchar(trimCharacters) + 1, nchar(sample))
+
+    sample
+}
+
+trimFromEnd <- function(sample, trimCharacters)
+{
+    if(endsWith(sample, trimCharacters))
+        sample <- substr(sample, 0, nchar(sample) - nchar(trimCharacters))
+
+    sample
+}
+
+RListToPythonList <- function(RList, separator = ", ")
+{
+    pythonArrayContent <- sapply(RList, function(elementInList)
+    {
+        if((is.vector(elementInList) || is.list(elementInList)) &&
+            length(elementInList) > 1)
+        {
+            return(RListToPythonList(elementInList, separator))
+        }
+        else
+        {
+            return(paste0("\"", elementInList, "\""))
+        }
+    })
+
+    pythonArray <- paste0("[", paste0(pythonArrayContent, collapse = separator), "]")
+    pythonArray
+}
+
+appendToStartIfNotExist <- function(sample, characters)
+{
+    if(!startsWith(sample, characters))
+        sample <- paste0(characters, sample)
+
+    sample
+}
+
+splitToPathAndName = function(path)
+{
+    path <- appendToStartIfNotExist(path, "/")
+    components <- unlist(stringr::str_split(path, "/"))
+    nameAndPath <- list()
+    nameAndPath$name <- components[length(components)]
+    nameAndPath$path <- trimFromStart(paste0(components[-length(components)], collapse = "/"),
+                                      "/")
+
+    nameAndPath
+}
diff --git a/sdk/R/README.Rmd b/sdk/R/README.Rmd
new file mode 100644 (file)
index 0000000..098bfe1
--- /dev/null
@@ -0,0 +1,298 @@
+## R SDK for Arvados
+
+This SDK focuses on providing support for accessing Arvados projects, collections, and the files within collections.
+The API is not final and feedback is solicited from users on ways in which it could be improved.
+
+### Installation
+
+```install.packages("ArvadosR", repos=c("http://r.arvados.org", getOption("repos")["CRAN"]), dependencies=TRUE)```
+
+Note: on Linux, you may have to install supporting packages.
+
+On Centos 7, this is:
+
+```yum install libxml2-devel openssl-devel curl-devel```
+
+On Debian, this is:
+
+```apt-get install build-essential libxml2-dev libssl-dev libcurl4-gnutls-dev```
+
+
+### Usage
+
+#### Initializing API
+
+```{r include=FALSE}
+knitr::opts_chunk$set(eval = FALSE)
+```
+
+* Load Library and Initialize API:
+
+    ```{r}
+    library('ArvadosR')
+    # use environment variables ARVADOS_API_TOKEN and ARVADOS_API_HOST
+    arv <- Arvados$new()
+
+    # provide them explicitly
+    arv <- Arvados$new("your Arvados token", "example.arvadosapi.com")
+    ```
+
+    Optionally, add numRetries parameter to specify number of times to retry failed service requests.
+    Default is 0.
+
+    ```{r}
+    arv <- Arvados$new("your Arvados token", "example.arvadosapi.com", numRetries = 3)
+    ```
+
+    This parameter can be set at any time using setNumRetries
+
+    ```{r}
+    arv$setNumRetries(5)
+    ```
+
+
+#### Working with collections
+
+* Get a collection:
+
+    ```{r}
+    collection <- arv$getCollection("uuid")
+    ```
+
+* List collections:
+
+    ```{r}
+    # offset of 0 and default limit of 100
+    collectionList <- arv$listCollections(list(list("name", "like", "Test%")))
+
+    collectionList <- arv$listCollections(list(list("name", "like", "Test%")), limit = 10, offset = 2)
+    ```
+
+    ```{r}
+    # count of total number of items (may be more than returned due to paging)
+    collectionList$items_available
+
+    # items which match the filter criteria
+    collectionList$items
+    ```
+
+* List all collections even if the number of items is greater than maximum API limit:
+
+    ```{r}
+    collectionList <- arv$listAllCollections(list(list("name", "like", "Test%")))
+    ```
+
+* Delete a collection:
+
+    ```{r}
+    deletedCollection <- arv$deleteCollection("uuid")
+    ```
+
+* Update a collection's metadata:
+
+    ```{r}
+    updatedCollection <- arv$updateCollection("uuid", list(name = "New name", description = "New description"))
+    ```
+
+* Create collection:
+
+    ```{r}
+    createdCollection <- arv$createCollection(list(name = "Example", description = "This is a test collection"))
+    ```
+
+
+#### Manipulating collection content
+
+* Create collection object:
+
+    ```{r}
+    collection <- Collection$new(arv, "uuid")
+    ```
+
+* Get list of files:
+
+    ```{r}
+    files <- collection$getFileListing()
+    ```
+
+* Get ArvadosFile or Subcollection from internal tree-like structure:
+
+    ```{r}
+    arvadosFile <- collection$get("location/to/my/file.cpp")
+    ```
+
+    or
+
+    ```{r}
+    arvadosSubcollection <- collection$get("location/to/my/directory/")
+    ```
+
+* Read a table:
+
+    ```{r}
+    arvadosFile   <- collection$get("myinput.txt")
+    arvConnection <- arvadosFile$connection("r")
+    mytable       <- read.table(arvConnection)
+    ```
+
+* Write a table:
+
+    ```{r}
+    arvadosFile   <- collection$create("myoutput.txt")
+    arvConnection <- arvadosFile$connection("w")
+    write.table(mytable, arvConnection)
+    arvadosFile$flush()
+    ```
+
+* Write to existing file (override current content of the file):
+
+    ```{r}
+    arvadosFile <- collection$get("location/to/my/file.cpp")
+    arvadosFile$write("This is new file content")
+    ```
+
+* Read whole file or just a portion of it:
+
+    ```{r}
+    fileContent <- arvadosFile$read()
+    fileContent <- arvadosFile$read("text")
+    fileContent <- arvadosFile$read("raw", offset = 1024, length = 512)
+    ```
+
+* Get ArvadosFile or Subcollection size:
+
+    ```{r}
+    size <- arvadosFile$getSizeInBytes()
+    ```
+
+    or
+
+    ```{r}
+    size <- arvadosSubcollection$getSizeInBytes()
+    ```
+
+* Create new file in a collection:
+
+    ```{r}
+    collection$create(fileNames, optionalRelativePath)
+    ```
+
+    Example:
+
+    ```{r}
+    mainFile <- collection$create("main.cpp", "cpp/src/")
+    fileList <- collection$create(c("main.cpp", lib.dll), "cpp/src/")
+    ```
+
+* Add existing ArvadosFile or Subcollection to a collection:
+
+    ```{r}
+    folder <- Subcollection$new("src")
+    file   <- ArvadosFile$new("main.cpp")
+    folder$add(file)
+    ```
+
+    ```{r}
+    collection$add(folder, "cpp")
+    ```
+
+    This examples will add file "main.cpp" in "./cpp/src/" folder if folder exists.
+    If subcollection contains more files or folders they will be added recursively.
+
+* Delete file from a collection:
+
+    ```{r}
+    collection$remove("location/to/my/file.cpp")
+    ```
+
+    You can remove both Subcollection and ArvadosFile.
+    If subcollection contains more files or folders they will be removed recursively.
+
+    You can also remove multiple files at once:
+
+    ```{r}
+    collection$remove(c("path/to/my/file.cpp", "path/to/other/file.cpp"))
+    ```
+
+* Delete file or folder from a Subcollection:
+
+    ```{r}
+    subcollection <- collection$get("mySubcollection/")
+    subcollection$remove("fileInsideSubcollection.exe")
+    subcollection$remove("folderInsideSubcollection/")
+    ```
+
+* Move file or folder inside collection:
+
+    Directley from collection
+
+    ```{r}
+    collection$move("folder/file.cpp", "file.cpp")
+    ```
+
+    Or from file
+
+    ```{r}
+    file <- collection$get("location/to/my/file.cpp")
+    file$move("newDestination/file.cpp")
+    ```
+
+    Or from subcollection
+
+    ```{r}
+    subcollection <- collection$get("location/to/folder")
+    subcollection$move("newDestination/folder")
+    ```
+
+    Make sure to include new file name in destination.
+    In second example file$move("newDestination/") will not work.
+
+#### Working with Aravdos projects
+
+* Get a project:
+
+    ```{r}
+    project <- arv$getProject("uuid")
+    ```
+
+* List projects:
+
+    ```{r}
+    # list subprojects of a project
+    projects <- arv$listProjects(list(list("owner_uuid", "=", "aaaaa-j7d0g-ccccccccccccccc")))
+
+    # list projects which have names beginning with Example
+    arv$listProjects(list(list("name","like","Example%")))
+    ```
+
+* List all projects even if the number of items is greater than maximum API limit:
+
+    ```{r}
+    collectionList <- arv$listAllProjects(list(list("name","like","Example%")))
+    ```
+
+* Delete a project:
+
+    ```{r}
+    deletedProject <- arv$deleteProject("uuid")
+    ```
+
+* Update project:
+
+    ```{r}
+    updatedProject <- arv$updateProject("uuid", list(name = "new_name", description = "new description"))
+    ```
+
+* Create project:
+
+    ```{r}
+    createdProject <- arv$createProject(list(name = "project_name", description = "project description"))
+    ```
+
+### Building the ArvadosR package
+
+  ```
+  cd arvados/sdk && R CMD build R
+  ```
+
+This will create a tarball of the ArvadosR package in the current directory.
diff --git a/sdk/R/man/Arvados.Rd b/sdk/R/man/Arvados.Rd
new file mode 100644 (file)
index 0000000..3c66437
--- /dev/null
@@ -0,0 +1,62 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{Arvados}
+\alias{Arvados}
+\title{Arvados}
+\description{
+Arvados class gives users ability to manipulate collections and projects.
+}
+\section{Usage}{
+
+\preformatted{arv = Arvados$new(authToken = NULL, hostName = NULL, numRetries = 0)}
+}
+
+\section{Arguments}{
+
+\describe{
+  \item{authToken}{Authentification token. If not specified ARVADOS_API_TOKEN environment variable will be used.}
+  \item{hostName}{Host name. If not specified ARVADOS_API_HOST environment variable will be used.}
+  \item{numRetries}{Number which specifies how many times to retry failed service requests.}
+}
+}
+
+\section{Methods}{
+
+\describe{
+  \item{getToken()}{Returns authentification token currently in use.}
+  \item{getHostName()}{Returns host name currently in use.}
+  \item{getNumRetries()}{Returns number which specifies how many times to retry failed service requests.}
+  \item{setNumRetries(newNumOfRetries)}{Sets number which specifies how many times to retry failed service requests.}
+  \item{getCollection(uuid)}{Get collection with specified UUID.}
+  \item{listCollections(filters = NULL, limit = 100, offset = 0)}{Returns list of collections based on filters parameter.}
+  \item{listAllCollections(filters = NULL)}{Lists all collections, based on filters parameter, even if the number of items is greater than maximum API limit.}
+  \item{deleteCollection(uuid)}{Deletes collection with specified UUID.}
+  \item{updateCollection(uuid, newContent)}{Updates collection with specified UUID.}
+  \item{createCollection(content)}{Creates new collection.}
+  \item{getProject(uuid)}{Get project with specified UUID.}
+  \item{listProjects(filters = NULL, limit = 100, offset = 0)}{Returns list of projects based on filters parameter.}
+  \item{listAllProjects(filters = NULL)}{Lists all projects, based on filters parameter, even if the number of items is greater than maximum API limit.}
+  \item{deleteProject(uuid)}{Deletes project with specified UUID.}
+  \item{updateProject(uuid, newContent)}{Updates project with specified UUID.}
+  \item{createProject(content)}{Creates new project.}
+}
+}
+
+\examples{
+\dontrun{
+arv <- Arvados$new("your Arvados token", "example.arvadosapi.com")
+
+collection <- arv$getCollection("uuid")
+
+collectionList <- arv$listCollections(list(list("name", "like", "Test\%")))
+collectionList <- arv$listAllCollections(list(list("name", "like", "Test\%")))
+
+deletedCollection <- arv$deleteCollection("uuid")
+
+updatedCollection <- arv$updateCollection("uuid", list(name = "New name",
+                                                       description = "New description"))
+
+createdCollection <- arv$createCollection(list(name = "Example",
+                                               description = "This is a test collection"))
+}
+}
diff --git a/sdk/R/man/ArvadosFile.Rd b/sdk/R/man/ArvadosFile.Rd
new file mode 100644 (file)
index 0000000..b7840dc
--- /dev/null
@@ -0,0 +1,56 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/ArvadosFile.R
+\name{ArvadosFile}
+\alias{ArvadosFile}
+\title{ArvadosFile}
+\description{
+ArvadosFile class represents a file inside Arvados collection.
+}
+\section{Usage}{
+
+\preformatted{file = ArvadosFile$new(name)}
+}
+
+\section{Arguments}{
+
+\describe{
+  \item{name}{Name of the file.}
+}
+}
+
+\section{Methods}{
+
+\describe{
+  \item{getName()}{Returns name of the file.}
+  \item{getRelativePath()}{Returns file path relative to the root.}
+  \item{read(contentType = "raw", offset = 0, length = 0)}{Read file content.}
+  \item{write(content, contentType = "text/html")}{Write to file (override current content of the file).}
+  \item{connection(rw)}{Get connection opened in "read" or "write" mode.}
+  \item{flush()}{Write connections content to a file (override current content of the file).}
+  \item{remove(name)}{Removes ArvadosFile or Subcollection specified by name from the subcollection.}
+  \item{getSizeInBytes()}{Returns file size in bytes.}
+  \item{move(newLocation)}{Moves file to a new location inside collection.}
+}
+}
+
+\examples{
+\dontrun{
+myFile <- ArvadosFile$new("myFile")
+
+myFile$write("This is new file content")
+fileContent <- myFile$read()
+fileContent <- myFile$read("text")
+fileContent <- myFile$read("raw", offset = 8, length = 4) 
+
+#Write a table:
+arvConnection <- myFile$connection("w")
+write.table(mytable, arvConnection)
+arvadosFile$flush()
+
+#Read a table:
+arvConnection <- myFile$connection("r")
+mytable <- read.table(arvConnection)
+
+myFile$move("newFolder/myFile")
+}
+}
diff --git a/sdk/R/man/Collection.Rd b/sdk/R/man/Collection.Rd
new file mode 100644 (file)
index 0000000..8cf29a2
--- /dev/null
@@ -0,0 +1,51 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Collection.R
+\name{Collection}
+\alias{Collection}
+\title{Collection}
+\description{
+Collection class provides interface for working with Arvados collections.
+}
+\section{Usage}{
+
+\preformatted{collection = Collection$new(arv, uuid)}
+}
+
+\section{Arguments}{
+
+\describe{
+  \item{arv}{Arvados object.}
+  \item{uuid}{UUID of a collection.}
+}
+}
+
+\section{Methods}{
+
+\describe{
+  \item{add(content)}{Adds ArvadosFile or Subcollection specified by content to the collection.}
+  \item{create(fileNames, relativePath = "")}{Creates one or more ArvadosFiles and adds them to the collection at specified path.}
+  \item{remove(fileNames)}{Remove one or more files from the collection.}
+  \item{move(content, newLocation)}{Moves ArvadosFile or Subcollection to another location in the collection.}
+  \item{getFileListing()}{Returns collections file content as character vector.}
+  \item{get(relativePath)}{If relativePath is valid, returns ArvadosFile or Subcollection specified by relativePath, else returns NULL.}
+}
+}
+
+\examples{
+\dontrun{
+arv <- Arvados$new("your Arvados token", "example.arvadosapi.com")
+collection <- Collection$new(arv, "uuid")
+
+newFile <- ArvadosFile$new("myFile")
+collection$add(newFile, "myFolder")
+
+createdFiles <- collection$create(c("main.cpp", lib.dll), "cpp/src/")
+
+collection$remove("location/to/my/file.cpp")
+
+collection$move("folder/file.cpp", "file.cpp")
+
+arvadosFile <- collection$get("location/to/my/file.cpp")
+arvadosSubcollection <- collection$get("location/to/my/directory/")
+}
+}
diff --git a/sdk/R/man/Subcollection.Rd b/sdk/R/man/Subcollection.Rd
new file mode 100644 (file)
index 0000000..df0970b
--- /dev/null
@@ -0,0 +1,47 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Subcollection.R
+\name{Subcollection}
+\alias{Subcollection}
+\title{Subcollection}
+\description{
+Subcollection class represents a folder inside Arvados collection.
+It is essentially a composite of ArvadosFiles and other Subcollections.
+}
+\section{Usage}{
+
+\preformatted{subcollection = Subcollection$new(name)}
+}
+
+\section{Arguments}{
+
+\describe{
+  \item{name}{Name of the subcollection.}
+}
+}
+
+\section{Methods}{
+
+\describe{
+  \item{getName()}{Returns name of the subcollection.}
+  \item{getRelativePath()}{Returns subcollection path relative to the root.}
+  \item{add(content)}{Adds ArvadosFile or Subcollection specified by content to the subcollection.}
+  \item{remove(name)}{Removes ArvadosFile or Subcollection specified by name from the subcollection.}
+  \item{get(relativePath)}{If relativePath is valid, returns ArvadosFile or Subcollection specified by relativePath, else returns NULL.}
+  \item{getFileListing()}{Returns subcollections file content as character vector.}
+  \item{getSizeInBytes()}{Returns subcollections content size in bytes.}
+  \item{move(newLocation)}{Moves subcollection to a new location inside collection.}
+}
+}
+
+\examples{
+\dontrun{
+myFolder <- Subcollection$new("myFolder")
+myFile   <- ArvadosFile$new("myFile")
+
+myFolder$add(myFile)
+myFolder$get("myFile")
+myFolder$remove("myFile")
+
+myFolder$move("newLocation/myFolder")
+}
+}
diff --git a/sdk/R/man/print.Arvados.Rd b/sdk/R/man/print.Arvados.Rd
new file mode 100644 (file)
index 0000000..0833d15
--- /dev/null
@@ -0,0 +1,16 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Arvados.R
+\name{print.Arvados}
+\alias{print.Arvados}
+\title{print.Arvados}
+\usage{
+\method{print}{Arvados}(x, ...)
+}
+\arguments{
+\item{x}{Instance of Arvados class}
+
+\item{...}{Optional arguments.}
+}
+\description{
+Custom print function for Arvados class
+}
diff --git a/sdk/R/man/print.ArvadosFile.Rd b/sdk/R/man/print.ArvadosFile.Rd
new file mode 100644 (file)
index 0000000..566ec8b
--- /dev/null
@@ -0,0 +1,16 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/ArvadosFile.R
+\name{print.ArvadosFile}
+\alias{print.ArvadosFile}
+\title{print.ArvadosFile}
+\usage{
+\method{print}{ArvadosFile}(x, ...)
+}
+\arguments{
+\item{x}{Instance of ArvadosFile class}
+
+\item{...}{Optional arguments.}
+}
+\description{
+Custom print function for ArvadosFile class
+}
diff --git a/sdk/R/man/print.Collection.Rd b/sdk/R/man/print.Collection.Rd
new file mode 100644 (file)
index 0000000..8852383
--- /dev/null
@@ -0,0 +1,16 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Collection.R
+\name{print.Collection}
+\alias{print.Collection}
+\title{print.Collection}
+\usage{
+\method{print}{Collection}(x, ...)
+}
+\arguments{
+\item{x}{Instance of Collection class}
+
+\item{...}{Optional arguments.}
+}
+\description{
+Custom print function for Collection class
+}
diff --git a/sdk/R/man/print.Subcollection.Rd b/sdk/R/man/print.Subcollection.Rd
new file mode 100644 (file)
index 0000000..621350f
--- /dev/null
@@ -0,0 +1,16 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Subcollection.R
+\name{print.Subcollection}
+\alias{print.Subcollection}
+\title{print.Subcollection}
+\usage{
+\method{print}{Subcollection}(x, ...)
+}
+\arguments{
+\item{x}{Instance of Subcollection class}
+
+\item{...}{Optional arguments.}
+}
+\description{
+Custom print function for Subcollection class
+}
diff --git a/sdk/R/run_test.R b/sdk/R/run_test.R
new file mode 100644 (file)
index 0000000..1f8931d
--- /dev/null
@@ -0,0 +1,7 @@
+results <- devtools::test()
+any_error <- any(as.data.frame(results)$error)
+if (any_error) {
+  q("no", 1)
+} else {
+  q("no", 0)
+}
diff --git a/sdk/R/tests/testthat.R b/sdk/R/tests/testthat.R
new file mode 100644 (file)
index 0000000..18ef411
--- /dev/null
@@ -0,0 +1,4 @@
+library(testthat)
+library(ArvadosR)
+
+test_check("ArvadosR")
diff --git a/sdk/R/tests/testthat/fakes/FakeArvados.R b/sdk/R/tests/testthat/fakes/FakeArvados.R
new file mode 100644 (file)
index 0000000..5886ff7
--- /dev/null
@@ -0,0 +1,35 @@
+FakeArvados <- R6::R6Class(
+
+    "FakeArvados",
+
+    public = list(
+
+        token      = NULL,
+        host       = NULL,
+        webdavHost = NULL,
+        http       = NULL,
+        httpParser = NULL,
+        REST       = NULL,
+
+        initialize = function(token      = NULL,
+                              host       = NULL,
+                              webdavHost = NULL,
+                              http       = NULL,
+                              httpParser = NULL)
+        {
+            self$token      <- token
+            self$host       <- host
+            self$webdavHost <- webdavHost
+            self$http       <- http
+            self$httpParser <- httpParser
+        },
+
+        getToken    = function() self$token,
+        getHostName = function() self$host,
+        getHttpClient = function() self$http,
+        getHttpParser = function() self$httpParser,
+        getWebDavHostName = function() self$webdavHost
+    ),
+
+    cloneable = FALSE
+)
diff --git a/sdk/R/tests/testthat/fakes/FakeHttpParser.R b/sdk/R/tests/testthat/fakes/FakeHttpParser.R
new file mode 100644 (file)
index 0000000..865234d
--- /dev/null
@@ -0,0 +1,56 @@
+FakeHttpParser <- R6::R6Class(
+
+    "FakeHttrParser",
+
+    public = list(
+
+        validContentTypes = NULL,
+        parserCallCount = NULL,
+
+        initialize = function() 
+        {
+            self$parserCallCount <- 0
+            self$validContentTypes <- c("text", "raw")
+        },
+
+        parseJSONResponse = function(serverResponse) 
+        {
+            self$parserCallCount <- self$parserCallCount + 1
+
+            if(!is.null(serverResponse$content))
+                return(serverResponse$content)
+
+            serverResponse
+        },
+
+        parseResponse = function(serverResponse, outputType)
+        {
+            self$parserCallCount <- self$parserCallCount + 1
+
+            if(!is.null(serverResponse$content))
+                return(serverResponse$content)
+
+            serverResponse
+        },
+
+        getFileNamesFromResponse = function(serverResponse, uri)
+        {
+            self$parserCallCount <- self$parserCallCount + 1
+
+            if(!is.null(serverResponse$content))
+                return(serverResponse$content)
+
+            serverResponse
+        },
+
+        getFileSizesFromResponse = function(serverResponse, uri)    
+        {
+            self$parserCallCount <- self$parserCallCount + 1
+
+            if(!is.null(serverResponse$content))
+                return(serverResponse$content)
+
+            serverResponse
+        }
+    )
+)
diff --git a/sdk/R/tests/testthat/fakes/FakeHttpRequest.R b/sdk/R/tests/testthat/fakes/FakeHttpRequest.R
new file mode 100644 (file)
index 0000000..5336028
--- /dev/null
@@ -0,0 +1,166 @@
+FakeHttpRequest <- R6::R6Class(
+
+    "FakeHttpRequest",
+
+    public = list(
+
+        serverMaxElementsPerRequest = NULL,
+
+        content                                 = NULL,
+        expectedURL                             = NULL,
+        URLIsProperlyConfigured                 = NULL,
+        expectedQueryFilters                    = NULL,
+        queryFiltersAreCorrect                  = NULL,
+        requestHeaderContainsAuthorizationField = NULL,
+        requestHeaderContainsDestinationField   = NULL,
+        requestHeaderContainsRangeField         = NULL,
+        requestHeaderContainsContentTypeField   = NULL,
+        JSONEncodedBodyIsProvided               = NULL,
+        requestBodyIsProvided                   = NULL,
+
+        numberOfGETRequests    = NULL,
+        numberOfDELETERequests = NULL,
+        numberOfPUTRequests    = NULL,
+        numberOfPOSTRequests   = NULL,
+        numberOfMOVERequests   = NULL,
+
+        initialize = function(expectedURL      = NULL,
+                              serverResponse   = NULL,
+                              expectedFilters  = NULL)
+        {
+            if(is.null(serverResponse))
+            {
+                self$content <- list()
+                self$content$status_code <- 200
+            }
+            else
+                self$content <- serverResponse
+
+            self$expectedURL                             <- expectedURL
+            self$URLIsProperlyConfigured                 <- FALSE
+            self$expectedQueryFilters                    <- expectedFilters
+            self$queryFiltersAreCorrect                  <- FALSE
+            self$requestHeaderContainsAuthorizationField <- FALSE
+            self$requestHeaderContainsDestinationField   <- FALSE
+            self$requestHeaderContainsRangeField         <- FALSE
+            self$requestHeaderContainsContentTypeField   <- FALSE
+            self$JSONEncodedBodyIsProvided               <- FALSE
+            self$requestBodyIsProvided                   <- FALSE
+
+            self$numberOfGETRequests    <- 0
+            self$numberOfDELETERequests <- 0
+            self$numberOfPUTRequests    <- 0
+            self$numberOfPOSTRequests   <- 0
+            self$numberOfMOVERequests   <- 0
+
+            self$serverMaxElementsPerRequest <- 5
+        },
+
+        execute = function(verb, url, headers = NULL, body = NULL, query = NULL,
+                           limit = NULL, offset = NULL, retryTimes = 0)
+        {
+            private$validateURL(url)
+            private$validateHeaders(headers)
+            private$validateFilters(queryFilters)
+            private$validateBody(body)
+
+            if(verb == "GET")
+                self$numberOfGETRequests <- self$numberOfGETRequests + 1
+            else if(verb == "POST")
+                self$numberOfPOSTRequests <- self$numberOfPOSTRequests + 1
+            else if(verb == "PUT")
+                self$numberOfPUTRequests <- self$numberOfPUTRequests + 1
+            else if(verb == "DELETE")
+                self$numberOfDELETERequests <- self$numberOfDELETERequests + 1
+            else if(verb == "MOVE")
+                self$numberOfMOVERequests <- self$numberOfMOVERequests + 1
+            else if(verb == "PROPFIND")
+            {
+                return(self$content)
+            }
+
+            if(!is.null(self$content$items_available))
+                return(private$getElements(offset, limit))
+            else
+                return(self$content)
+        }
+    ),
+
+    private = list(
+
+        validateURL = function(url) 
+        {
+            if(!is.null(self$expectedURL) && url == self$expectedURL)
+                self$URLIsProperlyConfigured <- TRUE
+        },
+
+        validateHeaders = function(headers) 
+        {
+            if(!is.null(headers$Authorization))
+                self$requestHeaderContainsAuthorizationField <- TRUE
+
+            if(!is.null(headers$Destination))
+                self$requestHeaderContainsDestinationField <- TRUE
+
+            if(!is.null(headers$Range))
+                self$requestHeaderContainsRangeField <- TRUE
+
+            if(!is.null(headers[["Content-Type"]]))
+                self$requestHeaderContainsContentTypeField <- TRUE
+        },
+
+        validateBody = function(body)
+        {
+            if(!is.null(body))           
+            {
+                self$requestBodyIsProvided <- TRUE
+
+                if(class(body) == "json")           
+                    self$JSONEncodedBodyIsProvided <- TRUE
+            }
+        },
+
+        validateFilters = function(filters)
+        {
+            if(!is.null(self$expectedQueryFilters) &&
+               !is.null(filters) &&
+               all.equal(unname(filters), self$expectedQueryFilters))
+            {
+                self$queryFiltersAreCorrect <- TRUE
+            }
+        },
+
+        getElements = function(offset, limit)
+        {
+            start <- 1
+            elementCount <- self$serverMaxElementsPerRequest
+
+            if(!is.null(offset))
+            {
+                if(offset > self$content$items_available)
+                    stop("Invalid offset")
+                
+                start <- offset + 1
+            }
+
+            if(!is.null(limit))
+                if(limit < self$serverMaxElementsPerRequest)
+                    elementCount <- limit - 1
+
+
+            serverResponse <- list()
+            serverResponse$items_available <- self$content$items_available
+            serverResponse$items <- self$content$items[start:(start + elementCount - 1)]
+
+            if(start + elementCount > self$content$items_available)
+            {
+                elementCount = self$content$items_available - start
+                serverResponse$items <- self$content$items[start:(start + elementCount)]
+            }
+
+            serverResponse
+        }
+    ),
+
+    cloneable = FALSE
+)
diff --git a/sdk/R/tests/testthat/fakes/FakeRESTService.R b/sdk/R/tests/testthat/fakes/FakeRESTService.R
new file mode 100644 (file)
index 0000000..d370e87
--- /dev/null
@@ -0,0 +1,167 @@
+FakeRESTService <- R6::R6Class(
+
+    "FakeRESTService",
+
+    public = list(
+
+        getResourceCallCount    = NULL,
+        createResourceCallCount = NULL,
+        listResourcesCallCount  = NULL,
+        deleteResourceCallCount = NULL,
+        updateResourceCallCount = NULL,
+        fetchAllItemsCallCount  = NULL,
+
+        createCallCount               = NULL,
+        deleteCallCount               = NULL,
+        moveCallCount                 = NULL,
+        getCollectionContentCallCount = NULL,
+        getResourceSizeCallCount      = NULL,
+        readCallCount                 = NULL,
+        writeCallCount                = NULL,
+        getConnectionCallCount        = NULL,
+        writeBuffer                   = NULL,
+        filtersAreConfiguredCorrectly = NULL,
+        bodyIsConfiguredCorrectly     = NULL,
+        expectedFilterContent         = NULL,
+
+        collectionContent = NULL,
+        returnContent     = NULL,
+
+        initialize = function(collectionContent = NULL, returnContent = NULL, 
+                              expectedFilterContent = NULL)
+        {
+            self$getResourceCallCount    <- 0
+            self$createResourceCallCount <- 0
+            self$listResourcesCallCount  <- 0
+            self$deleteResourceCallCount <- 0
+            self$updateResourceCallCount <- 0
+            self$fetchAllItemsCallCount  <- 0
+
+            self$createCallCount               <- 0
+            self$deleteCallCount               <- 0
+            self$moveCallCount                 <- 0
+            self$getCollectionContentCallCount <- 0
+            self$getResourceSizeCallCount      <- 0
+            self$readCallCount                 <- 0
+            self$writeCallCount                <- 0
+            self$getConnectionCallCount        <- 0
+            self$filtersAreConfiguredCorrectly <- FALSE
+            self$bodyIsConfiguredCorrectly     <- FALSE
+
+            self$collectionContent     <- collectionContent
+            self$returnContent         <- returnContent
+            self$expectedFilterContent <- expectedFilterContent
+        },
+
+        getWebDavHostName = function()
+        {
+        },
+
+        getResource = function(resource, uuid)
+        {
+            self$getResourceCallCount <- self$getResourceCallCount + 1
+            self$returnContent
+        },
+
+        listResources = function(resource, filters = NULL, limit = 100, offset = 0)
+        {
+            self$listResourcesCallCount <- self$listResourcesCallCount + 1
+
+            if(!is.null(self$expectedFilterContent) && !is.null(filters))
+               if(all.equal(filters, self$expectedFilterContent))
+                    self$filtersAreConfiguredCorrectly <- TRUE
+
+            self$returnContent
+        },
+
+        fetchAllItems = function(resourceURL, filters)
+        {
+            self$fetchAllItemsCallCount <- self$fetchAllItemsCallCount + 1
+
+            if(!is.null(self$expectedFilterContent) && !is.null(filters))
+               if(all.equal(filters, self$expectedFilterContent))
+                    self$filtersAreConfiguredCorrectly <- TRUE
+
+            self$returnContent
+        },
+
+        deleteResource = function(resource, uuid)
+        {
+            self$deleteResourceCallCount <- self$deleteResourceCallCount + 1
+            self$returnContent
+        },
+
+        updateResource = function(resource, uuid, newContent)
+        {
+            self$updateResourceCallCount <- self$updateResourceCallCount + 1
+
+            if(!is.null(self$returnContent) && !is.null(newContent))
+               if(all.equal(newContent, self$returnContent))
+                    self$bodyIsConfiguredCorrectly <- TRUE
+
+            self$returnContent
+        },
+
+        createResource = function(resource, content)
+        {
+            self$createResourceCallCount <- self$createResourceCallCount + 1
+
+            if(!is.null(self$returnContent) && !is.null(content))
+               if(all.equal(content, self$returnContent))
+                    self$bodyIsConfiguredCorrectly <- TRUE
+
+            self$returnContent
+        },
+
+        create = function(files, uuid)
+        {
+            self$createCallCount <- self$createCallCount + 1
+            self$returnContent
+        },
+
+        delete = function(relativePath, uuid)
+        {
+            self$deleteCallCount <- self$deleteCallCount + 1
+            self$returnContent
+        },
+
+        move = function(from, to, uuid)
+        {
+            self$moveCallCount <- self$moveCallCount + 1
+            self$returnContent
+        },
+
+        getCollectionContent = function(uuid)
+        {
+            self$getCollectionContentCallCount <- self$getCollectionContentCallCount + 1
+            self$collectionContent
+        },
+
+        getResourceSize = function(uuid, relativePathToResource)
+        {
+            self$getResourceSizeCallCount <- self$getResourceSizeCallCount + 1
+            self$returnContent
+        },
+        
+        read = function(relativePath, uuid, contentType = "text", offset = 0, length = 0)
+        {
+            self$readCallCount <- self$readCallCount + 1
+            self$returnContent
+        },
+
+        write = function(uuid, relativePath, content, contentType)
+        {
+            self$writeBuffer <- content
+            self$writeCallCount <- self$writeCallCount + 1
+            self$returnContent
+        },
+
+        getConnection = function(relativePath, uuid, openMode)
+        {
+            self$getConnectionCallCount <- self$getConnectionCallCount + 1
+            self$returnContent
+        }
+    ),
+
+    cloneable = FALSE
+)
diff --git a/sdk/R/tests/testthat/test-Arvados.R b/sdk/R/tests/testthat/test-Arvados.R
new file mode 100644 (file)
index 0000000..25cf88f
--- /dev/null
@@ -0,0 +1,306 @@
+context("Arvados API")
+
+source("fakes/FakeRESTService.R")
+
+test_that("Constructor will use environment variables if no parameters are passed to it", {
+
+    Sys.setenv(ARVADOS_API_HOST  = "environment_api_host")
+    Sys.setenv(ARVADOS_API_TOKEN = "environment_api_token")
+
+    arv <- Arvados$new()
+
+    Sys.unsetenv("ARVADOS_API_HOST")
+    Sys.unsetenv("ARVADOS_API_TOKEN")
+
+    expect_that("https://environment_api_host/arvados/v1/",
+                equals(arv$getHostName())) 
+
+    expect_that("environment_api_token",
+                equals(arv$getToken())) 
+}) 
+
+test_that("Constructor preferes constructor fields over environment variables", {
+
+    Sys.setenv(ARVADOS_API_HOST  = "environment_api_host")
+    Sys.setenv(ARVADOS_API_TOKEN = "environment_api_token")
+
+    arv <- Arvados$new("constructor_api_token", "constructor_api_host")
+
+    Sys.unsetenv("ARVADOS_API_HOST")
+    Sys.unsetenv("ARVADOS_API_TOKEN")
+
+    expect_that("https://constructor_api_host/arvados/v1/",
+                equals(arv$getHostName())) 
+
+    expect_that("constructor_api_token",
+                equals(arv$getToken())) 
+}) 
+
+test_that("Constructor raises exception if fields and environment variables are not provided", {
+
+    expect_that(Arvados$new(),
+                throws_error(paste0("Please provide host name and authentification token",
+                                    " or set ARVADOS_API_HOST and ARVADOS_API_TOKEN",
+                                    " environment variables.")))
+}) 
+
+test_that("getCollection delegates operation to RESTService class", {
+
+    arv <- Arvados$new("token", "hostName")
+    fakeREST <- FakeRESTService$new()
+    arv$setRESTService(fakeREST)
+    collectionUUID <- "aaaaa-j7d0g-ccccccccccccccc"
+
+    arv$getCollection(collectionUUID)
+
+    expect_that(fakeREST$getResourceCallCount, equals(1))
+}) 
+
+test_that("listCollections delegates operation to RESTService class", {
+
+    arv <- Arvados$new("token", "hostName")
+    fakeREST <- FakeRESTService$new()
+    arv$setRESTService(fakeREST)
+
+    arv$listCollections()
+
+    expect_that(fakeREST$listResourcesCallCount, equals(1))
+}) 
+
+test_that("listCollections filter paramerter must be named 'collection'", {
+
+    filters <- list(list("name", "like", "MyCollection"))
+    names(filters) <- c("collection")
+    fakeREST <- FakeRESTService$new(expectedFilterContent = filters)
+    arv <- Arvados$new("token", "hostName")
+    arv$setRESTService(fakeREST)
+
+    arv$listCollections(list(list("name", "like", "MyCollection")))
+
+    expect_that(fakeREST$filtersAreConfiguredCorrectly, is_true())
+}) 
+
+test_that("listAllCollections delegates operation to RESTService class", {
+
+    arv <- Arvados$new("token", "hostName")
+    fakeREST <- FakeRESTService$new()
+    arv$setRESTService(fakeREST)
+
+    arv$listAllCollections()
+
+    expect_that(fakeREST$fetchAllItemsCallCount, equals(1))
+}) 
+
+test_that("listAllCollections filter paramerter must be named 'collection'", {
+
+    filters <- list(list("name", "like", "MyCollection"))
+    names(filters) <- c("collection")
+    fakeREST <- FakeRESTService$new(expectedFilterContent = filters)
+    arv <- Arvados$new("token", "hostName")
+    arv$setRESTService(fakeREST)
+
+    arv$listAllCollections(list(list("name", "like", "MyCollection")))
+
+    expect_that(fakeREST$filtersAreConfiguredCorrectly, is_true())
+}) 
+
+test_that("deleteCollection delegates operation to RESTService class", {
+
+    arv <- Arvados$new("token", "hostName")
+    fakeREST <- FakeRESTService$new()
+    arv$setRESTService(fakeREST)
+    collectionUUID <- "aaaaa-j7d0g-ccccccccccccccc"
+
+    arv$deleteCollection(collectionUUID)
+
+    expect_that(fakeREST$deleteResourceCallCount, equals(1))
+}) 
+
+test_that("updateCollection delegates operation to RESTService class", {
+
+    arv <- Arvados$new("token", "hostName")
+    fakeREST <- FakeRESTService$new()
+    arv$setRESTService(fakeREST)
+    newCollectionContent <- list(newName = "Brand new shiny name")
+    collectionUUID <- "aaaaa-j7d0g-ccccccccccccccc"
+
+    arv$updateCollection(collectionUUID, newCollectionContent)
+
+    expect_that(fakeREST$updateResourceCallCount, equals(1))
+}) 
+
+test_that("updateCollection adds content to request parameter named 'collection'", {
+
+    collectionUUID <- "aaaaa-j7d0g-ccccccccccccccc"
+    body <- list(list())
+    names(body) <- c("collection")
+    body$collection <- list(name = "MyCollection", desc = "No description")
+    fakeREST <- FakeRESTService$new(returnContent = body)
+    arv <- Arvados$new("token", "hostName")
+    arv$setRESTService(fakeREST)
+
+    arv$updateCollection(collectionUUID, 
+                         list(name = "MyCollection", desc = "No description"))
+
+    expect_that(fakeREST$bodyIsConfiguredCorrectly, is_true())
+}) 
+
+test_that("createCollection delegates operation to RESTService class", {
+
+    arv <- Arvados$new("token", "hostName")
+    fakeREST <- FakeRESTService$new()
+    arv$setRESTService(fakeREST)
+    collectionContent <- list(newName = "Brand new shiny name")
+
+    arv$createCollection(collectionContent)
+
+    expect_that(fakeREST$createResourceCallCount, equals(1))
+}) 
+
+test_that("createCollection adds content to request parameter named 'collection'", {
+
+    body <- list(list())
+    names(body) <- c("collection")
+    body$collection <- list(name = "MyCollection", desc = "No description")
+    fakeREST <- FakeRESTService$new(returnContent = body)
+    arv <- Arvados$new("token", "hostName")
+    arv$setRESTService(fakeREST)
+
+    arv$createCollection(list(name = "MyCollection", desc = "No description"))
+
+    expect_that(fakeREST$bodyIsConfiguredCorrectly, is_true())
+}) 
+
+test_that("getProject delegates operation to RESTService class", {
+
+    arv <- Arvados$new("token", "hostName")
+    fakeREST <- FakeRESTService$new()
+    arv$setRESTService(fakeREST)
+    projectUUID <- "aaaaa-j7d0g-ccccccccccccccc"
+
+    arv$getCollection(projectUUID)
+
+    expect_that(fakeREST$getResourceCallCount, equals(1))
+}) 
+
+test_that("listProjects delegates operation to RESTService class", {
+
+    arv <- Arvados$new("token", "hostName")
+    fakeREST <- FakeRESTService$new()
+    arv$setRESTService(fakeREST)
+
+    arv$listCollections()
+
+    expect_that(fakeREST$listResourcesCallCount, equals(1))
+}) 
+
+test_that("listProjects filter contains additional 'group_class' field by default", {
+
+    filters <- list(list("name", "like", "MyProject"))
+    names(filters) <- c("groups")
+    filters[[length(filters) + 1]] <- list("group_class", "=", "project")
+
+    fakeREST <- FakeRESTService$new(expectedFilterContent = filters)
+    arv <- Arvados$new("token", "hostName")
+    arv$setRESTService(fakeREST)
+
+    arv$listProjects(list(list("name", "like", "MyProject")))
+
+    expect_that(fakeREST$filtersAreConfiguredCorrectly, is_true())
+}) 
+
+test_that("listAllProjects delegates operation to RESTService class", {
+
+    arv <- Arvados$new("token", "hostName")
+    fakeREST <- FakeRESTService$new()
+    arv$setRESTService(fakeREST)
+
+    arv$listAllProjects()
+
+    expect_that(fakeREST$fetchAllItemsCallCount, equals(1))
+}) 
+
+test_that("listAllProjects filter contains additional 'group_class' field by default", {
+
+    filters <- list(list("name", "like", "MyProject"))
+    names(filters) <- c("groups")
+    filters[[length(filters) + 1]] <- list("group_class", "=", "project")
+
+    fakeREST <- FakeRESTService$new(expectedFilterContent = filters)
+    arv <- Arvados$new("token", "hostName")
+    arv$setRESTService(fakeREST)
+
+    arv$listAllProjects(list(list("name", "like", "MyProject")))
+
+    expect_that(fakeREST$filtersAreConfiguredCorrectly, is_true())
+}) 
+
+test_that("deleteProject delegates operation to RESTService class", {
+
+    arv <- Arvados$new("token", "hostName")
+    fakeREST <- FakeRESTService$new()
+    arv$setRESTService(fakeREST)
+    projectUUID <- "aaaaa-j7d0g-ccccccccccccccc"
+
+    arv$deleteCollection(projectUUID)
+
+    expect_that(fakeREST$deleteResourceCallCount, equals(1))
+}) 
+
+test_that("updateProject delegates operation to RESTService class", {
+
+    arv <- Arvados$new("token", "hostName")
+    fakeREST <- FakeRESTService$new()
+    arv$setRESTService(fakeREST)
+    newProjectContent <- list(newName = "Brand new shiny name")
+    projectUUID <- "aaaaa-j7d0g-ccccccccccccccc"
+
+    arv$updateCollection(projectUUID, newProjectContent)
+
+    expect_that(fakeREST$updateResourceCallCount, equals(1))
+}) 
+
+test_that("updateProject adds content to request parameter named 'group'", {
+
+    projectUUID <- "aaaaa-j7d0g-ccccccccccccccc"
+    body <- list(list())
+    names(body) <- c("group")
+    body$group <- list(name = "MyProject", desc = "No description")
+
+    fakeREST <- FakeRESTService$new(returnContent = body)
+    arv <- Arvados$new("token", "hostName")
+    arv$setRESTService(fakeREST)
+
+    arv$updateProject(projectUUID,
+                      list(name = "MyProject", desc = "No description"))
+
+    expect_that(fakeREST$bodyIsConfiguredCorrectly, is_true())
+}) 
+
+test_that("createProject delegates operation to RESTService class", {
+
+    arv <- Arvados$new("token", "hostName")
+    fakeREST <- FakeRESTService$new()
+    arv$setRESTService(fakeREST)
+    projectContent <- list(newName = "Brand new shiny name")
+
+    arv$createCollection(projectContent)
+
+    expect_that(fakeREST$createResourceCallCount, equals(1))
+}) 
+
+test_that("createProject request body contains 'goup_class' filed", {
+
+    body <- list(list())
+    names(body) <- c("group")
+    body$group <- c("group_class" = "project",
+                    list(name = "MyProject", desc = "No description"))
+
+    fakeREST <- FakeRESTService$new(returnContent = body)
+    arv <- Arvados$new("token", "hostName")
+    arv$setRESTService(fakeREST)
+
+    arv$createProject(list(name = "MyProject", desc = "No description"))
+
+    expect_that(fakeREST$bodyIsConfiguredCorrectly, is_true())
+}) 
diff --git a/sdk/R/tests/testthat/test-ArvadosFile.R b/sdk/R/tests/testthat/test-ArvadosFile.R
new file mode 100644 (file)
index 0000000..90cc149
--- /dev/null
@@ -0,0 +1,271 @@
+source("fakes/FakeRESTService.R")
+
+context("ArvadosFile")
+
+test_that("constructor raises error if  file name is empty string", {
+
+    expect_that(ArvadosFile$new(""), throws_error("Invalid name."))
+}) 
+
+test_that("getFileListing always returns file name", {
+
+    dog <- ArvadosFile$new("dog")
+
+    expect_that(dog$getFileListing(), equals("dog"))
+}) 
+
+test_that("get always returns NULL", {
+
+    dog <- ArvadosFile$new("dog")
+    
+    responseIsNull <- is.null(dog$get("something"))
+    expect_that(responseIsNull, is_true())
+}) 
+
+test_that("getFirst always returns NULL", {
+
+    dog <- ArvadosFile$new("dog")
+    
+    responseIsNull <- is.null(dog$getFirst())
+    expect_that(responseIsNull, is_true())
+}) 
+
+test_that(paste("getSizeInBytes returns zero if arvadosFile",
+                "is not part of a collection"), {
+
+    dog <- ArvadosFile$new("dog")
+
+    expect_that(dog$getSizeInBytes(), equals(0))
+}) 
+
+test_that(paste("getSizeInBytes delegates size calculation",
+                "to REST service class"), {
+
+    collectionContent <- c("animal", "animal/fish")
+    returnSize <- 100
+    fakeREST <- FakeRESTService$new(collectionContent, returnSize)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    fish <- collection$get("animal/fish")
+
+    resourceSize <- fish$getSizeInBytes()
+
+    expect_that(resourceSize, equals(100))
+}) 
+
+test_that("getRelativePath returns path relative to the tree root", {
+
+    animal <- Subcollection$new("animal")
+    fish <- Subcollection$new("fish")
+    shark <- ArvadosFile$new("shark")
+
+    animal$add(fish)
+    fish$add(shark)
+
+    expect_that(shark$getRelativePath(), equals("animal/fish/shark"))
+}) 
+
+test_that("read raises exception if file doesn't belong to a collection", {
+
+    dog <- ArvadosFile$new("dog")
+
+    expect_that(dog$read(),
+                throws_error("ArvadosFile doesn't belong to any collection."))
+}) 
+
+test_that("read raises exception offset or length is negative number", {
+
+
+    collectionContent <- c("animal", "animal/fish")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    fish <- collection$get("animal/fish")
+
+    expect_that(fish$read(contentType = "text", offset = -1),
+                throws_error("Offset and length must be positive values."))
+    expect_that(fish$read(contentType = "text", length = -1),
+                throws_error("Offset and length must be positive values."))
+    expect_that(fish$read(contentType = "text", offset = -1, length = -1),
+                throws_error("Offset and length must be positive values."))
+}) 
+
+test_that("read delegates reading operation to REST service class", {
+
+    collectionContent <- c("animal", "animal/fish")
+    readContent <- "my file"
+    fakeREST <- FakeRESTService$new(collectionContent, readContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    fish <- collection$get("animal/fish")
+    
+    fileContent <- fish$read("text")
+
+    expect_that(fileContent, equals("my file"))
+    expect_that(fakeREST$readCallCount, equals(1))
+}) 
+
+test_that(paste("connection delegates connection creation ro RESTService class",
+                "which returns curl connection opened in read mode when", 
+                "'r' of 'rb' is passed as argument"), {
+
+    collectionContent <- c("animal", "animal/fish")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    fish <- collection$get("animal/fish")
+
+    connection <- fish$connection("r")
+
+    expect_that(fakeREST$getConnectionCallCount, equals(1))
+}) 
+
+test_that(paste("connection returns textConnection opened",
+                "in write mode when 'w' is passed as argument"), {
+
+    collectionContent <- c("animal", "animal/fish")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    fish <- collection$get("animal/fish")
+
+    connection <- fish$connection("w")
+
+    writeLines("file", connection)
+    writeLines("content", connection)
+
+    writeResult <- textConnectionValue(connection)
+
+    expect_that(writeResult[1], equals("file"))
+    expect_that(writeResult[2], equals("content"))
+}) 
+
+test_that("flush sends data stored in a connection to a REST server", {
+
+
+    collectionContent <- c("animal", "animal/fish")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    fish <- collection$get("animal/fish")
+
+    connection <- fish$connection("w")
+
+    writeLines("file content", connection)
+
+    fish$flush()
+
+    expect_that(fakeREST$writeBuffer, equals("file content"))
+}) 
+
+test_that("write raises exception if file doesn't belong to a collection", {
+
+    dog <- ArvadosFile$new("dog")
+
+    expect_that(dog$write(),
+                throws_error("ArvadosFile doesn't belong to any collection."))
+}) 
+
+test_that("write delegates writing operation to REST service class", {
+
+
+    collectionContent <- c("animal", "animal/fish")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    fish <- collection$get("animal/fish")
+    
+    fileContent <- fish$write("new file content")
+
+    expect_that(fakeREST$writeBuffer, equals("new file content"))
+}) 
+
+test_that(paste("move raises exception if arvados file",
+                "doesn't belong to any collection"), {
+
+    animal <- ArvadosFile$new("animal")
+
+    expect_that(animal$move("new/location"),
+                throws_error("ArvadosFile doesn't belong to any collection"))
+}) 
+
+test_that(paste("move raises exception if newLocationInCollection",
+                "parameter is invalid"), {
+
+
+    collectionContent <- c("animal",
+                           "animal/fish",
+                           "animal/dog",
+                           "animal/fish/shark",
+                           "ball")
+
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+
+    collection <- Collection$new(api, "myUUID")
+    dog <- collection$get("animal/dog")
+
+    expect_that(dog$move("objects/dog"),
+                throws_error("Unable to get destination subcollection"))
+}) 
+
+test_that("move raises exception if new location contains content with the same name", {
+
+
+    collectionContent <- c("animal",
+                           "animal/fish",
+                           "animal/dog",
+                           "animal/fish/shark",
+                           "dog")
+
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    dog <- collection$get("animal/dog")
+
+    expect_that(dog$move("dog"),
+                throws_error("Destination already contains content with same name."))
+
+}) 
+
+test_that("move moves arvados file inside collection tree", {
+
+
+    collectionContent <- c("animal",
+                           "animal/fish",
+                           "animal/dog",
+                           "animal/fish/shark",
+                           "ball")
+
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    dog <- collection$get("animal/dog")
+
+    dog$move("dog")
+    dogIsNullOnOldLocation <- is.null(collection$get("animal/dog"))
+    dogExistsOnNewLocation <- !is.null(collection$get("dog"))
+
+    expect_that(dogIsNullOnOldLocation, is_true())
+    expect_that(dogExistsOnNewLocation, is_true())
+}) 
diff --git a/sdk/R/tests/testthat/test-Collection.R b/sdk/R/tests/testthat/test-Collection.R
new file mode 100644 (file)
index 0000000..ec00ca3
--- /dev/null
@@ -0,0 +1,260 @@
+source("fakes/FakeRESTService.R")
+
+context("Collection")
+
+test_that(paste("constructor creates file tree from text content",
+                "retreived form REST service"), {
+
+    collectionContent <- c("animal", "animal/fish", "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    root <- collection$get("")
+
+    expect_that(fakeREST$getCollectionContentCallCount, equals(1))
+    expect_that(root$getName(), equals(""))
+})
+
+test_that(paste("add raises exception if passed argumet is not",
+                "ArvadosFile or Subcollection"), {
+
+    collectionContent <- c("animal", "animal/fish", "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    newNumber <- 10
+
+    expect_that(collection$add(newNumber),
+    throws_error(paste("Expected AravodsFile or Subcollection",
+                       "object, got (numeric)."), fixed = TRUE))
+})
+
+test_that("add raises exception if relative path is not valid", {
+
+    collectionContent <- c("animal", "animal/fish", "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    newPen <- ArvadosFile$new("pen")
+
+    expect_that(collection$add(newPen, "objects"),
+                throws_error("Subcollection objects doesn't exist.",
+                              fixed = TRUE))
+})
+
+test_that("add raises exception if content name is empty string", {
+
+    collectionContent <- c("animal", "animal/fish")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    rootFolder <- Subcollection$new("")
+
+    expect_that(collection$add(rootFolder),
+                throws_error("Content has invalid name.", fixed = TRUE))
+})
+
+test_that(paste("add adds ArvadosFile or Subcollection",
+                "to local tree structure and remote REST service"), {
+
+    collectionContent <- c("animal", "animal/fish", "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    newDog <- ArvadosFile$new("dog")
+    collection$add(newDog, "animal")
+
+    dog <- collection$get("animal/dog")
+    dogExistsInCollection <- !is.null(dog) && dog$getName() == "dog"
+
+    expect_that(dogExistsInCollection, is_true())
+    expect_that(fakeREST$createCallCount, equals(1))
+})
+
+test_that("create raises exception if passed argumet is not character vector", {
+
+    collectionContent <- c("animal", "animal/fish", "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    expect_that(collection$create(10),
+                throws_error("Expected character vector, got (numeric).", 
+                             fixed = TRUE))
+})
+
+test_that("create raises exception if relative path is not valid", {
+
+    collectionContent <- c("animal",
+                           "animal/fish",
+                           "ball")
+
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    newPen <- ArvadosFile$new("pen")
+
+    expect_that(collection$create(newPen, "objects"),
+                throws_error("Subcollection objects doesn't exist.",
+                              fixed = TRUE))
+})
+
+test_that(paste("create adds files specified by fileNames",
+                "to local tree structure and remote REST service"), {
+
+    collectionContent <- c("animal", "animal/fish", "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    files <- c("dog", "cat")
+    collection$create(files, "animal")
+
+    dog <- collection$get("animal/dog")
+    cat <- collection$get("animal/cat")
+    dogExistsInCollection <- !is.null(dog) && dog$getName() == "dog"
+    catExistsInCollection <- !is.null(cat) && cat$getName() == "cat"
+
+    expect_that(dogExistsInCollection, is_true())
+    expect_that(catExistsInCollection, is_true())
+    expect_that(fakeREST$createCallCount, equals(2))
+})
+
+test_that("remove raises exception if passed argumet is not character vector", {
+
+    collectionContent <- c("animal", "animal/fish", "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    expect_that(collection$remove(10),
+                throws_error("Expected character vector, got (numeric).", 
+                             fixed = TRUE))
+})
+
+test_that("remove raises exception if user tries to remove root folder", {
+
+    collectionContent <- c("animal", "animal/fish")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    expect_that(collection$remove(""),
+                throws_error("You can't delete root folder.", fixed = TRUE))
+})
+
+test_that(paste("remove removes files specified by paths",
+                "from local tree structure and from remote REST service"), {
+
+    collectionContent <- c("animal", "animal/fish", "animal/dog", "animal/cat", "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    collection$remove(c("animal/dog", "animal/cat"))
+
+    dog <- collection$get("animal/dog")
+    cat <- collection$get("animal/dog")
+    dogExistsInCollection <- !is.null(dog) && dog$getName() == "dog"
+    catExistsInCollection <- !is.null(cat) && cat$getName() == "cat"
+
+    expect_that(dogExistsInCollection, is_false())
+    expect_that(catExistsInCollection, is_false())
+    expect_that(fakeREST$deleteCallCount, equals(2))
+})
+
+test_that(paste("move moves content to a new location inside file tree",
+                "and on REST service"), {
+
+    collectionContent <- c("animal", "animal/dog", "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    collection$move("animal/dog", "dog")
+
+    dogIsNullOnOldLocation <- is.null(collection$get("animal/dog"))
+    dogExistsOnNewLocation <- !is.null(collection$get("dog"))
+
+    expect_that(dogIsNullOnOldLocation, is_true())
+    expect_that(dogExistsOnNewLocation, is_true())
+    expect_that(fakeREST$moveCallCount, equals(1))
+})
+
+test_that("move raises exception if new location is not valid", {
+
+    collectionContent <- c("animal", "animal/fish", "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    expect_that(collection$move("fish", "object"),
+                throws_error("Content you want to move doesn't exist in the collection.",
+                             fixed = TRUE))
+})
+
+test_that("getFileListing returns sorted collection content received from REST service", {
+
+    collectionContent <- c("animal", "animal/fish", "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    contentMatchExpected <- all(collection$getFileListing() == 
+                                c("animal", "animal/fish", "ball"))
+
+    expect_that(contentMatchExpected, is_true())
+    #2 calls because Collection$new calls getFileListing once
+    expect_that(fakeREST$getCollectionContentCallCount, equals(2))
+
+})
+
+test_that("get returns arvados file or subcollection from internal tree structure", {
+
+    collectionContent <- c("animal", "animal/fish", "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+
+    fish <- collection$get("animal/fish")
+    fishIsNotNull <- !is.null(fish)
+
+    expect_that(fishIsNotNull, is_true())
+    expect_that(fish$getName(), equals("fish"))
+})
diff --git a/sdk/R/tests/testthat/test-CollectionTree.R b/sdk/R/tests/testthat/test-CollectionTree.R
new file mode 100644 (file)
index 0000000..42a54bf
--- /dev/null
@@ -0,0 +1,102 @@
+context("CollectionTree")
+
+test_that("constructor creates file tree from character array properly", {
+
+    collection <- "myCollection"
+    characterArray <- c("animal", 
+                        "animal/dog",
+                        "boat")
+
+    collectionTree <- CollectionTree$new(characterArray, collection)
+
+    root   <- collectionTree$getTree()
+    animal <- collectionTree$getElement("animal")
+    dog    <- collectionTree$getElement("animal/dog")
+    boat   <- collectionTree$getElement("boat")
+
+    rootHasNoParent             <- is.null(root$getParent())
+    rootIsOfTypeSubcollection   <- "Subcollection" %in% class(root)
+    animalIsOfTypeSubcollection <- "Subcollection" %in% class(animal)
+    dogIsOfTypeArvadosFile      <- "ArvadosFile" %in% class(dog)
+    boatIsOfTypeArvadosFile     <- "ArvadosFile" %in% class(boat)
+    animalsParentIsRoot         <- animal$getParent()$getName() == root$getName()
+    animalContainsDog           <- animal$getFirst()$getName() == dog$getName()
+    dogsParentIsAnimal          <- dog$getParent()$getName() == animal$getName()
+    boatsParentIsRoot           <- boat$getParent()$getName() == root$getName()
+
+    allElementsBelongToSameCollection <- root$getCollection()   == "myCollection" &&
+                                         animal$getCollection() == "myCollection" &&
+                                         dog$getCollection()    == "myCollection" &&
+                                         boat$getCollection()   == "myCollection"
+
+    expect_that(root$getName(), equals(""))
+    expect_that(rootIsOfTypeSubcollection, is_true())
+    expect_that(rootHasNoParent, is_true())
+    expect_that(animalIsOfTypeSubcollection, is_true())
+    expect_that(animalsParentIsRoot, is_true())
+    expect_that(animalContainsDog, is_true())
+    expect_that(dogIsOfTypeArvadosFile, is_true())
+    expect_that(dogsParentIsAnimal, is_true())
+    expect_that(boatIsOfTypeArvadosFile, is_true())
+    expect_that(boatsParentIsRoot, is_true())
+    expect_that(allElementsBelongToSameCollection, is_true())
+}) 
+
+test_that("getElement returns element from tree if element exists on specified path", {
+
+    collection <- "myCollection"
+    characterArray <- c("animal", 
+                        "animal/dog",
+                        "boat")
+
+    collectionTree <- CollectionTree$new(characterArray, collection)
+
+    dog <- collectionTree$getElement("animal/dog")
+
+    expect_that(dog$getName(), equals("dog"))
+}) 
+
+test_that("getElement returns NULL from tree if element doesn't exists on specified path", {
+
+    collection <- "myCollection"
+    characterArray <- c("animal", 
+                        "animal/dog",
+                        "boat")
+
+    collectionTree <- CollectionTree$new(characterArray, collection)
+
+    fish <- collectionTree$getElement("animal/fish")
+    fishIsNULL <- is.null(fish)
+
+    expect_that(fishIsNULL, is_true())
+}) 
+
+test_that("getElement trims ./ from start of relativePath", {
+
+    collection <- "myCollection"
+    characterArray <- c("animal", 
+                        "animal/dog",
+                        "boat")
+
+    collectionTree <- CollectionTree$new(characterArray, collection)
+
+    dog <- collectionTree$getElement("animal/dog")
+    dogWithDotSlash <- collectionTree$getElement("./animal/dog")
+
+    expect_that(dogWithDotSlash$getName(), equals(dog$getName()))
+}) 
+
+test_that("getElement trims / from end of relativePath", {
+
+    collection <- "myCollection"
+    characterArray <- c("animal", 
+                        "animal/dog",
+                        "boat")
+
+    collectionTree <- CollectionTree$new(characterArray, collection)
+
+    animal <- collectionTree$getElement("animal")
+    animalWithSlash <- collectionTree$getElement("animal/")
+
+    expect_that(animalWithSlash$getName(), equals(animal$getName()))
+}) 
diff --git a/sdk/R/tests/testthat/test-HttpParser.R b/sdk/R/tests/testthat/test-HttpParser.R
new file mode 100644 (file)
index 0000000..b286212
--- /dev/null
@@ -0,0 +1,92 @@
+context("Http Parser")
+
+
+test_that("parseJSONResponse generates and returns JSON object from server response", {
+
+    JSONContent <- "{\"bar\":{\"foo\":[10]}}"
+    serverResponse <- list()
+    serverResponse$content <- charToRaw(JSONContent)
+    serverResponse$headers[["Content-Type"]] <- "application/json; charset=utf-8"
+    class(serverResponse) <- c("response")
+
+    parser <- HttpParser$new()
+
+    result <- parser$parseJSONResponse(serverResponse)
+    barExists <- !is.null(result$bar)
+
+    expect_that(barExists, is_true())
+    expect_that(unlist(result$bar$foo), equals(10))
+}) 
+
+test_that(paste("parseResponse generates and returns character vector",
+                "from server response if outputType is text"), {
+
+    content <- "random text"
+    serverResponse <- list()
+    serverResponse$content <- charToRaw(content)
+    serverResponse$headers[["Content-Type"]] <- "text/plain; charset=utf-8"
+    class(serverResponse) <- c("response")
+
+    parser <- HttpParser$new()
+    parsedResponse <- parser$parseResponse(serverResponse, "text")
+
+    expect_that(parsedResponse, equals("random text"))
+}) 
+
+
+webDAVResponseSample = 
+    paste0("<?xml version=\"1.0\" encoding=\"UTF-8\"?><D:multistatus xmlns:",
+           "D=\"DAV:\"><D:response><D:href>/c=aaaaa-bbbbb-ccccccccccccccc</D",
+           ":href><D:propstat><D:prop><D:resourcetype><D:collection xmlns:D=",
+           "\"DAV:\"/></D:resourcetype><D:getlastmodified>Fri, 11 Jan 2018 1",
+           "1:11:11 GMT</D:getlastmodified><D:displayname></D:displayname><D",
+           ":supportedlock><D:lockentry xmlns:D=\"DAV:\"><D:lockscope><D:exc",
+           "lusive/></D:lockscope><D:locktype><D:write/></D:locktype></D:loc",
+           "kentry></D:supportedlock></D:prop><D:status>HTTP/1.1 200 OK</D:s",
+           "tatus></D:propstat></D:response><D:response><D:href>/c=aaaaa-bbb",
+           "bb-ccccccccccccccc/myFile.exe</D:href><D:propstat><D:prop><D:r",
+           "esourcetype></D:resourcetype><D:getlastmodified>Fri, 12 Jan 2018",
+           " 22:22:22 GMT</D:getlastmodified><D:getcontenttype>text/x-c++src",
+           "; charset=utf-8</D:getcontenttype><D:displayname>myFile.exe</D",
+           ":displayname><D:getcontentlength>25</D:getcontentlength><D:getet",
+           "ag>\"123b12dd1234567890\"</D:getetag><D:supportedlock><D:lockent",
+           "ry xmlns:D=\"DAV:\"><D:lockscope><D:exclusive/></D:lockscope><D:",
+           "locktype><D:write/></D:locktype></D:lockentry></D:supportedlock>",
+           "</D:prop><D:status>HTTP/1.1 200 OK</D:status></D:propstat></D:re",
+           "sponse></D:multistatus>")
+
+
+
+test_that(paste("getFileNamesFromResponse returns file names belonging to specific",
+                "collection parsed from webDAV server response"), {
+
+    serverResponse <- list()
+    serverResponse$content <- charToRaw(webDAVResponseSample)
+    serverResponse$headers[["Content-Type"]] <- "text/xml; charset=utf-8"
+    class(serverResponse) <- c("response")
+    url <- URLencode("https://webdav/c=aaaaa-bbbbb-ccccccccccccccc")
+
+    parser <- HttpParser$new()
+    result <- parser$getFileNamesFromResponse(serverResponse, url)
+    expectedResult <- "myFile.exe"
+    resultMatchExpected <- all.equal(result, expectedResult)
+
+    expect_that(resultMatchExpected, is_true())
+}) 
+
+test_that(paste("getFileSizesFromResponse returns file sizes",
+                "parsed from webDAV server response"), {
+
+    serverResponse <- list()
+    serverResponse$content <- charToRaw(webDAVResponseSample)
+    serverResponse$headers[["Content-Type"]] <- "text/xml; charset=utf-8"
+    class(serverResponse) <- c("response")
+    url <- URLencode("https://webdav/c=aaaaa-bbbbb-ccccccccccccccc")
+
+    parser <- HttpParser$new()
+    expectedResult <- "25"
+    result <- parser$getFileSizesFromResponse(serverResponse, url)
+    resultMatchExpected <- result == expectedResult
+
+    expect_that(resultMatchExpected, is_true())
+}) 
diff --git a/sdk/R/tests/testthat/test-HttpRequest.R b/sdk/R/tests/testthat/test-HttpRequest.R
new file mode 100644 (file)
index 0000000..66ab9af
--- /dev/null
@@ -0,0 +1,28 @@
+context("Http Request")
+
+
+test_that("execyte raises exception if http verb is not valid", {
+
+    http <- HttpRequest$new()
+    expect_that(http$execute("FAKE VERB", "url"),
+               throws_error("Http verb is not valid."))
+}) 
+
+test_that(paste("createQuery generates and encodes query portion of http",
+                "request based on filters, limit and offset parameters"), {
+
+    http <- HttpRequest$new()
+    filters <- list(list("color", "=", "red"))
+    limit <- 20
+    offset <- 50
+    expect_that(http$createQuery(filters, limit, offset),
+                equals(paste0("/?filters=%5B%5B%22color%22%2C%22%3D%22%2C%22red",
+                              "%22%5D%5D&limit=20&offset=50")))
+}) 
+
+test_that(paste("createQuery generates and empty string",
+                "when filters, limit and offset parameters are set to NULL"), {
+
+    http <- HttpRequest$new()
+    expect_that(http$createQuery(NULL, NULL, NULL), equals(""))
+}) 
diff --git a/sdk/R/tests/testthat/test-RESTService.R b/sdk/R/tests/testthat/test-RESTService.R
new file mode 100644 (file)
index 0000000..d4f3c2c
--- /dev/null
@@ -0,0 +1,633 @@
+source("fakes/FakeArvados.R")
+source("fakes/FakeHttpRequest.R")
+source("fakes/FakeHttpParser.R")
+
+context("REST service")
+
+test_that("getWebDavHostName calls REST service properly", {
+
+    expectedURL <- "https://host/discovery/v1/apis/arvados/v1/rest"
+    serverResponse <- list(keepWebServiceUrl = "https://myWebDavServer.com")
+    httpRequest <- FakeHttpRequest$new(expectedURL, serverResponse)
+
+    REST <- RESTService$new("token", "host",
+                            httpRequest, FakeHttpParser$new())
+
+    REST$getWebDavHostName()
+
+    expect_that(httpRequest$URLIsProperlyConfigured, is_true())
+    expect_that(httpRequest$requestHeaderContainsAuthorizationField, is_true())
+    expect_that(httpRequest$numberOfGETRequests, equals(1))
+}) 
+
+test_that("getWebDavHostName returns webDAV host name properly", {
+
+    serverResponse <- list(keepWebServiceUrl = "https://myWebDavServer.com")
+    httpRequest <- FakeHttpRequest$new(expectedURL = NULL, serverResponse)
+
+    REST <- RESTService$new("token", "host",
+                            httpRequest, FakeHttpParser$new())
+
+    expect_that("https://myWebDavServer.com", equals(REST$getWebDavHostName())) 
+}) 
+
+test_that("getResource calls REST service properly", {
+
+    serverResponse <- NULL
+    resourceUUID <- "aaaaa-j7d0g-ccccccccccccccc"
+    expectedURL    <- paste0("https://host/arvados/v1/collections/", resourceUUID)
+
+    httpRequest <- FakeHttpRequest$new(expectedURL, serverResponse)
+
+    REST <- RESTService$new("token", "host", 
+                            httpRequest, FakeHttpParser$new(),
+                            0, "webDavHost")
+
+    REST$getResource("collections", resourceUUID)
+
+    expect_that(httpRequest$URLIsProperlyConfigured, is_true())
+    expect_that(httpRequest$requestHeaderContainsAuthorizationField, is_true())
+    expect_that(httpRequest$numberOfGETRequests, equals(1))
+}) 
+
+test_that("getResource parses server response", {
+
+    resourceUUID <- "aaaaa-j7d0g-ccccccccccccccc"
+    httpParser <- FakeHttpParser$new()
+    REST <- RESTService$new("token", "host", 
+                            FakeHttpRequest$new(), httpParser,
+                            0, "webDavHost")
+
+    REST$getResource("collections", resourceUUID)
+
+    expect_that(httpParser$parserCallCount, equals(1))
+}) 
+
+test_that("getResource raises exception if response contains errors field", {
+
+    resourceUUID <- "aaaaa-j7d0g-ccccccccccccccc"
+    serverResponse <- list(errors = 404)
+
+    REST <- RESTService$new("token", "host",
+                            FakeHttpRequest$new(NULL, serverResponse),
+                            FakeHttpParser$new(),
+                            0, "webDavHost")
+    
+    expect_that(REST$getResource("collections", resourceUUID), throws_error("404", fixed = TRUE))
+}) 
+
+test_that("listResources calls REST service properly", {
+
+    serverResponse <- NULL
+    expectedURL    <- paste0("https://host/arvados/v1/collections")
+    httpRequest <- FakeHttpRequest$new(expectedURL, serverResponse)
+
+    REST <- RESTService$new("token", "host", 
+                            httpRequest, FakeHttpParser$new(),
+                            0, "webDavHost")
+
+    REST$listResources("collections")
+
+    expect_that(httpRequest$URLIsProperlyConfigured, is_true())
+    expect_that(httpRequest$requestHeaderContainsAuthorizationField, is_true())
+    expect_that(httpRequest$numberOfGETRequests, equals(1))
+}) 
+
+test_that("listResources parses server response", {
+
+    httpParser <- FakeHttpParser$new()
+    REST <- RESTService$new("token", "host", 
+                            FakeHttpRequest$new(), httpParser,
+                            0, "webDavHost")
+
+    REST$listResources("collections")
+
+    expect_that(httpParser$parserCallCount, equals(1))
+}) 
+
+test_that("listResources raises exception if response contains errors field", {
+
+    serverResponse <- list(errors = 404)
+
+    REST <- RESTService$new("token", "host", 
+                            FakeHttpRequest$new(NULL, serverResponse),
+                            FakeHttpParser$new(),
+                            0, "webDavHost")
+    
+    expect_that(REST$listResources("collections"), throws_error("404", fixed = TRUE))
+}) 
+
+test_that("fetchAllItems always returns all resource items from server", {
+
+    expectedURL <- NULL
+    serverResponse <- list(items_available = 8,
+                           items = list("collection1",
+                                        "collection2",
+                                        "collection3",
+                                        "collection4",
+                                        "collection5",
+                                        "collection6",
+                                        "collection7",
+                                        "collection8"))
+
+    httpParser <- FakeHttpParser$new()
+    httpRequest <- FakeHttpRequest$new(expectedURL, serverResponse)
+    httpRequest$serverMaxElementsPerRequest <- 3
+
+    REST <- RESTService$new("token", "host", 
+                            httpRequest, httpParser,
+                            0, "webDavHost")
+
+    result <- REST$fetchAllItems(NULL, NULL)
+
+    expect_that(length(result), equals(8))
+    expect_that(httpRequest$numberOfGETRequests, equals(3))
+    expect_that(httpParser$parserCallCount, equals(3))
+}) 
+
+test_that("deleteResource calls REST service properly", {
+
+    serverResponse <- NULL
+    resourceUUID <- "aaaaa-j7d0g-ccccccccccccccc"
+    expectedURL    <- paste0("https://host/arvados/v1/collections/", resourceUUID)
+
+    httpRequest <- FakeHttpRequest$new(expectedURL, serverResponse)
+
+    REST <- RESTService$new("token", "host", 
+                            httpRequest, FakeHttpParser$new(),
+                            0, "webDavHost")
+
+    REST$deleteResource("collections", resourceUUID)
+
+    expect_that(httpRequest$URLIsProperlyConfigured, is_true())
+    expect_that(httpRequest$requestHeaderContainsAuthorizationField, is_true())
+    expect_that(httpRequest$numberOfDELETERequests, equals(1))
+}) 
+
+test_that("deleteCollection parses server response", {
+
+    resourceUUID <- "aaaaa-j7d0g-ccccccccccccccc"
+    httpParser <- FakeHttpParser$new()
+    REST <- RESTService$new("token", "host", 
+                            FakeHttpRequest$new(), httpParser,
+                            0, "webDavHost")
+
+    REST$deleteResource("collections", resourceUUID)
+
+    expect_that(httpParser$parserCallCount, equals(1))
+}) 
+
+test_that("deleteCollection raises exception if response contains errors field", {
+
+    resourceUUID <- "aaaaa-j7d0g-ccccccccccccccc"
+    serverResponse <- list(errors = 404)
+
+    REST <- RESTService$new("token", "host", 
+                            FakeHttpRequest$new(NULL, serverResponse),
+                            FakeHttpParser$new(),
+                            0, "webDavHost")
+    
+    expect_that(REST$deleteResource("collections", resourceUUID), throws_error("404", fixed = TRUE))
+}) 
+
+test_that("updateResource calls REST service properly", {
+
+    serverResponse <- NULL
+    resourceUUID <- "aaaaa-j7d0g-ccccccccccccccc"
+    expectedURL    <- paste0("https://host/arvados/v1/collections/", resourceUUID)
+    newResourceContent <- list(newName = "Brand new shiny name")
+
+    httpRequest <- FakeHttpRequest$new(expectedURL, serverResponse)
+
+    REST <- RESTService$new("token", "host", 
+                            httpRequest, FakeHttpParser$new(),
+                            0, "webDavHost")
+
+    REST$updateResource("collections", resourceUUID, newResourceContent)
+
+    expect_that(httpRequest$URLIsProperlyConfigured, is_true())
+    expect_that(httpRequest$requestHeaderContainsAuthorizationField, is_true())
+    expect_that(httpRequest$JSONEncodedBodyIsProvided, is_true())
+    expect_that(httpRequest$numberOfPUTRequests, equals(1))
+}) 
+
+test_that("updateResource parses server response", {
+
+    newResourceContent <- list(newName = "Brand new shiny name")
+    resourceUUID <- "aaaaa-j7d0g-ccccccccccccccc"
+    httpParser <- FakeHttpParser$new()
+    REST <- RESTService$new("token", "host", 
+                            FakeHttpRequest$new(), httpParser,
+                            0, "webDavHost")
+
+    REST$updateResource("collections", resourceUUID, newResourceContent)
+
+    expect_that(httpParser$parserCallCount, equals(1))
+}) 
+
+test_that("updateResource raises exception if response contains errors field", {
+
+    resourceUUID <- "aaaaa-j7d0g-ccccccccccccccc"
+    serverResponse <- list(errors = 404)
+    newResourceContent <- list(newName = "Brand new shiny name")
+    REST <- RESTService$new("token", "host", 
+                            FakeHttpRequest$new(NULL, serverResponse),
+                            FakeHttpParser$new(),
+                            0, "webDavHost")
+    
+    expect_that(REST$updateResource("collections", resourceUUID, newResourceContent),
+                throws_error("404", fixed = TRUE))
+}) 
+
+test_that("createResource calls REST service properly", {
+
+    resourceContent <- list(name = "My favorite collection")
+    serverResponse <- NULL
+    resourceUUID <- "aaaaa-j7d0g-ccccccccccccccc"
+    expectedURL <- "https://host/arvados/v1/collections"
+    newResourceContent <- list(newName = "Brand new shiny name")
+
+    httpRequest <- FakeHttpRequest$new(expectedURL, serverResponse)
+
+    REST <- RESTService$new("token", "host", 
+                            httpRequest, FakeHttpParser$new(),
+                            0, "webDavHost")
+
+    REST$createResource("collections", resourceContent)
+
+    expect_that(httpRequest$URLIsProperlyConfigured, is_true())
+    expect_that(httpRequest$requestHeaderContainsAuthorizationField, is_true())
+    expect_that(httpRequest$JSONEncodedBodyIsProvided, is_true())
+    expect_that(httpRequest$numberOfPOSTRequests, equals(1))
+}) 
+
+test_that("createResource parses server response", {
+
+    resourceContent <- list(newName = "Brand new shiny name")
+    resourceUUID <- "aaaaa-j7d0g-ccccccccccccccc"
+    httpParser <- FakeHttpParser$new()
+    REST <- RESTService$new("token", "host",
+                            FakeHttpRequest$new(), httpParser,
+                            0, "webDavHost")
+
+    REST$createResource("collections", resourceContent)
+
+    expect_that(httpParser$parserCallCount, equals(1))
+}) 
+
+test_that("createResource raises exception if response contains errors field", {
+
+    resourceUUID <- "aaaaa-j7d0g-ccccccccccccccc"
+    serverResponse <- list(errors = 404)
+    resourceContent <- list(newName = "Brand new shiny name")
+    REST <- RESTService$new("token", "host", 
+                            FakeHttpRequest$new(NULL, serverResponse),
+                            FakeHttpParser$new(),
+                            0, "webDavHost")
+    
+    expect_that(REST$createResource("collections", resourceContent),
+                throws_error("404", fixed = TRUE))
+}) 
+
+test_that("create calls REST service properly", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    expectedURL <- "https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc/file"
+    fakeHttp <- FakeHttpRequest$new(expectedURL)
+    fakeHttpParser <- FakeHttpParser$new()
+
+    REST <- RESTService$new("token", "https://host/",
+                            fakeHttp, fakeHttpParser,
+                            0, "https://webDavHost/")
+
+    REST$create("file", uuid)
+
+    expect_that(fakeHttp$URLIsProperlyConfigured, is_true())
+    expect_that(fakeHttp$requestHeaderContainsAuthorizationField, is_true())
+    expect_that(fakeHttp$numberOfPUTRequests, equals(1))
+}) 
+
+test_that("create raises exception if server response code is not between 200 and 300", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    response <- list()
+    response$status_code <- 404
+    fakeHttp <- FakeHttpRequest$new(serverResponse = response)
+
+    REST <- RESTService$new("token", "https://host/", 
+                            fakeHttp, HttpParser$new(),
+                            0, "https://webDavHost/")
+
+    expect_that(REST$create("file", uuid),
+                throws_error("Server code: 404"))
+}) 
+
+test_that("delete calls REST service properly", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    expectedURL <- "https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc/file"
+    fakeHttp <- FakeHttpRequest$new(expectedURL)
+    fakeHttpParser <- FakeHttpParser$new()
+
+    REST <- RESTService$new("token", "https://host/", 
+                            fakeHttp, fakeHttpParser,
+                            0, "https://webDavHost/")
+
+    REST$delete("file", uuid)
+
+    expect_that(fakeHttp$URLIsProperlyConfigured, is_true())
+    expect_that(fakeHttp$requestHeaderContainsAuthorizationField, is_true())
+    expect_that(fakeHttp$numberOfDELETERequests, equals(1))
+}) 
+
+test_that("delete raises exception if server response code is not between 200 and 300", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    response <- list()
+    response$status_code <- 404
+    fakeHttp <- FakeHttpRequest$new(serverResponse = response)
+
+    REST <- RESTService$new("token", "https://host/",
+                            fakeHttp, HttpParser$new(),
+                            0, "https://webDavHost/")
+
+    expect_that(REST$delete("file", uuid),
+                throws_error("Server code: 404"))
+}) 
+
+test_that("move calls REST service properly", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    expectedURL <- "https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc/file"
+    fakeHttp <- FakeHttpRequest$new(expectedURL)
+    fakeHttpParser <- FakeHttpParser$new()
+
+    REST <- RESTService$new("token", "https://host/", 
+                            fakeHttp, fakeHttpParser,
+                            0, "https://webDavHost/")
+
+    REST$move("file", "newDestination/file", uuid)
+
+    expect_that(fakeHttp$URLIsProperlyConfigured, is_true())
+    expect_that(fakeHttp$requestHeaderContainsAuthorizationField, is_true())
+    expect_that(fakeHttp$requestHeaderContainsDestinationField, is_true())
+    expect_that(fakeHttp$numberOfMOVERequests, equals(1))
+}) 
+
+test_that("move raises exception if server response code is not between 200 and 300", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    response <- list()
+    response$status_code <- 404
+    fakeHttp <- FakeHttpRequest$new(serverResponse = response)
+
+    REST <- RESTService$new("token", "https://host/", 
+                            fakeHttp, HttpParser$new(),
+                            0, "https://webDavHost/")
+
+    expect_that(REST$move("file", "newDestination/file", uuid),
+                throws_error("Server code: 404"))
+}) 
+
+test_that("getCollectionContent retreives correct content from WebDAV server", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    expectedURL <- "https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc"
+    returnContent <- list()
+    returnContent$status_code <- 200
+    returnContent$content <- c("animal", "animal/dog", "ball")
+
+    fakeHttp <- FakeHttpRequest$new(expectedURL, returnContent)
+
+    REST <- RESTService$new("token", "https://host/", 
+                            fakeHttp, FakeHttpParser$new(),
+                            0, "https://webDavHost/")
+
+    returnResult <- REST$getCollectionContent(uuid)
+    returnedContentMatchExpected <- all.equal(returnResult,
+                                              c("animal", "animal/dog", "ball"))
+
+    expect_that(returnedContentMatchExpected, is_true())
+    expect_that(fakeHttp$requestHeaderContainsAuthorizationField, is_true())
+}) 
+
+test_that("getCollectionContent raises exception if server returns empty response", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    response <- ""
+    fakeHttp <- FakeHttpRequest$new(serverResponse = response)
+
+    REST <- RESTService$new("token", "https://host/", 
+                            fakeHttp, FakeHttpParser$new(),
+                            0, "https://webDavHost/")
+
+    expect_that(REST$getCollectionContent(uuid),
+                throws_error("Response is empty, request may be misconfigured"))
+}) 
+
+test_that("getCollectionContent parses server response", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    fakeHttpParser <- FakeHttpParser$new()
+    REST <- RESTService$new("token", "https://host/", 
+                            FakeHttpRequest$new(), fakeHttpParser,
+                            0, "https://webDavHost/")
+
+    REST$getCollectionContent(uuid)
+
+    expect_that(fakeHttpParser$parserCallCount, equals(1))
+}) 
+
+test_that("getCollectionContent raises exception if server returns empty response", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    response <- ""
+    fakeHttp <- FakeHttpRequest$new(serverResponse = response)
+
+    REST <- RESTService$new("token", "https://host/", 
+                            fakeHttp, FakeHttpParser$new(),
+                            0, "https://webDavHost/")
+
+    expect_that(REST$getCollectionContent(uuid),
+                throws_error("Response is empty, request may be misconfigured"))
+}) 
+
+test_that(paste("getCollectionContent raises exception if server",
+                "response code is not between 200 and 300"), {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    response <- list()
+    response$status_code <- 404
+    fakeHttp <- FakeHttpRequest$new(serverResponse = response)
+
+    REST <- RESTService$new("token", "https://host/", 
+                            fakeHttp, HttpParser$new(),
+                            0, "https://webDavHost/")
+
+    expect_that(REST$getCollectionContent(uuid),
+                throws_error("Server code: 404"))
+}) 
+
+
+test_that("getResourceSize calls REST service properly", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    expectedURL <- "https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc/file"
+    response <- list()
+    response$status_code <- 200
+    response$content <- c(6, 2, 931, 12003)
+    fakeHttp <- FakeHttpRequest$new(expectedURL, response)
+
+    REST <- RESTService$new("token", "https://host/",
+                            fakeHttp, FakeHttpParser$new(),
+                            0, "https://webDavHost/")
+
+    returnResult <- REST$getResourceSize("file", uuid)
+    returnedContentMatchExpected <- all.equal(returnResult,
+                                              c(6, 2, 931, 12003))
+
+    expect_that(fakeHttp$URLIsProperlyConfigured, is_true())
+    expect_that(fakeHttp$requestHeaderContainsAuthorizationField, is_true())
+    expect_that(returnedContentMatchExpected, is_true())
+}) 
+
+test_that("getResourceSize raises exception if server returns empty response", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    response <- ""
+    fakeHttp <- FakeHttpRequest$new(serverResponse = response)
+
+    REST <- RESTService$new("token", "https://host/", 
+                            fakeHttp, FakeHttpParser$new(),
+                            0, "https://webDavHost/")
+
+    expect_that(REST$getResourceSize("file", uuid),
+                throws_error("Response is empty, request may be misconfigured"))
+}) 
+
+test_that(paste("getResourceSize raises exception if server",
+                "response code is not between 200 and 300"), {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    response <- list()
+    response$status_code <- 404
+    fakeHttp <- FakeHttpRequest$new(serverResponse = response)
+
+    REST <- RESTService$new("token", "https://host/", 
+                            fakeHttp, HttpParser$new(),
+                            0, "https://webDavHost/")
+
+    expect_that(REST$getResourceSize("file", uuid),
+                throws_error("Server code: 404"))
+}) 
+
+test_that("getResourceSize parses server response", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    fakeHttpParser <- FakeHttpParser$new()
+    REST <- RESTService$new("token", "https://host/", 
+                            FakeHttpRequest$new(), fakeHttpParser,
+                            0, "https://webDavHost/")
+
+    REST$getResourceSize("file", uuid)
+
+    expect_that(fakeHttpParser$parserCallCount, equals(1))
+}) 
+
+test_that("read calls REST service properly", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    expectedURL <- "https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc/file"
+    serverResponse <- list()
+    serverResponse$status_code <- 200
+    serverResponse$content <- "file content"
+
+    fakeHttp <- FakeHttpRequest$new(expectedURL, serverResponse)
+
+    REST <- RESTService$new("token", "https://host/", 
+                            fakeHttp, FakeHttpParser$new(),
+                            0, "https://webDavHost/")
+
+    returnResult <- REST$read("file", uuid, "text", 1024, 512)
+
+    expect_that(fakeHttp$URLIsProperlyConfigured, is_true())
+    expect_that(fakeHttp$requestHeaderContainsAuthorizationField, is_true())
+    expect_that(fakeHttp$requestHeaderContainsRangeField, is_true())
+    expect_that(returnResult, equals("file content"))
+}) 
+
+test_that("read raises exception if server response code is not between 200 and 300", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    response <- list()
+    response$status_code <- 404
+    fakeHttp <- FakeHttpRequest$new(serverResponse = response)
+
+    REST <- RESTService$new("token", "https://host/", 
+                            fakeHttp, HttpParser$new(),
+                            0, "https://webDavHost/")
+
+    expect_that(REST$read("file", uuid),
+                throws_error("Server code: 404"))
+}) 
+
+test_that("read raises exception if contentType is not valid", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    fakeHttp <- FakeHttpRequest$new()
+
+    REST <- RESTService$new("token", "https://host/", 
+                            fakeHttp, HttpParser$new(),
+                            0, "https://webDavHost/")
+
+    expect_that(REST$read("file", uuid, "some invalid content type"),
+                throws_error("Invalid contentType. Please use text or raw."))
+}) 
+
+test_that("read parses server response", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    fakeHttpParser <- FakeHttpParser$new()
+    REST <- RESTService$new("token", "https://host/", 
+                            FakeHttpRequest$new(), fakeHttpParser,
+                            0, "https://webDavHost/")
+
+    REST$read("file", uuid, "text", 1024, 512)
+
+    expect_that(fakeHttpParser$parserCallCount, equals(1))
+}) 
+
+test_that("write calls REST service properly", {
+
+    fileContent <- "new file content" 
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    expectedURL <- "https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc/file"
+    fakeHttp <- FakeHttpRequest$new(expectedURL)
+
+    REST <- RESTService$new("token", "https://host/", 
+                            fakeHttp, FakeHttpParser$new(),
+                            0, "https://webDavHost/")
+
+    REST$write("file", uuid, fileContent, "text/html")
+
+    expect_that(fakeHttp$URLIsProperlyConfigured, is_true())
+    expect_that(fakeHttp$requestBodyIsProvided, is_true())
+    expect_that(fakeHttp$requestHeaderContainsAuthorizationField, is_true())
+    expect_that(fakeHttp$requestHeaderContainsContentTypeField, is_true())
+}) 
+
+test_that("write raises exception if server response code is not between 200 and 300", {
+
+    uuid <- "aaaaa-j7d0g-ccccccccccccccc"
+    fileContent <- "new file content" 
+    response <- list()
+    response$status_code <- 404
+    fakeHttp <- FakeHttpRequest$new(serverResponse = response)
+
+    REST <- RESTService$new("token", "https://host/",
+                            fakeHttp, HttpParser$new(),
+                            0, "https://webDavHost/")
+
+    expect_that(REST$write("file", uuid, fileContent, "text/html"),
+                throws_error("Server code: 404"))
+}) 
diff --git a/sdk/R/tests/testthat/test-Subcollection.R b/sdk/R/tests/testthat/test-Subcollection.R
new file mode 100644 (file)
index 0000000..1b141e1
--- /dev/null
@@ -0,0 +1,356 @@
+source("fakes/FakeRESTService.R")
+
+context("Subcollection")
+
+test_that("getRelativePath returns path relative to the tree root", {
+
+    animal <- Subcollection$new("animal")
+
+    fish <- Subcollection$new("fish")
+    animal$add(fish)
+
+    expect_that(animal$getRelativePath(), equals("animal"))
+    expect_that(fish$getRelativePath(), equals("animal/fish"))
+}) 
+
+test_that(paste("getFileListing by default returns sorted path of all files",
+                "relative to the current subcollection"), {
+
+    animal   <- Subcollection$new("animal")
+    fish     <- Subcollection$new("fish")
+    shark    <- ArvadosFile$new("shark")
+    blueFish <- ArvadosFile$new("blueFish")
+
+    animal$add(fish)
+    fish$add(shark)
+    fish$add(blueFish)
+
+    result <- animal$getFileListing()
+
+    #expect sorted array
+    expectedResult <- c("animal/fish/blueFish", "animal/fish/shark")
+
+    resultsMatch <- length(expectedResult) == length(result) &&
+                    all(expectedResult == result)
+
+    expect_that(resultsMatch, is_true())
+}) 
+
+test_that(paste("getFileListing returns sorted names of all direct children",
+                "if fullPath is set to FALSE"), {
+
+    animal <- Subcollection$new("animal")
+    fish   <- Subcollection$new("fish")
+    shark  <- ArvadosFile$new("shark")
+    dog    <- ArvadosFile$new("dog")
+
+    animal$add(fish)
+    animal$add(dog)
+    fish$add(shark)
+
+    result <- animal$getFileListing(fullPath = FALSE)
+    expectedResult <- c("dog", "fish")
+
+    resultsMatch <- length(expectedResult) == length(result) &&
+                    all(expectedResult == result)
+
+    expect_that(resultsMatch, is_true())
+}) 
+
+test_that("add adds content to inside collection tree", {
+
+    animal <- Subcollection$new("animal")
+    fish   <- Subcollection$new("fish")
+    dog    <- ArvadosFile$new("dog")
+
+    animal$add(fish)
+    animal$add(dog)
+
+    animalContainsFish <- animal$get("fish")$getName() == fish$getName()
+    animalContainsDog  <- animal$get("dog")$getName()  == dog$getName()
+
+    expect_that(animalContainsFish, is_true())
+    expect_that(animalContainsDog, is_true())
+}) 
+
+test_that("add raises exception if content name is empty string", {
+
+    animal     <- Subcollection$new("animal")
+    rootFolder <- Subcollection$new("")
+
+    expect_that(animal$add(rootFolder),
+                throws_error("Content has invalid name.", fixed = TRUE))
+})
+
+test_that(paste("add raises exception if ArvadosFile/Subcollection", 
+                "with same name already exists in the subcollection"), {
+
+    animal     <- Subcollection$new("animal")
+    fish       <- Subcollection$new("fish")
+    secondFish <- Subcollection$new("fish")
+    thirdFish  <- ArvadosFile$new("fish")
+
+    animal$add(fish)
+
+    expect_that(animal$add(secondFish),
+                throws_error(paste("Subcollection already contains ArvadosFile or",
+                                   "Subcollection with same name."), fixed = TRUE))
+    expect_that(animal$add(thirdFish),
+                throws_error(paste("Subcollection already contains ArvadosFile or",
+                                   "Subcollection with same name."), fixed = TRUE))
+}) 
+
+test_that(paste("add raises exception if passed argument is", 
+                "not ArvadosFile or Subcollection"), {
+
+    animal <- Subcollection$new("animal")
+    number <- 10
+
+    expect_that(animal$add(number),
+                throws_error(paste("Expected AravodsFile or Subcollection object,",
+                                   "got (numeric)."), fixed = TRUE))
+}) 
+
+test_that(paste("add post content to a REST service", 
+                "if subcollection belongs to a collection"), {
+    
+    collectionContent <- c("animal", "animal/fish")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+
+    collection <- Collection$new(api, "myUUID")
+    animal <- collection$get("animal")
+    dog <- ArvadosFile$new("dog")
+
+    animal$add(dog)
+
+    expect_that(fakeREST$createCallCount, equals(1))
+}) 
+
+test_that("remove removes content from subcollection", {
+
+    animal <- Subcollection$new("animal")
+    fish   <- Subcollection$new("fish")
+
+    animal$add(fish)
+    animal$remove("fish")
+
+    returnValueAfterRemovalIsNull <- is.null(animal$get("fish"))
+
+    expect_that(returnValueAfterRemovalIsNull, is_true())
+}) 
+
+test_that(paste("remove raises exception", 
+                "if content to remove doesn't exist in the subcollection"), {
+
+    animal <- Subcollection$new("animal")
+
+    expect_that(animal$remove("fish"),
+                throws_error(paste("Subcollection doesn't contains ArvadosFile",
+                                   "or Subcollection with specified name.")))
+}) 
+
+test_that("remove raises exception if passed argument is not character vector", {
+
+    animal <- Subcollection$new("animal")
+    number <- 10
+
+    expect_that(animal$remove(number),
+                throws_error(paste("Expected character,",
+                                   "got (numeric)."), fixed = TRUE))
+}) 
+
+test_that(paste("remove removes content from REST service", 
+                "if subcollection belongs to a collection"), {
+    
+    collectionContent <- c("animal", "animal/fish", "animal/dog")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    animal <- collection$get("animal")
+
+    animal$remove("fish")
+
+    expect_that(fakeREST$deleteCallCount, equals(1))
+}) 
+
+test_that(paste("get returns ArvadosFile or Subcollection", 
+                "if file or folder with given name exists"), {
+
+    animal <- Subcollection$new("animal")
+    fish   <- Subcollection$new("fish")
+    dog    <- ArvadosFile$new("dog")
+
+    animal$add(fish)
+    animal$add(dog)
+
+    returnedFish <- animal$get("fish")
+    returnedDog  <- animal$get("dog")
+
+    returnedFishIsSubcollection <- "Subcollection" %in% class(returnedFish)
+    returnedDogIsArvadosFile    <- "ArvadosFile"   %in% class(returnedDog)
+
+    expect_that(returnedFishIsSubcollection, is_true())
+    expect_that(returnedFish$getName(), equals("fish"))
+
+    expect_that(returnedDogIsArvadosFile, is_true())
+    expect_that(returnedDog$getName(), equals("dog"))
+}) 
+
+test_that(paste("get returns NULL if file or folder", 
+                "with given name doesn't exists"), {
+
+    animal <- Subcollection$new("animal")
+    fish   <- Subcollection$new("fish")
+
+    animal$add(fish)
+
+    returnedDogIsNull <- is.null(animal$get("dog"))
+
+    expect_that(returnedDogIsNull, is_true())
+}) 
+
+test_that("getFirst returns first child in the subcollection", {
+
+    animal <- Subcollection$new("animal")
+    fish   <- Subcollection$new("fish")
+
+    animal$add(fish)
+
+    expect_that(animal$getFirst()$getName(), equals("fish"))
+}) 
+
+test_that("getFirst returns NULL if subcollection contains no children", {
+
+    animal <- Subcollection$new("animal")
+
+    returnedElementIsNull <- is.null(animal$getFirst())
+
+    expect_that(returnedElementIsNull, is_true())
+}) 
+
+test_that(paste("setCollection by default sets collection",
+                "filed of subcollection and all its children"), {
+
+    animal <- Subcollection$new("animal")
+    fish   <- Subcollection$new("fish")
+    animal$add(fish)
+
+    animal$setCollection("myCollection")
+
+    expect_that(animal$getCollection(), equals("myCollection"))
+    expect_that(fish$getCollection(), equals("myCollection"))
+}) 
+
+test_that(paste("setCollection sets collection filed of subcollection only",
+                "if parameter setRecursively is set to FALSE"), {
+
+    animal <- Subcollection$new("animal")
+    fish   <- Subcollection$new("fish")
+    animal$add(fish)
+
+    animal$setCollection("myCollection", setRecursively = FALSE)
+    fishCollectionIsNull <- is.null(fish$getCollection())
+
+    expect_that(animal$getCollection(), equals("myCollection"))
+    expect_that(fishCollectionIsNull, is_true())
+}) 
+
+test_that(paste("move raises exception if subcollection",
+                "doesn't belong to any collection"), {
+
+    animal <- Subcollection$new("animal")
+
+    expect_that(animal$move("new/location"),
+                throws_error("Subcollection doesn't belong to any collection"))
+}) 
+
+test_that("move raises exception if new location contains content with the same name", {
+
+    collectionContent <- c("animal",
+                           "animal/fish",
+                           "animal/dog",
+                           "animal/fish/shark",
+                           "fish")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    fish <- collection$get("animal/fish")
+
+    expect_that(fish$move("fish"),
+                throws_error("Destination already contains content with same name."))
+
+}) 
+
+test_that(paste("move raises exception if newLocationInCollection",
+                "parameter is invalid"), {
+
+    collectionContent <- c("animal",
+                           "animal/fish",
+                           "animal/dog",
+                           "animal/fish/shark",
+                           "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+
+    collection <- Collection$new(api, "myUUID")
+    fish <- collection$get("animal/fish")
+
+    expect_that(fish$move("objects/dog"),
+                throws_error("Unable to get destination subcollection"))
+}) 
+
+test_that("move moves subcollection inside collection tree", {
+
+    collectionContent <- c("animal",
+                           "animal/fish",
+                           "animal/dog",
+                           "animal/fish/shark",
+                           "ball")
+    fakeREST <- FakeRESTService$new(collectionContent)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    fish <- collection$get("animal/fish")
+
+    fish$move("fish")
+    fishIsNullOnOldLocation <- is.null(collection$get("animal/fish"))
+    fishExistsOnNewLocation <- !is.null(collection$get("fish"))
+
+    expect_that(fishIsNullOnOldLocation, is_true())
+    expect_that(fishExistsOnNewLocation, is_true())
+}) 
+
+test_that(paste("getSizeInBytes returns zero if subcollection",
+                "is not part of a collection"), {
+
+    animal <- Subcollection$new("animal")
+
+    expect_that(animal$getSizeInBytes(), equals(0))
+}) 
+
+test_that(paste("getSizeInBytes delegates size calculation",
+                "to REST service class"), {
+
+    collectionContent <- c("animal", "animal/fish")
+    returnSize <- 100
+    fakeREST <- FakeRESTService$new(collectionContent, returnSize)
+
+    api <- Arvados$new("myToken", "myHostName")
+    api$setRESTService(fakeREST)
+    collection <- Collection$new(api, "myUUID")
+    animal <- collection$get("animal")
+
+    resourceSize <- animal$getSizeInBytes()
+
+    expect_that(resourceSize, equals(100))
+}) 
diff --git a/sdk/R/tests/testthat/test-util.R b/sdk/R/tests/testthat/test-util.R
new file mode 100644 (file)
index 0000000..62065f8
--- /dev/null
@@ -0,0 +1,85 @@
+context("Utility function")
+
+test_that("trimFromStart trims string correctly if string starts with trimCharacters", {
+
+    sample <- "./something/random"
+    trimCharacters <- "./something/"
+
+    result <- trimFromStart(sample, trimCharacters)
+
+    expect_that(result, equals("random"))
+}) 
+
+test_that("trimFromStart returns original string if string doesn't starts with trimCharacters", {
+
+    sample <- "./something/random"
+    trimCharacters <- "./nothing/"
+
+    result <- trimFromStart(sample, trimCharacters)
+
+    expect_that(result, equals("./something/random"))
+}) 
+
+test_that("trimFromEnd trims string correctly if string ends with trimCharacters", {
+
+    sample <- "./something/random"
+    trimCharacters <- "/random"
+
+    result <- trimFromEnd(sample, trimCharacters)
+
+    expect_that(result, equals("./something"))
+}) 
+
+test_that("trimFromEnd returns original string if string doesn't end with trimCharacters", {
+
+    sample <- "./something/random"
+    trimCharacters <- "specific"
+
+    result <- trimFromStart(sample, trimCharacters)
+
+    expect_that(result, equals("./something/random"))
+}) 
+
+test_that("RListToPythonList converts nested R list to char representation of Python list", {
+
+    sample <- list("insert", list("random", list("text")), list("here")) 
+
+    result              <- RListToPythonList(sample)
+    resultWithSeparator <- RListToPythonList(sample, separator = ",+")
+
+    expect_that(result, equals("[\"insert\", [\"random\", \"text\"], \"here\"]"))
+    expect_that(resultWithSeparator,
+                equals("[\"insert\",+[\"random\",+\"text\"],+\"here\"]"))
+}) 
+
+test_that("appendToStartIfNotExist appends characters to beginning of a string", {
+
+    sample <- "New Year"
+    charactersToAppend <- "Happy "
+
+    result <- appendToStartIfNotExist(sample, charactersToAppend)
+
+    expect_that(result, equals("Happy New Year"))
+}) 
+
+test_that(paste("appendToStartIfNotExist returns original string if string",
+                "doesn't start with specified characters"), {
+
+    sample <- "Happy New Year"
+    charactersToAppend <- "Happy"
+
+    result <- appendToStartIfNotExist(sample, charactersToAppend)
+
+    expect_that(result, equals("Happy New Year"))
+}) 
+
+test_that(paste("splitToPathAndName splits relative path to file/folder",
+                "name and rest of the path"), {
+
+    relativePath <- "path/to/my/file.exe"
+
+    result <- splitToPathAndName( relativePath)
+
+    expect_that(result$name, equals("file.exe"))
+    expect_that(result$path, equals("path/to/my"))
+}) 
index e82fd9feef1f3d88c4068e7d0d6cbfee6232c3f2..71ddd172214c4dac1b20907c8cf5a18bce6c37b2 100644 (file)
@@ -224,13 +224,16 @@ class ArvCwlRunner(object):
 
     def check_features(self, obj):
         if isinstance(obj, dict):
-            if obj.get("writable"):
-                raise SourceLine(obj, "writable", UnsupportedRequirement).makeError("InitialWorkDir feature 'writable: true' not supported")
+            if obj.get("writable") and self.work_api != "containers":
+                raise SourceLine(obj, "writable", UnsupportedRequirement).makeError("InitialWorkDir feature 'writable: true' not supported with --api=jobs")
             if obj.get("class") == "DockerRequirement":
                 if obj.get("dockerOutputDirectory"):
-                    # TODO: can be supported by containers API, but not jobs API.
-                    raise SourceLine(obj, "dockerOutputDirectory", UnsupportedRequirement).makeError(
-                        "Option 'dockerOutputDirectory' of DockerRequirement not supported.")
+                    if self.work_api != "containers":
+                        raise SourceLine(obj, "dockerOutputDirectory", UnsupportedRequirement).makeError(
+                            "Option 'dockerOutputDirectory' of DockerRequirement not supported with --api=jobs.")
+                    if not obj.get("dockerOutputDirectory").startswith('/'):
+                        raise SourceLine(obj, "dockerOutputDirectory", validate.ValidationException).makeError(
+                            "Option 'dockerOutputDirectory' must be an absolute path.")
             for v in obj.itervalues():
                 self.check_features(v)
         elif isinstance(obj, list):
@@ -279,7 +282,7 @@ class ArvCwlRunner(object):
 
         def rewrite(fileobj):
             fileobj["location"] = generatemapper.mapper(fileobj["location"]).target
-            for k in ("basename", "listing", "contents", "nameext", "nameroot", "dirname"):
+            for k in ("listing", "contents", "nameext", "nameroot", "dirname"):
                 if k in fileobj:
                     del fileobj[k]
 
index 014e1b94aae5b283ea851fd74e480dd5df926f55..abe67c8fb3c552c7e66925093ae1377b1e26b4e9 100644 (file)
@@ -105,17 +105,32 @@ class ArvadosContainer(object):
                 generatemapper = NoFollowPathMapper([self.generatefiles], "", "",
                                                     separateDirs=False)
 
+                logger.debug("generatemapper is %s", generatemapper._pathmap)
+
                 with Perf(metrics, "createfiles %s" % self.name):
                     for f, p in generatemapper.items():
                         if not p.target:
                             pass
-                        elif p.type in ("File", "Directory"):
-                            source, path = self.arvrunner.fs_access.get_collection(p.resolved)
-                            vwd.copy(path, p.target, source_collection=source)
+                        elif p.type in ("File", "Directory", "WritableFile", "WritableDirectory"):
+                            if p.resolved.startswith("_:"):
+                                vwd.mkdirs(p.target)
+                            else:
+                                source, path = self.arvrunner.fs_access.get_collection(p.resolved)
+                                vwd.copy(path, p.target, source_collection=source)
                         elif p.type == "CreateFile":
                             with vwd.open(p.target, "w") as n:
                                 n.write(p.resolved.encode("utf-8"))
 
+                def keepemptydirs(p):
+                    if isinstance(p, arvados.collection.RichCollectionBase):
+                        if len(p) == 0:
+                            p.open(".keep", "w").close()
+                        else:
+                            for c in p:
+                                keepemptydirs(p[c])
+
+                keepemptydirs(vwd)
+
                 with Perf(metrics, "generatefiles.save_new %s" % self.name):
                     vwd.save_new()
 
@@ -126,6 +141,8 @@ class ArvadosContainer(object):
                     mounts[mountpoint] = {"kind": "collection",
                                           "portable_data_hash": vwd.portable_data_hash(),
                                           "path": p.target}
+                    if p.type.startswith("Writable"):
+                        mounts[mountpoint]["writable"] = True
 
         container_request["environment"] = {"TMPDIR": self.tmpdir, "HOME": self.outdir}
         if self.environment:
index b667dac1ca5cec6f272c390be8fcd17e1628764c..81faff44e6297476d28d84c7f78d7201dff29122 100644 (file)
@@ -36,8 +36,13 @@ class ArvadosCommandTool(CommandLineTool):
 
     def job(self, joborder, output_callback, **kwargs):
         if self.work_api == "containers":
-            kwargs["outdir"] = "/var/spool/cwl"
-            kwargs["docker_outdir"] = "/var/spool/cwl"
+            dockerReq, is_req = self.get_requirement("DockerRequirement")
+            if dockerReq and dockerReq.get("dockerOutputDirectory"):
+                kwargs["outdir"] = dockerReq.get("dockerOutputDirectory")
+                kwargs["docker_outdir"] = dockerReq.get("dockerOutputDirectory")
+            else:
+                kwargs["outdir"] = "/var/spool/cwl"
+                kwargs["docker_outdir"] = "/var/spool/cwl"
         elif self.work_api == "jobs":
             kwargs["outdir"] = "$(task.outdir)"
             kwargs["docker_outdir"] = "$(task.outdir)"
index 914ccaa5a1049868cfe7f840f6bf7d56e957218c..998890a31c50acac0513479d0fad9675fd790647 100644 (file)
@@ -225,12 +225,16 @@ class StagingPathMapper(PathMapper):
         tgt = os.path.join(stagedir, obj["basename"])
         basetgt, baseext = os.path.splitext(tgt)
         n = 1
-        while tgt in self.targets:
-            n += 1
-            tgt = "%s_%i%s" % (basetgt, n, baseext)
+        if tgt in self.targets and (self.reversemap(tgt)[0] != loc):
+            while tgt in self.targets:
+                n += 1
+                tgt = "%s_%i%s" % (basetgt, n, baseext)
         self.targets.add(tgt)
         if obj["class"] == "Directory":
-            self._pathmap[loc] = MapperEnt(loc, tgt, "Directory", staged)
+            if obj.get("writable"):
+                self._pathmap[loc] = MapperEnt(loc, tgt, "WritableDirectory", staged)
+            else:
+                self._pathmap[loc] = MapperEnt(loc, tgt, "Directory", staged)
             if loc.startswith("_:") or self._follow_dirs:
                 self.visitlisting(obj.get("listing", []), tgt, basedir)
         elif obj["class"] == "File":
@@ -239,7 +243,7 @@ class StagingPathMapper(PathMapper):
             if "contents" in obj and loc.startswith("_:"):
                 self._pathmap[loc] = MapperEnt(obj["contents"], tgt, "CreateFile", staged)
             else:
-                if copy:
+                if copy or obj.get("writable"):
                     self._pathmap[loc] = MapperEnt(loc, tgt, "WritableFile", staged)
                 else:
                     self._pathmap[loc] = MapperEnt(loc, tgt, "File", staged)
index 2ca63cfe5048a62f0a1853e2aca06be865ea1fd4..fb5d036e941969df71b6a3062d09bb87d4328739 100644 (file)
@@ -161,7 +161,7 @@ def upload_docker(arvrunner, tool):
     if isinstance(tool, CommandLineTool):
         (docker_req, docker_is_req) = get_feature(tool, "DockerRequirement")
         if docker_req:
-            if docker_req.get("dockerOutputDirectory"):
+            if docker_req.get("dockerOutputDirectory") and arvrunner.work_api != "containers":
                 # TODO: can be supported by containers API, but not jobs API.
                 raise SourceLine(docker_req, "dockerOutputDirectory", UnsupportedRequirement).makeError(
                     "Option 'dockerOutputDirectory' of DockerRequirement not supported.")
index 88e3d80db35293a5fab4aec7dbd4db023e26a943..e5484651edcd44d0f2ef67c3bf0dbd1244ffca60 100644 (file)
@@ -41,7 +41,7 @@ setup(name='arvados-cwl-runner',
       # Note that arvados/build/run-build-packages.sh looks at this
       # file to determine what version of cwltool and schema-salad to build.
       install_requires=[
-          'cwltool==1.0.20180116213856',
+          'cwltool==1.0.20180130110340',
           'schema-salad==2.6.20171201034858',
           'typing==3.5.3.0',
           'ruamel.yaml==0.13.7',
index 26b31615ea41950a7f3f13535571b2755981eb7a..d3c1e90637d5419320b0115b386780d6321d9975 100755 (executable)
@@ -6,4 +6,7 @@
 if ! arv-get d7514270f356df848477718d58308cc4+94 > /dev/null ; then
     arv-put --portable-data-hash testdir/*
 fi
+if ! arv-get f225e6259bdd63bc7240599648dde9f1+97 > /dev/null ; then
+    arv-put --portable-data-hash hg19/*
+fi
 exec cwltest --test arvados-tests.yml --tool arvados-cwl-runner $@ -- --disable-reuse --compute-checksum
diff --git a/sdk/cwl/tests/hg19/hg19.fa b/sdk/cwl/tests/hg19/hg19.fa
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/cwl/tests/hg19/hg19.fa.amb b/sdk/cwl/tests/hg19/hg19.fa.amb
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/cwl/tests/hg19/hg19.fa.ann b/sdk/cwl/tests/hg19/hg19.fa.ann
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/cwl/tests/hg19/hg19.fa.fai b/sdk/cwl/tests/hg19/hg19.fa.fai
new file mode 100644 (file)
index 0000000..e69de29
index 05c9ee7410ac7b12ac1ae29770db0617709ce710..806d63ab85f3f1a9a08c73f9ea76f3dc7b3ecc09 100644 (file)
@@ -59,11 +59,13 @@ class TestMakeOutput(unittest.TestCase):
         final.save_new.assert_has_calls([mock.call(ensure_unique_name=True, name='Test output', owner_uuid='zzzzz-j7d0g-zzzzzzzzzzzzzzz')])
         self.assertEqual("""{
     "bar": {
+        "basename": "baz.txt",
         "class": "File",
         "location": "baz.txt",
         "size": 4
     },
     "foo": {
+        "basename": "foo.txt",
         "class": "File",
         "location": "foo.txt",
         "size": 3
index 5e530658abe96534d9ed3488a975a85a778af16d..d057c09b227e9f375d2b3d04e95d9327044c4f33 100644 (file)
@@ -13,6 +13,7 @@ const (
        DataManagerToken        = "320mkve8qkswstz7ff61glpk3mhgghmg67wmic7elw4z41pke1"
        ManagementToken         = "jg3ajndnq63sywcd50gbs5dskdc9ckkysb0nsqmfz08nwf17nl"
        ActiveUserUUID          = "zzzzz-tpzed-xurymjxw79nv3jz"
+       FederatedActiveUserUUID = "zbbbb-tpzed-xurymjxw79nv3jz"
        SpectatorUserUUID       = "zzzzz-tpzed-l1s2piq4t4mps8r"
        UserAgreementCollection = "zzzzz-4zz18-uukreo9rbgwsujr" // user_agreement_in_anonymously_accessible_project
        FooCollection           = "zzzzz-4zz18-fy296fx3hot09f7"
index edcb8501a4621aa71be9eef5d68c719aac49a267..b013776b98d3690db6cd5921bc8a3c11e6ce4ad4 100644 (file)
@@ -90,7 +90,7 @@ class Container < ArvadosModel
       self.priority = ContainerRequest.
         where(container_uuid: uuid,
               state: ContainerRequest::Committed).
-        maximum('priority')
+        maximum('priority') || 0
       self.save!
     end
   end
@@ -515,7 +515,7 @@ class Container < ArvadosModel
             cr.with_lock do
               # Use row locking because this increments container_count
               cr.container_uuid = c.uuid
-              cr.save
+              cr.save!
             end
           end
         end
@@ -526,11 +526,21 @@ class Container < ArvadosModel
           cr.finalize!
         end
 
-        # Try to cancel any outstanding container requests made by this container.
-        ContainerRequest.where(requesting_container_uuid: uuid,
-                               state: ContainerRequest::Committed).each do |cr|
-          cr.priority = 0
-          cr.save
+        # Cancel outstanding container requests made by this container.
+        ContainerRequest.
+          includes(:container).
+          where(requesting_container_uuid: uuid,
+                state: ContainerRequest::Committed).each do |cr|
+          cr.update_attributes!(priority: 0)
+          cr.container.reload
+          if cr.container.state == Container::Queued || cr.container.state == Container::Locked
+            # If the child container hasn't started yet, finalize the
+            # child CR now instead of leaving it "on hold", i.e.,
+            # Queued with priority 0.  (OTOH, if the child is already
+            # running, leave it alone so it can get cancelled the
+            # usual way, get a copy of the log collection, etc.)
+            cr.update_attributes!(state: ContainerRequest::Final)
+          end
         end
       end
     end
index 3596bf3d67551352c7ba404b3605c7dabe15956b..bcca40700bd9efbaf57c74332a94f3325763299a 100644 (file)
@@ -10,6 +10,8 @@ class ContainerRequest < ArvadosModel
   include CommonApiTemplate
   include WhitelistUpdate
 
+  belongs_to :container, foreign_key: :container_uuid, primary_key: :uuid
+
   serialize :properties, Hash
   serialize :environment, Hash
   serialize :mounts, Hash
@@ -237,12 +239,13 @@ class ContainerRequest < ArvadosModel
       end
 
     when Final
-      if self.state_changed? and not current_user.andand.is_admin
-        self.errors.add :state, "of container request can only be set to Final by system."
-      end
-
       if self.state_was == Committed
-        permitted.push :output_uuid, :log_uuid
+        # "Cancel" means setting priority=0, state=Committed
+        permitted.push :priority
+
+        if current_user.andand.is_admin
+          permitted.push :output_uuid, :log_uuid
+        end
       end
 
     end
index 85e40ffe34d10f26198c3bab6523e234e7360eec..29ce4f5aea5ffb29489f38fc49e9235bfa979b00 100644 (file)
@@ -219,7 +219,7 @@ canceled_with_queued_container:
   uuid: zzzzz-xvhdp-canceledqueuedc
   owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
   name: canceled with queued container
-  state: Committed
+  state: Final
   priority: 0
   created_at: 2016-01-11 11:11:11.111111111 Z
   updated_at: 2016-01-11 11:11:11.111111111 Z
@@ -238,7 +238,7 @@ canceled_with_locked_container:
   uuid: zzzzz-xvhdp-canceledlocekdc
   owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
   name: canceled with locked container
-  state: Committed
+  state: Final
   priority: 0
   created_at: 2016-01-11 11:11:11.111111111 Z
   updated_at: 2016-01-11 11:11:11.111111111 Z
index 5267a0b2e196fb055d47b03c6d6f8b86c9245d64..013f03c47e55b154da228e20a934bee106239676 100644 (file)
@@ -486,6 +486,8 @@ pipeline_<%=i%>_of_10:
           required: true
           dataclass: Collection
           title: foo instance input
+      job:
+        state: Failed
 <% end %>
 
 # pipelines in project_with_2_pipelines_and_60_crs
index 808795213351870986689299f3a9cbcbf087265c..8fb800c5f94f8a93bdc2f3990282ea76df7bb51b 100644 (file)
@@ -84,6 +84,22 @@ active:
       role: Computational biologist
     getting_started_shown: 2015-03-26 12:34:56.789000000 Z
 
+federated_active:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zbbbb-tpzed-xurymjxw79nv3jz
+  email: zbbbb-active-user@arvados.local
+  first_name: Active
+  last_name: User
+  identity_url: https://active-user.openid.local
+  is_active: true
+  is_admin: false
+  username: federatedactive
+  prefs:
+    profile:
+      organization: example.com
+      role: Computational biologist
+    getting_started_shown: 2015-03-26 12:34:56.789000000 Z
+
 project_viewer:
   owner_uuid: zzzzz-tpzed-000000000000000
   uuid: zzzzz-tpzed-projectviewer1a
index ed86befbace2133bd06213b809279478418cf5a2..0e13ee950c3aa57a4eab704d1003b378e2b4f4d2 100644 (file)
@@ -473,10 +473,12 @@ class ContainerTest < ActiveSupport::TestCase
   end
 
   test "Container queued cancel" do
-    c, _ = minimal_new
+    c, cr = minimal_new({container_count_max: 1})
     set_user_from_auth :dispatch1
     assert c.update_attributes(state: Container::Cancelled), show_errors(c)
     check_no_change_from_cancelled c
+    cr.reload
+    assert_equal ContainerRequest::Final, cr.state
   end
 
   test "Container queued count" do
index 7eefb1aaaded35be0b4ab99a03f4d82e30076c06..653e0b4949da882cbcc185894ffdaee369c19318 100644 (file)
@@ -103,21 +103,23 @@ type ContainerRunner struct {
        LogsPDH       *string
        RunArvMount
        MkTempDir
-       ArvMount       *exec.Cmd
-       ArvMountPoint  string
-       HostOutputDir  string
-       CleanupTempDir []string
-       Binds          []string
-       Volumes        map[string]struct{}
-       OutputPDH      *string
-       SigChan        chan os.Signal
-       ArvMountExit   chan error
-       finalState     string
-
-       statLogger   io.WriteCloser
-       statReporter *crunchstat.Reporter
-       statInterval time.Duration
-       cgroupRoot   string
+       ArvMount      *exec.Cmd
+       ArvMountPoint string
+       HostOutputDir string
+       Binds         []string
+       Volumes       map[string]struct{}
+       OutputPDH     *string
+       SigChan       chan os.Signal
+       ArvMountExit  chan error
+       finalState    string
+       parentTemp    string
+
+       statLogger       io.WriteCloser
+       statReporter     *crunchstat.Reporter
+       hoststatLogger   io.WriteCloser
+       hoststatReporter *crunchstat.Reporter
+       statInterval     time.Duration
+       cgroupRoot       string
        // What we expect the container's cgroup parent to be.
        expectCgroupParent string
        // What we tell docker to use as the container's cgroup
@@ -150,16 +152,18 @@ func (runner *ContainerRunner) setupSignals() {
 
        go func(sig chan os.Signal) {
                for s := range sig {
-                       runner.CrunchLog.Printf("caught signal: %v", s)
-                       runner.stop()
+                       runner.stop(s)
                }
        }(runner.SigChan)
 }
 
 // stop the underlying Docker container.
-func (runner *ContainerRunner) stop() {
+func (runner *ContainerRunner) stop(sig os.Signal) {
        runner.cStateLock.Lock()
        defer runner.cStateLock.Unlock()
+       if sig != nil {
+               runner.CrunchLog.Printf("caught signal: %v", sig)
+       }
        if runner.ContainerID == "" {
                return
        }
@@ -171,12 +175,6 @@ func (runner *ContainerRunner) stop() {
        }
 }
 
-func (runner *ContainerRunner) stopSignals() {
-       if runner.SigChan != nil {
-               signal.Stop(runner.SigChan)
-       }
-}
-
 var errorBlacklist = []string{
        "(?ms).*[Cc]annot connect to the Docker daemon.*",
        "(?ms).*oci runtime error.*starting container process.*container init.*mounting.*to rootfs.*no such file or directory.*",
@@ -325,11 +323,42 @@ func (runner *ContainerRunner) ArvMountCmd(arvMountCmd []string, token string) (
 
 func (runner *ContainerRunner) SetupArvMountPoint(prefix string) (err error) {
        if runner.ArvMountPoint == "" {
-               runner.ArvMountPoint, err = runner.MkTempDir("", prefix)
+               runner.ArvMountPoint, err = runner.MkTempDir(runner.parentTemp, prefix)
        }
        return
 }
 
+func copyfile(src string, dst string) (err error) {
+       srcfile, err := os.Open(src)
+       if err != nil {
+               return
+       }
+
+       os.MkdirAll(path.Dir(dst), 0777)
+
+       dstfile, err := os.Create(dst)
+       if err != nil {
+               return
+       }
+       _, err = io.Copy(dstfile, srcfile)
+       if err != nil {
+               return
+       }
+
+       err = srcfile.Close()
+       err2 := dstfile.Close()
+
+       if err != nil {
+               return
+       }
+
+       if err2 != nil {
+               return err2
+       }
+
+       return nil
+}
+
 func (runner *ContainerRunner) SetupMounts() (err error) {
        err = runner.SetupArvMountPoint("keep")
        if err != nil {
@@ -357,6 +386,11 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
        runner.Binds = nil
        runner.Volumes = make(map[string]struct{})
        needCertMount := true
+       type copyFile struct {
+               src  string
+               bind string
+       }
+       var copyFiles []copyFile
 
        var binds []string
        for bind := range runner.Container.Mounts {
@@ -412,7 +446,7 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
                                pdhOnly = false
                                src = fmt.Sprintf("%s/by_id/%s", runner.ArvMountPoint, mnt.UUID)
                        } else if mnt.PortableDataHash != "" {
-                               if mnt.Writable {
+                               if mnt.Writable && !strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
                                        return fmt.Errorf("Can never write to a collection specified by portable data hash")
                                }
                                idx := strings.Index(mnt.PortableDataHash, "/")
@@ -439,10 +473,12 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
                        if mnt.Writable {
                                if bind == runner.Container.OutputPath {
                                        runner.HostOutputDir = src
+                                       runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind))
                                } else if strings.HasPrefix(bind, runner.Container.OutputPath+"/") {
-                                       return fmt.Errorf("Writable mount points are not permitted underneath the output_path: %v", bind)
+                                       copyFiles = append(copyFiles, copyFile{src, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})
+                               } else {
+                                       runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind))
                                }
-                               runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", src, bind))
                        } else {
                                runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", src, bind))
                        }
@@ -450,7 +486,7 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
 
                case mnt.Kind == "tmp":
                        var tmpdir string
-                       tmpdir, err = runner.MkTempDir("", "")
+                       tmpdir, err = runner.MkTempDir(runner.parentTemp, "tmp")
                        if err != nil {
                                return fmt.Errorf("While creating mount temp dir: %v", err)
                        }
@@ -462,7 +498,6 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
                        if staterr != nil {
                                return fmt.Errorf("While Chmod temp dir: %v", err)
                        }
-                       runner.CleanupTempDir = append(runner.CleanupTempDir, tmpdir)
                        runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s", tmpdir, bind))
                        if bind == runner.Container.OutputPath {
                                runner.HostOutputDir = tmpdir
@@ -478,11 +513,10 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
                        // can ensure the file is world-readable
                        // inside the container, without having to
                        // make it world-readable on the docker host.
-                       tmpdir, err := runner.MkTempDir("", "")
+                       tmpdir, err := runner.MkTempDir(runner.parentTemp, "json")
                        if err != nil {
                                return fmt.Errorf("creating temp dir: %v", err)
                        }
-                       runner.CleanupTempDir = append(runner.CleanupTempDir, tmpdir)
                        tmpfn := filepath.Join(tmpdir, "mountdata.json")
                        err = ioutil.WriteFile(tmpfn, jsondata, 0644)
                        if err != nil {
@@ -491,11 +525,10 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
                        runner.Binds = append(runner.Binds, fmt.Sprintf("%s:%s:ro", tmpfn, bind))
 
                case mnt.Kind == "git_tree":
-                       tmpdir, err := runner.MkTempDir("", "")
+                       tmpdir, err := runner.MkTempDir(runner.parentTemp, "git_tree")
                        if err != nil {
                                return fmt.Errorf("creating temp dir: %v", err)
                        }
-                       runner.CleanupTempDir = append(runner.CleanupTempDir, tmpdir)
                        err = gitMount(mnt).extractTree(runner.ArvClient, tmpdir, token)
                        if err != nil {
                                return err
@@ -537,58 +570,118 @@ func (runner *ContainerRunner) SetupMounts() (err error) {
                }
        }
 
+       for _, cp := range copyFiles {
+               st, err := os.Stat(cp.src)
+               if err != nil {
+                       return fmt.Errorf("While staging writable file from %q to %q: %v", cp.src, cp.bind, err)
+               }
+               if st.IsDir() {
+                       err = filepath.Walk(cp.src, func(walkpath string, walkinfo os.FileInfo, walkerr error) error {
+                               if walkerr != nil {
+                                       return walkerr
+                               }
+                               target := path.Join(cp.bind, walkpath[len(cp.src):])
+                               if walkinfo.Mode().IsRegular() {
+                                       copyerr := copyfile(walkpath, target)
+                                       if copyerr != nil {
+                                               return copyerr
+                                       }
+                                       return os.Chmod(target, walkinfo.Mode()|0777)
+                               } else if walkinfo.Mode().IsDir() {
+                                       mkerr := os.MkdirAll(target, 0777)
+                                       if mkerr != nil {
+                                               return mkerr
+                                       }
+                                       return os.Chmod(target, walkinfo.Mode()|os.ModeSetgid|0777)
+                               } else {
+                                       return fmt.Errorf("Source %q is not a regular file or directory", cp.src)
+                               }
+                       })
+               } else if st.Mode().IsRegular() {
+                       err = copyfile(cp.src, cp.bind)
+                       if err == nil {
+                               err = os.Chmod(cp.bind, st.Mode()|0777)
+                       }
+               }
+               if err != nil {
+                       return fmt.Errorf("While staging writable file from %q to %q: %v", cp.src, cp.bind, err)
+               }
+       }
+
        return nil
 }
 
 func (runner *ContainerRunner) ProcessDockerAttach(containerReader io.Reader) {
        // Handle docker log protocol
        // https://docs.docker.com/engine/reference/api/docker_remote_api_v1.15/#attach-to-a-container
+       defer close(runner.loggingDone)
 
        header := make([]byte, 8)
-       for {
-               _, readerr := io.ReadAtLeast(containerReader, header, 8)
-
-               if readerr == nil {
-                       readsize := int64(header[7]) | (int64(header[6]) << 8) | (int64(header[5]) << 16) | (int64(header[4]) << 24)
-                       if header[0] == 1 {
-                               // stdout
-                               _, readerr = io.CopyN(runner.Stdout, containerReader, readsize)
-                       } else {
-                               // stderr
-                               _, readerr = io.CopyN(runner.Stderr, containerReader, readsize)
+       var err error
+       for err == nil {
+               _, err = io.ReadAtLeast(containerReader, header, 8)
+               if err != nil {
+                       if err == io.EOF {
+                               err = nil
                        }
+                       break
                }
+               readsize := int64(header[7]) | (int64(header[6]) << 8) | (int64(header[5]) << 16) | (int64(header[4]) << 24)
+               if header[0] == 1 {
+                       // stdout
+                       _, err = io.CopyN(runner.Stdout, containerReader, readsize)
+               } else {
+                       // stderr
+                       _, err = io.CopyN(runner.Stderr, containerReader, readsize)
+               }
+       }
 
-               if readerr != nil {
-                       if readerr != io.EOF {
-                               runner.CrunchLog.Printf("While reading docker logs: %v", readerr)
-                       }
-
-                       closeerr := runner.Stdout.Close()
-                       if closeerr != nil {
-                               runner.CrunchLog.Printf("While closing stdout logs: %v", closeerr)
-                       }
+       if err != nil {
+               runner.CrunchLog.Printf("error reading docker logs: %v", err)
+       }
 
-                       closeerr = runner.Stderr.Close()
-                       if closeerr != nil {
-                               runner.CrunchLog.Printf("While closing stderr logs: %v", closeerr)
-                       }
+       err = runner.Stdout.Close()
+       if err != nil {
+               runner.CrunchLog.Printf("error closing stdout logs: %v", err)
+       }
 
-                       if runner.statReporter != nil {
-                               runner.statReporter.Stop()
-                               closeerr = runner.statLogger.Close()
-                               if closeerr != nil {
-                                       runner.CrunchLog.Printf("While closing crunchstat logs: %v", closeerr)
-                               }
-                       }
+       err = runner.Stderr.Close()
+       if err != nil {
+               runner.CrunchLog.Printf("error closing stderr logs: %v", err)
+       }
 
-                       close(runner.loggingDone)
-                       return
+       if runner.statReporter != nil {
+               runner.statReporter.Stop()
+               err = runner.statLogger.Close()
+               if err != nil {
+                       runner.CrunchLog.Printf("error closing crunchstat logs: %v", err)
                }
        }
 }
 
-func (runner *ContainerRunner) StartCrunchstat() {
+func (runner *ContainerRunner) stopHoststat() error {
+       if runner.hoststatReporter == nil {
+               return nil
+       }
+       runner.hoststatReporter.Stop()
+       err := runner.hoststatLogger.Close()
+       if err != nil {
+               return fmt.Errorf("error closing hoststat logs: %v", err)
+       }
+       return nil
+}
+
+func (runner *ContainerRunner) startHoststat() {
+       runner.hoststatLogger = NewThrottledLogger(runner.NewLogWriter("hoststat"))
+       runner.hoststatReporter = &crunchstat.Reporter{
+               Logger:     log.New(runner.hoststatLogger, "", 0),
+               CgroupRoot: runner.cgroupRoot,
+               PollPeriod: runner.statInterval,
+       }
+       runner.hoststatReporter.Start()
+}
+
+func (runner *ContainerRunner) startCrunchstat() {
        runner.statLogger = NewThrottledLogger(runner.NewLogWriter("crunchstat"))
        runner.statReporter = &crunchstat.Reporter{
                CID:          runner.ContainerID,
@@ -794,7 +887,7 @@ func (runner *ContainerRunner) AttachStreams() (err error) {
                        _, err := io.Copy(response.Conn, stdinRdr)
                        if err != nil {
                                runner.CrunchLog.Print("While writing stdin collection to docker container %q", err)
-                               runner.stop()
+                               runner.stop(nil)
                        }
                        stdinRdr.Close()
                        response.CloseWrite()
@@ -804,7 +897,7 @@ func (runner *ContainerRunner) AttachStreams() (err error) {
                        _, err := io.Copy(response.Conn, bytes.NewReader(stdinJson))
                        if err != nil {
                                runner.CrunchLog.Print("While writing stdin json to docker container %q", err)
-                               runner.stop()
+                               runner.stop(nil)
                        }
                        response.CloseWrite()
                }()
@@ -855,6 +948,7 @@ func (runner *ContainerRunner) CreateContainer() error {
 
        runner.ContainerConfig.Volumes = runner.Volumes
 
+       maxRAM := int64(runner.Container.RuntimeConstraints.RAM)
        runner.HostConfig = dockercontainer.HostConfig{
                Binds: runner.Binds,
                LogConfig: dockercontainer.LogConfig{
@@ -862,6 +956,10 @@ func (runner *ContainerRunner) CreateContainer() error {
                },
                Resources: dockercontainer.Resources{
                        CgroupParent: runner.setCgroupParent,
+                       NanoCPUs:     int64(runner.Container.RuntimeConstraints.VCPUs) * 1000000000,
+                       Memory:       maxRAM, // RAM
+                       MemorySwap:   maxRAM, // RAM+swap
+                       KernelMemory: maxRAM, // kernel portion
                },
        }
 
@@ -944,7 +1042,7 @@ func (runner *ContainerRunner) WaitFinish() error {
 
                case <-arvMountExit:
                        runner.CrunchLog.Printf("arv-mount exited while container is still running.  Stopping container.")
-                       runner.stop()
+                       runner.stop(nil)
                        // arvMountExit will always be ready now that
                        // it's closed, but that doesn't interest us.
                        arvMountExit = nil
@@ -1078,7 +1176,7 @@ func (runner *ContainerRunner) UploadOutputFile(
        // go through mounts and try reverse map to collection reference
        for _, bind := range binds {
                mnt := runner.Container.Mounts[bind]
-               if tgt == bind || strings.HasPrefix(tgt, bind+"/") {
+               if (tgt == bind || strings.HasPrefix(tgt, bind+"/")) && !mnt.Writable {
                        // get path relative to bind
                        targetSuffix := tgt[len(bind):]
 
@@ -1217,7 +1315,7 @@ func (runner *ContainerRunner) CaptureOutput() error {
                        continue
                }
 
-               if mnt.ExcludeFromOutput == true {
+               if mnt.ExcludeFromOutput == true || mnt.Writable {
                        continue
                }
 
@@ -1339,29 +1437,33 @@ func (runner *ContainerRunner) CleanupDirs() {
                }
        }
 
-       for _, tmpdir := range runner.CleanupTempDir {
-               if rmerr := os.RemoveAll(tmpdir); rmerr != nil {
-                       runner.CrunchLog.Printf("While cleaning up temporary directory %s: %v", tmpdir, rmerr)
-               }
+       if rmerr := os.RemoveAll(runner.parentTemp); rmerr != nil {
+               runner.CrunchLog.Printf("While cleaning up temporary directory %s: %v", runner.parentTemp, rmerr)
        }
 }
 
 // CommitLogs posts the collection containing the final container logs.
 func (runner *ContainerRunner) CommitLogs() error {
-       runner.CrunchLog.Print(runner.finalState)
+       func() {
+               // Hold cStateLock to prevent races on CrunchLog (e.g., stop()).
+               runner.cStateLock.Lock()
+               defer runner.cStateLock.Unlock()
 
-       if runner.arvMountLog != nil {
-               runner.arvMountLog.Close()
-       }
-       runner.CrunchLog.Close()
+               runner.CrunchLog.Print(runner.finalState)
+
+               if runner.arvMountLog != nil {
+                       runner.arvMountLog.Close()
+               }
+               runner.CrunchLog.Close()
 
-       // Closing CrunchLog above allows them to be committed to Keep at this
-       // point, but re-open crunch log with ArvClient in case there are any
-       // other further errors (such as failing to write the log to Keep!)
-       // while shutting down
-       runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{ArvClient: runner.ArvClient,
-               UUID: runner.Container.UUID, loggingStream: "crunch-run", writeCloser: nil})
-       runner.CrunchLog.Immediate = log.New(os.Stderr, runner.Container.UUID+" ", 0)
+               // Closing CrunchLog above allows them to be committed to Keep at this
+               // point, but re-open crunch log with ArvClient in case there are any
+               // other further errors (such as failing to write the log to Keep!)
+               // while shutting down
+               runner.CrunchLog = NewThrottledLogger(&ArvLogWriter{ArvClient: runner.ArvClient,
+                       UUID: runner.Container.UUID, loggingStream: "crunch-run", writeCloser: nil})
+               runner.CrunchLog.Immediate = log.New(os.Stderr, runner.Container.UUID+" ", 0)
+       }()
 
        if runner.LogsPDH != nil {
                // If we have already assigned something to LogsPDH,
@@ -1470,7 +1572,6 @@ func (runner *ContainerRunner) Run() (err error) {
        runner.finalState = "Queued"
 
        defer func() {
-               runner.stopSignals()
                runner.CleanupDirs()
 
                runner.CrunchLog.Printf("crunch-run finished")
@@ -1512,6 +1613,7 @@ func (runner *ContainerRunner) Run() (err error) {
                }
 
                checkErr(runner.CaptureOutput())
+               checkErr(runner.stopHoststat())
                checkErr(runner.CommitLogs())
                checkErr(runner.UpdateContainerFinal())
        }()
@@ -1520,9 +1622,8 @@ func (runner *ContainerRunner) Run() (err error) {
        if err != nil {
                return
        }
-
-       // setup signal handling
        runner.setupSignals()
+       runner.startHoststat()
 
        // check for and/or load image
        err = runner.LoadImage()
@@ -1571,7 +1672,7 @@ func (runner *ContainerRunner) Run() (err error) {
        }
        runner.finalState = "Cancelled"
 
-       runner.StartCrunchstat()
+       runner.startCrunchstat()
 
        err = runner.StartContainer()
        if err != nil {
@@ -1681,6 +1782,12 @@ func main() {
                os.Exit(1)
        }
 
+       parentTemp, tmperr := cr.MkTempDir("", "crunch-run."+containerId+".")
+       if tmperr != nil {
+               log.Fatalf("%s: %v", containerId, tmperr)
+       }
+
+       cr.parentTemp = parentTemp
        cr.statInterval = *statInterval
        cr.cgroupRoot = *cgroupRoot
        cr.expectCgroupParent = *cgroupParent
index 22989bb2ece510e6f239151b267968fde25f1203..94b713355dd10a13e2563f701589f39874d8ac12 100644 (file)
@@ -16,7 +16,6 @@ import (
        "net"
        "os"
        "os/exec"
-       "path/filepath"
        "runtime/pprof"
        "sort"
        "strings"
@@ -1029,6 +1028,8 @@ func (s *TestSuite) TestSetupMounts(c *C) {
        c.Assert(err, IsNil)
        stubCertPath := stubCert(certTemp)
 
+       cr.parentTemp = realTemp
+
        defer os.RemoveAll(realTemp)
        defer os.RemoveAll(certTemp)
 
@@ -1045,11 +1046,12 @@ func (s *TestSuite) TestSetupMounts(c *C) {
        }
 
        checkEmpty := func() {
-               filepath.Walk(realTemp, func(path string, _ os.FileInfo, err error) error {
-                       c.Check(path, Equals, realTemp)
-                       c.Check(err, IsNil)
-                       return nil
-               })
+               // Should be deleted.
+               _, err := os.Stat(realTemp)
+               c.Assert(os.IsNotExist(err), Equals, true)
+
+               // Now recreate it for the next test.
+               c.Assert(os.Mkdir(realTemp, 0777), IsNil)
        }
 
        {
@@ -1064,7 +1066,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
                c.Check(am.Cmd, DeepEquals, []string{"--foreground", "--allow-other",
                        "--read-write", "--crunchstat-interval=5",
                        "--mount-by-pdh", "by_id", realTemp + "/keep1"})
-               c.Check(cr.Binds, DeepEquals, []string{realTemp + "/2:/tmp"})
+               c.Check(cr.Binds, DeepEquals, []string{realTemp + "/tmp2:/tmp"})
                os.RemoveAll(cr.ArvMountPoint)
                cr.CleanupDirs()
                checkEmpty()
@@ -1083,7 +1085,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
                c.Check(am.Cmd, DeepEquals, []string{"--foreground", "--allow-other",
                        "--read-write", "--crunchstat-interval=5",
                        "--mount-by-pdh", "by_id", realTemp + "/keep1"})
-               c.Check(cr.Binds, DeepEquals, []string{realTemp + "/2:/out", realTemp + "/3:/tmp"})
+               c.Check(cr.Binds, DeepEquals, []string{realTemp + "/tmp2:/out", realTemp + "/tmp3:/tmp"})
                os.RemoveAll(cr.ArvMountPoint)
                cr.CleanupDirs()
                checkEmpty()
@@ -1104,7 +1106,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
                c.Check(am.Cmd, DeepEquals, []string{"--foreground", "--allow-other",
                        "--read-write", "--crunchstat-interval=5",
                        "--mount-by-pdh", "by_id", realTemp + "/keep1"})
-               c.Check(cr.Binds, DeepEquals, []string{realTemp + "/2:/tmp", stubCertPath + ":/etc/arvados/ca-certificates.crt:ro"})
+               c.Check(cr.Binds, DeepEquals, []string{realTemp + "/tmp2:/tmp", stubCertPath + ":/etc/arvados/ca-certificates.crt:ro"})
                os.RemoveAll(cr.ArvMountPoint)
                cr.CleanupDirs()
                checkEmpty()
@@ -1200,8 +1202,8 @@ func (s *TestSuite) TestSetupMounts(c *C) {
                err := cr.SetupMounts()
                c.Check(err, IsNil)
                sort.StringSlice(cr.Binds).Sort()
-               c.Check(cr.Binds, DeepEquals, []string{realTemp + "/2/mountdata.json:/mnt/test.json:ro"})
-               content, err := ioutil.ReadFile(realTemp + "/2/mountdata.json")
+               c.Check(cr.Binds, DeepEquals, []string{realTemp + "/json2/mountdata.json:/mnt/test.json:ro"})
+               content, err := ioutil.ReadFile(realTemp + "/json2/mountdata.json")
                c.Check(err, IsNil)
                c.Check(content, DeepEquals, []byte(test.out))
                os.RemoveAll(cr.ArvMountPoint)
@@ -1227,26 +1229,42 @@ func (s *TestSuite) TestSetupMounts(c *C) {
                c.Check(am.Cmd, DeepEquals, []string{"--foreground", "--allow-other",
                        "--read-write", "--crunchstat-interval=5",
                        "--file-cache", "512", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", realTemp + "/keep1"})
-               c.Check(cr.Binds, DeepEquals, []string{realTemp + "/2:/tmp", realTemp + "/keep1/tmp0:/tmp/foo:ro"})
+               c.Check(cr.Binds, DeepEquals, []string{realTemp + "/tmp2:/tmp", realTemp + "/keep1/tmp0:/tmp/foo:ro"})
                os.RemoveAll(cr.ArvMountPoint)
                cr.CleanupDirs()
                checkEmpty()
        }
 
-       // Writable mount points are not allowed underneath output_dir mount point
+       // Writable mount points copied to output_dir mount point
        {
                i = 0
                cr.ArvMountPoint = ""
                cr.Container.Mounts = make(map[string]arvados.Mount)
                cr.Container.Mounts = map[string]arvados.Mount{
-                       "/tmp":     {Kind: "tmp"},
-                       "/tmp/foo": {Kind: "collection", Writable: true},
+                       "/tmp": {Kind: "tmp"},
+                       "/tmp/foo": {Kind: "collection",
+                               PortableDataHash: "59389a8f9ee9d399be35462a0f92541c+53",
+                               Writable:         true},
+                       "/tmp/bar": {Kind: "collection",
+                               PortableDataHash: "59389a8f9ee9d399be35462a0f92541d+53",
+                               Path:             "baz",
+                               Writable:         true},
                }
                cr.OutputPath = "/tmp"
 
+               os.MkdirAll(realTemp+"/keep1/by_id/59389a8f9ee9d399be35462a0f92541c+53", os.ModePerm)
+               os.MkdirAll(realTemp+"/keep1/by_id/59389a8f9ee9d399be35462a0f92541d+53/baz", os.ModePerm)
+
+               rf, _ := os.Create(realTemp + "/keep1/by_id/59389a8f9ee9d399be35462a0f92541d+53/baz/quux")
+               rf.Write([]byte("bar"))
+               rf.Close()
+
                err := cr.SetupMounts()
-               c.Check(err, NotNil)
-               c.Check(err, ErrorMatches, `Writable mount points are not permitted underneath the output_path.*`)
+               c.Check(err, IsNil)
+               _, err = os.Stat(cr.HostOutputDir + "/foo")
+               c.Check(err, IsNil)
+               _, err = os.Stat(cr.HostOutputDir + "/bar/quux")
+               c.Check(err, IsNil)
                os.RemoveAll(cr.ArvMountPoint)
                cr.CleanupDirs()
                checkEmpty()
@@ -1523,7 +1541,7 @@ func (s *TestSuite) TestStdoutWithMultipleMountPointsUnderOutputDir(c *C) {
                t.logWriter.Close()
        })
 
-       c.Check(runner.Binds, DeepEquals, []string{realtemp + "/2:/tmp",
+       c.Check(runner.Binds, DeepEquals, []string{realtemp + "/tmp2:/tmp",
                realtemp + "/keep1/by_id/a0def87f80dd594d4675809e83bd4f15+367/file2_in_main.txt:/tmp/foo/bar:ro",
                realtemp + "/keep1/by_id/a0def87f80dd594d4675809e83bd4f15+367/subdir1/subdir2/file2_in_subdir2.txt:/tmp/foo/baz/sub2file2:ro",
                realtemp + "/keep1/by_id/a0def87f80dd594d4675809e83bd4f15+367/subdir1:/tmp/foo/sub1:ro",
@@ -1612,11 +1630,11 @@ func (s *TestSuite) TestOutputSymlinkToInput(c *C) {
        }
 
        api, _, _ := s.fullRunHelper(c, helperRecord, extraMounts, 0, func(t *TestDockerClient) {
-               os.Symlink("/keep/foo/sub1file2", t.realTemp+"/2/baz")
-               os.Symlink("/keep/foo2/subdir1/file2_in_subdir1.txt", t.realTemp+"/2/baz2")
-               os.Symlink("/keep/foo2/subdir1", t.realTemp+"/2/baz3")
-               os.Mkdir(t.realTemp+"/2/baz4", 0700)
-               os.Symlink("/keep/foo2/subdir1/file2_in_subdir1.txt", t.realTemp+"/2/baz4/baz5")
+               os.Symlink("/keep/foo/sub1file2", t.realTemp+"/tmp2/baz")
+               os.Symlink("/keep/foo2/subdir1/file2_in_subdir1.txt", t.realTemp+"/tmp2/baz2")
+               os.Symlink("/keep/foo2/subdir1", t.realTemp+"/tmp2/baz3")
+               os.Mkdir(t.realTemp+"/tmp2/baz4", 0700)
+               os.Symlink("/keep/foo2/subdir1/file2_in_subdir1.txt", t.realTemp+"/tmp2/baz4/baz5")
                t.logWriter.Close()
        })
 
@@ -1654,7 +1672,7 @@ func (s *TestSuite) TestOutputError(c *C) {
        extraMounts := []string{}
 
        api, _, _ := s.fullRunHelper(c, helperRecord, extraMounts, 0, func(t *TestDockerClient) {
-               os.Symlink("/etc/hosts", t.realTemp+"/2/baz")
+               os.Symlink("/etc/hosts", t.realTemp+"/tmp2/baz")
                t.logWriter.Close()
        })
 
@@ -1678,21 +1696,21 @@ func (s *TestSuite) TestOutputSymlinkToOutput(c *C) {
        extraMounts := []string{}
 
        api, _, _ := s.fullRunHelper(c, helperRecord, extraMounts, 0, func(t *TestDockerClient) {
-               rf, _ := os.Create(t.realTemp + "/2/realfile")
+               rf, _ := os.Create(t.realTemp + "/tmp2/realfile")
                rf.Write([]byte("foo"))
                rf.Close()
 
-               os.Mkdir(t.realTemp+"/2/realdir", 0700)
-               rf, _ = os.Create(t.realTemp + "/2/realdir/subfile")
+               os.Mkdir(t.realTemp+"/tmp2/realdir", 0700)
+               rf, _ = os.Create(t.realTemp + "/tmp2/realdir/subfile")
                rf.Write([]byte("bar"))
                rf.Close()
 
-               os.Symlink("/tmp/realfile", t.realTemp+"/2/file1")
-               os.Symlink("realfile", t.realTemp+"/2/file2")
-               os.Symlink("/tmp/file1", t.realTemp+"/2/file3")
-               os.Symlink("file2", t.realTemp+"/2/file4")
-               os.Symlink("realdir", t.realTemp+"/2/dir1")
-               os.Symlink("/tmp/realdir", t.realTemp+"/2/dir2")
+               os.Symlink("/tmp/realfile", t.realTemp+"/tmp2/file1")
+               os.Symlink("realfile", t.realTemp+"/tmp2/file2")
+               os.Symlink("/tmp/file1", t.realTemp+"/tmp2/file3")
+               os.Symlink("file2", t.realTemp+"/tmp2/file4")
+               os.Symlink("realdir", t.realTemp+"/tmp2/dir1")
+               os.Symlink("/tmp/realdir", t.realTemp+"/tmp2/dir2")
                t.logWriter.Close()
        })
 
index c64ac7a8fe1e2645b11e0917c3c5b9bcaab54369..62c856da340183d0483503e0a2393816acaf0321 100644 (file)
@@ -402,7 +402,17 @@ func (v *AzureBlobVolume) Put(ctx context.Context, loc string, block []byte) err
        }()
        errChan := make(chan error)
        go func() {
-               errChan <- v.bsClient.CreateBlockBlobFromReader(v.ContainerName, loc, uint64(len(block)), bufr, nil)
+               var body io.Reader = bufr
+               if len(block) == 0 {
+                       // We must send a "Content-Length: 0" header,
+                       // but the http client interprets
+                       // ContentLength==0 as "unknown" unless it can
+                       // confirm by introspection that Body will
+                       // read 0 bytes.
+                       body = http.NoBody
+                       bufr.Close()
+               }
+               errChan <- v.bsClient.CreateBlockBlobFromReader(v.ContainerName, loc, uint64(len(block)), body, nil)
        }()
        select {
        case <-ctx.Done():
@@ -722,7 +732,9 @@ func (c *azureBlobClient) GetBlobRange(cname, bname, byterange string, hdrs map[
 
 func (c *azureBlobClient) CreateBlockBlobFromReader(cname, bname string, size uint64, rdr io.Reader, hdrs map[string]string) error {
        c.stats.Tick(&c.stats.Ops, &c.stats.CreateOps)
-       rdr = NewCountingReader(rdr, c.stats.TickOutBytes)
+       if size != 0 {
+               rdr = NewCountingReader(rdr, c.stats.TickOutBytes)
+       }
        err := c.client.CreateBlockBlobFromReader(cname, bname, size, rdr, hdrs)
        c.stats.TickErr(err)
        return err
index 4256ec0d0cb599e259ff7cabcc6f3407fd2e6dce..06216edcb82aeddc3617c121cecc4e1a6387be4a 100644 (file)
@@ -124,6 +124,11 @@ func (h *azStubHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
                return
        }
 
+       if (r.Method == "PUT" || r.Method == "POST") && r.Header.Get("Content-Length") == "" {
+               rw.WriteHeader(http.StatusLengthRequired)
+               return
+       }
+
        body, err := ioutil.ReadAll(r.Body)
        if err != nil {
                return
index 64545eb0f977a39338373291d2e271762cd61b7a..7b023570b3d221593b8973eb4e640bda87a29dcb 100644 (file)
@@ -44,14 +44,14 @@ setup(name='arvados-node-manager',
           'setuptools'
       ],
       dependency_links=[
-          "https://github.com/curoverse/libcloud/archive/apache-libcloud-2.2.2.dev3.zip"
+          "https://github.com/curoverse/libcloud/archive/apache-libcloud-2.2.2.dev4.zip"
       ],
       test_suite='tests',
       tests_require=[
           'requests',
           'pbr<1.7.0',
           'mock>=1.0',
-          'apache-libcloud==2.2.2.dev3',
+          'apache-libcloud==2.2.2.dev4',
       ],
       zip_safe=False,
       cmdclass={'egg_info': tagger},
index 234d15aecd03d42537b5f93f02a4dcd8b3f0340b..3429a1b65d0b13c500e3f109d017eaa6afcc60de 100755 (executable)
@@ -57,6 +57,7 @@ GEMS="$ARVBOX_DATA/gems"
 PIPCACHE="$ARVBOX_DATA/pip"
 NPMCACHE="$ARVBOX_DATA/npm"
 GOSTUFF="$ARVBOX_DATA/gopath"
+RLIBS="$ARVBOX_DATA/Rlibs"
 
 getip() {
     docker inspect $ARVBOX_CONTAINER | grep \"IPAddress\" | head -n1 | tr -d ' ":,\n' | cut -c10-
@@ -188,7 +189,7 @@ run() {
         updateconf
         wait_for_arvbox
     else
-        mkdir -p "$PG_DATA" "$VAR_DATA" "$PASSENGER" "$GEMS" "$PIPCACHE" "$NPMCACHE" "$GOSTUFF"
+        mkdir -p "$PG_DATA" "$VAR_DATA" "$PASSENGER" "$GEMS" "$PIPCACHE" "$NPMCACHE" "$GOSTUFF" "$RLIBS"
 
 
         if ! test -d "$ARVADOS_ROOT" ; then
@@ -220,6 +221,7 @@ run() {
                        "--volume=$PIPCACHE:/var/lib/pip:rw" \
                        "--volume=$NPMCACHE:/var/lib/npm:rw" \
                        "--volume=$GOSTUFF:/var/lib/gopath:rw" \
+                       "--volume=$RLIBS:/var/lib/Rlibs:rw" \
                       "--env=SVDIR=/etc/test-service" \
                        arvados/arvbox-dev$TAG
 
@@ -262,6 +264,7 @@ run() {
                    "--volume=$PIPCACHE:/var/lib/pip:rw" \
                    "--volume=$NPMCACHE:/var/lib/npm:rw" \
                    "--volume=$GOSTUFF:/var/lib/gopath:rw" \
+                   "--volume=$RLIBS:/var/lib/Rlibs:rw" \
                    $PUBLIC \
                    arvados/arvbox-dev$TAG
             updateconf
index 83d507b62b4931163b73dded52240705ebd2ae70..0cc36ebd2b92cb5ac2c3f35ec02d757adf11d4a9 100644 (file)
@@ -19,7 +19,7 @@ RUN apt-get update && \
     apt-transport-https ca-certificates slurm-wlm \
     linkchecker python3-virtualenv python-virtualenv xvfb iceweasel \
     libgnutls28-dev python3-dev vim cadaver cython gnupg dirmngr \
-    libsecret-1-dev && \
+    libsecret-1-dev r-base r-cran-testthat libxml2-dev pandoc && \
     apt-get clean
 
 ENV RUBYVERSION_MINOR 2.3
@@ -90,6 +90,7 @@ ADD crunch-setup.sh gitolite.rc \
     keep-setup.sh common.sh createusers.sh \
     logger runsu.sh waitforpostgres.sh \
     application_yml_override.py api-setup.sh \
+    go-setup.sh \
     /usr/local/lib/arvbox/
 
 ADD runit /etc/runit
index 80344c16f2ef9bfcf0f97bf14f07a8c1cb97ce73..7cb51edfdc61ab6c9b67b29353eeb2f79e8774eb 100644 (file)
@@ -24,13 +24,16 @@ RUN echo "production" > /var/lib/arvados/workbench_rails_env
 
 RUN chown -R 1000:1000 /usr/src && /usr/local/lib/arvbox/createusers.sh
 
+RUN sudo -u arvbox /var/lib/arvbox/service/composer/run-service --only-deps
+RUN sudo -u arvbox /var/lib/arvbox/service/keep-web/run-service --only-deps
 RUN sudo -u arvbox /var/lib/arvbox/service/sso/run-service --only-deps
 RUN sudo -u arvbox /var/lib/arvbox/service/api/run-service --only-deps
 RUN sudo -u arvbox /var/lib/arvbox/service/workbench/run-service --only-deps
 RUN sudo -u arvbox /var/lib/arvbox/service/doc/run-service --only-deps
 RUN sudo -u arvbox /var/lib/arvbox/service/vm/run-service --only-deps
-RUN sudo -u arvbox /var/lib/arvbox/service/keep-web/run-service --only-deps
 RUN sudo -u arvbox /var/lib/arvbox/service/keepproxy/run-service --only-deps
 RUN sudo -u arvbox /var/lib/arvbox/service/arv-git-httpd/run-service --only-deps
+RUN sudo -u arvbox /var/lib/arvbox/service/crunch-dispatch-local/run-service --only-deps
+RUN sudo -u arvbox /var/lib/arvbox/service/websockets/run-service --only-deps
 RUN sudo -u arvbox /usr/local/lib/arvbox/keep-setup.sh --only-deps
 RUN sudo -u arvbox /var/lib/arvbox/service/sdk/run-service
index 2cb8473657e7705bc0759bd054da0e0d57222754..319889baef28152a1a933bdc25cab7aa85491e1a 100644 (file)
@@ -8,6 +8,7 @@ export GEM_HOME=/var/lib/gems
 export GEM_PATH=/var/lib/gems
 export npm_config_cache=/var/lib/npm
 export npm_config_cache_min=Infinity
+export R_LIBS=/var/lib/Rlibs
 
 if test -s /var/run/localip_override ; then
     localip=$(cat /var/run/localip_override)
index 30ecafb889c7e38acb45380d68c5f8634bcb9c3c..b3ec5cd10441f695522c50500a2e64fd3f6d8f5d 100755 (executable)
@@ -7,16 +7,11 @@ exec 2>&1
 set -eux -o pipefail
 
 . /usr/local/lib/arvbox/common.sh
+. /usr/local/lib/arvbox/go-setup.sh
 
-mkdir -p /var/lib/gopath
-cd /var/lib/gopath
-
-export GOPATH=$PWD
-mkdir -p "$GOPATH/src/git.curoverse.com"
-ln -sfn "/usr/src/arvados" "$GOPATH/src/git.curoverse.com/arvados.git"
 flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/services/crunchstat"
 flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/sdk/go/crunchrunner"
-install bin/crunchstat bin/crunchrunner /usr/local/bin
+install $GOPATH/bin/crunchstat $GOPATH/bin/crunchrunner /usr/local/bin
 
 if test -s /var/lib/arvados/api_rails_env ; then
   RAILS_ENV=$(cat /var/lib/arvados/api_rails_env)
diff --git a/tools/arvbox/lib/arvbox/docker/go-setup.sh b/tools/arvbox/lib/arvbox/docker/go-setup.sh
new file mode 100644 (file)
index 0000000..f068ce6
--- /dev/null
@@ -0,0 +1,16 @@
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+mkdir -p /var/lib/gopath
+cd /var/lib/gopath
+
+export GOPATH=$PWD
+mkdir -p "$GOPATH/src/git.curoverse.com"
+ln -sfn "/usr/src/arvados" "$GOPATH/src/git.curoverse.com/arvados.git"
+
+flock /var/lib/gopath/gopath.lock go get -t github.com/kardianos/govendor
+cd "$GOPATH/src/git.curoverse.com/arvados.git"
+flock /var/lib/gopath/gopath.lock go get -v -d ...
+flock /var/lib/gopath/gopath.lock "$GOPATH/bin/govendor" sync
index 5da2cfac440c6a4aafa20359ea718478d12816b3..8ef66a60687ce817e46308311dbcd4d80c6691ad 100755 (executable)
@@ -8,15 +8,10 @@ sleep 2
 set -eux -o pipefail
 
 . /usr/local/lib/arvbox/common.sh
+. /usr/local/lib/arvbox/go-setup.sh
 
-mkdir -p /var/lib/gopath
-cd /var/lib/gopath
-
-export GOPATH=$PWD
-mkdir -p "$GOPATH/src/git.curoverse.com"
-ln -sfn "/usr/src/arvados" "$GOPATH/src/git.curoverse.com/arvados.git"
 flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/services/keepstore"
-install bin/keepstore /usr/local/bin
+install $GOPATH/bin/keepstore /usr/local/bin
 
 if test "$1" = "--only-deps" ; then
     exit
index 806f9cd37a375e17ffeacc0943d03beb8ded614e..1383f7140f4ed961637d8c8ef160bfb3b575d317 100755 (executable)
@@ -7,15 +7,10 @@ exec 2>&1
 set -ex -o pipefail
 
 . /usr/local/lib/arvbox/common.sh
+. /usr/local/lib/arvbox/go-setup.sh
 
-mkdir -p /var/lib/gopath
-cd /var/lib/gopath
-
-export GOPATH=$PWD
-mkdir -p "$GOPATH/src/git.curoverse.com"
-ln -sfn "/usr/src/arvados" "$GOPATH/src/git.curoverse.com/arvados.git"
 flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/services/arv-git-httpd"
-install bin/arv-git-httpd /usr/local/bin
+install $GOPATH/bin/arv-git-httpd /usr/local/bin
 
 if test "$1" = "--only-deps" ; then
     exit
index ac4441de099ab37e1a0a36109807cf9e127db507..abd350f073c0f449b37b25362185b9b24a963136 100755 (executable)
@@ -10,13 +10,13 @@ set -ex -o pipefail
 
 cd /usr/src/composer
 
-npm -d install yarn
-
-PATH=$PATH:/usr/src/composer/node_modules/.bin
+npm -d install --prefix /usr/local --global yarn
 
 yarn install
 
-if test "$1" != "--only-deps" ; then
-    echo "apiEndPoint: https://${localip}:${services[api]}" > /usr/src/composer/src/composer.yml
-    exec ng serve --host 0.0.0.0 --port 4200 --env=webdev
+if test "$1" = "--only-deps" ; then
+    exit
 fi
+
+echo "apiEndPoint: https://${localip}:${services[api]}" > /usr/src/composer/src/composer.yml
+exec node_modules/.bin/ng serve --host 0.0.0.0 --port 4200 --env=webdev
index e7a302682155b751fe46a1151b5673672327f3d7..decbccddeeecce662a0e353da0dd01c26ce91021 100755 (executable)
@@ -4,19 +4,18 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 exec 2>&1
-set -eux -o pipefail
+set -ex -o pipefail
 
 . /usr/local/lib/arvbox/common.sh
+. /usr/local/lib/arvbox/go-setup.sh
 
-mkdir -p /var/lib/gopath
-cd /var/lib/gopath
-
-export GOPATH=$PWD
-mkdir -p "$GOPATH/src/git.curoverse.com"
-ln -sfn "/usr/src/arvados" "$GOPATH/src/git.curoverse.com/arvados.git"
 flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/services/crunch-run"
 flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/services/crunch-dispatch-local"
-install bin/crunch-run bin/crunch-dispatch-local /usr/local/bin
+install $GOPATH/bin/crunch-run $GOPATH/bin/crunch-dispatch-local /usr/local/bin
+
+if test "$1" = "--only-deps" ; then
+    exit
+fi
 
 cat > /usr/local/bin/crunch-run.sh <<EOF
 #!/bin/sh
index b8bf740605d3848a79dbd885f75b1bdcb5f50e54..97cc79d32fd2d110f6bd879441316cdcfec6adc1 100755 (executable)
@@ -11,6 +11,21 @@ set -ex -o pipefail
 cd /usr/src/arvados/doc
 run_bundler --without=development
 
+cd /usr/src/arvados/sdk/R
+R --quiet --vanilla <<EOF
+options(repos=structure(c(CRAN="http://cran.wustl.edu/")))
+if (!requireNamespace("devtools")) {
+  install.packages("devtools")
+}
+if (!requireNamespace("roxygen2")) {
+  install.packages("roxygen2")
+}
+if (!requireNamespace("pkgdown")) {
+  devtools::install_github("hadley/pkgdown")
+}
+devtools::install_dev_deps()
+EOF
+
 if test "$1" = "--only-deps" ; then
     exit
 fi
@@ -42,6 +57,7 @@ http {
 }
 EOF
 
+cd /usr/src/arvados/doc
 bundle exec rake generate baseurl=http://$localip:${services[doc]} arvados_api_host=$localip:${services[api]} arvados_workbench_host=http://$localip
 
 exec nginx -c /var/lib/arvados/doc-nginx.conf
index ee985a0e51b512d3263dcf7d72b7b4cdc530bd9f..70f2470b9fe7decd8a03efdfb09d5da8ab52f372 100755 (executable)
@@ -7,15 +7,10 @@ exec 2>&1
 set -ex -o pipefail
 
 . /usr/local/lib/arvbox/common.sh
+. /usr/local/lib/arvbox/go-setup.sh
 
-mkdir -p /var/lib/gopath
-cd /var/lib/gopath
-
-export GOPATH=$PWD
-mkdir -p "$GOPATH/src/git.curoverse.com"
-ln -sfn "/usr/src/arvados" "$GOPATH/src/git.curoverse.com/arvados.git"
 flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/services/keep-web"
-install bin/keep-web /usr/local/bin
+install $GOPATH/bin/keep-web /usr/local/bin
 
 if test "$1" = "--only-deps" ; then
     exit
index dae2dfdd786589a3023e0143a5f6c7f8b3711c0a..199247b7a0e2bfc6dcabdd929dc5177275f730bc 100755 (executable)
@@ -8,15 +8,10 @@ sleep 2
 set -ex -o pipefail
 
 . /usr/local/lib/arvbox/common.sh
+. /usr/local/lib/arvbox/go-setup.sh
 
-mkdir -p /var/lib/gopath
-cd /var/lib/gopath
-
-export GOPATH=$PWD
-mkdir -p "$GOPATH/src/git.curoverse.com"
-ln -sfn "/usr/src/arvados" "$GOPATH/src/git.curoverse.com/arvados.git"
 flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/services/keepproxy"
-install bin/keepproxy /usr/local/bin
+install $GOPATH/bin/keepproxy /usr/local/bin
 
 if test "$1" = "--only-deps" ; then
     exit
index 45407cb9ab48d34c7baa7811aef92376ed759d0b..3ef78ee45575676dc881059efd60cc57bd64cbd9 100755 (executable)
@@ -5,6 +5,8 @@
 
 flock /var/lib/arvados/createusers.lock /usr/local/lib/arvbox/createusers.sh
 
+make-ssl-cert generate-default-snakeoil --force-overwrite
+
 . /usr/local/lib/arvbox/common.sh
 
 chown -R $PGUSER:$PGGROUP /var/lib/postgresql
index cb56ac7f4de5dbb7f3ad6d22c6b8166933a32f81..2d01d907985c0c9ca6e0cf1e39969e1b4ce2d7fd 100755 (executable)
@@ -14,14 +14,10 @@ else
   RAILS_ENV=development
 fi
 
-mkdir -p /var/lib/gopath
-cd /var/lib/gopath
+. /usr/local/lib/arvbox/go-setup.sh
 
-export GOPATH=$PWD
-mkdir -p "$GOPATH/src/git.curoverse.com"
-ln -sfn "/usr/src/arvados" "$GOPATH/src/git.curoverse.com/arvados.git"
 flock /var/lib/gopath/gopath.lock go get -t "git.curoverse.com/arvados.git/services/ws"
-install bin/ws /usr/local/bin/arvados-ws
+install $GOPATH/bin/ws /usr/local/bin/arvados-ws
 
 if test "$1" = "--only-deps" ; then
     exit
index ebc40b13cb8c2ad2ec8152df975d6a0863ffc45b..10569b2e139b89a2904820ab62dab6cbc3a747b5 100644 (file)
@@ -307,7 +307,7 @@ func doMain(cfg *ConfigParams) error {
                }
                userIDToUUID[uID] = u.UUID
                if cfg.Verbose {
-                       log.Printf("Seen user %q (%s)", u.Username, u.Email)
+                       log.Printf("Seen user %q (%s)", u.Username, u.UUID)
                }
        }
 
@@ -317,6 +317,11 @@ func doMain(cfg *ConfigParams) error {
                return err
        }
        log.Printf("Found %d remote groups", len(remoteGroups))
+       if cfg.Verbose {
+               for groupUUID := range remoteGroups {
+                       log.Printf("- Group %q: %d users", remoteGroups[groupUUID].Group.Name, len(remoteGroups[groupUUID].PreviousMembers))
+               }
+       }
 
        membershipsRemoved := 0
 
@@ -504,9 +509,9 @@ func GetRemoteGroups(cfg *ConfigParams, allUsers map[string]arvados.User) (remot
                                Operator: "=",
                                Operand:  group.UUID,
                        }, {
-                               Attr:     "head_kind",
-                               Operator: "=",
-                               Operand:  "arvados#user",
+                               Attr:     "head_uuid",
+                               Operator: "like",
+                               Operand:  "%-tpzed-%",
                        }},
                }
                // User -> Group filter
@@ -528,9 +533,9 @@ func GetRemoteGroups(cfg *ConfigParams, allUsers map[string]arvados.User) (remot
                                Operator: "=",
                                Operand:  group.UUID,
                        }, {
-                               Attr:     "tail_kind",
-                               Operator: "=",
-                               Operand:  "arvados#user",
+                               Attr:     "tail_uuid",
+                               Operator: "like",
+                               Operand:  "%-tpzed-%",
                        }},
                }
                g2uLinks, err := GetAll(cfg.Client, "links", g2uFilter, &LinkList{})
@@ -579,7 +584,7 @@ func GetRemoteGroups(cfg *ConfigParams, allUsers map[string]arvados.User) (remot
 // RemoveMemberFromGroup remove all links related to the membership
 func RemoveMemberFromGroup(cfg *ConfigParams, user arvados.User, group arvados.Group) error {
        if cfg.Verbose {
-               log.Printf("Getting group membership links for user %q (%s) on group %q (%s)", user.Email, user.UUID, group.Name, group.UUID)
+               log.Printf("Getting group membership links for user %q (%s) on group %q (%s)", user.Username, user.UUID, group.Name, group.UUID)
        }
        var links []interface{}
        // Search for all group<->user links (both ways)
index e776648a803736581dcb61724631ac844cdca68f..4a3e470c42f2b56f82199938d857bd295e564e9a 100644 (file)
@@ -83,7 +83,6 @@ func (s *TestSuite) SetUpTest(c *C) {
        c.Assert(len(s.users), Not(Equals), 0)
 }
 
-// Clean any membership link and remote group created by the test
 func (s *TestSuite) TearDownTest(c *C) {
        var dst interface{}
        // Reset database to fixture state after every test run.
@@ -93,7 +92,7 @@ func (s *TestSuite) TearDownTest(c *C) {
 
 var _ = Suite(&TestSuite{})
 
-// MakeTempCVSFile creates a temp file with data as comma separated values
+// MakeTempCSVFile creates a temp file with data as comma separated values
 func MakeTempCSVFile(data [][]string) (f *os.File, err error) {
        f, err = ioutil.TempFile("", "test_sync_remote_groups")
        if err != nil {
@@ -266,11 +265,15 @@ func (s *TestSuite) TestIgnoreSpaces(c *C) {
 
 // The absence of a user membership on the CSV file implies its removal
 func (s *TestSuite) TestMembershipRemoval(c *C) {
-       activeUserEmail := s.users[arvadostest.ActiveUserUUID].Email
-       activeUserUUID := s.users[arvadostest.ActiveUserUUID].UUID
+       localUserEmail := s.users[arvadostest.ActiveUserUUID].Email
+       localUserUUID := s.users[arvadostest.ActiveUserUUID].UUID
+       remoteUserEmail := s.users[arvadostest.FederatedActiveUserUUID].Email
+       remoteUserUUID := s.users[arvadostest.FederatedActiveUserUUID].UUID
        data := [][]string{
-               {"TestGroup1", activeUserEmail},
-               {"TestGroup2", activeUserEmail},
+               {"TestGroup1", localUserEmail},
+               {"TestGroup1", remoteUserEmail},
+               {"TestGroup2", localUserEmail},
+               {"TestGroup2", remoteUserEmail},
        }
        tmpfile, err := MakeTempCSVFile(data)
        c.Assert(err, IsNil)
@@ -283,11 +286,13 @@ func (s *TestSuite) TestMembershipRemoval(c *C) {
                groupUUID, err := RemoteGroupExists(s.cfg, groupName)
                c.Assert(err, IsNil)
                c.Assert(groupUUID, Not(Equals), "")
-               c.Assert(GroupMembershipExists(s.cfg.Client, activeUserUUID, groupUUID), Equals, true)
+               c.Assert(GroupMembershipExists(s.cfg.Client, localUserUUID, groupUUID), Equals, true)
+               c.Assert(GroupMembershipExists(s.cfg.Client, remoteUserUUID, groupUUID), Equals, true)
        }
-       // New CSV with one previous membership missing
+       // New CSV with some previous membership missing
        data = [][]string{
-               {"TestGroup1", activeUserEmail},
+               {"TestGroup1", localUserEmail},
+               {"TestGroup2", remoteUserEmail},
        }
        tmpfile2, err := MakeTempCSVFile(data)
        c.Assert(err, IsNil)
@@ -295,16 +300,18 @@ func (s *TestSuite) TestMembershipRemoval(c *C) {
        s.cfg.Path = tmpfile2.Name()
        err = doMain(s.cfg)
        c.Assert(err, IsNil)
-       // Confirm TestGroup1 membership still exist
+       // Confirm TestGroup1 memberships
        groupUUID, err := RemoteGroupExists(s.cfg, "TestGroup1")
        c.Assert(err, IsNil)
        c.Assert(groupUUID, Not(Equals), "")
-       c.Assert(GroupMembershipExists(s.cfg.Client, activeUserUUID, groupUUID), Equals, true)
-       // Confirm TestGroup2 membership was removed
+       c.Assert(GroupMembershipExists(s.cfg.Client, localUserUUID, groupUUID), Equals, true)
+       c.Assert(GroupMembershipExists(s.cfg.Client, remoteUserUUID, groupUUID), Equals, false)
+       // Confirm TestGroup1 memberships
        groupUUID, err = RemoteGroupExists(s.cfg, "TestGroup2")
        c.Assert(err, IsNil)
        c.Assert(groupUUID, Not(Equals), "")
-       c.Assert(GroupMembershipExists(s.cfg.Client, activeUserUUID, groupUUID), Equals, false)
+       c.Assert(GroupMembershipExists(s.cfg.Client, localUserUUID, groupUUID), Equals, false)
+       c.Assert(GroupMembershipExists(s.cfg.Client, remoteUserUUID, groupUUID), Equals, true)
 }
 
 // If a group doesn't exist on the system, create it before adding users