Merge branch 'master' into 4232-slow-pipes-n-jobs
authorPhil Hodgson <bitbucket@philhodgson.net>
Sat, 21 Feb 2015 09:11:43 +0000 (10:11 +0100)
committerPhil Hodgson <bitbucket@philhodgson.net>
Sat, 21 Feb 2015 09:11:43 +0000 (10:11 +0100)
253 files changed:
.gitignore
COPYING
apps/workbench/.gitignore
apps/workbench/Gemfile
apps/workbench/Gemfile.lock
apps/workbench/app/assets/javascripts/ajax_error.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/event_log.js
apps/workbench/app/assets/javascripts/filterable.js
apps/workbench/app/assets/javascripts/job_log_graph.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/request_shell_access.js [new file with mode: 0644]
apps/workbench/app/controllers/application_controller.rb
apps/workbench/app/controllers/collections_controller.rb
apps/workbench/app/controllers/jobs_controller.rb
apps/workbench/app/controllers/pipeline_instances_controller.rb
apps/workbench/app/controllers/pipeline_templates_controller.rb
apps/workbench/app/controllers/projects_controller.rb
apps/workbench/app/controllers/repositories_controller.rb
apps/workbench/app/controllers/search_controller.rb
apps/workbench/app/controllers/users_controller.rb
apps/workbench/app/helpers/application_helper.rb
apps/workbench/app/mailers/request_shell_access_reporter.rb [new file with mode: 0644]
apps/workbench/app/models/arvados_api_client.rb
apps/workbench/app/models/arvados_base.rb
apps/workbench/app/models/authorized_key.rb
apps/workbench/app/models/collection.rb
apps/workbench/app/models/job.rb
apps/workbench/app/models/repository.rb
apps/workbench/app/views/application/404.html.erb
apps/workbench/app/views/application/_browser_unsupported.html [new file with mode: 0644]
apps/workbench/app/views/application/_choose.html.erb
apps/workbench/app/views/application/_create_new_object_button.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_show_sharing.html.erb
apps/workbench/app/views/application/_title_and_buttons.html.erb
apps/workbench/app/views/application/index.html.erb
apps/workbench/app/views/collections/_create_new_object_button.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/_index_tbody.html.erb
apps/workbench/app/views/collections/_show_files.html.erb
apps/workbench/app/views/collections/show.html.erb
apps/workbench/app/views/jobs/_create_new_object_button.html.erb [new file with mode: 0644]
apps/workbench/app/views/jobs/_rerun_job_with_options_popup.html.erb [new file with mode: 0644]
apps/workbench/app/views/jobs/_show_job_buttons.html.erb
apps/workbench/app/views/jobs/_show_log.html.erb
apps/workbench/app/views/jobs/show.html.erb
apps/workbench/app/views/layouts/body.html.erb
apps/workbench/app/views/pipeline_instances/_running_component.html.erb
apps/workbench/app/views/pipeline_instances/_show_inputs.html.erb
apps/workbench/app/views/pipeline_instances/_show_tab_buttons.html.erb
apps/workbench/app/views/pipeline_instances/show.html.erb
apps/workbench/app/views/pipeline_templates/show.html.erb
apps/workbench/app/views/projects/_show_dashboard.html.erb
apps/workbench/app/views/projects/_show_tab_contents.html.erb
apps/workbench/app/views/projects/show.html.erb
apps/workbench/app/views/request_shell_access_reporter/send_request.text.erb [new file with mode: 0644]
apps/workbench/app/views/users/_add_ssh_key_popup.html.erb
apps/workbench/app/views/users/_create_new_object_button.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/_manage_repositories.html.erb
apps/workbench/app/views/users/_manage_virtual_machines.html.erb
apps/workbench/app/views/users/_setup_popup.html.erb
apps/workbench/app/views/users/request_shell_access.js [new file with mode: 0644]
apps/workbench/app/views/users/welcome.html.erb
apps/workbench/config/application.default.yml
apps/workbench/config/routes.rb
apps/workbench/public/browser_unsupported.js [new file with mode: 0644]
apps/workbench/test/controllers/application_controller_test.rb
apps/workbench/test/controllers/collections_controller_test.rb
apps/workbench/test/controllers/projects_controller_test.rb
apps/workbench/test/controllers/repositories_controller_test.rb
apps/workbench/test/controllers/users_controller_test.rb
apps/workbench/test/diagnostics/pipeline_test.rb
apps/workbench/test/integration/anonymous_access_test.rb [new file with mode: 0644]
apps/workbench/test/integration/browser_unsupported_test.rb [new file with mode: 0644]
apps/workbench/test/integration/errors_test.rb
apps/workbench/test/integration/filterable_infinite_scroll_test.rb
apps/workbench/test/integration/jobs_test.rb
apps/workbench/test/integration/pipeline_instances_test.rb
apps/workbench/test/integration/projects_test.rb
apps/workbench/test/integration/repositories_test.rb
apps/workbench/test/integration/user_manage_account_test.rb
apps/workbench/test/integration/websockets_test.rb
apps/workbench/test/integration_helper.rb
apps/workbench/test/performance_test_helper.rb
apps/workbench/test/support/remove_file_api.js [new file with mode: 0644]
apps/workbench/test/test_helper.rb
apps/workbench/test/unit/repository_test.rb [new file with mode: 0644]
crunch_scripts/crunchutil/subst.py
doc/_config.yml
doc/_includes/_arv_run_redirection.liquid [new file with mode: 0644]
doc/_includes/_events_py.liquid [new file with mode: 0644]
doc/_includes/_example_sdk_go.liquid
doc/_includes/_navbar_top.liquid
doc/_layouts/default.html.liquid
doc/api/methods/collections.html.textile.liquid
doc/api/methods/groups.html.textile.liquid
doc/api/schema/Collection.html.textile.liquid
doc/images/keyfeatures/chooseinputs.png
doc/images/keyfeatures/collectionpage.png
doc/images/keyfeatures/dashboard2.png
doc/images/keyfeatures/graph.png
doc/images/keyfeatures/log.png
doc/images/keyfeatures/provenance.png
doc/images/keyfeatures/rerun.png
doc/images/keyfeatures/running2.png
doc/images/keyfeatures/shared.png
doc/images/keyfeatures/webupload.png
doc/images/quickstart/1.png
doc/images/quickstart/2.png
doc/images/quickstart/3.png
doc/images/quickstart/4.png
doc/images/quickstart/5.png
doc/images/quickstart/6.png
doc/images/quickstart/7.png
doc/images/uses/shared.png
doc/images/uses/sharing.png [moved from doc/images/uses/share.png with 100% similarity]
doc/index.html.liquid
doc/sdk/cli/index.html.textile.liquid
doc/sdk/cli/reference.html.textile.liquid
doc/sdk/cli/subcommands.html.textile.liquid
doc/sdk/python/events.html.textile.liquid [new file with mode: 0644]
doc/start/getting_started/nextsteps.html.textile.liquid [new file with mode: 0644]
doc/start/getting_started/sharedata.html.textile.liquid
doc/user/index.html.textile.liquid
doc/user/topics/arv-run.html.textile.liquid
doc/user/topics/arv-web.html.textile.liquid [new file with mode: 0644]
doc/user/topics/crunch-tools-overview.html.textile.liquid [new file with mode: 0644]
doc/user/topics/run-command.html.textile.liquid
docker/arv-web/Dockerfile [new file with mode: 0644]
docker/arv-web/apache2_foreground.sh [new file with mode: 0755]
docker/arv-web/apache2_vhost [new file with mode: 0644]
docker/build_tools/Makefile
sdk/cli/arvados-cli.gemspec
sdk/cli/bin/arv
sdk/cli/bin/crunch-job
sdk/go/arvadosclient/arvadosclient.go
sdk/go/arvadosclient/arvadosclient_test.go
sdk/go/arvadostest/run_servers.go [new file with mode: 0644]
sdk/go/blockdigest/blockdigest.go [new file with mode: 0644]
sdk/go/blockdigest/blockdigest_test.go [new file with mode: 0644]
sdk/go/keepclient/keepclient_test.go
sdk/go/keepclient/support.go
sdk/go/logger/logger.go [new file with mode: 0644]
sdk/go/logger/main/testlogger.go [new file with mode: 0644]
sdk/go/manifest/manifest.go [new file with mode: 0644]
sdk/go/manifest/manifest_test.go [new file with mode: 0644]
sdk/go/manifest/testdata/long_manifest [new file with mode: 0644]
sdk/go/manifest/testdata/short_manifest [new file with mode: 0644]
sdk/go/util/util.go [new file with mode: 0644]
sdk/python/arvados/api.py
sdk/python/arvados/collection.py
sdk/python/arvados/commands/arv_copy.py
sdk/python/arvados/commands/put.py
sdk/python/arvados/commands/ws.py
sdk/python/arvados/errors.py
sdk/python/arvados/events.py
sdk/python/arvados/keep.py
sdk/python/gittaggers.py [new file with mode: 0644]
sdk/python/setup.py
sdk/python/tests/arvados_testutil.py
sdk/python/tests/run_test_server.py
sdk/python/tests/test_api.py
sdk/python/tests/test_arv_put.py
sdk/python/tests/test_collections.py
sdk/python/tests/test_keep_client.py
sdk/python/tests/test_pipeline_template.py
sdk/python/tests/test_websockets.py
sdk/ruby/arvados.gemspec
sdk/ruby/lib/arvados.rb
sdk/ruby/lib/arvados/google_api_client.rb [new file with mode: 0644]
sdk/ruby/lib/arvados/keep.rb
sdk/ruby/test/test_keep_manifest.rb
services/api/.gitignore
services/api/Gemfile
services/api/Gemfile.lock
services/api/app/controllers/application_controller.rb
services/api/app/controllers/arvados/v1/collections_controller.rb
services/api/app/controllers/arvados/v1/groups_controller.rb
services/api/app/controllers/arvados/v1/links_controller.rb
services/api/app/controllers/arvados/v1/schema_controller.rb
services/api/app/controllers/arvados/v1/users_controller.rb
services/api/app/models/api_client_authorization.rb
services/api/app/models/arvados_model.rb
services/api/app/models/collection.rb
services/api/app/models/database_seeds.rb
services/api/app/models/user.rb
services/api/config/application.default.yml
services/api/config/initializers/time_format.rb [new file with mode: 0644]
services/api/db/migrate/20150122175935_no_description_in_search_index.rb [new file with mode: 0644]
services/api/db/migrate/20150123142953_full_text_search.rb [new file with mode: 0644]
services/api/db/migrate/20150203180223_set_group_class_on_anonymous_group.rb [new file with mode: 0644]
services/api/db/migrate/20150206210804_all_users_can_read_anonymous_group.rb [new file with mode: 0644]
services/api/db/migrate/20150206230342_rename_replication_attributes.rb [new file with mode: 0644]
services/api/db/migrate/20150216193428_collection_name_owner_unique_only_non_expired.rb [new file with mode: 0644]
services/api/db/structure.sql
services/api/lib/current_api_client.rb
services/api/lib/load_param.rb
services/api/lib/record_filters.rb
services/api/test/fixtures/collections.yml
services/api/test/fixtures/groups.yml
services/api/test/fixtures/jobs.yml
services/api/test/fixtures/links.yml
services/api/test/fixtures/pipeline_instances.yml
services/api/test/fixtures/pipeline_templates.yml
services/api/test/fixtures/repositories.yml
services/api/test/functional/arvados/v1/collections_controller_test.rb
services/api/test/functional/arvados/v1/filters_test.rb
services/api/test/functional/arvados/v1/groups_controller_test.rb
services/api/test/integration/collections_api_test.rb
services/api/test/integration/groups_test.rb
services/api/test/unit/arvados_model_test.rb
services/api/test/unit/collection_test.rb
services/api/test/unit/link_test.rb
services/api/test/websocket_runner.rb
services/arv-web/README [new file with mode: 0644]
services/arv-web/arv-web.py [new file with mode: 0755]
services/arv-web/sample-cgi-app/docker_image [new file with mode: 0644]
services/arv-web/sample-cgi-app/public/.htaccess [new file with mode: 0644]
services/arv-web/sample-cgi-app/public/index.cgi [new file with mode: 0755]
services/arv-web/sample-cgi-app/tmp/.keepkeep [new file with mode: 0644]
services/arv-web/sample-rack-app/config.ru [new file with mode: 0644]
services/arv-web/sample-rack-app/docker_image [new file with mode: 0644]
services/arv-web/sample-rack-app/public/.keepkeep [new file with mode: 0644]
services/arv-web/sample-rack-app/tmp/.keepkeep [new file with mode: 0644]
services/arv-web/sample-static-page/docker_image [new file with mode: 0644]
services/arv-web/sample-static-page/public/index.html [new file with mode: 0644]
services/arv-web/sample-static-page/tmp/.keepkeep [new file with mode: 0644]
services/arv-web/sample-wsgi-app/docker_image [new file with mode: 0644]
services/arv-web/sample-wsgi-app/passenger_wsgi.py [new file with mode: 0644]
services/arv-web/sample-wsgi-app/public/.keepkeep [new file with mode: 0644]
services/arv-web/sample-wsgi-app/tmp/.keepkeep [new file with mode: 0644]
services/datamanager/collection/collection.go [new file with mode: 0644]
services/datamanager/datamanager.go [new file with mode: 0644]
services/datamanager/keep/keep.go [new file with mode: 0644]
services/datamanager/loggerutil/loggerutil.go [new file with mode: 0644]
services/fuse/arvados_fuse/__init__.py
services/fuse/gittaggers.py [new symlink]
services/fuse/setup.py
services/fuse/tests/test_mount.py
services/keepproxy/keepproxy_test.go
services/nodemanager/MANIFEST.in [new file with mode: 0644]
services/nodemanager/arvnodeman/computenode/__init__.py
services/nodemanager/arvnodeman/computenode/dispatch/__init__.py
services/nodemanager/arvnodeman/computenode/driver/__init__.py
services/nodemanager/arvnodeman/computenode/driver/ec2.py
services/nodemanager/arvnodeman/computenode/driver/gce.py [new file with mode: 0644]
services/nodemanager/arvnodeman/config.py
services/nodemanager/doc/ec2.example.cfg
services/nodemanager/doc/gce.example.cfg [new file with mode: 0644]
services/nodemanager/gittaggers.py [new symlink]
services/nodemanager/setup.py
services/nodemanager/tests/test_computenode_dispatch.py
services/nodemanager/tests/test_computenode_driver_ec2.py
services/nodemanager/tests/test_computenode_driver_gce.py [new file with mode: 0644]
services/nodemanager/tests/test_daemon.py
services/nodemanager/tests/testutil.py

index 8cc6b89324311d0cd7db51f9c6a5e7ba400253dc..eec475862e6ec2a87554e0fca90697e87f441bf5 100644 (file)
@@ -10,10 +10,9 @@ sdk/perl/MYMETA.*
 sdk/perl/Makefile
 sdk/perl/blib
 sdk/perl/pm_to_blib
-*/vendor/bundle
+*/vendor
+*/*/vendor
 sdk/java/target
 *.class
-apps/workbench/vendor/bundle
-services/api/vendor/bundle
 sdk/java/log
-sdk/cli/vendor
+/tmp
diff --git a/COPYING b/COPYING
index 4006e686dad73c4053af9b11d312501de55b10b3..acbd7523ed49f01217874965aa3180cccec89d61 100644 (file)
--- a/COPYING
+++ b/COPYING
@@ -1,5 +1,5 @@
 Server-side components of Arvados contained in the apps/ and services/
-directories, including the API Server, Workbench, and Crunch, are licenced
+directories, including the API Server, Workbench, and Crunch, are licensed
 under the GNU Affero General Public License version 3 (see agpl-3.0.txt)
 
 The Arvados client Software Development Kits contained in the sdk/ directory,
index 24a7a84a31249c9c69894ce9dd3ecb5b7fe7446c..9bef02bbfda670595750fd99a4461005ce5b8f12 100644 (file)
@@ -3,6 +3,7 @@
 
 # Ignore all logfiles and tempfiles.
 /log/*.log
+/log/*.log.gz
 /tmp
 
 /config/.secret_token
index b24cb26c7dacb7d064702f6dad36c18213c56f17..d3746a830a6c32a4a0b94b6d9f85a5dbe94bca63 100644 (file)
@@ -1,7 +1,7 @@
 source 'https://rubygems.org'
 
 gem 'rails', '~> 4.1.0'
-gem 'arvados', '>= 0.1.20150116063758'
+gem 'arvados', '>= 0.1.20150210011250'
 
 gem 'sqlite3'
 
@@ -33,7 +33,7 @@ group :development do
   gem 'flamegraph', require: false
 end
 
-group :test, :diagnostics do
+group :test, :diagnostics, :performance do
   gem 'minitest', '>= 5.0.0'
   gem 'selenium-webdriver'
   gem 'capybara'
@@ -41,7 +41,7 @@ group :test, :diagnostics do
   gem 'headless'
 end
 
-group :test do
+group :test, :performance do
   gem 'rails-perftest'
   gem 'ruby-prof'
   gem 'rvm-capistrano'
index 1833a08d0b0ce3d8bc981e808c5a2891df2e42af..9a886972a1a256b1b09df90e841810d42d00bdd1 100644 (file)
@@ -40,7 +40,7 @@ GEM
     andand (1.3.3)
     angularjs-rails (1.3.8)
     arel (5.0.1.20140414130214)
-    arvados (0.1.20150116063758)
+    arvados (0.1.20150210011250)
       activesupport (>= 3.2.13)
       andand (~> 1.3, >= 1.3.3)
       google-api-client (~> 0.6.3, >= 0.6.3)
@@ -258,7 +258,7 @@ DEPENDENCIES
   RedCloth
   andand
   angularjs-rails
-  arvados (>= 0.1.20150116063758)
+  arvados (>= 0.1.20150210011250)
   bootstrap-sass (~> 3.1.0)
   bootstrap-tab-history-rails
   bootstrap-x-editable-rails
diff --git a/apps/workbench/app/assets/javascripts/ajax_error.js b/apps/workbench/app/assets/javascripts/ajax_error.js
new file mode 100644 (file)
index 0000000..9012844
--- /dev/null
@@ -0,0 +1,15 @@
+$(document).on('ajax:error', function(e, xhr, status, error) {
+    var errorMessage = '' + status + ': ' + error;
+    // $btn is the element (button/link) that initiated the failed request.
+    var $btn = $(e.target);
+    // Populate some elements with the error text (e.g., a <p> in an alert div)
+    $($btn.attr('data-on-error-write')).text(errorMessage);
+    // Show some elements (e.g., an alert div)
+    $($btn.attr('data-on-error-show')).show();
+    // Hide some elements (e.g., a success/normal div)
+    $($btn.attr('data-on-error-hide')).hide();
+}).on('ajax:success', function(e) {
+    var $btn = $(e.target);
+    $($btn.attr('data-on-success-show')).show();
+    $($btn.attr('data-on-success-hide')).hide();
+});
index 29ea74c417cb904f5b5da1cab364c1f1000f2018..36361a17d12e3f295910b87be2ff85a6e6077110 100644 (file)
@@ -56,315 +56,3 @@ $(document).on('ajax:complete ready', function() {
         subscribeToEventLog();
     }
 });
-
-/* Assumes existence of:
-  window.jobGraphData = [];
-  window.jobGraphSeries = [];
-  window.jobGraphSortedSeries = [];
-  window.jobGraphMaxima = {};
- */
-function processLogLineForChart( logLine ) {
-    try {
-        var match = logLine.match(/^(\S+) (\S+) (\S+) (\S+) stderr crunchstat: (\S+) (.*)/);
-        if( !match ) {
-            match = logLine.match(/^((?:Sun|Mon|Tue|Wed|Thu|Fri|Sat) (?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) \d{1,2} \d\d:\d\d:\d\d \d{4}) (\S+) (\S+) (\S+) stderr crunchstat: (\S+) (.*)/);
-            if( match ) {
-                match[1] = (new Date(match[1] + ' UTC')).toISOString().replace('Z','');
-            }
-        }
-        if( match ) {
-            var rawDetailData = '';
-            var datum = null;
-
-            // the timestamp comes first
-            var timestamp = match[1].replace('_','T') + 'Z';
-
-            // we are interested in "-- interval" recordings
-            var intervalMatch = match[6].match(/(.*) -- interval (.*)/);
-            if( intervalMatch ) {
-                var intervalData = intervalMatch[2].trim().split(' ');
-                var dt = parseFloat(intervalData[0]);
-                var dsum = 0.0;
-                for(var i=2; i < intervalData.length; i += 2 ) {
-                    dsum += parseFloat(intervalData[i]);
-                }
-                datum = dsum/dt;
-
-                if( datum < 0 ) {
-                    // not interested in negative deltas
-                    return;
-                }
-
-                rawDetailData = intervalMatch[2];
-
-                // for the series name use the task number (4th term) and then the first word after 'crunchstat:'
-                var series = 'T' + match[4] + '-' + match[5];
-
-                // special calculation for cpus
-                if( /-cpu$/.test(series) ) {
-                    // divide the stat by the number of cpus unless the time count is less than the interval length
-                    if( dsum.toFixed(1) > dt.toFixed(1) ) {
-                        var cpuCountMatch = intervalMatch[1].match(/(\d+) cpus/);
-                        if( cpuCountMatch ) {
-                            datum = datum / cpuCountMatch[1];
-                        }
-                    }
-                }
-
-                addJobGraphDatum( timestamp, datum, series, rawDetailData );
-            } else {
-                // we are also interested in memory ("mem") recordings
-                var memoryMatch = match[6].match(/(\d+) cache (\d+) swap (\d+) pgmajfault (\d+) rss/);
-                if( memoryMatch ) {
-                    rawDetailData = match[6];
-                    // one datapoint for rss and one for swap - only show the rawDetailData for rss
-                    addJobGraphDatum( timestamp, parseInt(memoryMatch[4]), 'T' + match[4] + "-rss", rawDetailData );
-                    addJobGraphDatum( timestamp, parseInt(memoryMatch[2]), 'T' + match[4] + "-swap", '' );
-                } else {
-                    // not interested
-                    return;
-                }
-            }
-
-            window.redraw = true;
-        }
-    } catch( err ) {
-        console.log( 'Ignoring error trying to process log line: ' + err);
-    }
-}
-
-function addJobGraphDatum(timestamp, datum, series, rawDetailData) {
-    // check for new series
-    if( $.inArray( series, jobGraphSeries ) < 0 ) {
-        var newIndex = jobGraphSeries.push(series) - 1;
-        jobGraphSortedSeries.push(newIndex);
-        jobGraphSortedSeries.sort( function(a,b) {
-            var matchA = jobGraphSeries[a].match(/^T(\d+)-(.*)/);
-            var matchB = jobGraphSeries[b].match(/^T(\d+)-(.*)/);
-            var termA = ('000000' + matchA[1]).slice(-6) + matchA[2];
-            var termB = ('000000' + matchB[1]).slice(-6) + matchB[2];
-            return termA > termB ? 1 : -1;
-        });
-        jobGraphMaxima[series] = null;
-        window.recreate = true;
-    }
-
-    if( datum !== 0 && ( jobGraphMaxima[series] === null || jobGraphMaxima[series] < datum ) ) {
-        if( isJobSeriesRescalable(series) ) {
-            // use old maximum to get a scale conversion
-            var scaleConversion = jobGraphMaxima[series]/datum;
-            // set new maximum and rescale the series
-            jobGraphMaxima[series] = datum;
-            rescaleJobGraphSeries( series, scaleConversion );
-        }
-    }
-
-    // scale
-    var scaledDatum = null;
-    if( isJobSeriesRescalable(series) && jobGraphMaxima[series] !== null && jobGraphMaxima[series] !== 0 ) {
-        scaledDatum = datum/jobGraphMaxima[series]
-    } else {
-        scaledDatum = datum;
-    }
-    // identify x axis point, searching from the end of the array (most recent)
-    var found = false;
-    for( var i = jobGraphData.length - 1; i >= 0; i-- ) {
-        if( jobGraphData[i]['t'] === timestamp ) {
-            found = true;
-            jobGraphData[i][series] = scaledDatum;
-            jobGraphData[i]['raw-'+series] = rawDetailData;
-            break;
-        } else if( jobGraphData[i]['t'] < timestamp  ) {
-            // we've gone far enough back in time and this data is supposed to be sorted
-            break;
-        }
-    }
-    // index counter from previous loop will have gone one too far, so add one
-    var insertAt = i+1;
-    if(!found) {
-        // create a new x point for this previously unrecorded timestamp
-        var entry = { 't': timestamp };
-        entry[series] = scaledDatum;
-        entry['raw-'+series] = rawDetailData;
-        jobGraphData.splice( insertAt, 0, entry );
-        var shifted = [];
-        // now let's see about "scrolling" the graph, dropping entries that are too old (>10 minutes)
-        while( jobGraphData.length > 0
-                 && (Date.parse( jobGraphData[0]['t'] ) + 10*60000 < Date.parse( jobGraphData[jobGraphData.length-1]['t'] )) ) {
-            shifted.push(jobGraphData.shift());
-        }
-        if( shifted.length > 0 ) {
-            // from those that we dropped, were any of them maxima? if so we need to rescale
-            jobGraphSeries.forEach( function(series) {
-                // test that every shifted entry in this series was either not a number (in which case we don't care)
-                // or else approximately (to 2 decimal places) smaller than the scaled maximum (i.e. 1),
-                // because otherwise we just scrolled off something that was a maximum point
-                // and so we need to recalculate a new maximum point by looking at all remaining displayed points in the series
-                if( isJobSeriesRescalable(series) && jobGraphMaxima[series] !== null
-                      && !shifted.every( function(e) { return( !$.isNumeric(e[series]) || e[series].toFixed(2) < 1.0 ) } ) ) {
-                    // check the remaining displayed points and find the new (scaled) maximum
-                    var seriesMax = null;
-                    jobGraphData.forEach( function(entry) {
-                        if( $.isNumeric(entry[series]) && (seriesMax === null || entry[series] > seriesMax)) {
-                            seriesMax = entry[series];
-                        }
-                    });
-                    if( seriesMax !== null && seriesMax !== 0 ) {
-                        // set new actual maximum using the new maximum as the conversion conversion and rescale the series
-                        jobGraphMaxima[series] *= seriesMax;
-                        var scaleConversion = 1/seriesMax;
-                        rescaleJobGraphSeries( series, scaleConversion );
-                    }
-                    else {
-                        // we no longer have any data points displaying for this series
-                        jobGraphMaxima[series] = null;
-                    }
-                }
-            });
-        }
-        // add a 10 minute old null data point to keep the chart honest if the oldest point is less than 9.9 minutes old
-        if( jobGraphData.length > 0 ) {
-            var earliestTimestamp = jobGraphData[0]['t'];
-            var mostRecentTimestamp = jobGraphData[jobGraphData.length-1]['t'];
-            if( (Date.parse( earliestTimestamp ) + 9.9*60000 > Date.parse( mostRecentTimestamp )) ) {
-                var tenMinutesBefore = (new Date(Date.parse( mostRecentTimestamp ) - 600*1000)).toISOString();
-                jobGraphData.unshift( { 't': tenMinutesBefore } );
-            }
-        }
-    }
-
-}
-
-function createJobGraph(elementName) {
-    delete jobGraph;
-    var emptyGraph = false;
-    if( jobGraphData.length === 0 ) {
-        // If there is no data we still want to show an empty graph,
-        // so add an empty datum and placeholder series to fool it into displaying itself.
-        // Note that when finally a new series is added, the graph will be recreated anyway.
-        jobGraphData.push( {} );
-        jobGraphSeries.push( '' );
-        emptyGraph = true;
-    }
-    var graphteristics = {
-        element: elementName,
-        data: jobGraphData,
-        ymax: 1.0,
-        yLabelFormat: function () { return ''; },
-        xkey: 't',
-        ykeys: jobGraphSeries,
-        labels: jobGraphSeries,
-        resize: true,
-        hideHover: 'auto',
-        parseTime: true,
-        hoverCallback: function(index, options, content) {
-            var s = "<div class='morris-hover-row-label'>";
-            s += options.data[index][options.xkey];
-            s += "</div> ";
-            for( i = 0; i < jobGraphSortedSeries.length; i++ ) {
-                var sortedIndex = jobGraphSortedSeries[i];
-                var series = options.ykeys[sortedIndex];
-                var datum = options.data[index][series];
-                var point = ''
-                point += "<div class='morris-hover-point' style='color: ";
-                point += options.lineColors[sortedIndex % options.lineColors.length];
-                point += "'>";
-                var labelMatch = options.labels[sortedIndex].match(/^T(\d+)-(.*)/);
-                point += 'Task ' + labelMatch[1] + ' ' + labelMatch[2];
-                point += ": ";
-                if ( datum !== undefined ) {
-                    if( isJobSeriesRescalable( series ) ) {
-                        datum *= jobGraphMaxima[series];
-                    }
-                    if( parseFloat(datum) !== 0 ) {
-                        if( /-cpu$/.test(series) ){
-                            datum = $.number(datum * 100, 1) + '%';
-                        } else if( datum < 10 ) {
-                            datum = $.number(datum, 2);
-                        } else {
-                            datum = $.number(datum);
-                        }
-                        if(options.data[index]['raw-'+series]) {
-                            datum += ' (' + options.data[index]['raw-'+series] + ')';
-                        }
-                    }
-                    point += datum;
-                } else {
-                    continue;
-                }
-                point += "</div> ";
-                s += point;
-            }
-            return s;
-        }
-    }
-    if( emptyGraph ) {
-        graphteristics['axes'] = false;
-        graphteristics['parseTime'] = false;
-        graphteristics['hideHover'] = 'always';
-    }
-    window.jobGraph = Morris.Line( graphteristics );
-    if( emptyGraph ) {
-        jobGraphData = [];
-        jobGraphSeries = [];
-    }
-}
-
-function rescaleJobGraphSeries( series, scaleConversion ) {
-    if( isJobSeriesRescalable() ) {
-        $.each( jobGraphData, function( i, entry ) {
-            if( entry[series] !== null && entry[series] !== undefined ) {
-                entry[series] *= scaleConversion;
-            }
-        });
-    }
-}
-
-// that's right - we never do this for the 'cpu' series, which will always be between 0 and 1 anyway
-function isJobSeriesRescalable( series ) {
-    return !/-cpu$/.test(series);
-}
-
-$(document).on('arv-log-event', '#log_graph_div', function(event, eventData) {
-    if( eventData.properties.text ) {
-        eventData.properties.text.split('\n').forEach( function( logLine ) {
-            processLogLineForChart( logLine );
-        } );
-    }
-} );
-
-$(document).on('ready ajax:complete', function() {
-    $('#log_graph_div').not('.graph-is-setup').addClass('graph-is-setup').each( function( index, graph_div ) {
-        window.jobGraphData = [];
-        window.jobGraphSeries = [];
-        window.jobGraphSortedSeries = [];
-        window.jobGraphMaxima = {};
-        window.recreate = false;
-        window.redraw = false;
-
-        createJobGraph($(graph_div).attr('id'));
-        var object_uuid = $(graph_div).data('object-uuid');
-        // if there are any listeners for this object uuid or "all", we will trigger the event
-        var matches = ".arv-log-event-listener[data-object-uuid=\"" + object_uuid + "\"],.arv-log-event-listener[data-object-uuids~=\"" + object_uuid + "\"]";
-
-        $(document).trigger('ajax:send');
-        $.get('/jobs/' + $(graph_div).data('object-uuid') + '/logs.json', function(data) {
-            data.forEach( function( entry ) {
-                $(matches).trigger('arv-log-event', entry);
-            });
-        });
-
-        setInterval( function() {
-            if( recreate ) {
-                window.recreate = false;
-                window.redraw = false;
-                // series have changed, draw entirely new graph
-                $(graph_div).html('');
-                createJobGraph($(graph_div).attr('id'));
-            } else if( redraw ) {
-                window.redraw = false;
-                jobGraph.setData( jobGraphData );
-            }
-        }, 5000);
-    });
-});
index 34075ca56c3c0f684a353f72b1bbbd9a480ced66..27473ad28585a7d44504465299bfbb4cc4656916 100644 (file)
 function updateFilterableQueryNow($target) {
     var newquery = $target.data('filterable-query-new');
     var params = $target.data('infinite-content-params-filterable') || {};
-    params.filters = [['any', 'ilike', '%' + newquery + '%']];
+    if (newquery == null || newquery == '') {
+      params.filters = [];
+    } else {
+      params.filters = [['any', '@@', newquery.concat(':*')]];
+    }
     $target.data('infinite-content-params-filterable', params);
     $target.data('filterable-query', newquery);
 }
diff --git a/apps/workbench/app/assets/javascripts/job_log_graph.js b/apps/workbench/app/assets/javascripts/job_log_graph.js
new file mode 100644 (file)
index 0000000..9daabe1
--- /dev/null
@@ -0,0 +1,313 @@
+/* Assumes existence of:
+  window.jobGraphData = [];
+  window.jobGraphSeries = [];
+  window.jobGraphSortedSeries = [];
+  window.jobGraphMaxima = {};
+ */
+function processLogLineForChart( logLine ) {
+    try {
+        var match = logLine.match(/^(\S+) (\S+) (\S+) (\S+) stderr crunchstat: (\S+) (.*)/);
+        if( !match ) {
+            match = logLine.match(/^((?:Sun|Mon|Tue|Wed|Thu|Fri|Sat) (?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) \d{1,2} \d\d:\d\d:\d\d \d{4}) (\S+) (\S+) (\S+) stderr crunchstat: (\S+) (.*)/);
+            if( match ) {
+                match[1] = (new Date(match[1] + ' UTC')).toISOString().replace('Z','');
+            }
+        }
+        if( match ) {
+            var rawDetailData = '';
+            var datum = null;
+
+            // the timestamp comes first
+            var timestamp = match[1].replace('_','T') + 'Z';
+
+            // we are interested in "-- interval" recordings
+            var intervalMatch = match[6].match(/(.*) -- interval (.*)/);
+            if( intervalMatch ) {
+                var intervalData = intervalMatch[2].trim().split(' ');
+                var dt = parseFloat(intervalData[0]);
+                var dsum = 0.0;
+                for(var i=2; i < intervalData.length; i += 2 ) {
+                    dsum += parseFloat(intervalData[i]);
+                }
+                datum = dsum/dt;
+
+                if( datum < 0 ) {
+                    // not interested in negative deltas
+                    return;
+                }
+
+                rawDetailData = intervalMatch[2];
+
+                // for the series name use the task number (4th term) and then the first word after 'crunchstat:'
+                var series = 'T' + match[4] + '-' + match[5];
+
+                // special calculation for cpus
+                if( /-cpu$/.test(series) ) {
+                    // divide the stat by the number of cpus unless the time count is less than the interval length
+                    if( dsum.toFixed(1) > dt.toFixed(1) ) {
+                        var cpuCountMatch = intervalMatch[1].match(/(\d+) cpus/);
+                        if( cpuCountMatch ) {
+                            datum = datum / cpuCountMatch[1];
+                        }
+                    }
+                }
+
+                addJobGraphDatum( timestamp, datum, series, rawDetailData );
+            } else {
+                // we are also interested in memory ("mem") recordings
+                var memoryMatch = match[6].match(/(\d+) cache (\d+) swap (\d+) pgmajfault (\d+) rss/);
+                if( memoryMatch ) {
+                    rawDetailData = match[6];
+                    // one datapoint for rss and one for swap - only show the rawDetailData for rss
+                    addJobGraphDatum( timestamp, parseInt(memoryMatch[4]), 'T' + match[4] + "-rss", rawDetailData );
+                    addJobGraphDatum( timestamp, parseInt(memoryMatch[2]), 'T' + match[4] + "-swap", '' );
+                } else {
+                    // not interested
+                    return;
+                }
+            }
+
+            window.redraw = true;
+        }
+    } catch( err ) {
+        console.log( 'Ignoring error trying to process log line: ' + err);
+    }
+}
+
+function addJobGraphDatum(timestamp, datum, series, rawDetailData) {
+    // check for new series
+    if( $.inArray( series, jobGraphSeries ) < 0 ) {
+        var newIndex = jobGraphSeries.push(series) - 1;
+        jobGraphSortedSeries.push(newIndex);
+        jobGraphSortedSeries.sort( function(a,b) {
+            var matchA = jobGraphSeries[a].match(/^T(\d+)-(.*)/);
+            var matchB = jobGraphSeries[b].match(/^T(\d+)-(.*)/);
+            var termA = ('000000' + matchA[1]).slice(-6) + matchA[2];
+            var termB = ('000000' + matchB[1]).slice(-6) + matchB[2];
+            return termA > termB ? 1 : -1;
+        });
+        jobGraphMaxima[series] = null;
+        window.recreate = true;
+    }
+
+    if( datum !== 0 && ( jobGraphMaxima[series] === null || jobGraphMaxima[series] < datum ) ) {
+        if( isJobSeriesRescalable(series) ) {
+            // use old maximum to get a scale conversion
+            var scaleConversion = jobGraphMaxima[series]/datum;
+            // set new maximum and rescale the series
+            jobGraphMaxima[series] = datum;
+            rescaleJobGraphSeries( series, scaleConversion );
+        }
+    }
+
+    // scale
+    var scaledDatum = null;
+    if( isJobSeriesRescalable(series) && jobGraphMaxima[series] !== null && jobGraphMaxima[series] !== 0 ) {
+        scaledDatum = datum/jobGraphMaxima[series]
+    } else {
+        scaledDatum = datum;
+    }
+    // identify x axis point, searching from the end of the array (most recent)
+    var found = false;
+    for( var i = jobGraphData.length - 1; i >= 0; i-- ) {
+        if( jobGraphData[i]['t'] === timestamp ) {
+            found = true;
+            jobGraphData[i][series] = scaledDatum;
+            jobGraphData[i]['raw-'+series] = rawDetailData;
+            break;
+        } else if( jobGraphData[i]['t'] < timestamp  ) {
+            // we've gone far enough back in time and this data is supposed to be sorted
+            break;
+        }
+    }
+    // index counter from previous loop will have gone one too far, so add one
+    var insertAt = i+1;
+    if(!found) {
+        // create a new x point for this previously unrecorded timestamp
+        var entry = { 't': timestamp };
+        entry[series] = scaledDatum;
+        entry['raw-'+series] = rawDetailData;
+        jobGraphData.splice( insertAt, 0, entry );
+        var shifted = [];
+        // now let's see about "scrolling" the graph, dropping entries that are too old (>10 minutes)
+        while( jobGraphData.length > 0
+                 && (Date.parse( jobGraphData[0]['t'] ) + 10*60000 < Date.parse( jobGraphData[jobGraphData.length-1]['t'] )) ) {
+            shifted.push(jobGraphData.shift());
+        }
+        if( shifted.length > 0 ) {
+            // from those that we dropped, were any of them maxima? if so we need to rescale
+            jobGraphSeries.forEach( function(series) {
+                // test that every shifted entry in this series was either not a number (in which case we don't care)
+                // or else approximately (to 2 decimal places) smaller than the scaled maximum (i.e. 1),
+                // because otherwise we just scrolled off something that was a maximum point
+                // and so we need to recalculate a new maximum point by looking at all remaining displayed points in the series
+                if( isJobSeriesRescalable(series) && jobGraphMaxima[series] !== null
+                      && !shifted.every( function(e) { return( !$.isNumeric(e[series]) || e[series].toFixed(2) < 1.0 ) } ) ) {
+                    // check the remaining displayed points and find the new (scaled) maximum
+                    var seriesMax = null;
+                    jobGraphData.forEach( function(entry) {
+                        if( $.isNumeric(entry[series]) && (seriesMax === null || entry[series] > seriesMax)) {
+                            seriesMax = entry[series];
+                        }
+                    });
+                    if( seriesMax !== null && seriesMax !== 0 ) {
+                        // set new actual maximum using the new maximum as the conversion conversion and rescale the series
+                        jobGraphMaxima[series] *= seriesMax;
+                        var scaleConversion = 1/seriesMax;
+                        rescaleJobGraphSeries( series, scaleConversion );
+                    }
+                    else {
+                        // we no longer have any data points displaying for this series
+                        jobGraphMaxima[series] = null;
+                    }
+                }
+            });
+        }
+        // add a 10 minute old null data point to keep the chart honest if the oldest point is less than 9.9 minutes old
+        if( jobGraphData.length > 0 ) {
+            var earliestTimestamp = jobGraphData[0]['t'];
+            var mostRecentTimestamp = jobGraphData[jobGraphData.length-1]['t'];
+            if( (Date.parse( earliestTimestamp ) + 9.9*60000 > Date.parse( mostRecentTimestamp )) ) {
+                var tenMinutesBefore = (new Date(Date.parse( mostRecentTimestamp ) - 600*1000)).toISOString();
+                jobGraphData.unshift( { 't': tenMinutesBefore } );
+            }
+        }
+    }
+
+}
+
+function createJobGraph(elementName) {
+    delete jobGraph;
+    var emptyGraph = false;
+    if( jobGraphData.length === 0 ) {
+        // If there is no data we still want to show an empty graph,
+        // so add an empty datum and placeholder series to fool it
+        // into displaying itself.  Note that when finally a new
+        // series is added, the graph will be recreated anyway.
+        jobGraphData.push( {} );
+        jobGraphSeries.push( '' );
+        emptyGraph = true;
+    }
+    var graphteristics = {
+        element: elementName,
+        data: jobGraphData,
+        ymax: 1.0,
+        yLabelFormat: function () { return ''; },
+        xkey: 't',
+        ykeys: jobGraphSeries,
+        labels: jobGraphSeries,
+        resize: true,
+        hideHover: 'auto',
+        parseTime: true,
+        hoverCallback: function(index, options, content) {
+            var s = "<div class='morris-hover-row-label'>";
+            s += options.data[index][options.xkey];
+            s += "</div> ";
+            for( i = 0; i < jobGraphSortedSeries.length; i++ ) {
+                var sortedIndex = jobGraphSortedSeries[i];
+                var series = options.ykeys[sortedIndex];
+                var datum = options.data[index][series];
+                var point = ''
+                point += "<div class='morris-hover-point' style='color: ";
+                point += options.lineColors[sortedIndex % options.lineColors.length];
+                point += "'>";
+                var labelMatch = options.labels[sortedIndex].match(/^T(\d+)-(.*)/);
+                point += 'Task ' + labelMatch[1] + ' ' + labelMatch[2];
+                point += ": ";
+                if ( datum !== undefined ) {
+                    if( isJobSeriesRescalable( series ) ) {
+                        datum *= jobGraphMaxima[series];
+                    }
+                    if( parseFloat(datum) !== 0 ) {
+                        if( /-cpu$/.test(series) ){
+                            datum = $.number(datum * 100, 1) + '%';
+                        } else if( datum < 10 ) {
+                            datum = $.number(datum, 2);
+                        } else {
+                            datum = $.number(datum);
+                        }
+                        if(options.data[index]['raw-'+series]) {
+                            datum += ' (' + options.data[index]['raw-'+series] + ')';
+                        }
+                    }
+                    point += datum;
+                } else {
+                    continue;
+                }
+                point += "</div> ";
+                s += point;
+            }
+            return s;
+        }
+    }
+    if( emptyGraph ) {
+        graphteristics['axes'] = false;
+        graphteristics['parseTime'] = false;
+        graphteristics['hideHover'] = 'always';
+    }
+    window.jobGraph = Morris.Line( graphteristics );
+    if( emptyGraph ) {
+        jobGraphData = [];
+        jobGraphSeries = [];
+    }
+}
+
+function rescaleJobGraphSeries( series, scaleConversion ) {
+    if( isJobSeriesRescalable() ) {
+        $.each( jobGraphData, function( i, entry ) {
+            if( entry[series] !== null && entry[series] !== undefined ) {
+                entry[series] *= scaleConversion;
+            }
+        });
+    }
+}
+
+// that's right - we never do this for the 'cpu' series, which will always be between 0 and 1 anyway
+function isJobSeriesRescalable( series ) {
+    return !/-cpu$/.test(series);
+}
+
+$(document).on('arv-log-event', '#log_graph_div', function(event, eventData) {
+    if( eventData.properties.text ) {
+        eventData.properties.text.split('\n').forEach( function( logLine ) {
+            processLogLineForChart( logLine );
+        } );
+    }
+} );
+
+$(document).on('ready ajax:complete', function() {
+    $('#log_graph_div').not('.graph-is-setup').addClass('graph-is-setup').each( function( index, graph_div ) {
+        window.jobGraphData = [];
+        window.jobGraphSeries = [];
+        window.jobGraphSortedSeries = [];
+        window.jobGraphMaxima = {};
+        window.recreate = false;
+        window.redraw = false;
+
+        createJobGraph($(graph_div).attr('id'));
+        var object_uuid = $(graph_div).data('object-uuid');
+        // if there are any listeners for this object uuid or "all", we will trigger the event
+        var matches = ".arv-log-event-listener[data-object-uuid=\"" + object_uuid + "\"],.arv-log-event-listener[data-object-uuids~=\"" + object_uuid + "\"]";
+
+        $(document).trigger('ajax:send');
+        $.get('/jobs/' + $(graph_div).data('object-uuid') + '/logs.json', function(data) {
+            data.forEach( function( entry ) {
+                $(matches).trigger('arv-log-event', entry);
+            });
+        });
+
+        setInterval( function() {
+            if (window.recreate || window.redraw) {
+                if (window.recreate) {
+                    // series have changed, draw entirely new graph.
+                    $(graph_div).html('').show(500);
+                    createJobGraph($(graph_div).attr('id'));
+                } else {
+                    jobGraph.setData(jobGraphData);
+                }
+                window.recreate = false;
+                window.redraw = false;
+            }
+        }, 5000);
+    });
+});
diff --git a/apps/workbench/app/assets/javascripts/request_shell_access.js b/apps/workbench/app/assets/javascripts/request_shell_access.js
new file mode 100644 (file)
index 0000000..792ab31
--- /dev/null
@@ -0,0 +1,10 @@
+$(document).on('ready ajax:success storage', function() {
+    // Update the "shell access requested" info box according to the
+    // current state of localStorage.
+    var msg = localStorage.getItem('request_shell_access');
+    var $noShellAccessDiv = $('#no_shell_access');
+    if ($noShellAccessDiv.length > 0) {
+        $('.alert-success p', $noShellAccessDiv).text(msg);
+        $('.alert-success', $noShellAccessDiv).toggle(!!msg);
+    }
+});
index a064d5399f7fce8be577f6df7c8a38107bf39398..b52591bc0caa5ff1b14da6ac7485a829eaeab1d8 100644 (file)
@@ -390,7 +390,7 @@ class ApplicationController < ActionController::Base
     @user_is_manager = false
     @share_links = []
 
-    if @object.uuid != current_user.uuid
+    if @object.uuid != current_user.andand.uuid
       begin
         @share_links = Link.permissions_for(@object)
         @user_is_manager = true
@@ -435,6 +435,7 @@ class ApplicationController < ActionController::Base
 
   protected
 
+  helper_method :strip_token_from_path
   def strip_token_from_path(path)
     path.sub(/([\?&;])api_token=[^&;]*[&;]?/, '\1')
   end
@@ -497,7 +498,7 @@ class ApplicationController < ActionController::Base
       else
         @object = model_class.find(params[:uuid])
       end
-    rescue ArvadosApiClient::NotFoundException, RuntimeError => error
+    rescue ArvadosApiClient::NotFoundException, ArvadosApiClient::NotLoggedInException, RuntimeError => error
       if error.is_a?(RuntimeError) and (error.message !~ /^argument to find\(/)
         raise
       end
@@ -646,6 +647,7 @@ class ApplicationController < ActionController::Base
   end
 
   def check_user_profile
+    return true if !current_user
     if request.method.downcase != 'get' || params[:partial] ||
        params[:tab_pane] || params[:action_method] ||
        params[:action] == 'setup_popup'
index f4aa0395f3d273a2948f3415be6ad4bfaf539673..e883017070d20ccdc7613e8f6c88ea9acaf1930e 100644 (file)
@@ -3,6 +3,10 @@ require "arvados/keep"
 class CollectionsController < ApplicationController
   include ActionController::Live
 
+  skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+    Rails.configuration.anonymous_user_token and
+    'show' == ctrl.action_name
+  }
   skip_around_filter(:require_thread_api_token,
                      only: [:show_file, :show_file_links])
   skip_before_filter(:find_object_by_uuid,
@@ -116,7 +120,9 @@ class CollectionsController < ApplicationController
     # purposes: it lets us return a useful status code for common errors, and
     # helps us figure out which token to provide to arv-get.
     coll = nil
-    tokens = [Thread.current[:arvados_api_token], params[:reader_token]].compact
+    tokens = [Thread.current[:arvados_api_token],
+              params[:reader_token],
+              (Rails.configuration.anonymous_user_token || nil)].compact
     usable_token = find_usable_token(tokens) do
       coll = Collection.find(params[:uuid])
     end
@@ -180,6 +186,16 @@ class CollectionsController < ApplicationController
 
   def show
     return super if !@object
+
+    @logs = []
+
+    if params["tab_pane"] == "Provenance_graph"
+      @prov_svg = ProvenanceHelper::create_provenance_graph(@object.provenance, "provenance_svg",
+                                                            {:request => request,
+                                                             :direction => :bottom_up,
+                                                             :combine_jobs => :script_only}) rescue nil
+    end
+
     if current_user
       if Keep::Locator.parse params["uuid"]
         @same_pdh = Collection.filter([["portable_data_hash", "=", @object.portable_data_hash]])
@@ -215,12 +231,6 @@ class CollectionsController < ApplicationController
           .results.any?
         @search_sharing = search_scopes
 
-        if params["tab_pane"] == "Provenance_graph"
-          @prov_svg = ProvenanceHelper::create_provenance_graph(@object.provenance, "provenance_svg",
-                                                                {:request => request,
-                                                                  :direction => :bottom_up,
-                                                                  :combine_jobs => :script_only}) rescue nil
-        end
         if params["tab_pane"] == "Used_by"
           @used_by_svg = ProvenanceHelper::create_provenance_graph(@object.used_by, "used_by_svg",
                                                                    {:request => request,
@@ -256,6 +266,15 @@ class CollectionsController < ApplicationController
     sharing_popup
   end
 
+  def update
+    @updates ||= params[@object.resource_param_name.to_sym]
+    if @updates && (@updates.keys - ["name", "description"]).empty?
+      # exclude manifest_text since only name or description is being updated
+      @object.manifest_text = nil
+    end
+    super
+  end
+
   protected
 
   def find_usable_token(token_list)
@@ -288,7 +307,9 @@ class CollectionsController < ApplicationController
     return nil
   end
 
-  def file_enumerator(opts)
+  # Note: several controller and integration tests rely on stubbing
+  # file_enumerator to return fake file content.
+  def file_enumerator opts
     FileStreamer.new opts
   end
 
index 08fb94d2f085d4d7eb777f81cea43d84f9f2dbcf..b90210f6a9b7be4c7bbea8a1193000ac9e8a7a56 100644 (file)
@@ -1,4 +1,9 @@
 class JobsController < ApplicationController
+  skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+    Rails.configuration.anonymous_user_token and
+    'show' == ctrl.action_name
+  }
+
   include JobsHelper
 
   def generate_provenance(jobs)
@@ -80,6 +85,8 @@ class JobsController < ApplicationController
   end
 
   def show_pane_list
-    %w(Status Log Details Provenance Advanced)
+    panes = %w(Status Log Details Provenance Advanced)
+    panes.delete 'Log' if !current_user
+    panes
   end
 end
index 25f5ee421c58dc860806b9a0f6b726e2e8816406..b4cce9be03e42bd2899590101a671717deb6295b 100644 (file)
@@ -1,6 +1,11 @@
 class PipelineInstancesController < ApplicationController
   skip_before_filter :find_object_by_uuid, only: :compare
   before_filter :find_objects_by_uuid, only: :compare
+  skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+    Rails.configuration.anonymous_user_token and
+    'show' == ctrl.action_name
+  }
+
   include PipelineInstancesHelper
   include PipelineComponentsHelper
 
index 2b2e9a4e33925da7899007910b213ce130811303..83ab88f6122dd1e5234bb12e4978fbabe89b263f 100644 (file)
@@ -1,4 +1,9 @@
 class PipelineTemplatesController < ApplicationController
+  skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+    Rails.configuration.anonymous_user_token and
+    'show' == ctrl.action_name
+  }
+
   include PipelineComponentsHelper
 
   def show
index a0bf262c960384a58e11d34d5949be5b7dc60a38..8c2f72e6689a40127cb66c2dbefbe7ffe60c50c0 100644 (file)
@@ -1,5 +1,9 @@
 class ProjectsController < ApplicationController
   before_filter :set_share_links, if: -> { defined? @object }
+  skip_around_filter :require_thread_api_token, if: proc { |ctrl|
+    Rails.configuration.anonymous_user_token and
+    %w(show tab_counts).include? ctrl.action_name
+  }
 
   def model_class
     Group
@@ -38,27 +42,32 @@ class ProjectsController < ApplicationController
   # us to tell the interface to get counts for each pane (using :filters).
   # It also seems to me that something like these could be used to configure the contents of the panes.
   def show_pane_list
-    pane_list = [
+    pane_list = []
+    pane_list <<
       {
         :name => 'Data_collections',
         :filters => [%w(uuid is_a arvados#collection)]
-      },
+      }
+    pane_list <<
       {
         :name => 'Jobs_and_pipelines',
         :filters => [%w(uuid is_a) + [%w(arvados#job arvados#pipelineInstance)]]
-      },
+      }
+    pane_list <<
       {
         :name => 'Pipeline_templates',
         :filters => [%w(uuid is_a arvados#pipelineTemplate)]
-      },
+      }
+    pane_list <<
       {
         :name => 'Subprojects',
         :filters => [%w(uuid is_a arvados#group)]
-      },
-      { :name => 'Other_objects',
+      } if current_user
+    pane_list <<
+      {
+        :name => 'Other_objects',
         :filters => [%w(uuid is_a) + [%w(arvados#human arvados#specimen arvados#trait)]]
-      }
-    ]
+      } if current_user
     pane_list << { :name => 'Sharing',
                    :count => @share_links.count } if @user_is_manager
     pane_list << { :name => 'Advanced' }
@@ -111,7 +120,13 @@ class ProjectsController < ApplicationController
         @removed_uuids << link.uuid
         link.destroy
       end
-      if item.owner_uuid == @object.uuid
+
+      # If this object has the 'expires_at' attribute, then simply mark it
+      # expired.
+      if item.attributes.include?("expires_at")
+        item.update_attributes expires_at: Time.now
+        @removed_uuids << item.uuid
+      elsif item.owner_uuid == @object.uuid
         # Object is owned by this project. Remove it from the project by
         # changing owner to the current user.
         begin
@@ -142,7 +157,7 @@ class ProjectsController < ApplicationController
         object.destroy
       end
     end
-    while (objects = @object.contents(include_linked: false)).any?
+    while (objects = @object.contents).any?
       objects.each do |object|
         object.update_attributes! owner_uuid: current_user.uuid
       end
@@ -183,7 +198,6 @@ class ProjectsController < ApplicationController
         (val.is_a?(Array) ? val : [val]).each do |type|
           objects = @object.contents(order: @order,
                                      limit: @limit,
-                                     include_linked: true,
                                      filters: (@filters - kind_filters + [['uuid', 'is_a', type]]),
                                     )
           objects.each do |object|
@@ -221,7 +235,6 @@ class ProjectsController < ApplicationController
     else
       @objects = @object.contents(order: @order,
                                   limit: @limit,
-                                  include_linked: true,
                                   filters: @filters,
                                   offset: @offset)
       @next_page_href = next_page_href(partial: :contents_rows,
index 3678ceb4fe265d7d42242f7c36a22d2942b312fa..d32c92a1e71fde336c99b52b990b86f019662af8 100644 (file)
@@ -11,7 +11,9 @@ class RepositoriesController < ApplicationController
       panes.insert(panes.length-1, panes.delete_at(panes.index('Advanced'))) if panes.index('Advanced')
       panes
     else
-      super
+      panes = super
     end
+    panes.delete('Attributes') if !current_user.is_admin
+    panes
   end
 end
index 9e2ff1b00b7e38c31129a3694833023aaebbc825..447f416aa25e438ab8fd5cdba3cf0d7d9827ebb7 100644 (file)
@@ -15,8 +15,7 @@ class SearchController < ApplicationController
     end
     @objects = search_what.contents(limit: @limit,
                                     offset: @offset,
-                                    filters: @filters,
-                                    include_linked: true)
+                                    filters: @filters)
     super
   end
 
index f3f36e6b29e123b063f31920e66e53527778e957..0ca5a85f018af48187865efe030195bbdeeebdbf 100644 (file)
@@ -240,14 +240,25 @@ class UsersController < ApplicationController
               ['tail_uuid', '=', current_user.uuid],
               ['link_class', '=', 'permission'],
              ])
-    @my_repositories = Repository.where uuid: repo_links.collect(&:head_uuid)
+
+    owned_repositories = Repository.where(owner_uuid: current_user.uuid)
+
+    @my_repositories = (Repository.where(uuid: repo_links.collect(&:head_uuid)) |
+                        owned_repositories).
+                       uniq { |repo| repo.uuid }
+
+
     @repo_writable = {}
     repo_links.each do |link|
       if link.name.in? ['can_write', 'can_manage']
-        @repo_writable[link.head_uuid] = true
+        @repo_writable[link.head_uuid] = link.name
       end
     end
 
+    owned_repositories.each do |repo|
+      @repo_writable[repo.uuid] = 'can_manage'
+    end
+
     # virtual machines the current user can login into
     @my_vm_logins = {}
     Link.where(tail_uuid: current_user.uuid,
@@ -302,6 +313,12 @@ class UsersController < ApplicationController
     end
   end
 
+  def request_shell_access
+    logger.warn "request_access: #{params.inspect}"
+    params['request_url'] = request.url
+    RequestShellAccessReporter.send_request(current_user, params).deliver
+  end
+
   protected
 
   def find_current_links user
index 11c55c3f77bed497566b124c34d6bb69c6d0ee0a..ef2830cd7e3fcc667719e0b442019e87de83715b 100644 (file)
@@ -133,7 +133,7 @@ module ApplicationHelper
         end
       end
       style_opts[:class] = (style_opts[:class] || '') + ' nowrap'
-      if opts[:no_link]
+      if opts[:no_link] or (resource_class == User && !current_user)
         raw(link_name)
       else
         (link_to raw(link_name), { controller: resource_class.to_s.tableize, action: 'show', id: ((opts[:name_link].andand.uuid) || link_uuid) }, style_opts) + raw(tags)
@@ -307,9 +307,7 @@ module ApplicationHelper
       end
     end
 
-    if dataclass == 'number' or attrvalue.is_a? Fixnum or attrvalue.is_a? Float
-      datatype = 'number'
-    elsif attrvalue.is_a? String
+    if attrvalue.is_a? String
       datatype = 'text'
     elsif attrvalue.is_a?(Array) or dataclass.andand.is_a?(Class)
       # TODO: find a way to edit with x-editable
diff --git a/apps/workbench/app/mailers/request_shell_access_reporter.rb b/apps/workbench/app/mailers/request_shell_access_reporter.rb
new file mode 100644 (file)
index 0000000..0195573
--- /dev/null
@@ -0,0 +1,11 @@
+class RequestShellAccessReporter < ActionMailer::Base
+  default from: Rails.configuration.email_from
+  default to: Rails.configuration.support_email_address
+
+  def send_request(user, params)
+    @user = user
+    @params = params
+    subject = "Shell account request from #{user.full_name} (#{user.email}, #{user.uuid})"
+    mail(subject: subject)
+  end
+end
index 5b2311dce98da44407a2f9dfc76436c2f1af1b29..992f8fd896989a408b9939c16dc893b85aa58964 100644 (file)
@@ -101,8 +101,13 @@ class ArvadosApiClient
     url.sub! '/arvados/v1/../../', '/'
 
     query = {
-      'api_token' => tokens[:arvados_api_token] || Thread.current[:arvados_api_token] || '',
-      'reader_tokens' => (tokens[:reader_tokens] || Thread.current[:reader_tokens] || []).to_json,
+      'api_token' => (tokens[:arvados_api_token] ||
+                      Thread.current[:arvados_api_token] ||
+                      ''),
+      'reader_tokens' => ((tokens[:reader_tokens] ||
+                           Thread.current[:reader_tokens] ||
+                           []) +
+                          [Rails.configuration.anonymous_user_token]).to_json,
     }
     if !data.nil?
       data.each do |k,v|
@@ -119,6 +124,7 @@ class ArvadosApiClient
     else
       query["_method"] = "GET"
     end
+
     if @@profiling_enabled
       query["_profile"] = "true"
     end
@@ -140,6 +146,7 @@ class ArvadosApiClient
     rescue Oj::ParseError
       resp = nil
     end
+
     if not resp.is_a? Hash
       raise InvalidApiResponseException.new(url, msg)
     elsif msg.status_code != 200
index bc5a9a37ddc351754feaef68c8d01916f6ca052c..f19d47435ae979564c94cf9117109ff44fe5acdf 100644 (file)
@@ -311,7 +311,7 @@ class ArvadosBase < ActiveRecord::Base
   end
 
   def self.creatable?
-    current_user
+    current_user.andand.is_active
   end
 
   def self.goes_in_projects?
index 2d804e1a5345743957b71aa7389eb15ab223312c..d84adea44f1832360b03ac5edc09a5cc10371a08 100644 (file)
@@ -6,4 +6,8 @@ class AuthorizedKey < ArvadosBase
       super
     end
   end
+
+  def self.creatable?
+    current_user
+  end
 end
index 686b816c08e2343419ea6e44f0f983a05cb0d6ce..13f5357faadba842e57bbc24b1bdc883509ed7f6 100644 (file)
@@ -35,7 +35,11 @@ class Collection < ArvadosBase
   end
 
   def content_summary
-    ApplicationController.helpers.human_readable_bytes_html(total_bytes) + " " + super
+    if total_bytes > 0
+      ApplicationController.helpers.human_readable_bytes_html(total_bytes) + " " + super
+    else
+      super + " modified at " + modified_at.to_s
+    end
   end
 
   def total_bytes
@@ -70,10 +74,6 @@ class Collection < ArvadosBase
     %w(name description manifest_text)
   end
 
-  def self.creatable?
-    false
-  end
-
   def provenance
     arvados_api_client.api "collections/#{self.uuid}/", "provenance"
   end
index c59bb89fe851306c80278b4b96ce192c9e064ea7..3ece865959f10aabf70acfab6cde1dd9145cf6d5 100644 (file)
@@ -11,10 +11,6 @@ class Job < ArvadosBase
     %w(description)
   end
 
-  def self.creatable?
-    false
-  end
-
   def default_name
     if script
       x = "\"#{script}\" job"
index bed7edc6e7ae44a078d7befc3bda1fafe17a72ff..b062dda8610ae7feed3494befbcd42acea37ec4e 100644 (file)
@@ -5,4 +5,11 @@ class Repository < ArvadosBase
   def attributes_for_display
     super.reject { |x| x[0] == 'fetch_url' }
   end
+  def editable_attributes
+    if current_user.is_admin
+      super
+    else
+      []
+    end
+  end
 end
index 8a0482274ad0cd18fcbde0d1cb770991ac4f995f..aa1ffda017120bb085997e130074e1bd799dace9 100644 (file)
 
 <h2>Not Found</h2>
 
-<p>The <%= req_item %> was not found.
+<p>The <%= req_item %> was not found.</p>
 
-<% if class_name %>
-Perhaps you'd like to
-<%= link_to("browse all #{class_name_h.pluralize}", action: :index, controller: class_name.tableize) %>?
-<% end %>
+<% if !current_user %>
+
+  <p>
+    (I notice you are not logged in. If you're looking for a private
+    page, you'll need to <%=link_to 'log in', arvados_api_client.arvados_login_url(return_to: strip_token_from_path(request.url))%> first.)
+  </p>
+
+<% elsif class_name %>
 
-</p>
+  <p>
+    Perhaps you'd like to <%= link_to("browse all
+    #{class_name_h.pluralize}", action: :index, controller:
+    class_name.tableize) %>?
+  </p>
+
+<% end %>
 
 <% error_message = "The #{req_item_plain_text} was not found." %>
 <%= render :partial => "report_error", :locals => {error_message: error_message, error_type: '404'} %>
diff --git a/apps/workbench/app/views/application/_browser_unsupported.html b/apps/workbench/app/views/application/_browser_unsupported.html
new file mode 100644 (file)
index 0000000..5af565c
--- /dev/null
@@ -0,0 +1,24 @@
+<!-- googleoff: all -->
+<style type="text/css">
+  #browser-unsupported .alert {
+    margin-left: -100px;
+    margin-right: -100px;
+    padding-left: 120px;
+    padding-right: 120px;
+  }
+</style>
+<div id="browser-unsupported" class="hidden">
+  <div class="alert alert-danger">
+    <p>
+      <b>Hey!</b> Your web browser is missing some of the features we
+      rely on.  Usually this means you are running an old version.
+      Updating your system, or switching to a current version
+      of <a class="alert-link"
+      href="//google.com/search?q=download+Mozilla+Firefox">Firefox</a>
+      or <a class="alert-link"
+      href="//google.com/search?q=download+Google+Chrome">Chrome</a>,
+      should fix this.
+    </p>
+  </div>
+</div>
+<!-- googleon: all -->
index 4e1503bde59738b90eb99b8b42f2d430a8987657..3233d8d0062d71f6aa97538b046bd53ab2db74e8 100644 (file)
         </div>
         <div style="height: 1em" />
 
-        <% preview_pane = (params[:preview_pane].to_s != "false")
-           pane_col_class = preview_pane ? "col-md-6" : "col-md-12" %>
+        <% preview_pane = (params[:preview_pane].to_s != "false") %>
         <div class="row" style="height: 20em">
-          <div class="<%= pane_col_class %> arv-filterable-list selectable-container <%= 'multiple' if multiple %>"
+          <div class="<%= 'col-sm-6' if preview_pane %> col-xs-12 arv-filterable-list selectable-container <%= 'multiple' if multiple %>"
                style="height: 100%; overflow-y: scroll"
                data-infinite-scroller="#choose-scroll"
                id="choose-scroll"
@@ -69,7 +68,7 @@
                                                        use_preview_selection: use_preview_sel %>">
           </div>
           <% if preview_pane %>
-            <div class="col-md-6 hidden-xs hidden-sm modal-dialog-preview-pane" style="height: 100%; overflow-y: scroll">
+            <div class="col-sm-6 col-xs-12 modal-dialog-preview-pane" style="height: 100%; overflow-y: scroll">
             </div>
           <% end %>
         </div>
diff --git a/apps/workbench/app/views/application/_create_new_object_button.html.erb b/apps/workbench/app/views/application/_create_new_object_button.html.erb
new file mode 100644 (file)
index 0000000..7e01f57
--- /dev/null
@@ -0,0 +1,7 @@
+<div style="display:inline-block">
+  <%= button_to({action: 'create'}, {class: 'btn btn-sm btn-primary'}) do %>
+    <i class="fa fa-fw fa-plus"></i>
+    Add a new
+    <%= controller.controller_name.singularize.humanize.downcase %>
+  <% end %>
+</div>
index 23795d3f04a3a56ec99f30be5d4e438fb7d33875..4b7beb30592832261722e32be0dca21af35044ca 100644 (file)
    choose_filters = {
      "groups" => [["group_class", "=", "role"]],
    }
+   if not Rails.configuration.anonymous_user_token
+     # It would be ideal to filter out the anonymous group by UUID,
+     # but that's not readily doable.  Workbench can't generate the
+     # UUID for a != filter, because it can't introspect the API
+     # server's UUID prefix.  And we can't say "uuid not like
+     # %-anonymouspublic", because the API server doesn't support a
+     # "not like" filter.
+     choose_filters["groups"] << ["name", "!=", "Anonymous users"]
+   end
    choose_filters.default = []
    owner_icon = fa_icon_class_for_uuid(@object.owner_uuid)
    if owner_icon == "fa-users"
index 887529b10d5c35e435608f33f1ae61631afc0209..31ff2e6e21a6e659ad8f75d14d2630e020b4d62b 100644 (file)
@@ -11,9 +11,9 @@
   <% end %>
 <% end %>
 
-<% if @object.class.goes_in_projects? && @object.uuid != current_user.uuid # Not the "Home" project %>
+<% if @object.class.goes_in_projects? && @object.uuid != current_user.andand.uuid # Not the "Home" project %>
   <% content_for :tab_line_buttons do %>
-    <% if @object.class.copies_to_projects? %>
+    <% if current_user.andand.is_active && @object.class.copies_to_projects? %>
       <%= link_to(
           choose_projects_path(
            title: "Copy this #{object_class} to:",
@@ -31,7 +31,7 @@
         <i class="fa fa-fw fa-copy"></i> Copy to project...
       <% end %>
     <% end %>
-    <% if (ArvadosBase.find(@object.owner_uuid).writable_by.include?(current_user.uuid) rescue nil) %>
+    <% if (ArvadosBase.find(@object.owner_uuid).writable_by.include?(current_user.andand.uuid) rescue nil) %>
       <%= link_to(
           choose_projects_path(
            title: "Move this #{object_class} to:",
index 3e2a608ed719cef61c52f17a07865bfdd6ca9dbd..4c7896f728883a0de6b46218d3dc4eae1100acc7 100644 (file)
@@ -5,24 +5,7 @@
 <% content_for :tab_line_buttons do %>
 
   <% if controller.model_class.creatable? %>
-
-    <% if controller.model_class.name == 'User' %>
-      <%= link_to setup_user_popup_path,
-        {class: 'btn btn-sm btn-primary', :remote => true, 'data-toggle' =>  "modal",
-          'data-target' => '#user-setup-modal-window', return_to: request.url} do %>
-        <i class="fa fa-fw fa-plus"></i> Add a new user
-      <% end %>
-      <div id="user-setup-modal-window" class="modal fade" role="dialog" aria-labelledby="myModalLabel" aria-hidden="true"></div>
-    <% elsif controller.controller_name == 'manage_account' %>
-      <%# No add button is needed %>
-    <% else %>
-      <%= button_to({action: 'create'}, {class: 'btn btn-sm btn-primary'}) do %>
-        <i class="fa fa-fw fa-plus"></i>
-        Add a new
-        <%= controller.controller_name.singularize.humanize.downcase %>
-      <% end %>
-    <% end %>
-
+    <%= render partial: 'create_new_object_button' %>
   <% end %>
 
 <% end %>
diff --git a/apps/workbench/app/views/collections/_create_new_object_button.html.erb b/apps/workbench/app/views/collections/_create_new_object_button.html.erb
new file mode 100644 (file)
index 0000000..1d8f1c4
--- /dev/null
@@ -0,0 +1 @@
+<%# "Create a new collection" would work, but the search filter on collections#index breaks the tab_line_buttons layout. %>
index 5d2fe2cbd0667b20d7e740db4c0d3c6fa49b6409..dd077aa2a4fd25bda3c63f151dc93108ca0f6013 100644 (file)
@@ -39,7 +39,7 @@
     <% if @collection_info[c.uuid] %>
       <% @collection_info[c.uuid][:tag_links].each do |tag_link| %>
         <span class="label label-info removable-tag" data-tag-link-uuid="<%= tag_link.uuid %>"><%= tag_link.name %>
-          <% if tag_link.owner_uuid == current_user.uuid %>
+          <% if tag_link.owner_uuid == current_user.andand.uuid %>
           &nbsp;<a title="Delete tag"><i class="glyphicon glyphicon-trash"></i></a>
           <% end %>
         </span>&nbsp;
index 603dc34f4fc6ab038ad727b95688ee8e9a6831fe..383ec64c0ab2697b781660644790f35a0e7a7524 100644 (file)
@@ -20,100 +20,99 @@ function unselect_all_files() {
 %>
 
 <div class="selection-action-container" style="padding-left: <%=padding_left%>">
-  <% if !defined? no_checkboxes or !no_checkboxes %>
-  <div class="row">
-    <div class="pull-left">
-      <div class="btn-group btn-group-sm">
-        <button type="button" class="btn btn-default dropdown-toggle" data-toggle="dropdown">Selection... <span class="caret"></span></button>
-        <ul class="dropdown-menu" role="menu">
-          <li><%= link_to "Create new collection with selected files", '#',
-                  method: :post,
-                  'data-href' => combine_selected_path(
-                    action_data: {current_project_uuid: @object.owner_uuid}.to_json
-                  ),
-                  'data-selection-param-name' => 'selection[]',
-                  'data-selection-action' => 'combine-collections',
-                  'data-toggle' => 'dropdown'
-            %></li>
-        </ul>
+  <% if Collection.creatable? and (!defined? no_checkboxes or !no_checkboxes) %>
+    <div class="row">
+      <div class="pull-left">
+        <div class="btn-group btn-group-sm">
+          <button type="button" class="btn btn-default dropdown-toggle" data-toggle="dropdown">Selection... <span class="caret"></span></button>
+          <ul class="dropdown-menu" role="menu">
+            <li><%= link_to "Create new collection with selected files", '#',
+                    method: :post,
+                    'data-href' => combine_selected_path(
+                      action_data: {current_project_uuid: @object.owner_uuid}.to_json
+                    ),
+                    'data-selection-param-name' => 'selection[]',
+                    'data-selection-action' => 'combine-collections',
+                    'data-toggle' => 'dropdown'
+              %></li>
+          </ul>
+        </div>
+        <div class="btn-group btn-group-sm">
+          <button id="select-all" type="button" class="btn btn-default" onClick="select_all_files()">Select all</button>
+          <button id="unselect-all" type="button" class="btn btn-default" onClick="unselect_all_files()">Unselect all</button>
+        </div>
       </div>
-      <div class="btn-group btn-group-sm">
-       <button id="select-all" type="button" class="btn btn-default" onClick="select_all_files()">Select all</button>
-       <button id="unselect-all" type="button" class="btn btn-default" onClick="unselect_all_files()">Unselect all</button>
+      <div class="pull-right">
+        <input class="form-control filterable-control" data-filterable-target="ul#collection_files" id="file_regex" name="file_regex" placeholder="filename regex" type="text"/>
       </div>
     </div>
-    <div class="pull-right">
-      <input class="form-control filterable-control" data-filterable-target="ul#collection_files" id="file_regex" name="file_regex" placeholder="filename regex" type="text"/>
-    </div>
-  </div>
-  <p/>
+    <p/>
   <% end %>
 
-<% file_tree = @object.andand.files_tree %>
-<% if file_tree.nil? or file_tree.empty? %>
-  <p>This collection is empty.</p>
-<% else %>
-  <ul id="collection_files" class="collection_files <%=preview_selectable_container%>">
-  <% dirstack = [file_tree.first.first] %>
-  <% file_tree.take(10000).each_with_index do |(dirname, filename, size), index| %>
-    <% file_path = CollectionsHelper::file_path([dirname, filename]) %>
-    <% while dirstack.any? and (dirstack.last != dirname) %>
-      <% dirstack.pop %></ul></li>
-    <% end %>
-    <li>
-    <% if size.nil?  # This is a subdirectory. %>
-      <% dirstack.push(File.join(dirname, filename)) %>
-      <div class="collection_files_row">
-       <div class="collection_files_name"><i class="fa fa-fw fa-folder-open"></i> <%= filename %></div>
-      </div>
-      <ul class="collection_files">
-    <% else %>
-      <% link_params = {controller: 'collections', action: 'show_file',
-                        uuid: @object.portable_data_hash, file: file_path, size: size} %>
-       <div class="collection_files_row filterable <%=preview_selectable%>" href="<%=@object.uuid%>/<%=file_path%>">
-        <div class="collection_files_buttons pull-right">
-          <%= raw(human_readable_bytes_html(size)) %>
-          <% disable_search = (Rails.configuration.filename_suffixes_with_view_icon.include? file_path.split('.')[-1]) ? false : true %>
-          <%= link_to(raw('<i class="fa fa-search"></i>'),
-                      link_params.merge(disposition: 'inline'),
-                      {title: "View #{file_path}", class: "btn btn-info btn-sm", disabled: disable_search}) %>
-          <%= link_to(raw('<i class="fa fa-download"></i>'),
-                      link_params.merge(disposition: 'attachment'),
-                      {title: "Download #{file_path}", class: "btn btn-info btn-sm"}) %>
-        </div>
-
-        <div class="collection_files_name">
-          <% if !defined? no_checkboxes or !no_checkboxes %>
-          <%= check_box_tag 'uuids[]', "#{@object.uuid}/#{file_path}", false, {
-                :class => "persistent-selection",
-                :friendly_type => "File",
-                :friendly_name => "#{@object.uuid}/#{file_path}",
-                :href => url_for(controller: 'collections', action: 'show_file',
-                                 uuid: @object.portable_data_hash, file: file_path),
-                :title => "Include #{file_path} in your selections",
-              } %>
-          <span>&nbsp;</span>
-          <% end %>
-      <% if CollectionsHelper::is_image(filename) %>
-          <i class="fa fa-fw fa-bar-chart-o"></i> <%= filename %></div>
-        <div class="collection_files_inline">
-          <%= link_to(image_tag("#{url_for @object}/#{file_path}"),
-                      link_params.merge(disposition: 'inline'),
-                      {title: file_path}) %>
+  <% file_tree = @object.andand.files_tree %>
+  <% if file_tree.nil? or file_tree.empty? %>
+    <p>This collection is empty.</p>
+  <% else %>
+    <ul id="collection_files" class="collection_files <%=preview_selectable_container%>">
+    <% dirstack = [file_tree.first.first] %>
+    <% file_tree.take(10000).each_with_index do |(dirname, filename, size), index| %>
+      <% file_path = CollectionsHelper::file_path([dirname, filename]) %>
+      <% while dirstack.any? and (dirstack.last != dirname) %>
+        <% dirstack.pop %></ul></li>
+      <% end %>
+      <li>
+      <% if size.nil?  # This is a subdirectory. %>
+        <% dirstack.push(File.join(dirname, filename)) %>
+        <div class="collection_files_row">
+         <div class="collection_files_name"><i class="fa fa-fw fa-folder-open"></i> <%= filename %></div>
         </div>
-       </div>
+        <ul class="collection_files">
       <% else %>
-          <i class="fa fa-fw fa-file" href="<%=@object.uuid%>/<%=file_path%>" ></i> <%= filename %></div>
-       </div>
-      <% end %>
-      </li>
-    <% end  # if file or directory %>
-  <% end  # file_tree.each %>
-  <%= raw(dirstack.map { |_| "</ul>" }.join("</li>")) %>
-<% end  # if file_tree %>
+        <% link_params = {controller: 'collections', action: 'show_file',
+                          uuid: @object.portable_data_hash, file: file_path, size: size} %>
+         <div class="collection_files_row filterable <%=preview_selectable%>" href="<%=@object.uuid%>/<%=file_path%>">
+          <div class="collection_files_buttons pull-right">
+            <%= raw(human_readable_bytes_html(size)) %>
+            <% disable_search = (Rails.configuration.filename_suffixes_with_view_icon.include? file_path.split('.')[-1]) ? false : true %>
+            <%= link_to(raw('<i class="fa fa-search"></i>'),
+                        link_params.merge(disposition: 'inline'),
+                        {title: "View #{file_path}", class: "btn btn-info btn-sm", disabled: disable_search}) %>
+            <%= link_to(raw('<i class="fa fa-download"></i>'),
+                        link_params.merge(disposition: 'attachment'),
+                        {title: "Download #{file_path}", class: "btn btn-info btn-sm"}) %>
+          </div>
+
+          <div class="collection_files_name">
+            <% if !defined? no_checkboxes or !no_checkboxes %>
+            <%= check_box_tag 'uuids[]', "#{@object.uuid}/#{file_path}", false, {
+                  :class => "persistent-selection",
+                  :friendly_type => "File",
+                  :friendly_name => "#{@object.uuid}/#{file_path}",
+                  :href => url_for(controller: 'collections', action: 'show_file',
+                                   uuid: @object.portable_data_hash, file: file_path),
+                  :title => "Include #{file_path} in your selections",
+                } %>
+            <span>&nbsp;</span>
+            <% end %>
+        <% if CollectionsHelper::is_image(filename) %>
+            <i class="fa fa-fw fa-bar-chart-o"></i> <%= filename %></div>
+          <div class="collection_files_inline">
+            <%= link_to(image_tag("#{url_for @object}/#{file_path}"),
+                        link_params.merge(disposition: 'inline'),
+                        {title: file_path}) %>
+          </div>
+         </div>
+        <% else %>
+            <i class="fa fa-fw fa-file" href="<%=@object.uuid%>/<%=file_path%>" ></i> <%= filename %></div>
+         </div>
+        <% end %>
+        </li>
+      <% end  # if file or directory %>
+    <% end  # file_tree.each %>
+    <%= raw(dirstack.map { |_| "</ul>" }.join("</li>")) %>
+  <% end  # if file_tree %>
+</div>
 
 <% content_for :footer_html do %>
 <div id="collection-sharing-modal-window" class="modal fade" role="dialog" aria-labelledby="myModalLabel" aria-hidden="true"></div>
 <% end %>
-
-</div>
index c3e0b7cb2eb1aa64fc210dc90daca92b1443d107..75a70868caec34917db04f316885a4ff6f4694e2 100644 (file)
@@ -2,25 +2,25 @@
   <div class="col-md-6">
     <div class="panel panel-info">
       <div class="panel-heading">
-       <h3 class="panel-title">
+        <h3 class="panel-title">
           <% if @name_link.nil? and @object.uuid.match /[0-9a-f]{32}/ %>
             Content hash <%= @object.portable_data_hash %>
           <% else %>
-           <%= if @object.respond_to? :name
+            <%= if @object.respond_to? :name
                   render_editable_attribute @object, :name
                 else
                   @name_link.andand.name || @object.uuid
                 end %>
             <% end %>
-       </h3>
+        </h3>
       </div>
       <div class="panel-body">
         <div class="arv-description-as-subtitle">
           <%= render_editable_attribute @object, 'description', nil, { 'data-emptytext' => "(No description provided)", 'data-toggle' => 'manual' } %>
         </div>
         <img src="/favicon.ico" class="pull-right" alt="" style="opacity: 0.3"/>
-       <p><i>Content hash:</i><br />
-         <span class="arvados-uuid"><%= link_to @object.portable_data_hash, collection_path(@object.portable_data_hash) %></span>
+        <p><i>Content hash:</i><br />
+          <span class="arvados-uuid"><%= link_to @object.portable_data_hash, collection_path(@object.portable_data_hash) %></span>
         </p>
         <%= render partial: "show_source_summary" %>
       </div>
   <div class="col-md-3">
     <div class="panel panel-default">
       <div class="panel-heading">
-       <h3 class="panel-title">
-         Activity
-       </h3>
+        <h3 class="panel-title">
+          Activity
+        </h3>
       </div>
       <div class="panel-body smaller-text">
         <!--
-       <input type="text" class="form-control" placeholder="Search"/>
+        <input type="text" class="form-control" placeholder="Search"/>
         -->
-       <div style="height:0.5em;"></div>
+        <div style="height:0.5em;"></div>
         <% name_or_object = @name_link.andand.uuid ? @name_link : @object %>
         <% if name_or_object.created_at and not @logs.andand.any? %>
           <p>
       </div>
     </div>
   </div>
+  <% if current_user %>
   <div class="col-md-3">
     <div class="panel panel-default">
       <div class="panel-heading">
-       <h3 class="panel-title">
-         Sharing and permissions
-       </h3>
+        <h3 class="panel-title">
+          Sharing and permissions
+        </h3>
       </div>
       <div class="panel-body">
         <!--
-       <input type="text" class="form-control" placeholder="Search"/>
+        <input type="text" class="form-control" placeholder="Search"/>
         -->
 
         <div id="sharing-button">
           <%= render partial: 'sharing_button' %>
         </div>
 
-       <div style="height:0.5em;"></div>
+        <div style="height:0.5em;"></div>
         <% if @projects.andand.any? %>
           <p>Included in projects:<br />
           <%= render_arvados_object_list_start(@projects, 'Show all projects',
       </div>
     </div>
   </div>
+  <% else %>
+  <div class="col-md-3">
+    <div class="panel panel-default">
+      <div class="panel-heading">
+        <h3 class="panel-title">
+          Welcome to Arvados
+        </h3>
+      </div>
+      <div class="panel-body">
+        You are accessing public data.
+      </div>
+    </div>
+  </div>
+  <% end %>
 </div>
 
 <%= render file: 'application/show.html.erb', locals: local_assigns %>
diff --git a/apps/workbench/app/views/jobs/_create_new_object_button.html.erb b/apps/workbench/app/views/jobs/_create_new_object_button.html.erb
new file mode 100644 (file)
index 0000000..c77451e
--- /dev/null
@@ -0,0 +1 @@
+<%# There is no UI for context-free "create a new job" %>
diff --git a/apps/workbench/app/views/jobs/_rerun_job_with_options_popup.html.erb b/apps/workbench/app/views/jobs/_rerun_job_with_options_popup.html.erb
new file mode 100644 (file)
index 0000000..44254e1
--- /dev/null
@@ -0,0 +1,48 @@
+<% @job = @object %>
+<div id="jobRerunModal" class="modal" role="dialog" aria-labelledby="jobRerunTitle" aria-hidden="true">
+  <div class="modal-dialog">
+    <div class="modal-content">
+      <%= form_for(@job, method: :post, url: {controller: 'jobs', action: 'create'}) do |f| %>
+        <% [:script, :repository, :supplied_script_version, :nondeterministic].each do |field_sym| %>
+          <%= f.hidden_field(field_sym) %>
+        <% end %>
+        <% [:script_parameters, :runtime_constraints].each do |field_sym| %>
+          <%= f.hidden_field(field_sym, value: @job.send(field_sym).to_json) %>
+        <% end %>
+        <div class="modal-header">
+          <button type="button" class="close" data-dismiss="modal" aria-hidden="true">&times;</button>
+          <div id="jobRerunTitle">
+            <div class="col-sm-6"> <h4 class="modal-title">Re-run job</h4> </div>
+          </div>
+          <br/>
+        </div>
+
+        <div class="modal-body">
+          <p> If this job is part of a pipeline, that pipeline would not know about the new job you are running.
+              If you want to update your pipeline results, please re-run the pipeline instead.
+          <p> The inputs and parameters will be the same as the current job.
+              Thus, the new job will not reflect any changes made to the pipeline that initiated this job. </p>
+          <div style="padding-left: 1em">
+            <% if (@job.supplied_script_version.blank? or
+                   (@job.supplied_script_version == @job.script_version)) %>
+              <%= f.hidden_field(:script_version) %>
+            <% else %>
+              <%= f.radio_button("script_version", @job.script_version) %>
+              <%= f.label(:script_version, "Use same script version as this run", value: @job.script_version) %>
+              <p style="padding-left: 1em"> Use the same script version as the current job.</p>
+
+              <%= f.radio_button(:script_version, @job.supplied_script_version) %>
+              <%= f.label(:script_version, "Use latest script version", value: @job.supplied_script_version) %>
+              <p style="padding-left: 1em"> Use the current commit indicated by '<%= @job.supplied_script_version %>' in the '<%= @job.repository %>' repository.</p>
+            <% end %>
+          </div>
+        </div>
+
+        <div class="modal-footer">
+          <button class="btn btn-default" data-dismiss="modal" aria-hidden="true">Cancel</button>
+          <%= f.submit(value: "Run now", class: "btn btn-primary") %>
+        </div>
+      <% end %>
+    </div>
+  </div>
+</div>
index 644da77425fe90bdc3c634c78d97e51099857cf1..b6c39df9a50c1e85ccc644a9f7de4e2a3d48d7f3 100644 (file)
@@ -1,29 +1,5 @@
-<% if @object.state != "Running" %>
-    <%= form_tag '/jobs', style: "display:inline; padding-left: 1em" do |f| %>
-      <% [:script, :script_version, :repository, :supplied_script_version, :nondeterministic].each do |d| %>
-        <%= hidden_field :job, d, :value => @object[d] %>
-      <% end %>
-      <% [:script_parameters, :runtime_constraints].each do |d| %>
-        <%= hidden_field :job, d, :value => JSON.dump(@object[d]) %>
-      <% end %>
-      <%= button_tag ({class: 'btn btn-sm btn-primary', id: "re-run-same-job-button",
-                       title: 'Re-run job using the same script version as this run'}) do %>
-        <i class="fa fa-fw fa-gear"></i> Re-run same version
-      <% end %>
-    <% end %>
-  <% if @object.respond_to? :supplied_script_version and !@object.supplied_script_version.nil? and !@object.supplied_script_version.empty? and @object.script_version != @object.supplied_script_version%>
-      <%= form_tag '/jobs', style: "display:inline" do |f| %>
-      <% [:script, :repository, :supplied_script_version, :nondeterministic].each do |d| %>
-        <%= hidden_field :job, d, :value => @object[d] %>
-      <% end %>
-      <%= hidden_field :job, :script_version, :value => @object[:supplied_script_version] %>
-      <% [:script_parameters, :runtime_constraints].each do |d| %>
-        <%= hidden_field :job, d, :value => JSON.dump(@object[d]) %>
-      <% end %>
-      <%= button_tag ({class: 'btn btn-sm btn-primary', id: "re-run-latest-job-button",
-                       title: 'Re-run job using the latest script version'}) do%>
-        <i class="fa fa-fw fa-gear"></i> Re-run latest version
-      <% end %>
-    <% end %>
-  <% end %>
+<% if @object.state != "Running" and Job.creatable? %>
+  <button type="button" class="btn btn-sm btn-primary" data-toggle="modal" data-target="#jobRerunModal">
+    <i class="fa fa-fw fa-gear"></i> Re-run job...
+  </button>
 <% end %>
index 315c8c1831242faedef0c0a1a10b1991f88e06c7..18021349e2d5e052fe1870bdb35d834f39c6e897 100644 (file)
@@ -2,6 +2,7 @@
 
 <div id="log_graph_div"
      class="arv-log-event-listener"
+     style="display:none"
      data-object-uuid="<%= @object.uuid %>"></div>
 
 <div id="event_log_div"
index 566014e4f328e256e81a8ba33d31794b41a6a29f..0c83e9445e192d8decd8e6f46dc64991418bb5c8 100644 (file)
@@ -3,9 +3,10 @@
        data-pane-content-url="<%= url_for(params.merge(tab_pane: "job_buttons")) %>"
        data-object-uuid="<%= @object.uuid %>"
        style="display: inline">
-    <%= render partial: 'show_job_buttons', locals: {object: @object}%>
+  <%= render partial: 'show_job_buttons', locals: {object: @object}%>
   </div>
 <% end %>
 
 <%= render partial: 'title_and_buttons' %>
 <%= render partial: 'content', layout: 'content_layout', locals: {pane_list: controller.show_pane_list }%>
+<%= render partial: 'rerun_job_with_options_popup' %>
index fb28efe1d40cb8cdf14b48f926b318c7bc016674..67eba944508df47839bb3fafb0ac4524e7f2e082 100644 (file)
       </nav>
     <% end %>
 
+    <%= render partial: 'browser_unsupported' %><%# requires JS support below %>
+
     <div id="page-wrapper">
       <%= yield %>
     </div>
 
 <div class="modal-container"></div>
 <div id="report-issue-modal-window"></div>
+<script src="/browser_unsupported.js"></script>
index dc9f00de162de44d872a57c109789bf897bcb967..4359860c6e5529c487dd8b4a672e9058e31e1cef 100644 (file)
@@ -75,7 +75,7 @@
             </div>
           <% end %>
 
-          <% if current_job[:state].in? ["Queued", "Running"] %>
+          <% if current_job[:state].in? ["Queued", "Running"] and @object.editable? %>
             <%# column offset 11 %>
             <div class="col-md-1 pipeline-instance-spacing">
               <%= form_tag "/jobs/#{current_job[:uuid]}/cancel", remote: true, style: "display:inline; padding-left: 1em" do |f| %>
index 65d458bb787ea5dad8d5a8b99dc6bd17aec30893..e6b7ef20347cf2a5c25af1b0304da4554a981b7a 100644 (file)
   <p>This pipeline does not need any further inputs specified. You can start it by clicking the "Run" button whenever you're ready. (It's not too late to change existing settings, though.)</p>
 <% else %>
   <p><i>Provide <%= n_inputs > 1 ? 'values' : 'a value' %> for the following <%= n_inputs > 1 ? 'parameters' : 'parameter' %>, then click the "Run" button to start the pipeline.</i></p>
-  <%= content_for :pi_input_form %>
-
-  <%= link_to(url_for('pipeline_instance[state]' => 'RunningOnServer'),
-      class: 'btn btn-primary run-pipeline-button',
-      method: :patch
-      ) do %>
-    Run <i class="fa fa-fw fa-play"></i>
+  <% if @object.editable? %>
+    <%= content_for :pi_input_form %>
+      <%= link_to(url_for('pipeline_instance[state]' => 'RunningOnServer'),
+          class: 'btn btn-primary run-pipeline-button',
+          method: :patch
+          ) do %>
+        Run <i class="fa fa-fw fa-play"></i>
+    <% end %>
   <% end %>
 
 <% end %>
index 38a7c913d421965de5da099ec9fe5c2b62a64165..4ed27346b4e68b75106482adddd1104073fc5003 100644 (file)
@@ -1,3 +1,4 @@
+<% if current_user.andand.is_active %>
   <% if @object.state.in? ['Complete', 'Failed', 'Cancelled', 'Paused'] %>
 
   <%= link_to(copy_pipeline_instance_path('id' => @object.uuid, 'script' => "use_latest", "components" => "use_latest", "pipeline_instance[state]" => "RunningOnServer"),
@@ -43,3 +44,4 @@
       <% end %>
     <% end %>
   <% end %>
+<% end %>
index 860e8091b26dd2780974748c145d265ef174cc16..e23e49c05ba3f883440e5c73112ce5204fcf1a1e 100644 (file)
@@ -62,9 +62,9 @@
       </div>
 
       <div class="modal-footer">
-        <button type="submit" class="btn btn-primary" name="pipeline_instance[state]" value="New">Copy and edit inputs</button>
-        <button type="submit" class="btn btn-primary" name="pipeline_instance[state]" value="RunningOnServer">Run now</button>
         <button class="btn btn-default" onClick="reset_form()" data-dismiss="modal" aria-hidden="true">Cancel</button>
+        <button type="submit" class="btn btn-primary" name="pipeline_instance[state]" value="RunningOnServer">Run now</button>
+        <button type="submit" class="btn btn-primary" name="pipeline_instance[state]" value="New">Copy and edit inputs</button>
       </div>
 
     </div>
index 0faa48fed3091ce825e993989b25d62e2a63e619..b4264e5ef269d069508ba6fc415ecda5fa1ebd75 100644 (file)
@@ -1,19 +1,25 @@
-<% content_for :tab_line_buttons do %>
-  <%= link_to(choose_projects_path(id: "run-pipeline-button",
-                                     title: 'Choose project',
-                                     editable: true,
-                                     action_name: 'Choose',
-                                     action_href: pipeline_instances_path,
-                                     action_method: 'post',
-                                     action_data: {selection_param: 'pipeline_instance[owner_uuid]',
-                                                   'pipeline_instance[pipeline_template_uuid]' => @object.uuid,
-                                                   'pipeline_instance[description]' => "Created at #{Time.now.localtime}" + (@object.name.andand.size.andand>0 ? " using the pipeline template *#{@object.name}*" : ""),
-                                                   'success' => 'redirect-to-created-object'
-                                                  }.to_json),
-                { class: "btn btn-primary btn-sm", remote: true, title: 'Run this pipeline' }
-               ) do %>
-                   <i class="fa fa-gear"></i> Run this pipeline
-                 <% end %>
+<% if @object.editable? %>
+  <% content_for :tab_line_buttons do %>
+    <%= link_to(choose_projects_path(
+        id: "run-pipeline-button",
+        title: 'Choose project',
+        editable: true,
+        action_name: 'Choose',
+        action_href: pipeline_instances_path,
+        action_method: 'post',
+        action_data: {
+          'selection_param' => 'pipeline_instance[owner_uuid]',
+          'pipeline_instance[pipeline_template_uuid]' => @object.uuid,
+          'pipeline_instance[description]' => "Created at #{Time.now.localtime}" + (@object.name.andand.size.andand>0 ? " using the pipeline template *#{@object.name}*" : ""),
+          'success' => 'redirect-to-created-object',
+        }.to_json), {
+          class: "btn btn-primary btn-sm",
+          remote: true,
+          title: 'Run this pipeline'
+        }) do %>
+      <i class="fa fa-gear"></i> Run this pipeline
+    <% end %>
+  <% end %>
 <% end %>
 
 <%= render file: 'application/show.html.erb', locals: local_assigns %>
index 1fbe5057be0a9cf528553eda276297770d437010..437f05a4998f996c203cfca10a5b5cb4caeb3939 100644 (file)
@@ -3,8 +3,9 @@
     <div class="col-md-6">
       <div class="panel panel-default" style="min-height: 10.5em">
         <div class="panel-heading"><span class="panel-title">Active pipelines</span>
+          <% if current_user.andand.is_active %>
           <span class="pull-right">
-    <%= link_to(
+          <%= link_to(
           choose_pipeline_templates_path(
             title: 'Choose a pipeline to run:',
             action_name: 'Next: choose inputs <i class="fa fa-fw fa-arrow-circle-right"></i>',
             action_method: 'post',
             action_data: {'selection_param' => 'pipeline_instance[pipeline_template_uuid]', 'pipeline_instance[owner_uuid]' => current_user.uuid, 'success' => 'redirect-to-created-object'}.to_json),
           { class: "btn btn-primary btn-xs", remote: true }) do %>
-      <i class="fa fa-fw fa-gear"></i> Run a pipeline...
-    <% end %>
-    </span>
+            <i class="fa fa-fw fa-gear"></i> Run a pipeline...
+          <% end %>
+          </span>
+          <% end %>
         </div>
 
         <% _running_pipelines = running_pipelines %>
index bf8e4667cf644dc8576432a9ffca107117cd3938..0b308db6ff072e1f22c833831a586707cfbaa652 100644 (file)
@@ -5,65 +5,69 @@
       <div class="btn-group btn-group-sm">
         <button type="button" class="btn btn-default dropdown-toggle" data-toggle="dropdown">Selection <span class="caret"></span></button>
         <ul class="dropdown-menu" role="menu">
-          <li><%= link_to "Create new collection with selected collections", '#',
-                  'data-href' => combine_selected_path(
-                    action_data: {current_project_uuid: @object.uuid}.to_json
-                  ),
-                  'id' => 'combine_selections_button',
-                  method: :post,
-                  'data-selection-param-name' => 'selection[]',
-                  'data-selection-action' => 'combine-project-contents',
-                  'data-toggle' => 'dropdown'
-            %></li>
+          <% if Collection.creatable? %>
+            <li><%= link_to "Create new collection with selected collections", '#',
+                    'data-href' => combine_selected_path(
+                      action_data: {current_project_uuid: @object.uuid}.to_json
+                    ),
+                    'id' => 'combine_selections_button',
+                    method: :post,
+                    'data-selection-param-name' => 'selection[]',
+                    'data-selection-action' => 'combine-project-contents',
+                    'data-toggle' => 'dropdown'
+              %></li>
+          <% end %>
           <li><%= link_to "Compare selected", '#',
                   'data-href' => compare_pipeline_instances_path,
                   'data-selection-param-name' => 'uuids[]',
                   'data-selection-action' => 'compare',
                   'data-toggle' => 'dropdown'
             %></li>
-          <li><%= link_to "Copy selected...", '#',
-                  'data-href' => choose_projects_path(
-                    title: 'Copy selected items to...',
-                    editable: true,
-                    action_name: 'Copy',
-                    action_href: actions_path,
-                    action_method: 'post',
-                    action_data_from_params: ['selection'],
-                    action_data: {
-                      copy_selections_into_project: true,
-                      selection_param: 'uuid',
-                      success: 'page-refresh'}.to_json),
-                  'data-remote' => true,
-                  'data-selection-param-name' => 'selection[]',
-                  'data-selection-action' => 'copy',
-                  'data-toggle' => 'dropdown'
-            %></li>
+          <% if Collection.creatable? %>
+            <li><%= link_to "Copy selected...", '#',
+                    'data-href' => choose_projects_path(
+                      title: 'Copy selected items to...',
+                      editable: true,
+                      action_name: 'Copy',
+                      action_href: actions_path,
+                      action_method: 'post',
+                      action_data_from_params: ['selection'],
+                      action_data: {
+                        copy_selections_into_project: true,
+                        selection_param: 'uuid',
+                        success: 'page-refresh'}.to_json),
+                    'data-remote' => true,
+                    'data-selection-param-name' => 'selection[]',
+                    'data-selection-action' => 'copy',
+                    'data-toggle' => 'dropdown'
+              %></li>
+          <% end %>
           <% if @object.editable? %>
-          <li><%= link_to "Move selected...", '#',
-                  'data-href' => choose_projects_path(
-                    title: 'Move selected items to...',
-                    editable: true,
-                    action_name: 'Move',
-                    action_href: actions_path,
-                    action_method: 'post',
-                    action_data_from_params: ['selection'],
-                    action_data: {
-                      move_selections_into_project: true,
-                      selection_param: 'uuid',
-                      success: 'page-refresh'}.to_json),
-                  'data-remote' => true,
-                  'data-selection-param-name' => 'selection[]',
-                  'data-selection-action' => 'move',
-                  'data-toggle' => 'dropdown'
-            %></li>
-          <li><%= link_to "Remove selected", '#',
-                  method: :delete,
-                  'data-href' => url_for(action: :remove_items),
-                  'data-selection-param-name' => 'item_uuids[]',
-                  'data-selection-action' => 'remove',
-                  'data-remote' => true,
-                  'data-toggle' => 'dropdown'
-            %></li>
+            <li><%= link_to "Move selected...", '#',
+                    'data-href' => choose_projects_path(
+                      title: 'Move selected items to...',
+                      editable: true,
+                      action_name: 'Move',
+                      action_href: actions_path,
+                      action_method: 'post',
+                      action_data_from_params: ['selection'],
+                      action_data: {
+                        move_selections_into_project: true,
+                        selection_param: 'uuid',
+                        success: 'page-refresh'}.to_json),
+                    'data-remote' => true,
+                    'data-selection-param-name' => 'selection[]',
+                    'data-selection-action' => 'move',
+                    'data-toggle' => 'dropdown'
+              %></li>
+            <li><%= link_to "Remove selected", '#',
+                    method: :delete,
+                    'data-href' => url_for(action: :remove_items),
+                    'data-selection-param-name' => 'item_uuids[]',
+                    'data-selection-action' => 'remove',
+                    'data-remote' => true,
+                    'data-toggle' => 'dropdown'
+              %></li>
           <% end %>
         </ul>
       </div>
index 0cab11797c72c978b06c5e742aa9c81abb48ba95..5dd1017c8d4724844bdd802a3328dd79cd9f6494 100644 (file)
@@ -1,4 +1,4 @@
-<% if @object.uuid != current_user.uuid # Not the "Home" project %>
+<% if @object.uuid != current_user.andand.uuid # Not the "Home" project %>
 <% content_for :content_top do %>
   <%= render partial: 'name_and_description' %>
 <% end %>
diff --git a/apps/workbench/app/views/request_shell_access_reporter/send_request.text.erb b/apps/workbench/app/views/request_shell_access_reporter/send_request.text.erb
new file mode 100644 (file)
index 0000000..58e80f7
--- /dev/null
@@ -0,0 +1,7 @@
+Shell account request from <%=@user.full_name%> (<%=@user.email%>, <%=@user.uuid%>)
+
+Details of the request:
+Full name: <%=@user.full_name%>
+Email address: <%=@user.email%>
+User's UUID: <%=@user.uuid%>
+User setup URL: <%= link_to('setup user', @params['request_url'].gsub('/request_shell_access', '#Admin')) %>
index 98f54ef252ed97b61ec5b9b66cf7e2449e2e4e14..25f4d75be19b0df6a450942bfd931f59aadc25c6 100644 (file)
@@ -29,8 +29,8 @@
       </div>
 
       <div class="modal-footer">
-        <button type="submit" class="btn btn-primary" autofocus>Submit</button>
         <button class="btn btn-default" onClick="reset_form()" data-dismiss="modal" aria-hidden="true">Cancel</button>
+        <button type="submit" class="btn btn-primary" autofocus>Submit</button>
       </div>
 
     <% end #form %>
diff --git a/apps/workbench/app/views/users/_create_new_object_button.html.erb b/apps/workbench/app/views/users/_create_new_object_button.html.erb
new file mode 100644 (file)
index 0000000..5458b44
--- /dev/null
@@ -0,0 +1,6 @@
+<%= link_to setup_user_popup_path,
+  {class: 'btn btn-sm btn-primary', :remote => true, 'data-toggle' =>  "modal",
+    'data-target' => '#user-setup-modal-window', return_to: request.url} do %>
+  <i class="fa fa-fw fa-plus"></i> Add a new user
+<% end %>
+<div id="user-setup-modal-window" class="modal fade" role="dialog" aria-labelledby="myModalLabel" aria-hidden="true"></div>
index d20498f8c02bc1a932286d9e2f7bb73e690c8fec..83ec30a8146f9311a03f817568ccd46f14d05075 100644 (file)
@@ -18,6 +18,7 @@
           <th> Name </th>
           <th> Read/Write </th>
           <th> URL </th>
+          <th> </th>
         </tr>
       </thead>
       <tbody>
             <td style="word-break:break-all;">
               <code><%= writable ? repo[:push_url] : repo[:fetch_url] %></code>
             </td>
+            <td>
+              <% if writable == 'can_manage' %>
+                <%= link_to "Share", "/repositories/#{repo[:uuid]}#Sharing" %>
+              <% end %>
+            </td>
           </tr>
         <% end %>
       </tbody>
index c6190ec5e142198aef4a4227d42027daee947841..43f2b7787c6f8e83bbf66bbd029bbd4e55fa5412 100644 (file)
@@ -2,12 +2,50 @@
   <p>
     For more information see <%= link_to raw('Arvados Docs &rarr; User Guide &rarr; SSH access'),
   "#{Rails.configuration.arvados_docsite}/user/getting_started/ssh-access-unix.html",
-  target: "_blank"%>.  A sample <i>~/.ssh/config</i> entry is provided below.
+  target: "_blank"%>.
+    <% if @my_virtual_machines.any? or true %>
+      A sample <code>~/.ssh/config</code> entry is provided below.
+    <% end %>
   </p>
 
   <% if !@my_virtual_machines.any? %>
-    You do not seem to have access to any virtual machines. If you would like to request access, please contact your system admin.
+    <div id="no_shell_access" class="no_shell_access">
+      <div class="alert alert-warning clearfix">
+        <p>
+          You do not have access to any virtual machines.  Some
+          Arvados features require using the command line.  You may
+          request access to a hosted virtual machine with the command
+          line shell.
+        </p>
+        <div class="pull-right">
+          <%= link_to({
+              action: 'request_shell_access',
+              controller: 'users',
+              id: current_user.uuid
+              },
+              method: :post,
+              remote: true,
+              class: 'btn btn-xs btn-primary',
+              data: {
+              disable_with: "Sending request...",
+              on_error_hide: '.no_shell_access .alert-success',
+              on_error_show: '.no_shell_access .alert-danger',
+              on_error_write: '.no_shell_access .alert-danger .error-text',
+              on_success_hide: '.no_shell_access .alert-danger',
+              }) do %>
+            Send request for shell access
+          <% end %>
+        </div>
+      </div>
+      <div class="alert alert-success" style="display:none">
+        <p class="contain-align-left"><%# (see javascripts/request_shell_access.js) %></p>
+      </div>
+      <div class="alert alert-danger" style="display:none">
+        <p class="contain-align-left">Sorry, something went wrong. Please try again. (<span class="error-text"></span>)</p>
+      </div>
+    </div>
   <% else %>
+    <script> localStorage.removeItem('request_shell_access'); </script>
     <table class="table virtual-machines-table">
       <colgroup>
         <col style="width: 25%" />
index e9429cf451d43b8c48e601be3d7cb97824fa8adf..a1a664ce838115b875c460cd988d5260490430f1 100644 (file)
@@ -64,8 +64,8 @@
     </div>
 
     <div class="modal-footer">
-      <button type="submit" id="register" class="btn btn-primary" autofocus>Submit</button>
       <button class="btn btn-default" onClick="reset_form()" data-dismiss="modal" aria-hidden="true">Cancel</button>
+      <button type="submit" id="register" class="btn btn-primary" autofocus>Submit</button>
     </div>
 
     <% end #form %>
diff --git a/apps/workbench/app/views/users/request_shell_access.js b/apps/workbench/app/views/users/request_shell_access.js
new file mode 100644 (file)
index 0000000..b97e14d
--- /dev/null
@@ -0,0 +1,10 @@
+var timestamp = new Date();
+localStorage.setItem("request_shell_access",
+                     "A request for shell access was sent on " +
+                     timestamp.toLocaleDateString() +
+                     " at " +
+                     timestamp.toLocaleTimeString());
+// The storage event gets triggered automatically in _other_ windows
+// when we hit localStorage, but we also need to fire it manually in
+// _this_ window.
+$(document).trigger('storage');
index a810a8df6d7893f8ed82a8148fe452f48d233c09..f23daa7690fe7e615c6be18f9a20d5703dbbc426 100644 (file)
@@ -3,9 +3,9 @@
 <div class="row">
   <div class="col-sm-8 col-sm-push-4" style="margin-top: 1em">
     <div class="well clearfix">
-      <%= image_tag "dax.png", style: "width: 147px; height: 197px; max-width: 25%; margin-right: 2em", class: 'pull-left' %>
+      <%= image_tag "dax.png", style: "width: 112px; height: 150px; margin-right: 2em", class: 'pull-left' %>
 
-      <h3>Please log in.</h3>
+      <h3 style="margin-top:0">Please log in.</h3>
 
       <p>
 
index dd306414b4cf80ce03d2f187c70b9ae2ace6613c..8be89b854575cb788c6a3c89dfb24434b3a0281f 100644 (file)
@@ -185,8 +185,16 @@ common:
   issue_reporter_email_to: arvados@example.com
   support_email_address: arvados@example.com
 
+  # generic issue email from
+  email_from: arvados@example.com
+
   # filename suffixes for which view icon would be shown in collection show page
   filename_suffixes_with_view_icon: [txt, gif, jpeg, jpg, png, html, htm, pdf]
 
   # the maximum number of bytes to load in the log viewer
   log_viewer_max_bytes: 1000000
+
+  # Set anonymous_user_token to enable anonymous user access. You can get
+  # the token by running "bundle exec ./script/get_anonymous_user_token.rb"
+  # in the directory where your API server is running.
+  anonymous_user_token: false
index 6b29d0553d4f0903973f40f951fde82d99ab3144..7ed02e7dc9ba11aa8beb07cae5f9c934ce77fd9f 100644 (file)
@@ -39,6 +39,7 @@ ArvadosWorkbench::Application.routes.draw do
     post 'unsetup', :on => :member
     get 'setup_popup', :on => :member
     get 'profile', :on => :member
+    post 'request_shell_access', :on => :member
   end
   get '/manage_account' => 'users#manage_account'
   get "/add_ssh_key_popup" => 'users#add_ssh_key_popup', :as => :add_ssh_key_popup
diff --git a/apps/workbench/public/browser_unsupported.js b/apps/workbench/public/browser_unsupported.js
new file mode 100644 (file)
index 0000000..0608000
--- /dev/null
@@ -0,0 +1,15 @@
+(function() {
+    var ok = false;
+    try {
+        if (window.Blob &&
+            window.File &&
+            window.FileReader &&
+            window.localStorage &&
+            window.WebSocket) {
+            ok = true;
+        }
+    } catch(err) {}
+    if (!ok) {
+        document.getElementById('browser-unsupported').className='';
+    }
+})();
index d0d9c5dfd142357a7c024a57f0e0bce10cbd4efb..843cf88a3d8185d4f96ddd189c61ef48adf28ef0 100644 (file)
@@ -325,4 +325,41 @@ class ApplicationControllerTest < ActionController::TestCase
       Rails.configuration.arvados_v1_base = orig_api_server
     end
   end
+
+  [
+    [CollectionsController.new, api_fixture('collections')['user_agreement_in_anonymously_accessible_project']],
+    [CollectionsController.new, api_fixture('collections')['user_agreement_in_anonymously_accessible_project'], false],
+    [JobsController.new, api_fixture('jobs')['running_job_in_publicly_accessible_project']],
+    [JobsController.new, api_fixture('jobs')['running_job_in_publicly_accessible_project'], false],
+    [PipelineInstancesController.new, api_fixture('pipeline_instances')['pipeline_in_publicly_accessible_project']],
+    [PipelineInstancesController.new, api_fixture('pipeline_instances')['pipeline_in_publicly_accessible_project'], false],
+    [PipelineTemplatesController.new, api_fixture('pipeline_templates')['pipeline_template_in_publicly_accessible_project']],
+    [PipelineTemplatesController.new, api_fixture('pipeline_templates')['pipeline_template_in_publicly_accessible_project'], false],
+    [ProjectsController.new, api_fixture('groups')['anonymously_accessible_project']],
+    [ProjectsController.new, api_fixture('groups')['anonymously_accessible_project'], false],
+  ].each do |controller, fixture, anon_config=true|
+    test "#{controller} show method with anonymous config enabled" do
+      if anon_config
+        Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+      else
+        Rails.configuration.anonymous_user_token = false
+      end
+
+      @controller = controller
+
+      get(:show, {id: fixture['uuid']})
+
+      if anon_config
+        assert_response 200
+        if controller.class == JobsController
+          assert_includes @response.inspect, fixture['script']
+        else
+          assert_includes @response.inspect, fixture['name']
+        end
+      else
+        assert_response :redirect
+        assert_match /\/users\/welcome/, @response.redirect_url
+      end
+    end
+  end
 end
index 14db674292c666b1558f27a809b66f6569840174..dfbe69f987f25c75cf03f7fe5b5a3a7398fd919d 100644 (file)
@@ -56,6 +56,25 @@ class CollectionsControllerTest < ActionController::TestCase
     assert_equal([['.', 'foo', 3]], assigns(:object).files)
   end
 
+  test "viewing a collection with spaces in filename" do
+    show_collection(:w_a_z_file, :active)
+    assert_equal([['.', 'w a z', 5]], assigns(:object).files)
+  end
+
+  test "download a file with spaces in filename" do
+    collection = api_fixture('collections')['w_a_z_file']
+    fakepipe = IO.popen(['echo', '-n', 'w a z'], 'rb')
+    IO.expects(:popen).with { |cmd, mode|
+      cmd.include? "#{collection['uuid']}/w a z"
+    }.returns(fakepipe)
+    get :show_file, {
+      uuid: collection['uuid'],
+      file: 'w a z'
+    }, session_for(:active)
+    assert_response :success
+    assert_equal 'w a z', response.body
+  end
+
   test "viewing a collection fetches related projects" do
     show_collection({id: api_fixture('collections')["foo_file"]['portable_data_hash']}, :active)
     assert_includes(assigns(:same_pdh).map(&:owner_uuid),
@@ -147,6 +166,18 @@ class CollectionsControllerTest < ActionController::TestCase
                  "failed to get a correct file from Keep")
   end
 
+  test 'anonymous download' do
+    Rails.configuration.anonymous_user_token =
+      api_fixture('api_client_authorizations')['anonymous']['api_token']
+    expect_content = stub_file_content
+    get :show_file, {
+      uuid: api_fixture('collections')['user_agreement_in_anonymously_accessible_project']['uuid'],
+      file: 'GNU_General_Public_License,_version_3.pdf',
+    }
+    assert_response :success
+    assert_equal expect_content, response.body
+  end
+
   test "can't get a file from Keep without permission" do
     params = collection_params(:foo_file, 'foo')
     sess = session_for(:spectator)
@@ -174,12 +205,23 @@ class CollectionsControllerTest < ActionController::TestCase
                      "using a reader token set the session's API token")
   end
 
-  test "trying to get from Keep with an unscoped reader token prompts login" do
-    params = collection_params(:foo_file, 'foo')
-    params[:reader_token] =
-      api_fixture('api_client_authorizations')['active_noscope']['api_token']
-    get(:show_file, params)
-    assert_response :redirect
+  [false, api_fixture('api_client_authorizations')['anonymous']['api_token']].
+    each do |anon_conf|
+    test "download a file using a reader token with insufficient scope (anon_conf=#{!!anon_conf})" do
+      Rails.configuration.anonymous_user_token = anon_conf
+      params = collection_params(:foo_file, 'foo')
+      params[:reader_token] =
+        api_fixture('api_client_authorizations')['active_noscope']['api_token']
+      get(:show_file, params)
+      if anon_conf
+        # Some files can be shown without a valid token, but not this one.
+        assert_response 404
+      else
+        # No files will ever be shown without a valid token. You
+        # should log in and try again.
+        assert_response :redirect
+      end
+    end
   end
 
   test "can get a file with an unpermissioned auth but in-scope reader token" do
@@ -313,4 +355,52 @@ class CollectionsControllerTest < ActionController::TestCase
     assert /#{stage3_id}&#45;&gt;#{stage3_out}/.match(used_by_svg)
 
   end
+
+  test "view collection with empty properties" do
+    fixture_name = :collection_with_empty_properties
+    show_collection(fixture_name, :active)
+    assert_equal(api_fixture('collections')[fixture_name.to_s]['name'], assigns(:object).name)
+    assert_not_nil(assigns(:object).properties)
+    assert_empty(assigns(:object).properties)
+  end
+
+  test "view collection with one property" do
+    fixture_name = :collection_with_one_property
+    show_collection(fixture_name, :active)
+    fixture = api_fixture('collections')[fixture_name.to_s]
+    assert_equal(fixture['name'], assigns(:object).name)
+    assert_equal(fixture['properties'][0], assigns(:object).properties[0])
+  end
+
+  test "create collection with properties" do
+    post :create, {
+      collection: {
+        name: 'collection created with properties',
+        manifest_text: '',
+        properties: {
+          property_1: 'value_1'
+        },
+      },
+      format: :json
+    }, session_for(:active)
+    assert_response :success
+    assert_not_nil assigns(:object).uuid
+    assert_equal 'collection created with properties', assigns(:object).name
+    assert_equal 'value_1', assigns(:object).properties[:property_1]
+  end
+
+  test "update description and check manifest_text is not lost" do
+    collection = api_fixture("collections")["multilevel_collection_1"]
+    post :update, {
+      id: collection["uuid"],
+      collection: {
+        description: 'test description update'
+      },
+      format: :json
+    }, session_for(:active)
+    assert_response :success
+    assert_not_nil assigns(:object)
+    assert_equal 'test description update', assigns(:object).description
+    assert_equal collection['manifest_text'], assigns(:object).manifest_text
+  end
 end
index 8407dc324257518b51144ee9827a7de814f8b304..c2089ad18d816cd47087270b603a655ab22e2dff 100644 (file)
@@ -100,7 +100,9 @@ class ProjectsControllerTest < ActionController::TestCase
     assert_response :success
   end
 
-  test "project admin can remove items from the project" do
+  test "project admin can remove collections from the project" do
+    # Deleting an object that supports 'expires_at' should make it
+    # completely inaccessible to API queries, not simply moved out of the project.
     coll_key = "collection_to_remove_from_subproject"
     coll_uuid = api_fixture("collections")[coll_key]["uuid"]
     delete(:remove_item,
@@ -111,6 +113,29 @@ class ProjectsControllerTest < ActionController::TestCase
     assert_response :success
     assert_match(/\b#{coll_uuid}\b/, @response.body,
                  "removed object not named in response")
+
+    use_token :subproject_admin
+    assert_raise ArvadosApiClient::NotFoundException do
+      Collection.find(coll_uuid)
+    end
+  end
+
+  test "project admin can remove items from project other than collections" do
+    # An object which does not have an expired_at field (e.g. Specimen)
+    # should be implicitly moved to the user's Home project when removed.
+    specimen_uuid = api_fixture('specimens', 'in_asubproject')['uuid']
+    delete(:remove_item,
+           { id: api_fixture('groups', 'asubproject')['uuid'],
+             item_uuid: specimen_uuid,
+             format: 'js' },
+           session_for(:subproject_admin))
+    assert_response :success
+    assert_match(/\b#{specimen_uuid}\b/, @response.body,
+                 "removed object not named in response")
+
+    use_token :subproject_admin
+    new_specimen = Specimen.find(specimen_uuid)
+    assert_equal api_fixture('users', 'subproject_admin')['uuid'], new_specimen.owner_uuid
   end
 
   test 'projects#show tab infinite scroll partial obeys limit' do
@@ -182,4 +207,18 @@ class ProjectsControllerTest < ActionController::TestCase
                           }]
     get :show, encoded_params, session_for(:active)
   end
+
+  test "visit non-public project as anonymous when anonymous browsing is enabled and expect page not found" do
+    Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+    get(:show, {id: api_fixture('groups')['aproject']['uuid']})
+    assert_response 404
+    assert_includes @response.inspect, 'you are not logged in'
+  end
+
+  test "visit home page as anonymous when anonymous browsing is enabled and expect login" do
+    Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+    get(:index)
+    assert_response :redirect
+    assert_match /\/users\/welcome/, @response.redirect_url
+  end
 end
index e45095c64c2b7d4e865bf2b81bc5553aef854083..f95bb7731fab4bd86888d18ded3a6af0e2a6eb6b 100644 (file)
@@ -45,4 +45,21 @@ class RepositoriesControllerTest < ActionController::TestCase
   test "viewer cannot manage repository" do
     refute user_can_manage(:spectator, api_fixture("repositories")["arvados"])
   end
+
+  [
+    [:active, ['#Sharing', '#Advanced']],
+    [:admin,  ['#Attributes', '#Sharing', '#Advanced']],
+  ].each do |user, expected_panes|
+    test "#{user} sees panes #{expected_panes}" do
+      get :show, {
+        id: api_fixture('repositories')['foo']['uuid']
+      }, session_for(user)
+      assert_response :success
+
+      panes = css_select('[data-toggle=tab]').each do |pane|
+        pane_name = pane.attributes['href']
+        assert_includes expected_panes, pane_name
+      end
+    end
+  end
 end
index 213a2a53c1630db44f3da2e1d1568670ec84a6d2..c1436da4545e93197c95d2b850614cf55c95cafc 100644 (file)
@@ -40,4 +40,38 @@ class UsersControllerTest < ActionController::TestCase
     assert_includes editables, true, "should have a writable repository"
     assert_includes editables, false, "should have a readonly repository"
   end
+
+  test "show repositories lists linked as well as owned repositories" do
+    get :manage_account, {}, session_for(:active)
+    assert_response :success
+    repos = assigns(:my_repositories)
+    assert repos
+    repo_writables = assigns(:repo_writable)
+    assert_not_empty repo_writables, "repo_writables should not be empty"
+    assert_includes repo_writables, api_fixture('repositories')['repository4']['uuid']  # writable by active
+    assert_includes repo_writables, api_fixture('repositories')['repository2']['uuid']  # owned by active
+  end
+
+  test "request shell access" do
+    user = api_fixture('users')['spectator']
+
+    ActionMailer::Base.deliveries = []
+
+    post :request_shell_access, {
+      id: user['uuid'],
+      format: 'js'
+    }, session_for(:spectator)
+    assert_response :success
+
+    full_name = "#{user['first_name']} #{user['last_name']}"
+    expected = "Shell account request from #{full_name} (#{user['email']}, #{user['uuid']})"
+    found_email = 0
+    ActionMailer::Base.deliveries.each do |email|
+      if email.subject.include?(expected)
+        found_email += 1
+        break
+      end
+    end
+    assert_equal 1, found_email, "Expected 1 email after requesting shell access"
+  end
 end
index eb9cfe5ed8b62a421a89a20088ca9e27451f7f97..168656d4c3fd273a1fa3cd5e73824c5799e9b0b0 100644 (file)
@@ -39,6 +39,7 @@ class PipelineTest < DiagnosticsTest
       wait_for_ajax
 
       # All needed input are filled in. Run this pipeline now
+      click_link 'Components'
       find('a,button', text: 'Run').click
 
       # Pipeline is running. We have a "Stop" button instead now.
diff --git a/apps/workbench/test/integration/anonymous_access_test.rb b/apps/workbench/test/integration/anonymous_access_test.rb
new file mode 100644 (file)
index 0000000..6508879
--- /dev/null
@@ -0,0 +1,173 @@
+require 'integration_helper'
+
+class AnonymousAccessTest < ActionDispatch::IntegrationTest
+  # These tests don't do state-changing API calls. Save some time by
+  # skipping the database reset.
+  reset_api_fixtures :after_each_test, false
+  reset_api_fixtures :after_suite, true
+
+  setup do
+    need_javascript
+    Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+  end
+
+  PUBLIC_PROJECT = "/projects/#{api_fixture('groups')['anonymously_accessible_project']['uuid']}"
+
+  def verify_site_navigation_anonymous_enabled user, is_active
+    if user
+      if user['is_active']
+        assert_text 'Unrestricted public data'
+        assert_selector 'a', text: 'Projects'
+      else
+        assert_text 'indicate that you have read and accepted the user agreement'
+      end
+      within('.navbar-fixed-top') do
+        assert_selector 'a', text: "#{user['email']}"
+        find('a', text: "#{user['email']}").click
+        within('.dropdown-menu') do
+          assert_selector 'a', text: 'Log out'
+        end
+      end
+    else  # anonymous
+      assert_text 'Unrestricted public data'
+      within('.navbar-fixed-top') do
+        assert_selector 'a', text: 'Log in'
+      end
+    end
+  end
+
+  [
+    [nil, nil, false, false],
+    ['inactive', api_fixture('users')['inactive'], false, false],
+    ['active', api_fixture('users')['active'], true, true],
+  ].each do |token, user, is_active|
+    test "visit public project as user #{token.inspect} when anonymous browsing is enabled" do
+      if !token
+        visit PUBLIC_PROJECT
+      else
+        visit page_with_token(token, PUBLIC_PROJECT)
+      end
+
+      verify_site_navigation_anonymous_enabled user, is_active
+    end
+  end
+
+  test "selection actions when anonymous user accesses shared project" do
+    visit PUBLIC_PROJECT
+
+    assert_selector 'a', text: 'Data collections'
+    assert_selector 'a', text: 'Jobs and pipelines'
+    assert_selector 'a', text: 'Pipeline templates'
+    assert_selector 'a', text: 'Advanced'
+    assert_no_selector 'a', text: 'Subprojects'
+    assert_no_selector 'a', text: 'Other objects'
+    assert_no_selector 'button', text: 'Add data'
+
+    click_button 'Selection'
+    within('.selection-action-container') do
+      assert_selector 'li', text: 'Compare selected'
+      assert_no_selector 'li', text: 'Create new collection with selected collections'
+      assert_no_selector 'li', text: 'Copy selected'
+      assert_no_selector 'li', text: 'Move selected'
+      assert_no_selector 'li', text: 'Remove selected'
+    end
+  end
+
+  test "anonymous user accesses data collections tab in shared project" do
+    visit PUBLIC_PROJECT
+    collection = api_fixture('collections')['user_agreement_in_anonymously_accessible_project']
+    assert_text 'GNU General Public License'
+
+    assert_selector 'a', text: 'Data collections'
+
+    # click on show collection
+    within "tr[data-object-uuid=\"#{collection['uuid']}\"]" do
+      click_link 'Show'
+    end
+
+    # in collection page
+    assert_no_selector 'input', text: 'Create sharing link'
+    assert_no_text 'Sharing and permissions'
+    assert_no_selector 'a', text: 'Upload'
+    assert_no_selector 'button', 'Selection'
+
+    within '#collection_files tr,li', text: 'GNU_General_Public_License,_version_3.pdf' do
+      find 'a[title~=View]'
+      find 'a[title~=Download]'
+    end
+  end
+
+  test 'view file' do
+    magic = rand(2**512).to_s 36
+    CollectionsController.any_instance.stubs(:file_enumerator).returns([magic])
+    collection = api_fixture('collections')['public_text_file']
+    visit '/collections/' + collection['uuid']
+    find('tr,li', text: 'Hello world.txt').
+      find('a[title~=View]').click
+    assert_text magic
+  end
+
+  [
+    'running_job',
+    'completed_job',
+    'pipelineInstance'
+  ].each do |type|
+    test "anonymous user accesses jobs and pipelines tab in shared project and clicks on #{type}" do
+      visit PUBLIC_PROJECT
+      assert_text 'GNU General Public License'
+
+      click_link 'Jobs and pipelines'
+      assert_text 'Pipeline in publicly accessible project'
+
+      # click on the specified job
+      if type.include? 'job'
+        verify_job_row type
+      else
+        verify_pipeline_instance_row
+      end
+    end
+  end
+
+  def verify_job_row look_for
+    within first('tr', text: look_for) do
+      click_link 'Show'
+    end
+    assert_text 'script_version'
+
+    assert_text 'zzzzz-tpzed-xurymjxw79nv3jz' # modified by user
+    assert_no_selector 'a', text: 'zzzzz-tpzed-xurymjxw79nv3jz'
+    assert_no_selector 'a', text: 'Move job'
+    assert_no_selector 'button', text: 'Cancel'
+    assert_no_selector 'button', text: 'Re-run job'
+  end
+
+  def verify_pipeline_instance_row
+    within first('tr[data-kind="arvados#pipelineInstance"]') do
+      assert_text 'Pipeline in publicly accessible project'
+      click_link 'Show'
+    end
+
+    # in pipeline instance page
+    assert_text 'This pipeline is complete'
+    assert_no_selector 'a', text: 'Re-run with latest'
+    assert_no_selector 'a', text: 'Re-run options'
+  end
+
+  test "anonymous user accesses pipeline templates tab in shared project" do
+    visit PUBLIC_PROJECT
+    assert_text 'GNU General Public License'
+
+    assert_selector 'a', text: 'Pipeline templates'
+
+    click_link 'Pipeline templates'
+    assert_text 'Pipeline template in publicly accessible project'
+
+    within first('tr[data-kind="arvados#pipelineTemplate"]') do
+      click_link 'Show'
+    end
+
+    # in template page
+    assert_text 'script version'
+    assert_no_selector 'a', text: 'Run this pipeline'
+  end
+end
diff --git a/apps/workbench/test/integration/browser_unsupported_test.rb b/apps/workbench/test/integration/browser_unsupported_test.rb
new file mode 100644 (file)
index 0000000..788907d
--- /dev/null
@@ -0,0 +1,17 @@
+require 'integration_helper'
+
+class BrowserUnsupported < ActionDispatch::IntegrationTest
+  WARNING_FRAGMENT = 'Your web browser is missing some of the features'
+
+  test 'warning if no File API' do
+    Capybara.current_driver = :poltergeist_without_file_api
+    visit '/'
+    assert_text :visible, WARNING_FRAGMENT
+  end
+
+  test 'no warning if File API' do
+    need_javascript
+    visit '/'
+    assert_no_text :visible, WARNING_FRAGMENT
+  end
+end
index 03c359e089590f20f8ea12334b7d518923923479..1897a038589898d8b31514360ed952de64365f51 100644 (file)
@@ -125,5 +125,4 @@ class ErrorsTest < ActionDispatch::IntegrationTest
       Rails.configuration.arvados_v1_base = original_arvados_v1_base
     end
   end
-
 end
index b4dadcd13f853f2086cb62797d1e12a67da3105d..0a7baeb48afaa87439c16094a36e2b68dc28e321 100644 (file)
@@ -10,18 +10,18 @@ class FilterableInfiniteScrollTest < ActionDispatch::IntegrationTest
   # unused ?search=foo param to pre-populate the search field.
   test 'no double-load if text input has a value at page load time' do
     visit page_with_token('admin', '/pipeline_instances')
-    assert_text 'pipeline_2'
-    visit page_with_token('admin', '/pipeline_instances?search=pipeline_1')
+    assert_text 'pipeline_with_job'
+    visit page_with_token('admin', '/pipeline_instances?search=pipeline_with_tagged')
     # Horrible hack to ensure the search results can't load correctly
     # on the second attempt.
     assert_selector '#recent-pipeline-instances'
     assert page.evaluate_script('$("#recent-pipeline-instances[data-infinite-content-href0]").attr("data-infinite-content-href0","/give-me-an-error").length == 1')
     # Wait for the first page of results to appear.
-    assert_text 'pipeline_1'
+    assert_text 'pipeline_with_tagged_collection_input'
     # Make sure the results are filtered.
-    assert_no_text 'pipeline_2'
-    # Make sure pipeline_2 didn't disappear merely because the results
-    # were replaced with an error message.
-    assert_text 'pipeline_1'
+    assert_no_text 'pipeline_with_job'
+    # Make sure pipeline_with_job didn't disappear merely because
+    # the results were replaced with an error message.
+    assert_text 'pipeline_with_tagged_collection_input'
   end
 end
index 716e7319874d56189b145acae6fd4c137a590f23..29bccd9d76b20846cd0d6ce7519b2858a81bcd5a 100644 (file)
@@ -4,6 +4,9 @@ require 'tmpdir'
 require 'integration_helper'
 
 class JobsTest < ActionDispatch::IntegrationTest
+  setup do
+      need_javascript
+  end
 
   def fakepipe_with_log_data
     content =
@@ -14,13 +17,8 @@ class JobsTest < ActionDispatch::IntegrationTest
   end
 
   test "add job description" do
-    need_javascript
-    visit page_with_token("active", "/jobs")
-
-    # go to job running the script "doesnotexist"
-    within first('tr', text: 'doesnotexist') do
-      find("a").click
-    end
+    job = api_fixture('jobs')['nearly_finished_job']
+    visit page_with_token("active", "/jobs/#{job['uuid']}")
 
     # edit job description
     within('.arv-description-as-subtitle') do
@@ -28,18 +26,14 @@ class JobsTest < ActionDispatch::IntegrationTest
       find('.editable-input textarea').set('*Textile description for job* - "Go to dashboard":/')
       find('.editable-submit').click
     end
-    wait_for_ajax
 
     # Verify edited description
-    assert page.has_no_text? '*Textile description for job*'
-    assert page.has_text? 'Textile description for job'
-    assert page.has_link? 'Go to dashboard'
-    click_link 'Go to dashboard'
-    assert page.has_text? 'Active pipelines'
+    assert_no_text '*Textile description for job*'
+    assert_text 'Textile description for job'
+    assert_selector 'a[href="/"]', text: 'Go to dashboard'
   end
 
   test "view job log" do
-    need_javascript
     job = api_fixture('jobs')['job_with_real_log']
 
     IO.expects(:popen).returns(fakepipe_with_log_data)
@@ -58,7 +52,6 @@ class JobsTest < ActionDispatch::IntegrationTest
   end
 
   test 'view partial job log' do
-    need_javascript
     # This config will be restored during teardown by ../test_helper.rb:
     Rails.configuration.log_viewer_max_bytes = 100
 
@@ -72,4 +65,43 @@ class JobsTest < ActionDispatch::IntegrationTest
     wait_for_ajax
     assert page.has_text? 'Showing only 100 bytes of this log'
   end
+
+  [
+    ['foobar', false, false],
+    ['job_with_latest_version', true, false],
+    ['job_with_latest_version', true, true],
+  ].each do |job_name, expect_options, use_latest|
+    test "Rerun #{job_name} job, expect options #{expect_options},
+          and use latest version option #{use_latest}" do
+      job = api_fixture('jobs')[job_name]
+      visit page_with_token 'active', '/jobs/'+job['uuid']
+
+      if expect_options
+        assert_text 'supplied_script_version: master'
+      else
+        assert_text 'supplied_script_version: (none)'
+      end
+
+      assert_triggers_dom_event 'shown.bs.modal' do
+        find('a,button', text: 'Re-run job...').click
+      end
+      within('.modal-dialog') do
+        assert_selector 'a,button', text: 'Cancel'
+        if use_latest
+          page.choose("job_script_version_#{job['supplied_script_version']}")
+        end
+        click_on "Run now"
+      end
+
+      # Re-running jobs doesn't currently work because the test API
+      # server has no git repository to check against.  For now, check
+      # that the correct script version is mentioned in the
+      # Fiddlesticks error message.
+      if expect_options && use_latest
+        assert_text "Script version #{job['supplied_script_version']} does not resolve to a commit"
+      else
+        assert_text "Script version #{job['script_version']} does not resolve to a commit"
+      end
+    end
+  end
 end
index 9f4ce692e5c0b28c8661a5061af4d209140ec228..f012b0992deaf50705a600080c87a2f9b1a28e5d 100644 (file)
@@ -6,9 +6,7 @@ class PipelineInstancesTest < ActionDispatch::IntegrationTest
   end
 
   test 'Create and run a pipeline' do
-    visit page_with_token('active_trustedclient')
-
-    visit '/pipeline_templates'
+    visit page_with_token('active_trustedclient', '/pipeline_templates')
     within('tr', text: 'Two Part Pipeline Template') do
       find('a,button', text: 'Run').click
     end
@@ -111,10 +109,9 @@ class PipelineInstancesTest < ActionDispatch::IntegrationTest
 
   # Create a pipeline instance from within a project and run
   test 'Create pipeline inside a project and run' do
-    visit page_with_token('active_trustedclient')
+    visit page_with_token('active_trustedclient', '/projects')
 
-    # Add this collection to the project using collections menu from top nav
-    visit '/projects'
+    # Add collection to the project using Add data button
     find("#projects-menu").click
     find('.dropdown-menu a,button', text: 'A Project').click
     find('.btn', text: 'Add data').click
@@ -138,9 +135,7 @@ class PipelineInstancesTest < ActionDispatch::IntegrationTest
   end
 
   test 'view pipeline with job and see graph' do
-    visit page_with_token('active_trustedclient')
-
-    visit '/pipeline_instances'
+    visit page_with_token('active_trustedclient', '/pipeline_instances')
     assert page.has_text? 'pipeline_with_job'
 
     find('a', text: 'pipeline_with_job').click
@@ -152,9 +147,7 @@ class PipelineInstancesTest < ActionDispatch::IntegrationTest
   end
 
   test 'pipeline description' do
-    visit page_with_token('active_trustedclient')
-
-    visit '/pipeline_instances'
+    visit page_with_token('active_trustedclient', '/pipeline_instances')
     assert page.has_text? 'pipeline_with_job'
 
     find('a', text: 'pipeline_with_job').click
@@ -182,17 +175,27 @@ class PipelineInstancesTest < ActionDispatch::IntegrationTest
            "components JSON not found")
   end
 
-  PROJECT_WITH_SEARCH_COLLECTION = "A Subproject"
-  def check_parameter_search(proj_name)
-    template = api_fixture("pipeline_templates")["parameter_with_search"]
-    search_text = template["components"]["with-search"]["script_parameters"]["input"]["search_for"]
-    visit page_with_token("active", "/pipeline_templates/#{template['uuid']}")
+  def create_pipeline_from(template_name, project_name="Home")
+    # Visit the named pipeline template and create a pipeline instance from it.
+    # The instance will be created under the named project.
+    template_uuid = api_fixture("pipeline_templates", template_name, "uuid")
+    visit page_with_token("active", "/pipeline_templates/#{template_uuid}")
     click_on "Run this pipeline"
-    within(".modal-dialog") do  # Set project for the new pipeline instance
-      find(".selectable", text: proj_name).click
+    within(".modal-dialog") do
+      # Set project for the new pipeline instance
+      find(".selectable", text: project_name).click
       click_on "Choose"
     end
-    assert(has_text?("This pipeline was created from the template"), "did not land on pipeline instance page")
+    assert(has_text?("This pipeline was created from the template"),
+           "did not land on pipeline instance page")
+  end
+
+  PROJECT_WITH_SEARCH_COLLECTION = "A Subproject"
+  def check_parameter_search(proj_name)
+    create_pipeline_from("parameter_with_search", proj_name)
+    search_text = api_fixture("pipeline_templates", "parameter_with_search",
+                              "components", "with-search",
+                              "script_parameters", "input", "search_for")
     first("a.btn,button", text: "Choose").click
     within(".modal-body") do
       if (proj_name != PROJECT_WITH_SEARCH_COLLECTION)
@@ -215,6 +218,23 @@ class PipelineInstancesTest < ActionDispatch::IntegrationTest
     check_parameter_search("A Project")
   end
 
+  test "enter a float for a number pipeline input" do
+    # Poltergeist either does not support the HTML 5 <input
+    # type="number">, or interferes with the associated X-Editable
+    # validation code.  If the input field has type=number (forcing an
+    # integer), this test will yield a false positive under
+    # Poltergeist.  --Brett, 2015-02-05
+    need_selenium "for strict X-Editable input validation"
+    create_pipeline_from("template_with_dataclass_number")
+    INPUT_SELECTOR =
+      ".editable[data-name='[components][work][script_parameters][input][value]']"
+    find(INPUT_SELECTOR).click
+    find(".editable-input input").set("12.34")
+    find("#editable-submit").click
+    assert_no_selector(".editable-popup")
+    assert_selector(INPUT_SELECTOR, text: "12.34")
+  end
+
   [
     [true, 'Two Part Pipeline Template', 'foo_collection_in_aproject', false],
     [false, 'Two Part Pipeline Template', 'foo_collection_in_aproject', false],
@@ -267,34 +287,18 @@ class PipelineInstancesTest < ActionDispatch::IntegrationTest
   ].each do |user, with_options, choose_options, in_aproject|
     test "Rerun pipeline instance as #{user} using options #{with_options} #{choose_options} in #{in_aproject}" do
       if in_aproject
-        visit page_with_token 'active', \
-        '/projects/'+api_fixture('groups')['aproject']['uuid']
+        path = '/pipeline_instances/'+api_fixture('pipeline_instances')['pipeline_owned_by_active_in_aproject']['uuid']
       else
-        visit page_with_token 'active', '/'
+        path = '/pipeline_instances/'+api_fixture('pipeline_instances')['pipeline_owned_by_active_in_home']['uuid']
       end
 
-      # need bigger modal size when choosing a file from collection
-      if Capybara.current_driver == :selenium
-        Capybara.current_session.driver.browser.manage.window.resize_to(1200, 800)
-      end
+      visit page_with_token(user, path)
 
-      create_and_run_pipeline_in_aproject in_aproject, 'Two Part Pipeline Template', 'foo_collection_in_aproject'
-      instance_path = current_path
-
-      # Pause the pipeline
-      find('a,button', text: 'Pause').click
-      assert page.has_text? 'Paused'
-      page.assert_no_selector 'a.disabled,button.disabled', text: 'Resume'
       page.assert_selector 'a,button', text: 'Re-run with latest'
       page.assert_selector 'a,button', text: 'Re-run options'
 
-      # Pipeline can be re-run now. Access it as the specified user, and re-run
-      if user == 'project_viewer'
-        visit page_with_token(user, instance_path)
+      if user == 'project_viewer' && in_aproject
         assert page.has_text? 'A Project'
-        page.assert_no_selector 'a.disabled,button.disabled', text: 'Resume'
-        page.assert_selector 'a,button', text: 'Re-run with latest'
-        page.assert_selector 'a,button', text: 'Re-run options'
       end
 
       # Now re-run the pipeline
@@ -319,7 +323,7 @@ class PipelineInstancesTest < ActionDispatch::IntegrationTest
       # project. In case of project_viewer user, since the user cannot
       # write to the project, the pipeline should have been created in
       # the user's Home project.
-      assert_not_equal instance_path, current_path, 'Rerun instance path expected to be different'
+      assert_not_equal path, current_path, 'Rerun instance path expected to be different'
       assert_text 'Home'
       if in_aproject && (user != 'project_viewer')
         assert_text 'A Project'
@@ -435,16 +439,8 @@ class PipelineInstancesTest < ActionDispatch::IntegrationTest
 
   [
     ['fuse', nil, 2, 20],                           # has 2 as of 11-07-2014
-    ['fuse', 'FUSE project', 1, 1],                 # 1 with this name
-    ['user1_with_load', nil, 30, 100],              # has 37 as of 11-07-2014
-    ['user1_with_load', 'pipeline_10', 2, 2],       # 2 with this name
-    ['user1_with_load', '000010pipelines', 10, 10], # owned_by the project zzzzz-j7d0g-000010pipelines
     ['user1_with_load', '000025pipelines', 25, 25], # owned_by the project zzzzz-j7d0g-000025pipelines, two pages
-    ['admin', nil, 40, 200],
-    ['admin', 'FUSE project', 1, 1],
-    ['admin', 'pipeline_10', 2, 2],
-    ['active', 'containing at least two', 2, 100],
-    ['active', nil, 10, 100],
+    ['admin', 'pipeline_20', 1, 1],
     ['active', 'no such match', 0, 0],
   ].each do |user, search_filter, expected_min, expected_max|
     test "scroll pipeline instances page for #{user} with search filter #{search_filter}
@@ -484,5 +480,4 @@ class PipelineInstancesTest < ActionDispatch::IntegrationTest
       end
     end
   end
-
 end
index 77d5722947abb24e0d02bc452819960c85b5ccfc..88972e50a73225391a17b723e12f77140914b1db 100644 (file)
@@ -172,6 +172,34 @@ class ProjectsTest < ActionDispatch::IntegrationTest
            "Project 5678 should now be inside project 1234")
   end
 
+  def open_groups_sharing(project_name="aproject", token_name="active")
+    project = api_fixture("groups", project_name)
+    visit(page_with_token(token_name, "/projects/#{project['uuid']}"))
+    click_on "Sharing"
+    click_on "Share with groups"
+  end
+
+  def group_name(group_key)
+    api_fixture("groups", group_key, "name")
+  end
+
+  test "projects not publicly sharable when anonymous browsing disabled" do
+    Rails.configuration.anonymous_user_token = false
+    open_groups_sharing
+    # Check for a group we do expect first, to make sure the modal's loaded.
+    assert_selector(".modal-container .selectable",
+                    text: group_name("all_users"))
+    assert_no_selector(".modal-container .selectable",
+                       text: group_name("anonymous_group"))
+  end
+
+  test "projects publicly sharable when anonymous browsing enabled" do
+    Rails.configuration.anonymous_user_token = "testonlytoken"
+    open_groups_sharing
+    assert_selector(".modal-container .selectable",
+                    text: group_name("anonymous_group"))
+  end
+
   test "project viewer can't see project sharing tab" do
     show_object_using('project_viewer', 'groups', 'aproject', 'A Project')
     assert(page.has_no_link?("Sharing"),
@@ -239,14 +267,6 @@ class ProjectsTest < ActionDispatch::IntegrationTest
 
       when 'Remove'
         assert page.has_no_text?(my_collection['name']), 'Collection still found in src project after remove'
-        visit page_with_token 'active', '/'
-        find("#projects-menu").click
-        find(".dropdown-menu a", text: "Home").click
-        assert page.has_text?(my_collection['name']), 'Collection not found in home project after remove'
-        if expect_name_change
-          assert page.has_text?(my_collection['name']+' removed from ' + src['name']),
-            'Collection with update name is not found in home project after remove'
-        end
       end
     end
   end
@@ -692,6 +712,6 @@ class ProjectsTest < ActionDispatch::IntegrationTest
                           "/projects/#{project['uuid']}#Advanced"))
     assert_text("API response")
     find("#page-wrapper .nav-tabs :first-child a").click
-    assert_text("bytes Collection")
+    assert_text("Collection modified at")
   end
 end
index 1e1a118a7a5ec6d8e9610c3d244ddd9a7bafd0dd..49654a86887fa033cb342a844cf35b385002f438 100644 (file)
@@ -15,7 +15,8 @@ class RepositoriesTest < ActionDispatch::IntegrationTest
     test "#{user} can manage sharing for another user" do
       add_user = api_fixture('users')['future_project_user']
       new_name = ["first_name", "last_name"].map { |k| add_user[k] }.join(" ")
-      show_object_using(user, 'repositories', 'foo', 'push_url')
+      show_object_using(user, 'repositories', 'foo',
+                        api_fixture('repositories')['foo']['name'])
       click_on "Sharing"
       add_share_and_check("users", new_name, add_user)
       modify_share_and_check(new_name)
@@ -28,7 +29,8 @@ class RepositoriesTest < ActionDispatch::IntegrationTest
   ].each do |user|
     test "#{user} can manage sharing for another group" do
       new_name = api_fixture('groups')['future_project_viewing_group']['name']
-      show_object_using(user, 'repositories', 'foo', 'push_url')
+      show_object_using(user, 'repositories', 'foo',
+                        api_fixture('repositories')['foo']['name'])
       click_on "Sharing"
       add_share_and_check("groups", new_name)
       modify_share_and_check(new_name)
@@ -36,7 +38,8 @@ class RepositoriesTest < ActionDispatch::IntegrationTest
   end
 
   test "spectator does not see repository sharing tab" do
-    show_object_using("spectator", 'repositories', 'arvados', 'push_url')
+    show_object_using('spectator', 'repositories', 'arvados',
+                      api_fixture('repositories')['arvados']['name'])
     assert(page.has_no_link?("Sharing"),
            "read-only repository user sees sharing tab")
   end
index fae7e62e728d12212dd4f2c3cb69dcbe420f83d2..9b5e5d61e17ec0495cc1dc3dffc19e5395db8604 100644 (file)
@@ -72,16 +72,24 @@ class UserManageAccountTest < ActionDispatch::IntegrationTest
     end
   end
 
+  test "pipeline notification shown even though public pipelines exist" do
+    skip "created_by doesn't work that way"
+    Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+    visit page_with_token 'job_reader'
+    click_link 'notifications-menu'
+    assert_selector 'a', text: 'Click here to learn how to run an Arvados Crunch pipeline'
+  end
+
   [
-    ['inactive_but_signed_user_agreement', true],
-    ['active', false],
-  ].each do |user, notifications|
-    test "test manage account for #{user} with notifications #{notifications}" do
+    ['job_reader', :ssh, :pipeline],
+    ['active'],
+  ].each do |user, *expect|
+    test "manage account for #{user} with notifications #{expect.inspect}" do
+      Rails.configuration.anonymous_user_token = false
       visit page_with_token(user)
       click_link 'notifications-menu'
-      if notifications
+      if expect.include? :ssh
         assert_selector('a', text: 'Click here to set up an SSH public key for use with Arvados')
-        assert_selector('a', text: 'Click here to learn how to run an Arvados Crunch pipeline')
         click_link('Click here to set up an SSH public key for use with Arvados')
         assert_selector('a', text: 'Add new SSH key')
 
@@ -90,11 +98,77 @@ class UserManageAccountTest < ActionDispatch::IntegrationTest
         # No more SSH notification
         click_link 'notifications-menu'
         assert_no_selector('a', text: 'Click here to set up an SSH public key for use with Arvados')
-        assert_selector('a', text: 'Click here to learn how to run an Arvados Crunch pipeline')
       else
         assert_no_selector('a', text: 'Click here to set up an SSH public key for use with Arvados')
         assert_no_selector('a', text: 'Click here to learn how to run an Arvados Crunch pipeline')
       end
+
+      if expect.include? :pipeline
+        assert_selector('a', text: 'Click here to learn how to run an Arvados Crunch pipeline')
+      end
+    end
+  end
+
+  test "verify repositories for active user" do
+    visit page_with_token('active', '/manage_account')
+
+    repos = [[api_fixture('repositories')['foo'], true, true],
+             [api_fixture('repositories')['repository3'], false, false],
+             [api_fixture('repositories')['repository4'], true, false]]
+
+    repos.each do |(repo, writable, sharable)|
+      within('tr', text: repo['name']+'.git') do
+        if sharable
+          assert_selector 'a', text:'Share'
+          assert_text 'writable'
+        else
+          assert_text repo['name']
+          assert_no_selector 'a', text:'Share'
+          if writable
+            assert_text 'writable'
+          else
+            assert_text 'read-only'
+          end
+        end
+      end
+    end
+  end
+
+  test "request shell access" do
+    ActionMailer::Base.deliveries = []
+    visit page_with_token('spectator', '/manage_account')
+    assert_text 'You do not have access to any virtual machines'
+    click_link 'Send request for shell access'
+
+    # Button text changes to "sending...", then back to normal. In the
+    # test suite we can't depend on confirming the "sending..." state
+    # before it goes back to normal, though.
+    ## assert_selector 'a', text: 'Sending request...'
+    assert_selector 'a', text: 'Send request for shell access'
+    assert_text 'A request for shell access was sent'
+
+    # verify that the email was sent
+    user = api_fixture('users')['spectator']
+    full_name = "#{user['first_name']} #{user['last_name']}"
+    expected = "Shell account request from #{full_name} (#{user['email']}, #{user['uuid']})"
+    found_email = 0
+    ActionMailer::Base.deliveries.each do |email|
+      if email.subject.include?(expected)
+        found_email += 1
+      end
+    end
+    assert_equal 1, found_email, "Expected email after requesting shell access"
+
+    # Revisit the page and verify the request sent message along with
+    # the request button.
+    within('.navbar-fixed-top') do
+      find('a', text: 'spectator').click
+      within('.dropdown-menu') do
+        find('a', text: 'Manage account').click
+      end
     end
+    assert_text 'You do not have access to any virtual machines.'
+    assert_text 'A request for shell access was sent on '
+    assert_selector 'a', text: 'Send request for shell access'
   end
 end
index efc2539e7070d14efbd1c25c79a8838398bdd518..4885129286ab5d4c66d9c3cf6a7bf94bb6215856 100644 (file)
@@ -65,7 +65,8 @@ class WebsocketTest < ActionDispatch::IntegrationTest
       assert_text '1001 hello'
 
       # Check that new value of scrollTop is greater than the old one
-      assert page.evaluate_script("$('#event_log_div').scrollTop()") > old_top
+      new_top = page.evaluate_script("$('#event_log_div').scrollTop()")
+      assert_operator new_top, :>, old_top
 
       # Now scroll to 30 pixels from the top
       page.execute_script "$('#event_log_div').scrollTop(30)"
@@ -122,13 +123,13 @@ class WebsocketTest < ActionDispatch::IntegrationTest
     visit(page_with_token("admin", "/jobs/#{p.uuid}"))
 
     assert_no_text 'complete'
-    assert_no_text 'Re-run same version'
+    assert_no_text 'Re-run job'
 
     p.state = "Complete"
     p.save!
 
     assert_text 'complete'
-    assert_text 'Re-run same version'
+    assert_text 'Re-run job'
 
     Thread.current[:arvados_api_token] = nil
   end
@@ -158,6 +159,10 @@ class WebsocketTest < ActionDispatch::IntegrationTest
     visit page_with_token "admin", "/jobs/#{uuid}"
     click_link "Log"
 
+    # Until graphable data arrives, we should see the text log but not the graph.
+    assert_selector '#event_log_div', visible: true
+    assert_no_selector '#log_graph_div', visible: true
+
     api = ArvadosApiClient.new
 
     # should give 45.3% or (((36.39+0.86)/10.0002)/8)*100 rounded to 1 decimal place
@@ -168,9 +173,15 @@ class WebsocketTest < ActionDispatch::IntegrationTest
                 object_uuid: uuid,
                 event_type: "stderr",
                 properties: {"text" => text}}})
-    wait_for_ajax
 
-    # using datapoint 1 instead of datapoint 0 because there will be a "dummy" datapoint with no actual stats 10 minutes previous to the one we're looking for, for the sake of making the x-axis of the graph show a full 10 minutes of time even though there is only a single real datapoint
+    # Log div should appear when the first data point arrives by websocket.
+    assert_selector '#log_graph_div', visible: true
+
+    # Using datapoint 1 instead of datapoint 0 because there will be a
+    # "dummy" datapoint with no actual stats 10 minutes previous to
+    # the one we're looking for, for the sake of making the x-axis of
+    # the graph show a full 10 minutes of time even though there is
+    # only a single real datapoint.
     cpu_stat = page.evaluate_script("jobGraphData[1]['T1-cpu']")
 
     assert_equal 45.3, (cpu_stat.to_f*100).round(1)
index cb07725350c99748efc8bb86cae47fc1c98b3dcf..39fdf4b260abd68be05f252158f29629aa199199 100644 (file)
@@ -4,12 +4,19 @@ require 'capybara/poltergeist'
 require 'uri'
 require 'yaml'
 
+POLTERGEIST_OPTS = {
+  window_size: [1200, 800],
+  phantomjs_options: ['--ignore-ssl-errors=true'],
+  inspector: true,
+}
+
 Capybara.register_driver :poltergeist do |app|
-  Capybara::Poltergeist::Driver.new app, {
-    window_size: [1200, 800],
-    phantomjs_options: ['--ignore-ssl-errors=true'],
-    inspector: true,
-  }
+  Capybara::Poltergeist::Driver.new app, POLTERGEIST_OPTS
+end
+
+Capybara.register_driver :poltergeist_without_file_api do |app|
+  js = File.expand_path '../support/remove_file_api.js', __FILE__
+  Capybara::Poltergeist::Driver.new app, POLTERGEIST_OPTS.merge(extensions: [js])
 end
 
 module WaitForAjax
index 7d335d819ad62bf33133b07a7fff18b875ff16b9..1b6a1a61dd52881dd4e98f82ebe434f0cd9fc1e0 100644 (file)
@@ -1,6 +1,6 @@
 require 'integration_helper'
 
-# Performance test can run in two two different ways:
+# Performance test can run in two different ways:
 #
 # 1. Similar to other integration tests using the command:
 #     RAILS_ENV=test bundle exec rake test:benchmark
diff --git a/apps/workbench/test/support/remove_file_api.js b/apps/workbench/test/support/remove_file_api.js
new file mode 100644 (file)
index 0000000..0c64df2
--- /dev/null
@@ -0,0 +1 @@
+window.FileReader = null;
index 7c454c9877b2bbb4c97c417cf5406cf433924ca3..078190bd32d0a02af56b59df5d28ad1556a511d8 100644 (file)
@@ -36,13 +36,17 @@ class ActiveSupport::TestCase
     Thread.current[:arvados_api_token] = auth['api_token']
   end
 
-  teardown do
+  setup do
     Thread.current[:arvados_api_token] = nil
     Thread.current[:user] = nil
     Thread.current[:reader_tokens] = nil
     # Diagnostics suite doesn't run a server, so there's no cache to clear.
     Rails.cache.clear unless (Rails.env == "diagnostics")
     # Restore configuration settings changed during tests
+    self.class.reset_application_config
+  end
+
+  def self.reset_application_config
     $application_config.each do |k,v|
       if k.match /^[^.]*$/
         Rails.configuration.send (k + '='), v
@@ -95,99 +99,73 @@ class ActiveSupport::TestCase
 end
 
 class ApiServerForTests
+  PYTHON_TESTS_DIR = File.expand_path('../../../../sdk/python/tests', __FILE__)
   ARV_API_SERVER_DIR = File.expand_path('../../../../services/api', __FILE__)
-  SERVER_PID_PATH = File.expand_path('tmp/pids/wbtest-server.pid', ARV_API_SERVER_DIR)
-  WEBSOCKET_PID_PATH = File.expand_path('tmp/pids/wstest-server.pid', ARV_API_SERVER_DIR)
+  SERVER_PID_PATH = File.expand_path('tmp/pids/test-server.pid', ARV_API_SERVER_DIR)
+  WEBSOCKET_PID_PATH = File.expand_path('tmp/pids/test-server.pid', ARV_API_SERVER_DIR)
   @main_process_pid = $$
+  @@server_is_running = false
 
-  def _system(*cmd)
-    $stderr.puts "_system #{cmd.inspect}"
+  def check_call *args
+    output = nil
     Bundler.with_clean_env do
-      if not system({'RAILS_ENV' => 'test', "ARVADOS_WEBSOCKETS" => (if @websocket then "ws-only" end)}, *cmd)
-        raise RuntimeError, "#{cmd[0]} returned exit code #{$?.exitstatus}"
+      output = IO.popen *args do |io|
+        io.read
+      end
+      if not $?.success?
+        raise RuntimeError, "Command failed (#{$?}): #{args.inspect}"
       end
     end
+    output
   end
 
-  def make_ssl_cert
-    unless File.exists? './self-signed.key'
-      _system('openssl', 'req', '-new', '-x509', '-nodes',
-              '-out', './self-signed.pem',
-              '-keyout', './self-signed.key',
-              '-days', '3650',
-              '-subj', '/CN=localhost')
+  def run_test_server
+    env_script = nil
+    Dir.chdir PYTHON_TESTS_DIR do
+      env_script = check_call %w(python ./run_test_server.py start --auth admin)
     end
-  end
-
-  def kill_server
-    if (pid = find_server_pid)
-      $stderr.puts "Sending TERM to API server, pid #{pid}"
-      Process.kill 'TERM', pid
+    test_env = {}
+    env_script.each_line do |line|
+      line = line.chomp
+      if 0 == line.index('export ')
+        toks = line.sub('export ', '').split '=', 2
+        $stderr.puts "run_test_server.py: #{toks[0]}=#{toks[1]}"
+        test_env[toks[0]] = toks[1]
+      end
     end
+    test_env
   end
 
-  def find_server_pid
-    pid = nil
-    begin
-      pid = IO.read(@pidfile).to_i
-      $stderr.puts "API server is running, pid #{pid.inspect}"
-    rescue Errno::ENOENT
+  def stop_test_server
+    Dir.chdir PYTHON_TESTS_DIR do
+      # This is a no-op if we're running within run-tests.sh
+      check_call %w(python ./run_test_server.py stop)
     end
-    return pid
+    @@server_is_running = false
   end
 
-  def run(args=[])
+  def run args=[]
+    return if @@server_is_running
+
+    # Stop server left over from interrupted previous run
+    stop_test_server
+
     ::MiniTest.after_run do
-      self.kill_server
+      stop_test_server
     end
 
-    @websocket = args.include?("--websockets")
-
-    @pidfile = if @websocket
-                 WEBSOCKET_PID_PATH
-               else
-                 SERVER_PID_PATH
-               end
-
-    # Kill server left over from previous test run
-    self.kill_server
-
-    Capybara.javascript_driver = :poltergeist
-    Dir.chdir(ARV_API_SERVER_DIR) do |apidir|
-      ENV["NO_COVERAGE_TEST"] = "1"
-      if @websocket
-        _system('bundle', 'exec', 'passenger', 'start', '-d', '-p3333',
-                '--pid-file', @pidfile)
-      else
-        make_ssl_cert
-        if ENV['ARVADOS_TEST_API_INSTALLED'].blank?
-          _system('bundle', 'exec', 'rake', 'db:test:load')
-          _system('bundle', 'exec', 'rake', 'db:fixtures:load')
-        end
-        _system('bundle', 'exec', 'passenger', 'start', '-d', '-p3000',
-                '--pid-file', @pidfile,
-                '--ssl',
-                '--ssl-certificate', 'self-signed.pem',
-                '--ssl-certificate-key', 'self-signed.key')
-      end
-      timeout = Time.now.tv_sec + 10
-      good_pid = false
-      while (not good_pid) and (Time.now.tv_sec < timeout)
-        sleep 0.2
-        server_pid = find_server_pid
-        good_pid = (server_pid and
-                    (server_pid > 0) and
-                    (Process.kill(0, server_pid) rescue false))
-      end
-      if not good_pid
-        raise RuntimeError, "could not find API server Rails pid"
-      end
-    end
+    test_env = run_test_server
+    $application_config['arvados_login_base'] = "https://#{test_env['ARVADOS_API_HOST']}/login"
+    $application_config['arvados_v1_base'] = "https://#{test_env['ARVADOS_API_HOST']}/arvados/v1"
+    $application_config['arvados_insecure_host'] = true
+    ActiveSupport::TestCase.reset_application_config
+
+    @@server_is_running = true
   end
 
-  def run_rake_task(task_name, arg_string)
-    Dir.chdir(ARV_API_SERVER_DIR) do
-      _system('bundle', 'exec', 'rake', "#{task_name}[#{arg_string}]")
+  def run_rake_task task_name, arg_string
+    Dir.chdir ARV_API_SERVER_DIR do
+      check_call ['bundle', 'exec', 'rake', "#{task_name}[#{arg_string}]"]
     end
   end
 end
diff --git a/apps/workbench/test/unit/repository_test.rb b/apps/workbench/test/unit/repository_test.rb
new file mode 100644 (file)
index 0000000..87eedb5
--- /dev/null
@@ -0,0 +1,18 @@
+require 'test_helper'
+
+class RepositoryTest < ActiveSupport::TestCase
+  [
+    ['admin', true],
+    ['active', false],
+  ].each do |user, can_edit|
+    test "#{user} can edit attributes #{can_edit}" do
+      use_token user
+      attrs = Repository.new.editable_attributes
+      if can_edit
+        refute_empty attrs
+      else
+        assert_empty attrs
+      end
+    end
+  end
+end
index 06ef6c1198ffad07d888040794a42dce08e8af16..fad9b060ee65ea574d6c8a3a1f04e528559a6277 100644 (file)
@@ -1,7 +1,10 @@
-import os
 import glob
+import os
+import re
 import stat
 
+BACKSLASH_ESCAPE_RE = re.compile(r'\\(.)')
+
 class SubstitutionError(Exception):
     pass
 
@@ -73,7 +76,7 @@ def do_substitution(p, c, subs=default_subs):
     while True:
         m = search(c)
         if m is None:
-            return c
+            return BACKSLASH_ESCAPE_RE.sub(r'\1', c)
 
         v = do_substitution(p, c[m[0]+2 : m[1]])
         var = True
index ab1eb9efabda2f79e967369d19e9cfa7f2bacb85..0a6c4ede5c071c7d283aaccd3a869b57b7eefc59 100644 (file)
@@ -20,9 +20,11 @@ navbar:
       - start/getting_started/firstpipeline.html.textile.liquid
     - Common Use Cases:
       - start/getting_started/sharedata.html.textile.liquid
+    - Next Steps:
+      - start/getting_started/nextsteps.html.textile.liquid
 
   userguide:
-    - Getting Started:
+    - Welcome:
       - user/index.html.textile.liquid
       - user/getting_started/community.html.textile.liquid
     - Run a pipeline using Workbench:
@@ -44,10 +46,13 @@ navbar:
     - Develop a new pipeline:
       - user/tutorials/intro-crunch.html.textile.liquid
       - user/tutorials/running-external-program.html.textile.liquid
+      - user/topics/crunch-tools-overview.html.textile.liquid
       - user/tutorials/tutorial-firstscript.html.textile.liquid
       - user/tutorials/tutorial-submit-job.html.textile.liquid
       - user/topics/tutorial-parallel.html.textile.liquid
       - user/topics/arv-docker.html.textile.liquid
+    - Develop a web service:
+      - user/topics/arv-web.html.textile.liquid
     - Reference:
       - user/topics/run-command.html.textile.liquid
       - user/reference/job-pipeline-ref.html.textile.liquid
@@ -66,6 +71,7 @@ navbar:
       - sdk/python/sdk-python.html.textile.liquid
       - sdk/python/python.html.textile.liquid
       - sdk/python/crunch-utility-libraries.html.textile.liquid
+      - sdk/python/events.html.textile.liquid
     - Perl:
       - sdk/perl/index.html.textile.liquid
     - Ruby:
diff --git a/doc/_includes/_arv_run_redirection.liquid b/doc/_includes/_arv_run_redirection.liquid
new file mode 100644 (file)
index 0000000..aa63366
--- /dev/null
@@ -0,0 +1,19 @@
+<notextile>
+<pre>
+$ <span class="userinput">arv-run grep -H -n ATTGGAGGAAAGATGAGTGAC \< *.fastq \> output.txt</span>
+[...]
+ 1 stderr run-command: grep -H -n ATTGGAGGAAAGATGAGTGAC < /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_1.fastq > output.txt
+ 2 stderr run-command: grep -H -n ATTGGAGGAAAGATGAGTGAC < /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_2.fastq > output.txt
+ 2 stderr run-command: completed with exit code 0 (success)
+ 2 stderr run-command: the following output files will be saved to keep:
+ 2 stderr run-command: 121 ./output.txt
+ 2 stderr run-command: start writing output to keep
+ 1 stderr run-command: completed with exit code 0 (success)
+ 1 stderr run-command: the following output files will be saved to keep:
+ 1 stderr run-command: 363 ./output.txt
+ 1 stderr run-command: start writing output to keep
+ 2 stderr upload wrote 121 total 121
+ 1 stderr upload wrote 363 total 363
+[..]
+</pre>
+</notextile>
diff --git a/doc/_includes/_events_py.liquid b/doc/_includes/_events_py.liquid
new file mode 100644 (file)
index 0000000..5662440
--- /dev/null
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+
+import arvados
+import arvados.events
+
+# 'ev' is a dict containing the log table record describing the change.
+def on_message(ev):
+    if ev.get("event_type") == "create" and ev.get("object_kind") == "arvados#collection":
+        print "A new collection was created: %s" % ev["object_uuid"]
+
+api = arvados.api("v1")
+ws = arvados.events.subscribe(api, [], on_message)
+ws.run_forever()
index 08124e661066f43da6fb1637a55d2f73ecb4b4eb..e205c59670550edcbee4c64fa620248e9d390979 100644 (file)
@@ -31,6 +31,9 @@ func main() {
        //
 
        type user struct {
+               // Remember to start each field name with a capital letter,
+               // otherwise it won't get populated by the arvados client because
+               // the field will be invisible to it.
                Uuid     string `json:"uuid"`
                FullName string `json:"full_name"`
        }
index 73b7abb9a355f6647c78fb98f9ca38074ab1b056..c6c8b28bff7f5ff95a3076842a1c2015215eaea8 100644 (file)
@@ -7,7 +7,7 @@
         <span class="icon-bar"></span>
         <span class="icon-bar"></span>
       </button>
-      <a class="navbar-brand" href="{{ site.baseurl }}/">Arvados Docs</a>
+      <a class="navbar-brand" href="{{ site.baseurl }}/">Arvados</a>
     </div>
     <div class="collapse navbar-collapse" id="bs-navbar-collapse">
       <ul class="nav navbar-nav">
@@ -17,7 +17,7 @@
         <li {% if page.navsection == 'api' %} class="active" {% endif %}><a href="{{ site.baseurl }}/api/index.html">API&nbsp;Reference</a></li>
         <li {% if page.navsection == 'adminguide' %} class="active" {% endif %}><a href="{{ site.baseurl }}/admin/index.html">Admin Guide</a></li>
         <li {% if page.navsection == 'installguide' %} class="active" {% endif %}><a href="{{ site.baseurl }}/install/index.html">Install Guide</a></li>
-        <li><a href="https://arvados.org/" style="padding-left: 2em">arvados.org&nbsp;&raquo;</a></li>
+        <li><a href="https://arvados.org/projects/arvados/" style="padding-left: 2em">Developer Site&nbsp;&raquo;</a></li>
       </ul>
     </div>
   </div>
index 2a45dd5bd053b6507210b26856fa81cfa92a82e6..b0b210f0e7a6397167636f0465d477175441600a 100644 (file)
@@ -2,7 +2,7 @@
 <html>
   <head>
     <meta charset="utf-8">
-    <title>{% unless page.title == "Arvados | Documentation" %} Arvados | Documentation | {% endunless %}{{ page.title }}</title>
+    <title>{% unless page.title == "Arvados" %} Arvados | Documentation | {% endunless %}{{ page.title }}</title>
     <meta name="viewport" content="width=device-width, initial-scale=1.0">
     <meta name="description" content="">
     <meta name="author" content="">
 
     </script>
 
+{% if page.no_nav_left %}
+{% else %}
 <p style="text-align: center"><small>
 The content of this documentation is licensed under the
 <a href="{{ site.baseurl }}/user/copying/by-sa-3.0.html">Creative
@@ -107,6 +109,8 @@ The content of this documentation is licensed under the
 Code samples in this documentation are licensed under the
 <a href="{{ site.baseurl }}/user/copying/LICENSE-2.0.html">Apache License, Version 2.0.</a></small>
 </p>
+{% endif %}
+
 
   </body>
 </html>
index 8760fe88edce9c41ab21e8518edcd66064b371bf..f5e685e2e9be44a1d740defff0a1ffb0e60da80f 100644 (file)
@@ -55,7 +55,7 @@ table(table table-bordered table-condensed).
 |filters|array|Conditions for filtering collections.|query||
 |select|array|Data fields to return in the result list.|query|@["uuid", "manifest_text"]@|
 
-N.B.: Because adding access tokens to manifests can be computationally expensive, the @manifest_text@ field is not included in results by default.  If you need it, pass a @select@ parameter that includes @manifest_text@.
+Note: Because adding access tokens to manifests can be computationally expensive, the @manifest_text@ field is not included in results by default.  If you need it, pass a @select@ parameter that includes @manifest_text@.
 
 h2. update
 
index 478662eea80665a734260ae28f15e4eb3e10c738..9f20a88a9519d09eb5d7fe040c93706379bc089d 100644 (file)
@@ -16,17 +16,18 @@ Required arguments are displayed in %{background:#ccffcc}green%.
 
 h2. contents
 
-Retrieve a list of items which are associated with the given group by ownership (i.e., the group owns the item) or a "name" link (i.e., a "name" link referencing the item).
+Retrieve a list of items owned by the group.
 
 Arguments:
 
 table(table table-bordered table-condensed).
 |_. Argument |_. Type |_. Description |_. Location |_. Example |
 {background:#ccffcc}.|uuid|string|The UUID of the group in question.|path||
-|include_linked|boolean|If false, results will only include items whose @owner_uuid@ attribute is the specified group. If true, results will additionally include items for which a "name" link exists.|path|{white-space:nowrap}. @false@ (default)
-@true@|
+|limit|integer (default 100)|Maximum number of items to return.|query||
+|order|string|Order in which to return matching items.  Sort within a resource type by prefixing the attribute with the resource name and a dot.|query|@"collections.modified_at desc"@|
+|filters|array|Conditions for filtering items.|query|@[["uuid", "is_a", "arvados#job"]]@|
 
-If @include_linked@ is @true@, the @"links"@ field in the response will contain the "name" links referencing the objects in the @"items"@ field.
+Note: Because adding access tokens to manifests can be computationally expensive, the @manifest_text@ field is not included in listed collections.  If you need it, request a "list of collections":{{site.baseurl}}/api/methods/collections.html with the filter @["owner_uuid", "=", GROUP_UUID]@, and @"manifest_text"@ listed in the select parameter.
 
 h2. create
 
index 69a8dc3366b658e81069b3805c06141513621960..9aa783cbf5f6b10d3e93c72523d67301eccfb410 100644 (file)
@@ -30,11 +30,10 @@ Each collection has, in addition to the usual "attributes of Arvados resources":
 
 table(table table-bordered table-condensed).
 |_. Attribute|_. Type|_. Description|_. Example|
-|locator|string|||
-|portable_data_hash|string|||
 |name|string|||
-|redundancy|number|||
-|redundancy_confirmed_by_client_uuid|string|API client||
-|redundancy_confirmed_at|datetime|||
-|redundancy_confirmed_as|number|||
+|description|text|||
+|portable_data_hash|string|||
 |manifest_text|text|||
+|replication_desired|number|Minimum storage replication level desired for each data block referenced by this collection. A value of @null@ signifies that the site default replication level (typically 2) is desired.|@2@|
+|replication_confirmed|number|Replication level most recently confirmed by the storage system. This field is null when a collection is first created, and is reset to null when the manifest_text changes in a way that introduces a new data block. An integer value indicates the replication level of the _least replicated_ data block in the collection.|@2@, null|
+|replication_confirmed_at|datetime|When replication_confirmed was confirmed. If replication_confirmed is null, this field is also null.||
index 38d3766da73eda028ce329cfbce43aa7a20626f4..957daec4924efc3a2b6980b8403de3e1a8c330c1 100644 (file)
Binary files a/doc/images/keyfeatures/chooseinputs.png and b/doc/images/keyfeatures/chooseinputs.png differ
index c002073d5ef0150c26c90dbcc9ef86f136c8debf..64ecf2cc0be72a026eb7605146cc2a5082e85154 100644 (file)
Binary files a/doc/images/keyfeatures/collectionpage.png and b/doc/images/keyfeatures/collectionpage.png differ
index b14d49b119fe55777414488511ccce01a1da485b..9ee8b3b11119da983169060cb8a876b75c2ad028 100644 (file)
Binary files a/doc/images/keyfeatures/dashboard2.png and b/doc/images/keyfeatures/dashboard2.png differ
index df20ea57d2744e29c9e8f97fdf321418cae3db60..a30691ba45b2c456d7f66e7230b6ce4aabcf7ee3 100644 (file)
Binary files a/doc/images/keyfeatures/graph.png and b/doc/images/keyfeatures/graph.png differ
index d74c72ab92a21dc82c69eca2bc7c0dd41964f7c8..3cc6f6f8656b7224a4665b5b0fc7fce60649a4c2 100644 (file)
Binary files a/doc/images/keyfeatures/log.png and b/doc/images/keyfeatures/log.png differ
index 730c776d6b075ea630d6a2c4e6a877bcebbfb2ed..eb0a1f99140570bb6c161385ddec64b2ded8611d 100644 (file)
Binary files a/doc/images/keyfeatures/provenance.png and b/doc/images/keyfeatures/provenance.png differ
index ae2080423099bbea5b323b2b20d67b77bd868496..c11c111e5599a2fac79ce5bdbd2841ca481185cb 100644 (file)
Binary files a/doc/images/keyfeatures/rerun.png and b/doc/images/keyfeatures/rerun.png differ
index 0bac277d71b84780f4d3fbb8b32ba34d70864bd3..2c9185565529a4212c6964bfe59778665ec87dd6 100644 (file)
Binary files a/doc/images/keyfeatures/running2.png and b/doc/images/keyfeatures/running2.png differ
index 1199a4a30831a6604e7c5ad23ed4c91be27b8045..1412be9f657761a446c3d60da2f1469abf67d8de 100644 (file)
Binary files a/doc/images/keyfeatures/shared.png and b/doc/images/keyfeatures/shared.png differ
index 1475711f055cbf4783620f1df11403c1a3747a65..00f5f6e6ecdc0d51dfb367f1b6104e6e56d33838 100644 (file)
Binary files a/doc/images/keyfeatures/webupload.png and b/doc/images/keyfeatures/webupload.png differ
index 21bc3f905f8f7de8f9150d02116c9ec59b2c5a30..79bf5d66de7bd5e444d361ec1a688ea3939e6c3d 100644 (file)
Binary files a/doc/images/quickstart/1.png and b/doc/images/quickstart/1.png differ
index fac32c4c6c96d11625f8197269f81f222ede064f..ddeb6f8a8fdd668a1be3e18562333b97016a6d6c 100644 (file)
Binary files a/doc/images/quickstart/2.png and b/doc/images/quickstart/2.png differ
index a080b014ddac49f337eaa04643b6b1ed05a29c50..8440b6bff04a3de4621e959454c9c2a42e7d8f0a 100644 (file)
Binary files a/doc/images/quickstart/3.png and b/doc/images/quickstart/3.png differ
index a6345efad81fc296f5d5cccae0a41eb1306d7360..405501c71e65327d9397c77f26a33132d5bf6fb5 100644 (file)
Binary files a/doc/images/quickstart/4.png and b/doc/images/quickstart/4.png differ
index 754d0a3021618b5db1305463aff5335c84ede548..f455cd4e84ae93d9a85c6971b69c29745f560669 100644 (file)
Binary files a/doc/images/quickstart/5.png and b/doc/images/quickstart/5.png differ
index 29fb7c01c4fdb1ff33b44baf10377338fc354123..328cdacde4f3dcbff06dbab49e45015e046e8961 100644 (file)
Binary files a/doc/images/quickstart/6.png and b/doc/images/quickstart/6.png differ
index e7be5c251aeec8ea90e3f6ddc7fd9708a5b598f1..f6d9b3d9b8f160968e53e115e39eccfa73ad76ad 100644 (file)
Binary files a/doc/images/quickstart/7.png and b/doc/images/quickstart/7.png differ
index 86b7d542aefe66b654834c27945a58afec6035d2..26314193b34519b913afb496386b30bf46e9aa2c 100644 (file)
Binary files a/doc/images/uses/shared.png and b/doc/images/uses/shared.png differ
index 22595e3eff4f5f5479b8dbcbcc18424b1bedceed..da8dd66b6487d1b81499b6665b4bdfeea0cb90ae 100644 (file)
@@ -2,7 +2,7 @@
 layout: default
 no_nav_left: true
 navsection: top
-title: Arvados | Documentation
+title: Arvados
 ...
 
 <div class="jumbotron">
@@ -10,7 +10,7 @@ title: Arvados | Documentation
     <div class="row">
       <div class="col-sm-6">
         <h1>ARVADOS</h1>
-        <p>A free and open source bioinformatics platform for genomic and biomedical data</p>
+        <p>A free and open source platform for big data science</p>
       </div>
       <div class="col-sm-6">
         <img src="images/dax-reading-book.png" style="max-height: 10em" alt="Dax reading a book" />
@@ -22,14 +22,24 @@ title: Arvados | Documentation
 <div class="container-fluid">
   <div class="row">
     <div class="col-sm-5">
-      <p><a href="https://arvados.org/">Arvados</a>  enables you to quickly begin using cloud computing resources in your bioinformatics work. It allows you to track your methods and datasets, share them securely, and easily re-run analyses.
+      <p><a href="https://arvados.org/">Arvados</a> enables you to quickly begin using cloud computing resources in your data science work. It allows you to track your methods and datasets, share them securely, and easily re-run analyses.
       </p>
-      <p>Check out our <a href="{{ site.baseurl }}/start/index.html">key features</a>, complete with screenshots, and then follow our tutorial to <a href="{{ site.baseurl }}/start/getting_started/firstpipeline.html">run your first pipeline</a> using our <a href="http://lp.curoverse.com/beta-signup/">public beta</a>.
+      <p><strong>Quickstart</strong>: Check out our <a href="{{ site.baseurl }}/start/index.html">key features</a>, complete with screenshots, and then follow our tutorial to <a href="{{ site.baseurl }}/start/getting_started/firstpipeline.html">run your first pipeline</a> using our <a href="http://lp.curoverse.com/beta-signup/">public beta</a>.
       </p>
-      <p>On this page, you can also find more in-depth guides for using Arvados.
+      <p><strong>News</strong>: Read our <a href="https://arvados.org/projects/arvados/blogs">blog updates</a> or look through our <a href="https://arvados.org/projects/arvados/activity">recent developer activity</a>.
       </p>
+      <p><strong>Questions?</strong> Email <a href="http://lists.arvados.org/mailman/listinfo/arvados">the mailing list</a>, or chat with us on IRC: <a href="irc://irc.oftc.net:6667/#arvados">#arvados</a> @ OFTC (you can <a href="https://webchat.oftc.net/?channels=arvados">join in your browser</a>).
+      </p>
+      <p><strong>Want to contribute?</strong> Check out our <a href="https://arvados.org/projects/arvados">developer site</a>. We're open source, check out our code on <a href="https://github.com/curoverse/arvados">github</a>.
+      </p>
+      <p><strong>License</strong>: Arvados is under the copyleft <a href="{{ site.baseurl }}/user/copying/agpl-3.0.html">GNU AGPL v3</a>, with our SDKs under <a href="{{ site.baseurl }}/user/copying/LICENSE-2.0.html">Apache License 2.0</a> (so that you can incorporate proprietary toolchains into your pipelines).
+      </p>
+
     </div>
     <div class="col-sm-7" style="border-left: solid; border-width: 1px">
+      <p>Below you can also find more in-depth guides for using Arvados.
+      </p>
+      <br>
       <p>
         <a href="{{ site.baseurl }}/start/index.html">Getting Started</a> &mdash; Start here if you're new to Arvados.
       </p>
@@ -48,6 +58,14 @@ title: Arvados | Documentation
       <p>
         <a href="{{ site.baseurl }}/install/index.html">Install Guide</a> &mdash; How to install Arvados on a cloud platform.
       </p>
+      <br>
+      <p><em><small>
+      The content of the above documentation is licensed under the
+      <a href="{{ site.baseurl }}/user/copying/by-sa-3.0.html">Creative
+        Commons Attribution-Share Alike 3.0 United States</a> license. Code samples in the above documentation are licensed under the
+      <a href="{{ site.baseurl }}/user/copying/LICENSE-2.0.html">Apache License, Version 2.0.</a></small></em>
+      </p>
+
     </div>
   </div>
 </div>
index 3d44250541a406b3520ffb8b8c3fb54479f48426..d64fb23df26518b2467b88d2d518a6779ab515c3 100644 (file)
@@ -6,9 +6,49 @@ title: "Overview"
 
 ...
 
-The @arv@ CLI tool provides a set of wrappers to make API calls. Additionally, it provides access to a number of subcommands.
+The @arv@ CLI tool provide provides a convenient interface to manipulate API resources. Additionally, it provides access to a number of subcommands.
 
-h3. Wrappers for API calls
+h3. Syntax
+
+The @arv@ command takes the following arguments:
+
+<pre>
+Arvados command line client
+Usage: arv [--flags] subcommand|resource [method] [--parameters]
+
+Available flags:
+  -n, --dry-run       Don't actually do anything
+  -v, --verbose       Print some things on stderr
+  -f, --format=<s>    Set the output format. Must be one of json (default),
+                      yaml or uuid. (Default: json)
+  -s, --short         Return only UUIDs (equivalent to --format=uuid)
+
+Use 'arv subcommand|resource --help' to get more information about a particular
+command or resource.
+
+Available subcommands: copy, create, edit, keep, pipeline, run, tag, ws
+
+Available resources: api_client_authorization, api_client, authorized_key,
+collection, user_agreement, group, job_task, link, log, keep_disk,
+pipeline_instance, node, repository, specimen, pipeline_template, user,
+virtual_machine, trait, human, job, keep_service
+
+Additional options:
+  -e, --version       Print version and exit
+  -h, --help          Show this message
+</pre>
+
+h4. Flags: @--format@
+
+- @--format=json@ := Output response as JSON. This is the default format.
+
+- @--format=yaml@ := Output response as YAML
+
+- @--format=uuid@ := Output only the UUIDs of object(s) in the API response, one per line.
+
+
+
+h3. Resources
 
 See the "arv reference":{{site.baseurl}}/sdk/cli/reference.html page.
 
index bc5cf1e6c10ed7f055f9b2955d48c7737febc5cf..5e5f2387988f5570f90a9e278e65e4a7fc10a465 100644 (file)
@@ -9,21 +9,12 @@ _In order to use the @arv@ command, make sure that you have a "working environme
 
 h3. Usage
 
-@arv [global_options] resource_type resource_method [method_parameters]@
-
-h4. Global options
-
-- @--format=json@ := Output response as JSON. This is the default format.
-
-- @--format=yaml@ := Output response as YAML
-
-- @--format=uuid@ := Output only the UUIDs of object(s) in the API response, one per line.
-
+See the "CLI overview":{{site.baseurl}}/sdk/cli/index.html page.
 
 h3. Resource types and methods
 
 Get list of resource types
-@arv --resources@
+@arv --help@
 
 Get list of resource methods for the "user" resource type
 @arv user --help@
index 5d82f7ac01dcc859cf4a0edc7d4f061a554c90d4..c7655ba78e9a22b7068d02fba1ea9565df5f9d13 100644 (file)
@@ -243,7 +243,8 @@ $ <code class="userinput">arv keep put --help</code>
 usage: arv-put [-h] [--max-manifest-depth N | --normalize]
                [--as-stream | --stream | --as-manifest | --in-manifest | --manifest | --as-raw | --raw]
                [--use-filename FILENAME] [--filename FILENAME]
-               [--portable-data-hash] [--project-uuid UUID] [--name NAME]
+               [--portable-data-hash] [--replication N]
+               [--project-uuid UUID] [--name NAME]
                [--progress | --no-progress | --batch-progress]
                [--resume | --no-resume] [--retries RETRIES]
                [path [path ...]]
@@ -289,6 +290,10 @@ optional arguments:
                         a directory. Implies --manifest.
   --portable-data-hash  Print the portable data hash instead of the Arvados
                         UUID for the collection created by the upload.
+  --replication N       Set the replication level for the new collection: how
+                        many different physical storage devices (e.g., disks)
+                        should have a copy of each data block. Default is to
+                        use the server-provided default (if any) or 2.
   --project-uuid UUID   Store the collection in the specified project, instead
                         of your Home project.
   --name NAME           Save the collection with the specified name.
diff --git a/doc/sdk/python/events.html.textile.liquid b/doc/sdk/python/events.html.textile.liquid
new file mode 100644 (file)
index 0000000..6afb9b5
--- /dev/null
@@ -0,0 +1,12 @@
+---
+layout: default
+navsection: sdk
+navmenu: Python
+title: Subscribing to events
+...
+
+Arvados applications can subscribe to a live event stream from the database.  Events are described in the "Log record schema.":{{site.baseurl}}/api/schema/Log.html
+
+<notextile>
+{% code 'events_py' as python %}
+</notextile>
diff --git a/doc/start/getting_started/nextsteps.html.textile.liquid b/doc/start/getting_started/nextsteps.html.textile.liquid
new file mode 100644 (file)
index 0000000..c2ebbfb
--- /dev/null
@@ -0,0 +1,7 @@
+---
+layout: default
+navsection: start 
+title: Check out the User Guide 
+...
+
+Now that you've finished the Getting Started guide, check out the "User Guide":{{site.baseurl}}/user/index.html. The User Guide goes into more depth than the Getting Started guide, covers how to develop your own pipelines in addition to using pre-existing pipelines, covers the Arvados command line tools in addition to the Workbench graphical interface to Arvados, and can be referenced in any order.
index 572ed344bd8f40048d624de45dbd89f96e31f8a1..d5d2fcd9ede50a0fd46d1ef30cd8ff881dae9df3 100644 (file)
@@ -61,7 +61,7 @@ Step-by-step instructions are shown below.
     </div>
 
     <div class="item">
-      <img src="{{ site.baseurl }}/images/uses/share.png" alt="Click 'Create sharing link'. You can click 'unshare' at any later point.">
+      <img src="{{ site.baseurl }}/images/uses/sharing.png" alt="Click 'Create sharing link'. You can click 'unshare' at any later point.">
       <div class="carousel-caption">
         Click 'Create sharing link'. You can click 'Unshare' at any later point.
       </div>
index 46a55aec89bdb0f8a23bb57dac62aef7d8806ca5..b108ff707708a35d3c95c97c1e153c306b753055 100644 (file)
@@ -4,15 +4,17 @@ navsection: userguide
 title: Welcome to Arvados!
 ...
 
-_If you are new to Arvados and want to get started quickly, go to "Accessing Arvados Workbench.":{{site.baseurl}}/user/getting_started/workbench.html_
+_If you are new to Arvados, please read the "Getting Started":{{site.baseurl}}/start/index.html guide for a quick introduction to working with Arvados._
 
-This guide provides an introduction to using Arvados to solve big data bioinformatics problems, including:
+This guide provides a reference for using Arvados to solve big data bioinformatics problems, including:
 
 * Robust storage of very large files, such as whole genome sequences, using the "Arvados Keep":{{site.baseurl}}/user/tutorials/tutorial-keep.html content-addressable cluster file system.
 * Running compute-intensive genomic analysis pipelines, such as alignment and variant calls using the "Arvados Crunch":{{site.baseurl}}/user/tutorials/intro-crunch.html cluster compute engine.
 * Storing and querying metadata about genome sequence files, such as human subjects and their phenotypic traits using the "Arvados Metadata Database.":{{site.baseurl}}/user/topics/tutorial-trait-search.html
 * Accessing, organizing, and sharing data, pipelines and results using the "Arvados Workbench":{{site.baseurl}}/user/getting_started/workbench.html web application.
 
+This User Guide goes into more depth than the "Getting Started guide":{{site.baseurl}}/start/index.html, covers how to develop your own pipelines in addition to using pre-existing pipelines, covers the Arvados commandline tools in addition to the Workbench graphical interface to Arvados, and can be referenced in any order.
+
 The examples in this guide use the Arvados instance located at <a href="https://{{ site.arvados_workbench_host }}/" target="_blank">https://{{ site.arvados_workbench_host }}</a>.  If you are using a different Arvados instance replace @{{ site.arvados_workbench_host }}@ with your private instance in all of the examples in this guide.
 
 Curoverse maintains a public Arvados instance located at <a href="https://workbench.qr1hi.arvadosapi.com/" target="_blank">https://workbench.qr1hi.arvadosapi.com/</a>.  You must have an account in order to use this service.  If you would like to request an account, please send an email to "arvados@curoverse.com":mailto:arvados@curoverse.com.
index 300ff2ff0431185cba11fa0d696c21f3dbe4c4db..862b19c2c9a8d4912818b517d98eb4b7e1b4099c 100644 (file)
@@ -69,25 +69,7 @@ You may use standard input (@<@) and standard output (@>@) redirection.  This wi
 
 (Note: because the syntax is designed to mimic standard shell syntax, it is necessary to quote the metacharacters @<@, @>@ and @|@ as either @\<@, @\>@ and @\|@ or @'<'@, @'>'@ and @'|'@.)
 
-<notextile>
-<pre>
-$ <span class="userinput">arv-run grep -H -n ATTGGAGGAAAGATGAGTGAC \< *.fastq \> output.txt</span>
-[...]
- 1 stderr run-command: grep -H -n ATTGGAGGAAAGATGAGTGAC < /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_1.fastq > output.txt
- 2 stderr run-command: grep -H -n ATTGGAGGAAAGATGAGTGAC < /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_2.fastq > output.txt
- 2 stderr run-command: completed with exit code 0 (success)
- 2 stderr run-command: the following output files will be saved to keep:
- 2 stderr run-command: 121 ./output.txt
- 2 stderr run-command: start writing output to keep
- 1 stderr run-command: completed with exit code 0 (success)
- 1 stderr run-command: the following output files will be saved to keep:
- 1 stderr run-command: 363 ./output.txt
- 1 stderr run-command: start writing output to keep
- 2 stderr upload wrote 121 total 121
- 1 stderr upload wrote 363 total 363
-[..]
-</pre>
-</notextile>
+{% include 'arv_run_redirection' %}
 
 You may use "run-command":run-command.html parameter substitution in the output file name to generate different filenames for each task:
 
diff --git a/doc/user/topics/arv-web.html.textile.liquid b/doc/user/topics/arv-web.html.textile.liquid
new file mode 100644 (file)
index 0000000..cf3a3cc
--- /dev/null
@@ -0,0 +1,98 @@
+---
+layout: default
+navsection: userguide
+title: "Using arv-web"
+...
+
+@arv-web@ enables you to run a custom web service from the contents of an Arvados collection.
+
+h2. Usage
+
+@arv-web@ enables you to set up a web service based on the most recent collection in a project.  An arv-web application is a reproducible, immutable application bundle where the web app is packaged with both the code to run and the data to serve.  Because Arvados Collections can be updated with minimum duplication, it is efficient to produce a new application bundle when the code or data needs to be updated; retaining old application bundles makes it easy to go back and run older versions of your web app.
+
+<pre>
+usage: arv-web.py [-h] --project-uuid PROJECT_UUID [--port PORT]
+                  [--image IMAGE]
+
+optional arguments:
+  -h, --help            show this help message and exit
+  --project-uuid PROJECT_UUID
+                        Project uuid to watch
+  --port PORT           Host port to listen on (default 8080)
+  --image IMAGE         Docker image to run
+</pre>
+
+At startup, @arv-web@ queries an Arvados project and mounts the most recently modified collection into a temporary directory.  It then runs a Docker image with the collection bound to @/mnt@ inside the container.  When a new collection is added to the project, or an existing project is updated, it will stop the running Docker container, unmount the old collection, mount the new most recently modified collection, and restart the Docker container with the new mount.
+
+h2. Docker container
+
+The @Dockerfile@ in @arvados/docker/arv-web@ builds a Docker image that runs Apache with @/mnt@ as the DocumentRoot.  It is configured to run web applications which use Python WSGI, Ruby Rack, or CGI; to serve static HTML; or browse the contents of the @public@ subdirectory of the collection using default Apache index pages.
+
+To build the Docker image:
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd arvados/docker</span>
+~/arvados/docker$ <span class="userinput">docker build -t arvados/arv-web arv-web</span>
+</code></pre>
+</notextile>
+
+h2. Running sample applications
+
+First, in Arvados Workbench, create a new project.  Copy the project UUID from the URL bar (this is the part of the URL after @projects/...@).
+
+Now upload a collection containing a "Python WSGI web app:":http://wsgi.readthedocs.org/en/latest/
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd arvados/services/arv-web</span>
+~/arvados/services/arv-web$ <span class="userinput">arv-put --project [zzzzz-j7d0g-yourprojectuuid] --name sample-wsgi-app sample-wsgi-app</span>
+0M / 0M 100.0%
+Collection saved as 'sample-wsgi-app'
+zzzzz-4zz18-ebohzfbzh82qmqy
+~/arvados/services/arv-web$ <span class="userinput">./arv-web.py --project [zzzzz-j7d0g-yourprojectuuid] --port 8888</span>
+2015-01-30 11:21:00 arvados.arv-web[4897] INFO: Mounting zzzzz-4zz18-ebohzfbzh82qmqy
+2015-01-30 11:21:01 arvados.arv-web[4897] INFO: Starting Docker container arvados/arv-web
+2015-01-30 11:21:02 arvados.arv-web[4897] INFO: Container id e79e70558d585a3e038e4bfbc97e5c511f21b6101443b29a8017bdf3d84689a3
+2015-01-30 11:21:03 arvados.arv-web[4897] INFO: Waiting for events
+</code></pre>
+</notextile>
+
+The sample application will be available at @http://localhost:8888@.
+
+h3. Updating the application
+
+If you upload a new collection to the same project, arv-web will restart the web service and serve the new collection.  For example, uploading a collection containing a "Ruby Rack web app:":https://github.com/rack/rack/wiki
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd arvados/services/arv-web</span>
+~/arvados/services/arv-web$ <span class="userinput">arv-put --project [zzzzz-j7d0g-yourprojectuuid] --name sample-rack-app sample-rack-app</span>
+0M / 0M 100.0%
+Collection saved as 'sample-rack-app'
+zzzzz-4zz18-dhhm0ay8k8cqkvg
+</code></pre>
+</notextile>
+
+@arv-web@ will automatically notice the change, load a new container, and send an update signal (SIGHUP) to the service:
+
+<pre>
+2015-01-30 11:21:03 arvados.arv-web[4897] INFO:Waiting for events
+2015-01-30 11:21:04 arvados.arv-web[4897] INFO:create zzzzz-4zz18-dhhm0ay8k8cqkvg
+2015-01-30 11:21:05 arvados.arv-web[4897] INFO:Mounting zzzzz-4zz18-dhhm0ay8k8cqkvg
+2015-01-30 11:21:06 arvados.arv-web[4897] INFO:Sending refresh signal to container
+2015-01-30 11:21:07 arvados.arv-web[4897] INFO:Waiting for events
+</pre>
+
+h2. Writing your own applications
+
+The @arvados/arv-web@ image serves Python and Ruby applications using Phusion Passenger and Apache @mod_passenger@.  See "Phusion Passenger users guide for Apache":https://www.phusionpassenger.com/documentation/Users%20guide%20Apache.html for details, and look at the sample apps @arvados/services/arv-web/sample-wsgi-app@ and @arvados/services/arv-web/sample-rack-app@.
+
+You can serve CGI applications using standard Apache CGI support.  See "Apache Tutorial: Dynamic Content with CGI":https://httpd.apache.org/docs/current/howto/cgi.html for details, and look at the sample app @arvados/services/arv-web/sample-cgi-app@.
+
+You can also serve static content from the @public@ directory of the collection.  Look at @arvados/services/arv-web/sample-static-page@ for an example.  If no @index.html@ is found in @public/@, it will render default Apache index pages, permitting simple browsing of the collection contents.
+
+h3. Custom images
+
+You can provide your own Docker image.  The Docker image that will be used create the web application container is specified in the @docker_image@ file in the root of the collection.  You can also specify @--image@ on the command @arv-web@ line to choose the docker image (this will override the contents of @docker_image@).
+
+h3. Reloading the web service
+
+Stopping the Docker container and starting it again can result in a small amount of downtime.  When the collection containing a new or updated web application uses the same Docker image as the currently running web application, it is possible to avoid this downtime by keeping the existing container and only reloading the web server.  This is accomplished by providing a file called @reload@ in the root of the collection, which should contain the commands necessary to reload the web server inside the container.
diff --git a/doc/user/topics/crunch-tools-overview.html.textile.liquid b/doc/user/topics/crunch-tools-overview.html.textile.liquid
new file mode 100644 (file)
index 0000000..994f437
--- /dev/null
@@ -0,0 +1,63 @@
+---
+layout: default
+navsection: userguide
+title: "Tools for writing Crunch pipelines"
+...
+
+Arvados includes a number of tools to help you develop pipelines and jobs for Crunch.  This overview explains each tool's intended use to help you choose the right one.
+
+h2. Use the "arv-run command-line utility":arv-run.html
+
+arv-run is an interactive command-line tool.  You run it as the first command of a traditional Unix shell command line, and it converts that work into an Arvados pipeline.  It automatically uploads any required data to Arvados, and dispatches work in parallel when possible.  This lets you easily migrate analysis work that you're doing on the command line to Arvados compute nodes.
+
+arv-run is best suited to complement work you already do on the command line.  If you write a shell one-liner that generates useful data, you can then call it with arv-run to parallelize it across a larger data set and save the results in Arvados.  For example, this run searches multiple FASTQ files in parallel, and saves the results to Keep through shell redirection:
+
+{% include 'arv_run_redirection' %}
+
+arv-run does not generate pipeline templates, or implement higher-level shell constructs like flow control.  If you want to make it easy to rerun your pipeline with different data later, or adapt to different inputs, it's best to write your own template.
+
+Refer to the "arv-run documentation":arv-run.html for details.
+
+h2. Write a "pipeline template":{{site.baseurl}}/user/tutorials/running-external-program.html
+
+Pipeline templates describe a set of analysis programs that should be run, and the inputs they require.  You can provide a high-level description of how data flows through the pipeline—for example, the outputs of programs A and B are provided as input to program C—and let Crunch take care of the details of starting the individual programs at the right time with the inputs you specified.
+
+Pipeline templates are written in JSON.  Once you save a pipeline template in Arvados, you run it by creating a pipeline instance that lists the specific inputs you'd like to use.  Arvados Workbench and the @arv pipeline run@ command-line tool both provide high-level interfaces to do this easily.  The pipeline's final output(s) will be saved in a project you specify.
+
+See the User Guide topic to learn how to "write and run your own pipelines":{{site.baseurl}}/user/tutorials/running-external-program.html.  The rest of this page suggests specific tools to use in your templates.
+
+h3. The "run-command Crunch script":run-command.html
+
+run-command is a Crunch script that is included with Arvados.  It builds a command line from its input parameters.  It runs that command on files in Collections using the Keep mount provided by Crunch.  Output files created by the command are saved in a new collection, which is considered the program's final output.  It can run the command in parallel on a list of inputs, and introspect arguments so you can, for example, generate output filenames based on input filenames.
+
+run-command is a great way to use an existing analysis tool inside an Arvados pipeline.  You might use one or two tools in a larger pipeline, or convert a simple series of tool invocations into a pipeline to benefit from Arvados' provenance tracking and job reuse.  For example, here's a one-step pipeline that uses run-command with bwa to align a single paired-end read FASTQ sample:
+
+<notextile>{% code 'run_command_simple_example' as javascript %}</notextile>
+
+run-command is limited to manipulating the tool's command-line arguments, and can only parallelize on simple lists of inputs.  If you need to preprocess input, or dispatch work differently based on those inputs, consider writing your own Crunch script.
+
+Refer to the "run-command reference":run-command.html for details.
+
+h3. Writing "your own Crunch script":{{site.baseurl}}/user/tutorials/tutorial-firstscript.html with the Python SDK
+
+Arvados includes a Python SDK designed to help you write your own Crunch scripts.  It provides a native Arvados API client; Collection classes that provide file-like objects to interact with data in Keep; and utility functions to work within Crunch's execution environment.  Using the Python SDK, you can efficiently dispatch work with however much sophistication you require.
+
+Writing your own Crunch script is the best way to do analysis in Arvados when an existing tool does not meet your needs.  By interacting directly with Arvados objects, you'll have full power to introspect and adapt to your input, introduce minimal overhead, and get very direct error messages in case there's any trouble.  As a simple example, here's a Crunch script that checksums each file in a collection in parallel, saving the results in Keep:
+
+<notextile>{% code 'tutorial_hash_script_py' as python %}</notextile>
+
+There's no limit to what you can do with your own Crunch script.  The downside is the amount of time and effort you're required to invest to write and debug new code.  If you have to do that anyway, writing a Crunch script will give you the most benefit from using Arvados.
+
+Refer to the "User Guide topic on writing Crunch scripts":{{site.baseurl}}/user/tutorials/tutorial-firstscript.html and the "Python SDK reference":{{site.baseurl}}/sdk/python/python.html for details.
+
+h3. Combining run-command and custom Crunch scripts in a pipeline
+
+Just because you need to write some new code to do some work doesn't mean that you have to do all the work in your own Crunch script.  You can combine your custom steps with existing tools in a pipeline, passing data between them.  For example, maybe there's a third-party tool that does most of the analysis work you need, but you often need to massage the tool's data.  You could write your own preprocessing script that creates a new collection to use as the input of a run-command job, or a postprocessing script to create a final output after the tool is done, and tie them all together in a pipeline.  Just like Unix pipes, Arvados pipelines let you combine smaller tools to maximize utility.
+
+h3. Using run-command with your legacy scripts
+
+Perhaps you've already written your own analysis program that you want to run inside Arvados.  Currently, the easiest way to do that is to copy run-command from the Arvados source code to your own Arvados git repository, along with your internal tool.  Then your pipeline can call run-command from your own repository to execute the internal tool alongside it.
+
+This approach has the downside that you'll have to copy and push run-command again any time there's an update you'd like to use.  Future Arvados development will make it possible to get code from multiple git repositories, so your job can use the latest run-command in the Arvados source, as well as the latest tool in your own git repository.  Follow "Arvados issue #4561":https://arvados.org/issues/4561 for updates.
+
+Alternatively, you can "build a Docker image that includes your program, add it to Arvados":arv-docker.html, then run the Arvados run-command script inside that Docker image.
index ca0045b3139d426e3b91280aec25e791453073e3..f1d42adceb9aa9d69a8d0529f2d1013ca92b7ec5 100644 (file)
@@ -74,6 +74,16 @@ table(table table-bordered table-condensed).
 |$(basename&nbsp;...)   | Strip leading directory and trailing file extension from the path provided.  For example, $(basename /foo/bar.baz.txt) will evaluate to "bar.baz".|
 |$(glob ...)       | Take a Unix shell path pattern (supports @*@ @?@ and @[]@) and search the local filesystem, returning the first match found.  Use together with $(dir ...) to get a local filesystem path for Arvados collections.  For example: $(glob $(dir $(mycollection)/*.bam)) will find the first .bam file in the collection specified by the user parameter "mycollection".  If there is more than one match, which one is returned is undefined.  Will raise an error if no matches are found.|
 
+h3. Escape sequences
+
+If your command includes a @$()@ sequence that shouldn't be interpreted by run-command&mdash;for example, because you're writing shell code that calls a subcommand&mdash;you can prevent run-command from interpreting it by placing a backslash in front of the @$@ character.  Note that JSON also uses backslash to escape characters, so you'll need to write two backslashes for run-command to see one after parsing the parameter.  This example uppercases all alphabetic characters in the "pattern" parameter before using it as a regular expression in grep:
+
+<pre>{"command": ["bash", "-c", "grep \\$(echo '$(pattern)' | tr a-z A-Z) '$(input)'"]}</pre>
+
+You can put a literal backslash in your command by escaping it with another backslash.  Ultimately this means that where the primary Unix command includes a single backslash, you'll need to write four backslashes: double the backslashes for run-command escaping, then double them again for JSON escaping.
+
+<pre>{"command": ["grep", "\\\\bword\\\\b", "$(input)"]}</pre>
+
 h2. List context
 
 Where specified by the documentation, parameters may be evaluated in a "list context".  That means the value will evaluate to a list instead of a string.  Parameter values can be a static list, a path to a file, a path to a directory, or a JSON object describing a list context function.
diff --git a/docker/arv-web/Dockerfile b/docker/arv-web/Dockerfile
new file mode 100644 (file)
index 0000000..11a9c17
--- /dev/null
@@ -0,0 +1,15 @@
+FROM arvados/passenger
+MAINTAINER Peter Amstutz <peter.amstutz@curoverse.com>
+
+ADD apache2_foreground.sh /etc/apache2/foreground.sh
+
+ADD apache2_vhost /etc/apache2/sites-available/arv-web
+RUN \
+  mkdir /var/run/apache2 && \
+  a2dissite default && \
+  a2ensite arv-web && \
+  a2enmod rewrite
+
+EXPOSE 80
+
+CMD ["/etc/apache2/foreground.sh"]
\ No newline at end of file
diff --git a/docker/arv-web/apache2_foreground.sh b/docker/arv-web/apache2_foreground.sh
new file mode 100755 (executable)
index 0000000..76766a6
--- /dev/null
@@ -0,0 +1,8 @@
+#! /bin/bash
+
+read pid cmd state ppid pgrp session tty_nr tpgid rest < /proc/self/stat
+trap "kill -HUP -$pgrp" HUP
+trap "kill -TERM -$pgrp; exit" EXIT TERM QUIT
+
+source /etc/apache2/envvars
+/usr/sbin/apache2 -D FOREGROUND
diff --git a/docker/arv-web/apache2_vhost b/docker/arv-web/apache2_vhost
new file mode 100644 (file)
index 0000000..5268201
--- /dev/null
@@ -0,0 +1,19 @@
+<VirtualHost *:80>
+  # Index file and Document Root (where the public files are located)
+  DirectoryIndex index.html
+  DocumentRoot /mnt/public
+  RackBaseURI /
+
+  LogLevel warn
+  ErrorLog  ${APACHE_LOG_DIR}/error.log
+  CustomLog ${APACHE_LOG_DIR}/access.log combined
+
+  <Directory /mnt/public>
+    Options Indexes IncludesNoExec
+    Options -MultiViews
+    AllowOverride All
+    Order allow,deny
+    Allow from all
+  </Directory>
+
+</VirtualHost>
index d92349c50be7f387faeaa798f91867d0c8b6bf5f..9d93d2bdef7099413e3ca32a34b70ee2e0141163 100644 (file)
@@ -60,6 +60,8 @@ SLURM_DEPS = slurm/Dockerfile config.yml $(SLURM_GENERATED)
 
 JOBS_DEPS = jobs/Dockerfile
 
+ARV_WEB_DEPS = arv-web/Dockerfile arv-web/apache2_foreground.sh arv-web/apache2_vhost
+
 JAVA_BWA_SAMTOOLS_DEPS = java-bwa-samtools/Dockerfile
 
 API_DEPS = api/* config.yml $(API_GENERATED)
@@ -209,6 +211,10 @@ sso-image: passenger-image $(SSO_DEPS)
        $(DOCKER_BUILD) -t arvados/sso sso
        date >sso-image
 
+arv-web-image: passenger-image $(ARV_WEB_DEPS)
+       $(DOCKER_BUILD) -t arvados/arv-web arv-web
+       date >arv-web-image
+
 # ============================================================
 # The arvados/base image is the base Debian image plus packages
 # that are dependencies for every Arvados service.
index 5fcf5465f3f3a1f4978cfbff3c8a4a89bc02e47e..8491aeec137b3353dc8115f24665da7181fa4294 100644 (file)
@@ -22,7 +22,7 @@ Gem::Specification.new do |s|
   s.executables << "arv-crunch-job"
   s.executables << "arv-tag"
   s.required_ruby_version = '>= 2.1.0'
-  s.add_runtime_dependency 'arvados', '~> 0.1', '>= 0.1.0'
+  s.add_runtime_dependency 'arvados', '~> 0.1', '>= 0.1.20150128223554'
   s.add_runtime_dependency 'google-api-client', '~> 0.6.3', '>= 0.6.3'
   s.add_runtime_dependency 'activesupport', '~> 3.2', '>= 3.2.13'
   s.add_runtime_dependency 'json', '~> 1.7', '>= 1.7.7'
index a142dba10f0816d7ee9b2ce323c7fff96ec360ac..36ec037bd80702b27137cb07824ca21cda641d99 100755 (executable)
@@ -15,7 +15,7 @@ end
 begin
   require 'curb'
   require 'rubygems'
-  require 'google/api_client'
+  require 'arvados/google_api_client'
   require 'json'
   require 'pp'
   require 'trollop'
@@ -52,36 +52,6 @@ module Kernel
   end
 end
 
-class Google::APIClient
- def discovery_document(api, version)
-   api = api.to_s
-   discovery_uri = self.discovery_uri(api, version)
-   discovery_uri_hash = Digest::MD5.hexdigest(discovery_uri)
-   return @discovery_documents[discovery_uri_hash] ||=
-     begin
-       # fetch new API discovery doc if stale
-       cached_doc = File.expand_path "~/.cache/arvados/discovery-#{discovery_uri_hash}.json" rescue nil
-
-       if cached_doc.nil? or not File.exist?(cached_doc) or (Time.now - File.mtime(cached_doc)) > 86400
-         response = self.execute!(:http_method => :get,
-                                  :uri => discovery_uri,
-                                  :authenticated => false)
-
-         begin
-           FileUtils.makedirs(File.dirname cached_doc)
-           File.open(cached_doc, 'w') do |f|
-             f.puts response.body
-           end
-         rescue
-           return JSON.load response.body
-         end
-       end
-
-       File.open(cached_doc) { |f| JSON.load f }
-     end
- end
-end
-
 class ArvadosClient < Google::APIClient
   def execute(*args)
     if args.last.is_a? Hash
index 2415217a21bd4dbf6c7e2ae54cbca8d915870d3f..c312f5d169fb77b7b853b113ce97195418dd2f56 100755 (executable)
@@ -1274,10 +1274,10 @@ sub fetch_block
   return $output_block;
 }
 
-# create_output_collections generates a new collection containing the
-# output of each successfully completed task, and returns the
-# portable_data_hash for the new collection.
-#
+# Create a collection by concatenating the output of all tasks (each
+# task's output is either a manifest fragment, a locator for a
+# manifest fragment stored in Keep, or nothing at all). Return the
+# portable_data_hash of the new collection.
 sub create_output_collection
 {
   Log (undef, "collate");
@@ -1292,10 +1292,11 @@ sub create_output_collection
                   '.execute()["portable_data_hash"]'
       );
 
+  my $task_idx = -1;
   for (@jobstep)
   {
-    next if (!exists $_->{'arvados_task'}->{'output'} ||
-             !$_->{'arvados_task'}->{'success'});
+    ++$task_idx;
+    next unless exists $_->{'arvados_task'}->{'output'};
     my $output = $_->{'arvados_task'}->{output};
     if ($output !~ /^[0-9a-f]{32}(\+\S+)*$/)
     {
@@ -1307,7 +1308,8 @@ sub create_output_collection
     }
     else
     {
-      Log (undef, "XXX fetch_block($output) failed XXX");
+      my $uuid = $_->{'arvados_task'}->{'uuid'};
+      Log (undef, "Error retrieving '$output' output by task $task_idx ($uuid)");
       $main::success = 0;
     }
   }
@@ -1907,10 +1909,15 @@ if (readlink ("$destdir.commit") eq $commit && -d $destdir) {
 
 unlink "$destdir.commit";
 mkdir $destdir;
-open TARX, "|-", "tar", "-xC", $destdir;
-{
-  local $/ = undef;
-  print TARX <DATA>;
+
+if (!open(TARX, "|-", "tar", "-xC", $destdir)) {
+  die "Error launching 'tar -xC $destdir': $!";
+}
+# If we send too much data to tar in one write (> 4-5 MiB), it stops, and we
+# get SIGPIPE.  We must feed it data incrementally.
+my $tar_input;
+while (read(DATA, $tar_input, 65536)) {
+  print TARX $tar_input;
 }
 if(!close(TARX)) {
   die "'tar -xC $destdir' exited $?: $!";
index 5ea2524aa63c730632c567edb10132224b919b3a..7c2442653c71494edcb6f8e06cbff533d9890fd1 100644 (file)
@@ -64,10 +64,10 @@ func MakeArvadosClient() (kc ArvadosClient, err error) {
                        TLSClientConfig: &tls.Config{InsecureSkipVerify: insecure}}},
                External: external}
 
-       if os.Getenv("ARVADOS_API_HOST") == "" {
+       if kc.ApiServer == "" {
                return kc, MissingArvadosApiHost
        }
-       if os.Getenv("ARVADOS_API_TOKEN") == "" {
+       if kc.ApiToken == "" {
                return kc, MissingArvadosApiToken
        }
 
index bf9b4e31c41dbb5ba974e1da3b4d1dbb99cda7e3..1af964d0a045ad2b4bb0a6dd9610fcf11d8027d3 100644 (file)
@@ -1,11 +1,10 @@
 package arvadosclient
 
 import (
-       "fmt"
        . "gopkg.in/check.v1"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
        "net/http"
        "os"
-       "os/exec"
        "testing"
 )
 
@@ -19,47 +18,35 @@ var _ = Suite(&ServerRequiredSuite{})
 // Tests that require the Keep server running
 type ServerRequiredSuite struct{}
 
-func pythonDir() string {
-       cwd, _ := os.Getwd()
-       return fmt.Sprintf("%s/../../python/tests", cwd)
+func (s *ServerRequiredSuite) SetUpSuite(c *C) {
+       arvadostest.StartAPI()
+       arvadostest.StartKeep()
 }
 
-func (s *ServerRequiredSuite) SetUpSuite(c *C) {
-       os.Chdir(pythonDir())
-       if err := exec.Command("python", "run_test_server.py", "start").Run(); err != nil {
-               panic("'python run_test_server.py start' returned error")
-       }
-       if err := exec.Command("python", "run_test_server.py", "start_keep").Run(); err != nil {
-               panic("'python run_test_server.py start_keep' returned error")
-       }
+func (s *ServerRequiredSuite) SetUpTest(c *C) {
+       arvadostest.ResetEnv()
 }
 
-func (s *ServerRequiredSuite) TestMakeArvadosClient(c *C) {
-       os.Setenv("ARVADOS_API_HOST", "localhost:3000")
-       os.Setenv("ARVADOS_API_TOKEN", "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
+func (s *ServerRequiredSuite) TestMakeArvadosClientSecure(c *C) {
        os.Setenv("ARVADOS_API_HOST_INSECURE", "")
-
        kc, err := MakeArvadosClient()
-       c.Check(kc.ApiServer, Equals, "localhost:3000")
-       c.Check(kc.ApiToken, Equals, "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
+       c.Assert(err, Equals, nil)
+       c.Check(kc.ApiServer, Equals, os.Getenv("ARVADOS_API_HOST"))
+       c.Check(kc.ApiToken, Equals, os.Getenv("ARVADOS_API_TOKEN"))
        c.Check(kc.ApiInsecure, Equals, false)
+}
 
+func (s *ServerRequiredSuite) TestMakeArvadosClientInsecure(c *C) {
        os.Setenv("ARVADOS_API_HOST_INSECURE", "true")
-
-       kc, err = MakeArvadosClient()
-       c.Check(kc.ApiServer, Equals, "localhost:3000")
-       c.Check(kc.ApiToken, Equals, "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
+       kc, err := MakeArvadosClient()
+       c.Assert(err, Equals, nil)
        c.Check(kc.ApiInsecure, Equals, true)
+       c.Check(kc.ApiServer, Equals, os.Getenv("ARVADOS_API_HOST"))
+       c.Check(kc.ApiToken, Equals, os.Getenv("ARVADOS_API_TOKEN"))
        c.Check(kc.Client.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify, Equals, true)
-
-       c.Assert(err, Equals, nil)
 }
 
 func (s *ServerRequiredSuite) TestCreatePipelineTemplate(c *C) {
-       os.Setenv("ARVADOS_API_HOST", "localhost:3000")
-       os.Setenv("ARVADOS_API_TOKEN", "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
-       os.Setenv("ARVADOS_API_HOST_INSECURE", "true")
-
        arv, err := MakeArvadosClient()
 
        getback := make(Dict)
@@ -91,10 +78,6 @@ func (s *ServerRequiredSuite) TestCreatePipelineTemplate(c *C) {
 }
 
 func (s *ServerRequiredSuite) TestErrorResponse(c *C) {
-       os.Setenv("ARVADOS_API_HOST", "localhost:3000")
-       os.Setenv("ARVADOS_API_TOKEN", "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
-       os.Setenv("ARVADOS_API_HOST_INSECURE", "true")
-
        arv, _ := MakeArvadosClient()
 
        getback := make(Dict)
diff --git a/sdk/go/arvadostest/run_servers.go b/sdk/go/arvadostest/run_servers.go
new file mode 100644 (file)
index 0000000..cad1691
--- /dev/null
@@ -0,0 +1,123 @@
+package arvadostest
+
+import (
+       "bufio"
+       "bytes"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "log"
+       "os"
+       "os/exec"
+       "strings"
+)
+
+var authSettings = make(map[string]string)
+
+func ResetEnv() {
+       for k, v := range authSettings {
+               os.Setenv(k, v)
+       }
+}
+
+func ParseAuthSettings(authScript []byte) {
+       scanner := bufio.NewScanner(bytes.NewReader(authScript))
+       for scanner.Scan() {
+               line := scanner.Text()
+               if 0 != strings.Index(line, "export ") {
+                       log.Printf("Ignoring: %v", line)
+                       continue
+               }
+               toks := strings.SplitN(strings.Replace(line, "export ", "", 1), "=", 2)
+               if len(toks) == 2 {
+                       authSettings[toks[0]] = toks[1]
+               } else {
+                       log.Fatalf("Could not parse: %v", line)
+               }
+       }
+       log.Printf("authSettings: %v", authSettings)
+}
+
+var pythonTestDir string = ""
+
+func chdirToPythonTests() {
+       if pythonTestDir != "" {
+               if err := os.Chdir(pythonTestDir); err != nil {
+                       log.Fatalf("chdir %s: %s", pythonTestDir, err)
+               }
+               return
+       }
+       for {
+               if err := os.Chdir("sdk/python/tests"); err == nil {
+                       pythonTestDir, err = os.Getwd()
+                       return
+               }
+               if parent, err := os.Getwd(); err != nil || parent == "/" {
+                       log.Fatalf("sdk/python/tests/ not found in any ancestor")
+               }
+               if err := os.Chdir(".."); err != nil {
+                       log.Fatal(err)
+               }
+       }
+}
+
+func StartAPI() {
+       cwd, _ := os.Getwd()
+       defer os.Chdir(cwd)
+       chdirToPythonTests()
+
+       cmd := exec.Command("python", "run_test_server.py", "start", "--auth", "admin")
+       stderr, err := cmd.StderrPipe()
+       if err != nil {
+               log.Fatal(err)
+       }
+       go io.Copy(os.Stderr, stderr)
+       stdout, err := cmd.StdoutPipe()
+       if err != nil {
+               log.Fatal(err)
+       }
+       if err = cmd.Start(); err != nil {
+               log.Fatal(err)
+       }
+       var authScript []byte
+       if authScript, err = ioutil.ReadAll(stdout); err != nil {
+               log.Fatal(err)
+       }
+       if err = cmd.Wait(); err != nil {
+               log.Fatal(err)
+       }
+       ParseAuthSettings(authScript)
+       ResetEnv()
+}
+
+func StopAPI() {
+       cwd, _ := os.Getwd()
+       defer os.Chdir(cwd)
+       chdirToPythonTests()
+
+       exec.Command("python", "run_test_server.py", "stop").Run()
+}
+
+func StartKeep() {
+       cwd, _ := os.Getwd()
+       defer os.Chdir(cwd)
+       chdirToPythonTests()
+
+       cmd := exec.Command("python", "run_test_server.py", "start_keep")
+       stderr, err := cmd.StderrPipe()
+       if err != nil {
+               log.Fatalf("Setting up stderr pipe: %s", err)
+       }
+       go io.Copy(os.Stderr, stderr)
+       if err := cmd.Run(); err != nil {
+               panic(fmt.Sprintf("'python run_test_server.py start_keep' returned error %s", err))
+       }
+}
+
+func StopKeep() {
+       cwd, _ := os.Getwd()
+       defer os.Chdir(cwd)
+       chdirToPythonTests()
+
+       exec.Command("python", "run_test_server.py", "stop_keep").Run()
+}
diff --git a/sdk/go/blockdigest/blockdigest.go b/sdk/go/blockdigest/blockdigest.go
new file mode 100644 (file)
index 0000000..9b818d3
--- /dev/null
@@ -0,0 +1,49 @@
+/* Stores a Block Locator Digest compactly. Can be used as a map key. */
+
+package blockdigest
+
+import (
+       "fmt"
+       "log"
+       "strconv"
+)
+
+// Stores a Block Locator Digest compactly, up to 128 bits.
+// Can be used as a map key.
+type BlockDigest struct {
+       h uint64
+       l uint64
+}
+
+func (d BlockDigest) String() string {
+       return fmt.Sprintf("%016x%016x", d.h, d.l)
+}
+
+// Will create a new BlockDigest unless an error is encountered.
+func FromString(s string) (dig BlockDigest, err error) {
+       if len(s) != 32 {
+               err = fmt.Errorf("Block digest should be exactly 32 characters but this one is %d: %s", len(s), s)
+               return
+       }
+
+       var d BlockDigest
+       d.h, err = strconv.ParseUint(s[:16], 16, 64)
+       if err != nil {
+               return
+       }
+       d.l, err = strconv.ParseUint(s[16:], 16, 64)
+       if err != nil {
+               return
+       }
+       dig = d
+       return
+}
+
+// Will fatal with the error if an error is encountered
+func AssertFromString(s string) BlockDigest {
+       d, err := FromString(s)
+       if err != nil {
+               log.Fatalf("Error creating BlockDigest from %s: %v", s, err)
+       }
+       return d
+}
diff --git a/sdk/go/blockdigest/blockdigest_test.go b/sdk/go/blockdigest/blockdigest_test.go
new file mode 100644 (file)
index 0000000..068a138
--- /dev/null
@@ -0,0 +1,79 @@
+package blockdigest
+
+import (
+       "fmt"
+       "strings"
+       "testing"
+)
+
+func expectValidDigestString(t *testing.T, s string) {
+       bd, err := FromString(s)
+       if err != nil {
+               t.Fatalf("Expected %s to produce a valid BlockDigest but instead got error: %v", s, err)
+       }
+
+       expected := strings.ToLower(s)
+               
+       if expected != bd.String() {
+               t.Fatalf("Expected %s to be returned by FromString(%s).String() but instead we received %s", expected, s, bd.String())
+       }
+}
+
+func expectInvalidDigestString(t *testing.T, s string) {
+       _, err := FromString(s)
+       if err == nil {
+               t.Fatalf("Expected %s to be an invalid BlockDigest, but did not receive an error", s)
+       }
+}
+
+func TestValidDigestStrings(t *testing.T) {
+       expectValidDigestString(t, "01234567890123456789abcdefabcdef")
+       expectValidDigestString(t, "01234567890123456789ABCDEFABCDEF")
+       expectValidDigestString(t, "01234567890123456789AbCdEfaBcDeF")
+}
+
+func TestInvalidDigestStrings(t *testing.T) {
+       expectInvalidDigestString(t, "01234567890123456789abcdefabcdeg")
+       expectInvalidDigestString(t, "01234567890123456789abcdefabcde")
+       expectInvalidDigestString(t, "01234567890123456789abcdefabcdefa")
+       expectInvalidDigestString(t, "g1234567890123456789abcdefabcdef")
+}
+
+func TestBlockDigestWorksAsMapKey(t *testing.T) {
+       m := make(map[BlockDigest]int)
+       bd := AssertFromString("01234567890123456789abcdefabcdef")
+       m[bd] = 5
+}
+
+func TestBlockDigestGetsPrettyPrintedByPrintf(t *testing.T) {
+       input := "01234567890123456789abcdefabcdef"
+       prettyPrinted := fmt.Sprintf("%v", AssertFromString(input))
+       if prettyPrinted != input {
+               t.Fatalf("Expected blockDigest produced from \"%s\" to be printed as " +
+                       "\"%s\", but instead it was printed as %s",
+                       input, input, prettyPrinted)
+       }
+}
+
+func TestBlockDigestGetsPrettyPrintedByPrintfInNestedStructs(t *testing.T) {
+       input := "01234567890123456789abcdefabcdef"
+       value := 42
+       nested := struct{
+               // Fun trivia fact: If this field was called "digest" instead of
+               // "Digest", then it would not be exported and String() would
+               // never get called on it and our output would look very
+               // different.
+               Digest BlockDigest
+               value int
+       }{
+               AssertFromString(input),
+               value,
+       }
+       prettyPrinted := fmt.Sprintf("%+v", nested)
+       expected := fmt.Sprintf("{Digest:%s value:%d}", input, value)
+       if prettyPrinted != expected {
+               t.Fatalf("Expected blockDigest produced from \"%s\" to be printed as " +
+                       "\"%s\", but instead it was printed as %s",
+                       input, expected, prettyPrinted)
+       }
+}
index 8487e00786d93d4acece1fcf83c065629e499944..cbd27d72e7c7e9310de1ed027e47912b7a187baa 100644 (file)
@@ -5,6 +5,7 @@ import (
        "flag"
        "fmt"
        "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
        "git.curoverse.com/arvados.git/sdk/go/streamer"
        . "gopkg.in/check.v1"
        "io"
@@ -13,7 +14,6 @@ import (
        "net"
        "net/http"
        "os"
-       "os/exec"
        "testing"
 )
 
@@ -44,42 +44,19 @@ func (s *ServerRequiredSuite) SetUpSuite(c *C) {
                c.Skip("Skipping tests that require server")
                return
        }
-       os.Chdir(pythonDir())
-       {
-               cmd := exec.Command("python", "run_test_server.py", "start")
-               stderr, err := cmd.StderrPipe()
-               if err != nil {
-                       log.Fatalf("Setting up stderr pipe: %s", err)
-               }
-               go io.Copy(os.Stderr, stderr)
-               if err := cmd.Run(); err != nil {
-                       panic(fmt.Sprintf("'python run_test_server.py start' returned error %s", err))
-               }
-       }
-       {
-               cmd := exec.Command("python", "run_test_server.py", "start_keep")
-               stderr, err := cmd.StderrPipe()
-               if err != nil {
-                       log.Fatalf("Setting up stderr pipe: %s", err)
-               }
-               go io.Copy(os.Stderr, stderr)
-               if err := cmd.Run(); err != nil {
-                       panic(fmt.Sprintf("'python run_test_server.py start_keep' returned error %s", err))
-               }
-       }
+       arvadostest.StartAPI()
+       arvadostest.StartKeep()
 }
 
 func (s *ServerRequiredSuite) TearDownSuite(c *C) {
-       os.Chdir(pythonDir())
-       exec.Command("python", "run_test_server.py", "stop_keep").Run()
-       exec.Command("python", "run_test_server.py", "stop").Run()
+       if *no_server {
+               return
+       }
+       arvadostest.StopKeep()
+       arvadostest.StopAPI()
 }
 
 func (s *ServerRequiredSuite) TestMakeKeepClient(c *C) {
-       os.Setenv("ARVADOS_API_HOST", "localhost:3000")
-       os.Setenv("ARVADOS_API_TOKEN", "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
-       os.Setenv("ARVADOS_API_HOST_INSECURE", "true")
-
        arv, err := arvadosclient.MakeArvadosClient()
        c.Assert(err, Equals, nil)
 
@@ -88,7 +65,7 @@ func (s *ServerRequiredSuite) TestMakeKeepClient(c *C) {
        c.Assert(err, Equals, nil)
        c.Check(len(kc.ServiceRoots()), Equals, 2)
        for _, root := range kc.ServiceRoots() {
-               c.Check(root, Matches, "http://localhost:2510[\\d]")
+               c.Check(root, Matches, "http://localhost:\\d+")
        }
 }
 
@@ -600,9 +577,6 @@ func (s *StandaloneSuite) TestGetWithFailures(c *C) {
 }
 
 func (s *ServerRequiredSuite) TestPutGetHead(c *C) {
-       os.Setenv("ARVADOS_API_HOST", "localhost:3000")
-       os.Setenv("ARVADOS_API_TOKEN", "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
-       os.Setenv("ARVADOS_API_HOST_INSECURE", "true")
        content := []byte("TestPutGetHead")
 
        arv, err := arvadosclient.MakeArvadosClient()
@@ -626,7 +600,7 @@ func (s *ServerRequiredSuite) TestPutGetHead(c *C) {
                r, n, url2, err := kc.Get(hash)
                c.Check(err, Equals, nil)
                c.Check(n, Equals, int64(len(content)))
-               c.Check(url2, Equals, fmt.Sprintf("http://localhost:25108/%s", hash))
+               c.Check(url2, Matches, fmt.Sprintf("http://localhost:\\d+/%s", hash))
 
                read_content, err2 := ioutil.ReadAll(r)
                c.Check(err2, Equals, nil)
@@ -636,7 +610,7 @@ func (s *ServerRequiredSuite) TestPutGetHead(c *C) {
                n, url2, err := kc.Ask(hash)
                c.Check(err, Equals, nil)
                c.Check(n, Equals, int64(len(content)))
-               c.Check(url2, Equals, fmt.Sprintf("http://localhost:25108/%s", hash))
+               c.Check(url2, Matches, fmt.Sprintf("http://localhost:\\d+/%s", hash))
        }
 }
 
index c24849e687a8d11cf2e5d2154fdd62d0e470ec83..9db6ebcbaa234a82778825e92baa8a7017a3f418 100644 (file)
@@ -11,7 +11,6 @@ import (
        "log"
        "net"
        "net/http"
-       "os"
        "strings"
        "time"
 )
@@ -78,14 +77,6 @@ func (this *KeepClient) setClientSettingsStore() {
 }
 
 func (this *KeepClient) DiscoverKeepServers() error {
-       if prx := os.Getenv("ARVADOS_KEEP_PROXY"); prx != "" {
-               sr := map[string]string{"proxy": prx}
-               this.SetServiceRoots(sr)
-               this.Using_proxy = true
-               this.setClientSettingsProxy()
-               return nil
-       }
-
        type svcList struct {
                Items []keepDisk `json:"items"`
        }
diff --git a/sdk/go/logger/logger.go b/sdk/go/logger/logger.go
new file mode 100644 (file)
index 0000000..ce18e90
--- /dev/null
@@ -0,0 +1,199 @@
+// Logger periodically writes a log to the Arvados SDK.
+//
+// This package is useful for maintaining a log object that is updated
+// over time. This log object will be periodically written to the log,
+// as specified by WriteInterval in the Params.
+//
+// This package is safe for concurrent use as long as:
+// The maps passed to a LogMutator are not accessed outside of the
+// LogMutator
+//
+// Usage:
+// arvLogger := logger.NewLogger(params)
+// arvLogger.Update(func(properties map[string]interface{},
+//     entry map[string]interface{}) {
+//   // Modifiy properties and entry however you want
+//   // properties is a shortcut for entry["properties"].(map[string]interface{})
+//   // properties can take any values you want to give it,
+//   // entry will only take the fields listed at http://doc.arvados.org/api/schema/Log.html
+// })
+package logger
+
+import (
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "log"
+       "time"
+)
+
+const (
+       startSuffix              = "-start"
+       partialSuffix            = "-partial"
+       finalSuffix              = "-final"
+       numberNoMoreWorkMessages = 2 // To return from FinalUpdate() & Work().
+)
+
+type LoggerParams struct {
+       Client          arvadosclient.ArvadosClient // The client we use to write log entries
+       EventTypePrefix string                      // The prefix we use for the event type in the log entry
+       WriteInterval   time.Duration               // Wait at least this long between log writes
+}
+
+// A LogMutator is a function which modifies the log entry.
+// It takes two maps as arguments, properties is the first and entry
+// is the second
+// properties is a shortcut for entry["properties"].(map[string]interface{})
+// properties can take any values you want to give it.
+// entry will only take the fields listed at http://doc.arvados.org/api/schema/Log.html
+// properties and entry are only safe to access inside the LogMutator,
+// they should not be stored anywhere, otherwise you'll risk
+// concurrent access.
+type LogMutator func(map[string]interface{}, map[string]interface{})
+
+// A Logger is used to build up a log entry over time and write every
+// version of it.
+type Logger struct {
+       // The data we write
+       data       map[string]interface{} // The entire map that we give to the api
+       entry      map[string]interface{} // Convenience shortcut into data
+       properties map[string]interface{} // Convenience shortcut into data
+
+       params LoggerParams // Parameters we were given
+
+       // Variables to coordinate updating and writing.
+       modified    bool            // Has this data been modified since the last write?
+       workToDo    chan LogMutator // Work to do in the worker thread.
+       writeTicker *time.Ticker    // On each tick we write the log data to arvados, if it has been modified.
+       hasWritten  bool            // Whether we've written at all yet.
+       noMoreWork  chan bool       // Signals that we're done writing.
+
+       writeHooks []LogMutator // Mutators we call before each write.
+}
+
+// Create a new logger based on the specified parameters.
+func NewLogger(params LoggerParams) *Logger {
+       // sanity check parameters
+       if &params.Client == nil {
+               log.Fatal("Nil arvados client in LoggerParams passed in to NewLogger()")
+       }
+       if params.EventTypePrefix == "" {
+               log.Fatal("Empty event type prefix in LoggerParams passed in to NewLogger()")
+       }
+
+       l := &Logger{
+               data:        make(map[string]interface{}),
+               entry:       make(map[string]interface{}),
+               properties:  make(map[string]interface{}),
+               params:      params,
+               workToDo:    make(chan LogMutator, 10),
+               writeTicker: time.NewTicker(params.WriteInterval),
+               noMoreWork:  make(chan bool, numberNoMoreWorkMessages)}
+
+       l.data["log"] = l.entry
+       l.entry["properties"] = l.properties
+
+       // Start the worker goroutine.
+       go l.work()
+
+       return l
+}
+
+// Exported functions will be called from other goroutines, therefore
+// all they are allowed to do is enqueue work to be done in the worker
+// goroutine.
+
+// Enqueues an update. This will happen in another goroutine after
+// this method returns.
+func (l *Logger) Update(mutator LogMutator) {
+       l.workToDo <- mutator
+}
+
+// Similar to Update(), but writes the log entry as soon as possible
+// (ignoring MinimumWriteInterval) and blocks until the entry has been
+// written. This is useful if you know that you're about to quit
+// (e.g. if you discovered a fatal error, or you're finished), since
+// go will not wait for timers (including the pending write timer) to
+// go off before exiting.
+func (l *Logger) FinalUpdate(mutator LogMutator) {
+       // TODO(misha): Consider not accepting any future updates somehow,
+       // since they won't get written if they come in after this.
+
+       // Stop the periodic write ticker. We'll perform the final write
+       // before returning from this function.
+       l.workToDo <- func(p map[string]interface{}, e map[string]interface{}) {
+               l.writeTicker.Stop()
+       }
+
+       // Apply the final update
+       l.workToDo <- mutator
+
+       // Perform the final write and signal that we can return.
+       l.workToDo <- func(p map[string]interface{}, e map[string]interface{}) {
+               l.write(true)
+               for i := 0; i < numberNoMoreWorkMessages; {
+                       l.noMoreWork <- true
+               }
+       }
+
+       // Wait until we've performed the write.
+       <-l.noMoreWork
+}
+
+// Adds a hook which will be called every time this logger writes an entry.
+func (l *Logger) AddWriteHook(hook LogMutator) {
+       // We do the work in a LogMutator so that it happens in the worker
+       // goroutine.
+       l.workToDo <- func(p map[string]interface{}, e map[string]interface{}) {
+               l.writeHooks = append(l.writeHooks, hook)
+       }
+}
+
+// The worker loop
+func (l *Logger) work() {
+       for {
+               select {
+               case <-l.writeTicker.C:
+                       if l.modified {
+                               l.write(false)
+                               l.modified = false
+                       }
+               case mutator := <-l.workToDo:
+                       mutator(l.properties, l.entry)
+                       l.modified = true
+               case <-l.noMoreWork:
+                       return
+               }
+       }
+}
+
+// Actually writes the log entry.
+func (l *Logger) write(isFinal bool) {
+
+       // Run all our hooks
+       for _, hook := range l.writeHooks {
+               hook(l.properties, l.entry)
+       }
+
+       // Update the event type.
+       if isFinal {
+               l.entry["event_type"] = l.params.EventTypePrefix + finalSuffix
+       } else if l.hasWritten {
+               l.entry["event_type"] = l.params.EventTypePrefix + partialSuffix
+       } else {
+               l.entry["event_type"] = l.params.EventTypePrefix + startSuffix
+       }
+       l.hasWritten = true
+
+       // Write the log entry.
+       // This is a network write and will take a while, which is bad
+       // because we're blocking all the other work on this goroutine.
+       //
+       // TODO(misha): Consider rewriting this so that we can encode l.data
+       // into a string, and then perform the actual write in another
+       // routine. This will be tricky and will require support in the
+       // client.
+       err := l.params.Client.Create("logs", l.data, nil)
+       if err != nil {
+               log.Printf("Attempted to log: %v", l.data)
+               log.Fatalf("Received error writing log: %v", err)
+       }
+}
diff --git a/sdk/go/logger/main/testlogger.go b/sdk/go/logger/main/testlogger.go
new file mode 100644 (file)
index 0000000..6cd7dfb
--- /dev/null
@@ -0,0 +1,29 @@
+// This binary tests the logger package.
+// It's not a standard unit test. Instead it writes to the actual log
+// and you have to clean up after it.
+
+package main
+
+import (
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/logger"
+       "log"
+)
+
+func main() {
+       arv, err := arvadosclient.MakeArvadosClient()
+       if err != nil {
+               log.Fatalf("Error setting up arvados client %v", err)
+       }
+
+       l := logger.NewLogger(logger.LoggerParams{Client: arv,
+               EventType: "experimental-logger-testing",
+               // No minimum write interval
+       })
+
+       {
+               properties, _ := l.Edit()
+               properties["Ninja"] = "Misha"
+       }
+       l.Record()
+}
diff --git a/sdk/go/manifest/manifest.go b/sdk/go/manifest/manifest.go
new file mode 100644 (file)
index 0000000..f6698c6
--- /dev/null
@@ -0,0 +1,118 @@
+/* Deals with parsing Manifest Text. */
+
+// Inspired by the Manifest class in arvados/sdk/ruby/lib/arvados/keep.rb
+
+package manifest
+
+import (
+       "fmt"
+       "git.curoverse.com/arvados.git/sdk/go/blockdigest"
+       "log"
+       "regexp"
+       "strconv"
+       "strings"
+)
+
+var LocatorPattern = regexp.MustCompile(
+       "^[0-9a-fA-F]{32}\\+[0-9]+(\\+[A-Z][A-Za-z0-9@_-]+)*$")
+
+type Manifest struct {
+       Text string
+}
+
+type BlockLocator struct {
+       Digest blockdigest.BlockDigest
+       Size   int
+       Hints  []string
+}
+
+// Represents a single line from a manifest.
+type ManifestStream struct {
+       StreamName string
+       Blocks     []string
+       Files      []string
+}
+
+func ParseBlockLocator(s string) (b BlockLocator, err error) {
+       if !LocatorPattern.MatchString(s) {
+               err = fmt.Errorf("String \"%s\" does not match BlockLocator pattern "+
+                       "\"%s\".",
+                       s,
+                       LocatorPattern.String())
+       } else {
+               tokens := strings.Split(s, "+")
+               var blockSize int64
+               var blockDigest blockdigest.BlockDigest
+               // We expect both of the following to succeed since LocatorPattern
+               // restricts the strings appropriately.
+               blockDigest, err = blockdigest.FromString(tokens[0])
+               if err != nil {
+                       return
+               }
+               blockSize, err = strconv.ParseInt(tokens[1], 10, 0)
+               if err != nil {
+                       return
+               }
+               b.Digest = blockDigest
+               b.Size = int(blockSize)
+               b.Hints = tokens[2:]
+       }
+       return
+}
+
+func parseManifestStream(s string) (m ManifestStream) {
+       tokens := strings.Split(s, " ")
+       m.StreamName = tokens[0]
+       tokens = tokens[1:]
+       var i int
+       for i = range tokens {
+               if !LocatorPattern.MatchString(tokens[i]) {
+                       break
+               }
+       }
+       m.Blocks = tokens[:i]
+       m.Files = tokens[i:]
+       return
+}
+
+func (m *Manifest) StreamIter() <-chan ManifestStream {
+       ch := make(chan ManifestStream)
+       go func(input string) {
+               // This slice holds the current line and the remainder of the
+               // manifest.  We parse one line at a time, to save effort if we
+               // only need the first few lines.
+               lines := []string{"", input}
+               for {
+                       lines = strings.SplitN(lines[1], "\n", 2)
+                       if len(lines[0]) > 0 {
+                               // Only parse non-blank lines
+                               ch <- parseManifestStream(lines[0])
+                       }
+                       if len(lines) == 1 {
+                               break
+                       }
+               }
+               close(ch)
+       }(m.Text)
+       return ch
+}
+
+// Blocks may appear mulitple times within the same manifest if they
+// are used by multiple files. In that case this Iterator will output
+// the same block multiple times.
+func (m *Manifest) BlockIterWithDuplicates() <-chan BlockLocator {
+       blockChannel := make(chan BlockLocator)
+       go func(streamChannel <-chan ManifestStream) {
+               for m := range streamChannel {
+                       for _, block := range m.Blocks {
+                               if b, err := ParseBlockLocator(block); err == nil {
+                                       blockChannel <- b
+                               } else {
+                                       log.Printf("ERROR: Failed to parse block: %v", err)
+                               }
+                       }
+               }
+               close(blockChannel)
+       }(m.StreamIter())
+       return blockChannel
+}
diff --git a/sdk/go/manifest/manifest_test.go b/sdk/go/manifest/manifest_test.go
new file mode 100644 (file)
index 0000000..c1bfb14
--- /dev/null
@@ -0,0 +1,178 @@
+package manifest
+
+import (
+       "git.curoverse.com/arvados.git/sdk/go/blockdigest"
+       "io/ioutil"
+       "runtime"
+       "testing"
+)
+
+func getStackTrace() (string) {
+       buf := make([]byte, 1000)
+       bytes_written := runtime.Stack(buf, false)
+       return "Stack Trace:\n" + string(buf[:bytes_written])
+}
+
+func expectFromChannel(t *testing.T, c <-chan string, expected string) {
+       actual, ok := <- c
+       if !ok {
+               t.Fatalf("Expected to receive %s but channel was closed. %s",
+                       expected,
+                       getStackTrace())
+       }
+       if actual != expected {
+               t.Fatalf("Expected %s but got %s instead. %s",
+                       expected,
+                       actual,
+                       getStackTrace())
+       }
+}
+
+func expectChannelClosed(t *testing.T, c <-chan interface{}) {
+       received, ok := <- c
+       if ok {
+               t.Fatalf("Expected channel to be closed, but received %v instead. %s",
+                       received,
+                       getStackTrace())
+       }
+}
+
+func expectEqual(t *testing.T, actual interface{}, expected interface{}) {
+       if actual != expected {
+               t.Fatalf("Expected %v but received %v instead. %s",
+                       expected,
+                       actual,
+                       getStackTrace())
+       }
+}
+
+func expectStringSlicesEqual(t *testing.T, actual []string, expected []string) {
+       if len(actual) != len(expected) {
+               t.Fatalf("Expected %v (length %d), but received %v (length %d) instead. %s", expected, len(expected), actual, len(actual), getStackTrace())
+       }
+       for i := range actual {
+               if actual[i] != expected[i] {
+                       t.Fatalf("Expected %v but received %v instead (first disagreement at position %d). %s", expected, actual, i, getStackTrace())
+               }
+       }
+}
+
+func expectManifestStream(t *testing.T, actual ManifestStream, expected ManifestStream) {
+       expectEqual(t, actual.StreamName, expected.StreamName)
+       expectStringSlicesEqual(t, actual.Blocks, expected.Blocks)
+       expectStringSlicesEqual(t, actual.Files, expected.Files)
+}
+
+func expectBlockLocator(t *testing.T, actual BlockLocator, expected BlockLocator) {
+       expectEqual(t, actual.Digest, expected.Digest)
+       expectEqual(t, actual.Size, expected.Size)
+       expectStringSlicesEqual(t, actual.Hints, expected.Hints)
+}
+
+func expectLocatorPatternMatch(t *testing.T, s string) {
+       if !LocatorPattern.MatchString(s) {
+               t.Fatalf("Expected \"%s\" to match locator pattern but it did not.",
+                       s)
+       }
+}
+
+func expectLocatorPatternFail(t *testing.T, s string) {
+       if LocatorPattern.MatchString(s) {
+               t.Fatalf("Expected \"%s\" to fail locator pattern but it passed.",
+                       s)
+       }
+}
+
+func TestLocatorPatternBasic(t *testing.T) {
+       expectLocatorPatternMatch(t, "12345678901234567890123456789012+12345")
+       expectLocatorPatternMatch(t, "A2345678901234abcdefababdeffdfdf+12345")
+       expectLocatorPatternMatch(t, "12345678901234567890123456789012+12345+A1")
+       expectLocatorPatternMatch(t,
+               "12345678901234567890123456789012+12345+A1+B123wxyz@_-")
+       expectLocatorPatternMatch(t,
+               "12345678901234567890123456789012+12345+A1+B123wxyz@_-+C@")
+
+       expectLocatorPatternFail(t,  "12345678901234567890123456789012")
+       expectLocatorPatternFail(t,  "12345678901234567890123456789012+")
+       expectLocatorPatternFail(t,  "12345678901234567890123456789012+12345+")
+       expectLocatorPatternFail(t,  "1234567890123456789012345678901+12345")
+       expectLocatorPatternFail(t,  "123456789012345678901234567890123+12345")
+       expectLocatorPatternFail(t,  "g2345678901234abcdefababdeffdfdf+12345")
+       expectLocatorPatternFail(t,  "12345678901234567890123456789012+12345 ")
+       expectLocatorPatternFail(t,  "12345678901234567890123456789012+12345+1")
+       expectLocatorPatternFail(t,  "12345678901234567890123456789012+12345+1A")
+       expectLocatorPatternFail(t,  "12345678901234567890123456789012+12345+A")
+       expectLocatorPatternFail(t,  "12345678901234567890123456789012+12345+a1")
+       expectLocatorPatternFail(t,  "12345678901234567890123456789012+12345+A1+")
+       expectLocatorPatternFail(t,  "12345678901234567890123456789012+12345+A1+B")
+       expectLocatorPatternFail(t,  "12345678901234567890123456789012+12345+A+B2")
+}
+
+func TestParseManifestStreamSimple(t *testing.T) {
+       m := parseManifestStream(". 365f83f5f808896ec834c8b595288735+2310+K@qr1hi+Af0c9a66381f3b028677411926f0be1c6282fe67c@542b5ddf 0:2310:qr1hi-8i9sb-ienvmpve1a0vpoi.log.txt")
+       expectManifestStream(t, m, ManifestStream{StreamName: ".",
+               Blocks: []string{"365f83f5f808896ec834c8b595288735+2310+K@qr1hi+Af0c9a66381f3b028677411926f0be1c6282fe67c@542b5ddf"},
+               Files: []string{"0:2310:qr1hi-8i9sb-ienvmpve1a0vpoi.log.txt"}})
+}
+
+func TestParseBlockLocatorSimple(t *testing.T) {
+       b, err := ParseBlockLocator("365f83f5f808896ec834c8b595288735+2310+K@qr1hi+Af0c9a66381f3b028677411926f0be1c6282fe67c@542b5ddf")
+       if err != nil {
+               t.Fatalf("Unexpected error parsing block locator: %v", err)
+       }
+       expectBlockLocator(t, b, BlockLocator{Digest: blockdigest.AssertFromString("365f83f5f808896ec834c8b595288735"),
+               Size: 2310,
+               Hints: []string{"K@qr1hi",
+                       "Af0c9a66381f3b028677411926f0be1c6282fe67c@542b5ddf"}})
+}
+
+func TestStreamIterShortManifestWithBlankStreams(t *testing.T) {
+       content, err := ioutil.ReadFile("testdata/short_manifest")
+       if err != nil {
+               t.Fatalf("Unexpected error reading manifest from file: %v", err)
+       }
+       manifest := Manifest{string(content)}
+       streamIter := manifest.StreamIter()
+
+       firstStream := <-streamIter
+       expectManifestStream(t,
+               firstStream,
+               ManifestStream{StreamName: ".",
+                       Blocks: []string{"b746e3d2104645f2f64cd3cc69dd895d+15693477+E2866e643690156651c03d876e638e674dcd79475@5441920c"},
+                       Files: []string{"0:15893477:chr10_band0_s0_e3000000.fj"}})
+
+       received, ok := <- streamIter
+       if ok {
+               t.Fatalf("Expected streamIter to be closed, but received %v instead.",
+                       received)
+       }
+}
+
+func TestBlockIterLongManifest(t *testing.T) {
+       content, err := ioutil.ReadFile("testdata/long_manifest")
+       if err != nil {
+               t.Fatalf("Unexpected error reading manifest from file: %v", err)
+       }
+       manifest := Manifest{string(content)}
+       blockChannel := manifest.BlockIterWithDuplicates()
+
+       firstBlock := <-blockChannel
+       expectBlockLocator(t,
+               firstBlock,
+               BlockLocator{Digest: blockdigest.AssertFromString("b746e3d2104645f2f64cd3cc69dd895d"),
+                       Size: 15693477,
+                       Hints: []string{"E2866e643690156651c03d876e638e674dcd79475@5441920c"}})
+       blocksRead := 1
+       var lastBlock BlockLocator
+       for lastBlock = range blockChannel {
+               //log.Printf("Blocks Read: %d", blocksRead)
+               blocksRead++
+       }
+       expectEqual(t, blocksRead, 853)
+
+       expectBlockLocator(t,
+               lastBlock,
+               BlockLocator{Digest: blockdigest.AssertFromString("f9ce82f59e5908d2d70e18df9679b469"),
+                       Size: 31367794,
+                       Hints: []string{"E53f903684239bcc114f7bf8ff9bd6089f33058db@5441920c"}})
+}
diff --git a/sdk/go/manifest/testdata/long_manifest b/sdk/go/manifest/testdata/long_manifest
new file mode 100644 (file)
index 0000000..5f4a2b4
--- /dev/null
@@ -0,0 +1,7 @@
+. b746e3d2104645f2f64cd3cc69dd895d+15693477+E2866e643690156651c03d876e638e674dcd79475@5441920c 109cd35b4d3f83266b63fb46c6943454+6770629+Ed0c0561b669237162996223b813b811d248ff9b0@5441920c 1455890e7b56831edff40738856e4194+15962669+Ec298b770d14205b5185d0e2b016ddd940c745446@5441920c 8c87f1c69c6f302c8c05e7d0e740d233+16342794+Ec432f4c24e63b840c1f12976b9edf396d70b8f67@5441920c 451cfce8c67bf92b67b5c6190d45d4f5+5067634+E406821d6ceb1d16ec638e66b7603c69f3482d895@5441920c f963d174978dc966910be6240e8602c7+4264756+E00241238e18635fdb583dd0c6d6561b672996467@5441920c 33be2d8cdd100eec6e842f644556d031+16665404+E6c773004b8296523014b9d23ed066ec72387485e@5441920c 6db13c2df6342b52d72df469c065b675+13536792+E6011e6057857f68d9b1b486571f239614b0707be@5441920c fb7ccc93e86187c519f6716c26474cb3+13714429+Ec4677bfcbe8689621d1b2d4f1bdce5b52f379f98@5441920c 972f24d216684646dfb9e266b7166f63+44743112+E1706fe89133bcd3625cc88de1035681c2d179770@5441920c 16f8df1595811cf9823c30254e6d58e6+17555223+E0febd567bf630b656dcfef01e90d3878c66eed36@5441920c d25b29289e6632728bf485eff6dde9c5+4366647+E7071644d29dd00be350e2e6fb7496346555fb4e9@5441920c 11dffe40608763462b5d89d5ccf33779+32161952+E7f110261b4b0d628396ff782f86966c17569c249@5441920c 0d36936536e85c28c233c6dfb856863b+22400265+Eee3966f1088f96d4fde6e4ec6b9b85cd65ff0c56@5441920c 03f293686e7c22b852b1f94b3490d781+14026139+Ef27fdfb40d6f9bd7bf8f639bcb2608365e002761@5441920c 185863e4c8fb666bc67b5b6666067094+22042495+Ee1164ffe4bffb0c2f29e1767688fbc468b326007@5441920c 4c7368ed41d2266df698176d0483e0be+31053569+E527d607c348f45ede4d8d6340f6079dd044c554d@5441920c ef75be5688e570564269596833866420+7357223+Eb27e68b0dc1674c515646c79280269779f2fb9ed@5441920c cc178064be26076266896d7b9bd91363+17709624+Ed64b0f5e023578cc2d23de434de9ec95debf6c4c@5441920c 5721f0964f9fb339066c176ce6d819c4+6146416+E5df3e33404b589fd4f2f827b86200fe3507c669b@5441920c 53df2cf91db94f57e7d67e4bc568d102+14669912+E64ddcf065630c72e281d0760fe11475b11614382@5441920c 3b045d987f9e1d03d9f3764223394f7f+11964610+E667868e60686bb6fc49609f2d61cb5b4e990dc4c@5441920c 1b83050279df8c6bfd2d7f675ecc6cc0+14904735+E91b1576015021d4debb5465dc449037bed0efc60@5441920c 16c366b5e44bd6d3f01600776e65076b+13400037+E6ded42f36469b5996e60c3415094d93b98d58d17@5441920c 6e7c59c345714f8d20176086c53d128f+5665774+Ef4c5716bb8c535d1335886f4ed8792e28829f531@5441920c 47c20b212e917be6923d6040053b6199+9646905+E875b5786fe08f40d5655ec0731368085d2059fe7@5441920c 6d56fc2964ee717fb3d168e0433652e5+4640161+E59be5ce3d0188761859f8f723bdfbf6f6cfc58b6@5441920c b62899c71fbf5ee6b3c59777480393b1+32455363+E2bfbdc56d6b66b7709f99466e733c1389cd8c952@5441920c 5c0390fc6f76631ec906792975d36d09+15940309+E0671c8fd6b2d8e05827cf400b6e6f7be76955dbf@5441920c 19be066d6bb9de09cb171c92efb62613+22466671+E2230614c0ccc69fd2669ce65738de68dbff3c867@5441920c 4c8396101d3fc596400d63121db853d0+13741614+Ecf2839221feb3d070b074fb1500544572dc5256b@5441920c cd29406297ffb7f637c058efbf305236+7619567+Ec063b1c180b6dfef7462c65dc2c7fc34b5756598@5441920c f68b644c6c02d36658e6f006f07b8ff0+23222064+E67594b67317452786c664f26808697d343d3316c@5441920c 42f58fb009502ec82e1d5cc076e79e4c+29666907+E2e27c6bef691333b19269570bc175be262e7b2ec@5441920c 384e1e7642d928660bc90950570071b7+16511641+E44951c3c7b111f06d566b686fc78dc430744549e@5441920c e200de735365bd89d42e70b469023076+26095352+Ef9566086c4526e88e4694b55cbeb2ed3d229198d@5441920c e809638508b9c667f7fbd2fde654c4b7+26536426+Eedb7bd609b7d22df73bc5b6031663824ff106f5f@5441920c c6e13cc51e2354c0346d4564c1b22138+5595242+Ef4eb609230d6644f1d8626e186f95f9b784186e3@5441920c fc6e075d862372e6dd4d438f0c339647+524636+E28e5d58c5feed7ef5e11869e16b00666424f3963@5441920c 654066ef6cd1b9ec3010d864800dd1c8+20166756+E655b286e729e5cb164646314031f45628c914761@5441920c dfe8df7f1f6d8f37667f275fb0f16fe4+10195576+Ec7b5272532230b29ce176629dbe6c9098f482062@5441920c 0b3e18ed791e551bbde5653487cd9e0c+26057104+E95309d4ec6c56d6490946103224e8e6d35622e12@5441920c 9f453ed53b8be18d3538b9564c9d6e2f+14129943+Ede61011c6d265c59417889db12301c712ef6e375@5441920c fd919cb4313d5c4d3e6d36ddecb39d9f+27262406+Ee7dcc78b62b26b179f6cd05bb6c56b6d932f01f8@5441920c 2371986d9b195513d56d7d8b6888fd13+11366564+E487076c1c0dbbfe05439e9b7506b3d79dff8e3d7@5441920c 19cc39fb80e4cf65dd9c36888261bf6c+4264756+E5d56331cc97d68d9cd7d1f942b04be3fd808c640@5441920c 622c38578f1913e0d1ce5db993821c89+6746610+E95f98718306714835df471b43393f45e27ddd9b9@5441920c 3836977b216b56d36b456fc07bd53664+21620366+Ed358c40e313e1cc97d3692eec180e45684dc21e5@5441920c 738636b97bc221e7d028bdb06347dc16+9166469+E76e010db792235b2fe1f56f26037638570191f5d@5441920c 56605f61b621650d3df04831649d2588+6326193+E1d9d0567e8fcb93990f7c4365f92742983e6f69c@5441920c 2125e15df79813c69497ef6c0f0f3c6c+12757371+E30cbe534f649db7301496eb203711dd9eb3e9ee9@5441920c c61de805f19928e6561c96f511fedbb4+12157116+E756df376e5bcc65319d062bd10685df117957004@5441920c e32dc879179c2d507bb75ebd015d4d26+10261919+E2250d07188228888c8052e774d68e2918f6c4c2e@5441920c 6d2d0e3b6984940858e36864d571eb96+40669605+E2bd8434ddf794691166b1556e47ef8f7b636c920@5441920c 65603431e7ded48b401b866d4c8d1d93+24190274+Ed2c84b40dde45d8b4df9c696651c4d8cbe02e019@5441920c 1228e02f7cbf807d8ed8b1823fe779b3+10020619+Eef06c59626f88b5dc9b741f777841845549d956d@5441920c 7367b338b16c64312146e65701605876+44636330+Ee6d463f6d719b0f684b7c8911f9cdcf6c272fec5@5441920c cd8d61ee8e4e2ce0717396093b6f39eb+13920977+Eb6c4f61e78b10c045b0dfd82d9635e45b6b01b5f@5441920c 28079dc5488123e5f9f3dcd323b7b560+22369141+E077f18b49d62e4d88ccc78dcc0008e4021d7342b@5441920c 56bf3c8e6c6064f6cb91600d29155b2b+22616366+E920d258e698cd2e7e66d9f78de12c87f62d472d1@5441920c 49f686994d4cb5967d19641e284733c6+26439412+E9dcd733412c06841ded126efdb30542c4f932587@5441920c 1ef6646ce8917186e1752eb65d26856c+4173314+Ed60dc1dc4b9ed74166619d66109f6eb546c86342@5441920c b24076cf2d292b60e6f8634e92b95db9+39664156+Edf615c5203845de38c846c2620560664ee6cb083@5441920c 576e06066d91f6ecb6f9b135926e271c+11123032+E9d147b4b89c947956f0c99b36c98f7026c2d6b05@5441920c 7642676de1dccb14cc2617522f27eb4e+10756630+E55cb4ed690976381c9f60e2666641c16f7cf5dc2@5441920c 77580fe91cd86342165fb0b3115ecc66+10560316+E99463b8815868992449668e59e41644b33c00244@5441920c 1c506d050783c30b8cd6b3e80668e468+35565426+E67c9d75c946c5c6e603867c66ccfcdb45266fc34@5441920c b0d8e3bf2d6fc9c9d067467749639c31+14197061+Ecdbb94e40090d099c847952d2f21de89803f3169@5441920c 01605bdb27b06992636d635b584c5c2f+20756432+E36de4fe4eb01fdd1b9226810d21c8f62f1d65643@5441920c 0c27885b49cf5589619bd6ff07d02fb2+15792191+E23bd16d3bd20d3bed3660d6fd035086d6d5146d7@5441920c b0149371ff6e4b097561cb6de4b5018d+22249239+E4f207f62d04d6d847c27e2463f69b847676344ed@5441920c d6fb819c6039468f36141e1344675379+16449706+Ecfb1156101edfeb2e7f62d074f52686d215def86@5441920c 09d34633511ddbcc6646d275d6f8446d+29052525+E6bd7fe2d67cec4ed4e303e5f75343e4b45656699@5441920c ed798723d587058615b6940434924f17+23966312+E97c78dcf692c99b1432839029c311b9e66ec51e9@5441920c 29f64c166e005e21d9ff612d6345886d+5944461+E004b7cdd000e8b6b82cde77f618d416953ef5f76@5441920c 8610cd2d6fb638467035fdf43f6c056d+20155513+E76b2453644c8624f5352098d3976bd41ccd81152@5441920c 64fbf1f692c85396dffd0497048ff655+26292374+E3d479e00158992e9d770632ed7fe613b801c536d@5441920c e7db466023228e000877117bf40898d5+37776620+E8268e86cf6d614e31b3f89dfcb73cfd1f7b4472d@5441920c 26f844c3000746d76150e474e838876c+16720695+Ecd248063ec976663774bb5102068672f6db25dc8@5441920c d631188d8c5318efbb5966d96567162b+13059459+Ee8e8b625c936d9ed4e5bfdd5031e99d60ec606e6@5441920c 75e196c3ff8c902f0357406573c27969+7673046+E3fde8dc65682eccb43637129dbb2efb2122f6677@5441920c 90d0f062f153d749dc548f5f924e16c7+5625767+Eecd6284d567555146616cf6dc6cc596e76e30e62@5441920c cc3f072f71cc6b1366f8406c613361f6+42976743+E55561d73068c4816945df0039e80863880128997@5441920c e74b79c0cbd84059178c60e8016d113d+13609906+E74850d9197693f46e640df4c7bf631f5cd6fe7db@5441920c 186706b6c31f83b07e7c60eb358e93bf+11966262+Ee4e0e578278e9288bcfc546355e16dd07c71854b@5441920c f85c6bc762c46d2b6245637bfe3f3144+17595626+E780515682f0279edf3bc7638e69dde8d5c87eb5f@5441920c 80fb6eed15dbf3f3d88fb45f2d1e70bb+6567336+E61709663412711e6bcccd1e82e02c207d65083e6@5441920c 55d586d9b4e661654d46201c77047949+7406969+Ef65e6ef6de723634d7ebc04b8e8c787760940948@5441920c 6fc45eb907446762169d58fb66dfc806+26345033+Ebf58596e6096dd76c9ec7579e5803e82ec7ccf66@5441920c e398725534cbe4b9875f184d383fc73e+11140026+E54668ebd22937e69e288657134242770c1fdc699@5441920c 69b586521b967c388b1f6ecf3727f274+9977002+E6eb4b63de4d17b50866bc5d38b0ec26df48be564@5441920c 2e293570b864703f5f1320426762c24e+13651023+Ef6640563ec496df42bcfc696986b6e4f6edccc68@5441920c 462b1eb00f462e161f4e5ce2bbf23515+19646309+E47ec8fb615747c6104f7463ffe65d1f6738c2e67@5441920c 7f8eb265855e458e6bfc13789dd696b7+22406679+Ef3cf31dbb3fefef455f62d6b5c2486500f327398@5441920c 36659b0e79c69296927b418919561e89+24370117+E66e94cf0be13046deb186302cd666d5300908029@5441920c bf6dd822cfbc6b90f986e5f43d500c6c+34354522+Edff8be044ebd69391cf282451659660d5dc6dc12@5441920c 2267fb579f99df6b23290bd2e939bcd6+12153797+Ed3de8875c91d6f346fe320b20670c410f46e7ede@5441920c dd66288e4f7ef394f6ed7e9b73ff5178+19120741+E3860d5c83e021eb3646e5884018ec3dd59d806b7@5441920c 7f86957074e677328be7538ccbcc747f+16676462+Ef6492f2cb4dbf9d73c1e58e2d0d85b0dd2f18402@5441920c d7363e073e178f502b92e053369f40fb+26125462+Ecf329f93efd1ec34f17edb991de264b9590c88f6@5441920c 6d64dde62f62d6febdf6f2c66c0220d8+23263164+Ecc22f32322cd039cce602e155bb530ebedce7b49@5441920c 7b70bebe42067024d360b7216c55d7e6+11436933+E7b70998697b46b0840836219c8e37e6d74906656@5441920c 3e6201706ff76745189f1636d5827578+27434607+E5204e6cf46e581b019661ed794674b877f7d3c26@5441920c 1b1968d7d8bb0d850e15bf8c122b1185+13431932+E28e98b072607648f73c5f09616c0be88d68111dc@5441920c f8ddc22888e3fff1491fdfc81327d8cf+2633555+E1b55c1417c2c0bb2fff5e77dbd6ce09e7f5d68bd@5441920c 9f200cd59000566dd3c5b606c8bd4899+10166739+E88797b1c2d44d6c6b6c16b6e2dfe76812494df2c@5441920c 65f26cbde744d142d8561b715f5dffc7+13335963+E13e86ebb6b426b1f4b6546320f95b63d558678f9@5441920c c89cbf812dd061873fdbeefcbb7bf344+6763176+E13b1765c5d3f3709605ef703c5c41bc46f25ffb4@5441920c 99f663066b7d0dc6f6e355eefbc64726+13444650+E8f607654b8d1fb72109b2e3eb64645202111ef2e@5441920c 6804c29fd6b3ec351dc36bf66146610c+26266416+E106283d64058d0c8b15061eee6d2059095767f7d@5441920c c23c67b4d1123fee2d8ed636c4817fd5+16376964+E392625bf396b887186e8200d94d8c7e392352618@5441920c 3f7640ed561971609025b37696c38236+14116164+E55239788883085d7f854058e090177fd10436258@5441920c 4f4014cf7cf09694c6bc5050d08d6861+23692725+Eb40f77014747eb8756606581bb6cef6665bc1e92@5441920c 0f46b1e0e8e69d0ec0546666b21f1c23+10507763+E173fc49b601c3c699d7cfce8c8871e44b371e6cf@5441920c 24385b164f3913fb234c6e3d8cbf6e55+27625276+Ed26e6d9e6eb59b6cf51c01d4b8909dc648338906@5441920c 0ec3f2ecf85f63886962b33d4785dd19+7026139+E43ec8f5ee2bf4f3b639ed66313c2363965702052@5441920c 674e2b084199c6be0566c29f512ce264+27711533+E1752f5c20c69cd33e669012632cfb2b93e1febf8@5441920c 8de5446ce99c95842b63dd62f2836e35+6793207+E808e94501ce9cf2f0b694f16ff261d42792dfc34@5441920c ecc3b274850405ec6531982414c634c2+15405916+E3c45d5ec865de3c34bb7e14e5577b7ec99d50268@5441920c 4c3b28e830f55707601378f6b314bb36+9160724+E6c42dd49736833326cfeb59003340d99d336b85c@5441920c f217e6338e5be409b309bc05768cd692+9467601+E33296cb0476d39648eb3518265241d2e58667c69@5441920c 1c33d278e00d838960c35365e8b211f3+7969532+E976bbcb318e35b425276d16640687cd30c0f6513@5441920c 45fdc6257f4601f5e6ddf2c3f3249453+24739014+E37fc9116462386d43647d43b1f24301fc2b3d2ff@5441920c 42c619bd934e4ee7876e6e62bb013c8d+26941562+E22061d93633689db860c97d09c2d428e0bc26318@5441920c cef567d31d5e889fc38f0b1c8e10603c+3036311+Eff049d2e8b04646603c7307d8427ec384dd5636e@5441920c 6d919324cfd4489696661b0c3bd2046e+7761096+E3d0ccb506d66c4621d1563e7f301d9de5e306ed0@5441920c 4631f15b56631ddf066623240ef60ecf+16709476+E125d603e61f05573e9bc6d15d64038548be25646@5441920c 6c897d794f5e90b15ee08634c3bfbef1+22602265+E65c0d239fe02411d4e688b0ff35b54b5fbf861e6@5441920c 26e1e7c8d16d0ec9335c8edb01556e74+23405696+Ed77c8c87b739992b6e2f4f0bd813e3877c029646@5441920c de5607856bc6965b3d689d9f6c739dc6+14457362+E16b373fe771865bec4e26e0c5b86e3241be55416@5441920c 9c96247f87d27cdf351d10424fb65154+11220750+E5666f47b25b3667bf32b17cf06202016edd96078@5441920c 6bb96d31bb0766150fbc94ff08ec1e50+16561466+Ef617977d6fc4b3b7606056e7744f61508e1f6dfd@5441920c 290806849f83631376637e012d63c055+15634314+Ef56d98c07c837800ef7653b9e74b1c868911c512@5441920c 917ff996f786819bc13747d05796db8d+26147265+Ebd9eb6985b39beb62d7cee1675dc88bc469786be@5441920c e3c8b5f953857082274364d3867fb56c+11193151+E39798993b68bcde100412e41e046f716cb576fd4@5441920c b0ce9f0bf1db246f83f961be4789b2db+9599462+E9d8bd12dc40e9e4665e4f33206ce9d4144b5c48e@5441920c 77d5f68866703cc369796f6d56c4d564+9625154+E6076126e1811c6e7b05c8959558fd35be4d9336e@5441920c 7b861b04ecef1e4260f42febc076dd48+46677445+E979196bd9bbd7456963e8f55564ecbe16ff3745f@5441920c ffb4f46254cfc652517e153438489038+12795653+E43e6ec68c5276d6422c66b077266230772849035@5441920c 7699462d29f00f611f35891127e16031+27123199+E09eeec5c1612c40246b21e26b65766ecc59bcc9b@5441920c df706e0400506e210565939e04539eb8+16632721+E3d404cd76de417682560ecf97b5c7f821c18148f@5441920c 1c9d96048b663c625fd02658f6f75c7f+12652756+E97cb664d41f2b9c69f9fe5667c12bcc266b6d492@5441920c ed360b6b945be71391e803353132c5fb+5706666+E7e4162c6cc3862322792cf91d76c719c84896c74@5441920c 24b7bf83c6b60fe6cf9746c8d16b86d6+12566075+E0d0b95ee04f865f5db70e2c80d35ed7742d20619@5441920c 9deef070820c1ecff87d109852443e97+16946677+E288515ff55d2b49754bffbde646d6b9f08981b66@5441920c 5e57630e60dd29658e61165360404fb5+12209370+E0762d4cee56b876c85ee0d2fd468649640561070@5441920c 61c7e19f7e96bcf59bff036887e5e755+17916606+E92d286ed713f8cb36d44f6b0346db71b5156648d@5441920c 878e7f227305c5c89ddc057bdc56ede5+24643337+E214637662b794717e65860d89ef5bc35f3f43d10@5441920c ef1514658c8f004fe640b59d376fdb06+3264756+E2b6eb6625c08c54758676006f634f9d09d9218b6@5441920c 485e4d6249b959b57226eec66268d074+4102134+E1118dbb1517f7323387bf970ddd5457c852353ef@5441920c 06d4b5ce44510d68dd154ff45203448c+19703325+E65bff4376436dff5c5601120e7c7138cc78eee61@5441920c 6d6616d27e10b3d0b562d154b6934eb7+11554223+E814476dfc3d4839453633b5538f76e11d365cdf2@5441920c f81f6f1ee2b866edf1e866c360c9decc+12130664+E3f3c05664668c4573244d3ce9ebb32356ec78d00@5441920c 66fb6db666667e6fe4b644d414643225+5642000+Ed3db35e5034c66e26323c3711b3bdd9e0c30b9e1@5441920c 5bedd5d1813136695b744e6696bd444b+17354621+Ed6c692158452b91b00e4f7065fb4d57945c6544f@5441920c 041391d37c47b66c064f216c76967c1d+7546724+E225d15c0700689d941be9216136d5159e57617bf@5441920c 0b3936e98635485dc5c39c091b1e141b+30306549+Ed8201dc4b2f19c6436b27200cc661160880f53e1@5441920c 87c955bc76e6dcd602074cd0b61ef669+19466657+Edce058995064b4c6d2ee4b5fd77634ef612fc4e2@5441920c 5863cf41b6d842606191f91266766ecf+19566732+E35547d8c39d6ddf6f0fd663ef6207d369121fd2c@5441920c 4b2cfe879bfdd4f5592b2948e1f12f80+16726166+E0c34f334513cfc42834f2f1b8bf3c2ec320bf9cc@5441920c 18fed9e859f59e23181668e4143c216d+7297044+E77384d2014fc7f1e460436175b45bb23678c0f70@5441920c dd1ee9df0750267ee5bc9ef6f29b0632+13453405+E45879d6d0f51bd868f7361809df00e383b2d83eb@5441920c f3e82d6578cc5172dd9f264f50d8bb42+20691242+E246dff090584102969751374c13e36510ef96feb@5441920c d68c62d920b706612d32f31727654479+13969727+E0428790ccc219305dd026886526fc5f41505ef67@5441920c 672f554d523e6939c88956610d8d66d9+15929956+Eb0468436beee5f8614d96765e75c628443d04832@5441920c 03690d1333904fdc508c57f33c715c3b+12006715+E3dfb288e160d2920cf92e3cef145d82d8636d807@5441920c d7d5d48c6ecbfff8edf63e21c8ee1680+6976746+Eee6cf6450806f2d68c7ff61d16ff0b9b09bee55b@5441920c b206cce6b38d71c626fc6260d60de055+16617309+E5bd96be2db6bc7692b8e7166fef6741635fe71c1@5441920c f82bc9fb241fc9bb1e9403660f31e963+26602130+E23677fb52377535f6f4d98371640701007467dd3@5441920c 60909d87315fc866ce54161907883f86+22761626+E222d02645d114b88836267760cc5599064dd8937@5441920c 5938d2c975658ed73f676cdf8e2df648+7096657+E6d5533fbcdc0f54dd094cf4de638c4cd3020bf04@5441920c 4b8c87889c09deee98b01bf9ec143964+26067196+Ebcb681616efd85c46893be63dd6663f5b45695c4@5441920c 4e7f06d06fd613f5d50dc3b9626d01de+10673992+E66fe9d65f3f18ef2fc74c6c766e04c6826060c21@5441920c e016be89b3607dc2c6d84703446096c6+14647560+E67d21749bf35c936546c2816e658c8ce4fd4863e@5441920c 65663576005d0735780d7783d27fd612+6567442+E3eeb256c414f59c671484666608019515b6d66e8@5441920c 8184bfb40466690c3c7bd33cf2001b7d+27369311+Ed3b2d4e52f16cf2c20b95e1650f0b69671b6767b@5441920c 28210e98e4bccfc0c8c881ee65dbccd7+9264693+E6780fef94c00c22364661b4df03db1894b65b279@5441920c 7d635728d6d3f0654491e73d06e2760b+16320752+E89b121f6c09e7f188397cedd9ce53064630e4197@5441920c c355555c484c0d41d31c1496bb0f88d4+4140293+Ed2ec40601643f992424e6042610ceeec4f926202@5441920c eee46de26c233081986fcc63036f6e87+17266099+E643f07bc7496eb97beb2bbdd74f78d9c7c40632e@5441920c 6bf27eb8b36619050c0246b26d541397+3060756+E9ed96e63725bb226e6717733062d92c38d0dd416@5441920c 17e7810c048bbbd3837c74253576c064+3260426+E660edf2b267bd1dfb1c70d25ce1173d99b572435@5441920c 633b2f33c40f13b691d59f8b64543ee9+26136225+E65975c79c76fedc2d8b92c2d8095845996c656c8@5441920c e5588b19938ee85458f1008b6155ff80+45662056+E5fe59f043d3b8e6f1ccc6d92e19ff6c6bd6e2d2c@5441920c 14b6ece5c233ed08c8343665bbc435fc+10447960+E6009d59e556cf6379ed6bc849f180d1cc33b3068@5441920c 1064ee1f9f687c0461c5bd686b612ce4+6564566+E7cbf7c65eb90855372605b5452b6265366e64841@5441920c c073866fd327e646c556d748027d6cc6+6396676+E8c404153f6d5010756968c6b9ff619bcddb1e1d7@5441920c 1dd987d82e5f8d23659cf23db99f6517+7956724+E18d666c504486712bddb5f8173658650c7708182@5441920c c4eb6d77298d6964f9e862e809463521+34269266+E1e466382fe93e2103395fedbb57bc5e2826f482f@5441920c 5c621f017e2e17260b15e13d6d6102be+13762411+E5293993d8891eed812c1829096775c9129d66d86@5441920c 706beecbdb9f413d8456e05b6744f6eb+3947613+Ecce55b46196c75ccfb06eb9b392e53d9f1c71c18@5441920c d498f6f76978747843767963f5064309+5537714+E2885742de6412d62b47c33bec68d8d9f81f9c09c@5441920c 2266396b65b97e348973206358673f66+24305632+E2e0ec28566c629333dce5f41e47488f4d736f018@5441920c d91969572c86d6b14636f6e3460bcb24+17507515+E96fb6850f7fbb4d9c2e0954be44635896879976f@5441920c 11b46690ee6e9bfef0c4026d856f4670+32626524+E361d099f561efd303d2e24182ee09327ec51657f@5441920c 2361c32669d0564e52d336f85923b61e+1010299+E45038369c554e6b30b60f3ec580898792163d919@5441920c 858bd2ddeb56d69038b78d289ddfde15+23454636+Ebb767b2668b5f9f61c4de733265595f1c074e606@5441920c 91618b31768711ec4b19dbfcfc7bb98c+16017355+E876f5f62b67613de0f79e60f245cb0f02f017220@5441920c 1bb9feb4c6ecd90cf1d8e061fe9967b1+9792746+Ebee666de05c3811c76620f4d9f49cc7103f0690f@5441920c f76ed53563936eb324feb4fcf9d2e44d+533647+E59361b31266d7566c00ce339629b5d1d86863cb6@5441920c 47f61e664eb4d68364d94971f0446206+1064656+Ef226fc40f66666690e640c125f636b37c6e75682@5441920c 155b75f465771d25168cc2f723426908+27465637+Ef6d455ccdd7350f6d8eb036675b046bd531f694b@5441920c 189e6923d3e6810634475b6563ff42d0+12707353+E218987c1f65753c694feecf176253ccc353268e6@5441920c 345957000ebe671b86130e51896d8694+6632970+E76eb72461dffd0b03ebd0287b4bd4df60fff6019@5441920c bb8830d56f6e8b0463c1897f9c6c9b68+6746794+Ee569093960e68f65b8bfcf0660c0d51d8e316507@5441920c c1c82dbc3246d4994c7110536532bd3f+17732191+Efb0bdf49337261801bd36e7f961cc766bb258d6c@5441920c 3469b89f618cf43d6964c89cb7557360+15491375+Efb4f84bd36776264d5b66193cbe06700c9c36986@5441920c 1c6c8cdd2b55b59763484fc736fcb2cb+20295749+Efd1b1e16c26825e6be2f0086e5956ffc2cb86186@5441920c 425eeb625e0e6f78640cd646b81ff96c+27117670+E6c651bc6fbf0911c5f0cfb13cf46643234cfd962@5441920c 467b40e186cbe66e68e27b497c146989+14464752+E6661978e64f282c9673fbf76c8c28d447de95571@5441920c 215e9957c31b9786166166d3066dc8c1+22592925+E24ec6bec163688076c95e6d575cc43c4d2185d25@5441920c 8e6d9566f2e6b368629c336c9fd6e0c1+21043993+E60f9744737815de11b5cbbf7d2b9bc26197710c6@5441920c 6903b3ef7b72b5c437121c8d75ee5f00+6526756+Eed896e26d13830cd0de7271e986638655bf936f6@5441920c e99d862823e5647d428cf03c85510dff+4646274+E7f7e0d272568f9d8353432e1d1284c6e99179ee1@5441920c de8752933c71e8e4912367c396286d59+19571326+Ed6eb12d8d1ec809bc6636806c89f0fc31b76e49b@5441920c 42b9673e467681dd1b75622d5871022d+12923669+E6638266df36f80ccee9b177392378fe0174654ed@5441920c 6738766901e6522d122065eb602706f8+9921926+Ee0506f3116684358651481b6f6766b6d61e4df36@5441920c 25ed8c9f9b7fc61b3f66936f6d22e966+2695507+E24986eb797bd7e2ce75f8cd7fd13502bd1db0900@5441920c 5f63716d6964f6346be68e50eb3477fd+11292446+E6d40765c1ee54fd31d239e1e96c25d6d964e6e33@5441920c 646ed63541be7c4b197e74200fc58563+40629656+E3228f646ef6d86dfb63090bc1f4540534fb12809@5441920c 2bc96d464c08c774950465b994786463+4060756+Ef6418662f5bf612877bc0334972769d5c364bbbe@5441920c 074f412860c7143944662f3579e8cc96+16610667+E7d989e4216744576f348473d58cb5102cd3b57cb@5441920c fdf162c24e1b743db60644c910bfcf26+29170320+Ec6c6b955e0fe664690d2364446326c2f16279321@5441920c d1e6d9e6512687494cb66788d97d6b76+21574362+E9e9f63bb64f611c623604e6f6f0222e0c8105236@5441920c debdb22c0be9d5cf661539bfdd628421+3619563+Eb95f6d2052bbc63bb931d21fb518f89531168e2d@5441920c 1b3b785b6f585c9f46c8b932ce5ceb26+49161531+E2f15232081e450fd4efe9368bfd8bf8162046667@5441920c e336b53894f0543d59963105e9678367+19746144+Ebf3c79b229c275ee7e1201257605016278153d7d@5441920c 782f48c017169e53d2c726d046dcc6ec+10946735+E9e78046511c67ebe2b39f5b21622bddfb87069c5@5441920c fdeb6225b7463435cebe00e7f86df276+6376465+Ef4599c2d6e757f7f66579b373e9e6ef0ed74b62d@5441920c 32d626f756c4cdf566533c6b2df652f2+26661567+Ed4671f20388d6576565fd26bc00d53f0e38b6c51@5441920c 14c4e60bd3fbded9dc8d11d6e970f666+13661669+E0d589b83806594837ed672319ddfd74f3cc39ff9@5441920c 77886771777c50587e02dd08866b75eb+13501427+E01866f494dcd7dd4fbe7541df16529447e52ef6c@5441920c 8b3bf3e5f6b6be1d667f36d1784367eb+13677551+E6b241697c8d0c97c142fb695936589c1945e9ebe@5441920c e12686bd46818f07614c0143b68802fe+15666076+E24458761c577527694bb99ff659b96c954dbc3e4@5441920c c710454601fb0f6e4d03d6461fce5f17+7996490+E8e9cc9e865e420e3e0cb0987f106665e80e7184e@5441920c 316eb301c1ee9cd9b38c6544cb7bf941+6053236+E04118416885186189d00220842078fdd82b105bc@5441920c 1946863de487f91790e10ce2d63deb4f+10726254+E1613e538b89d50e662650196b2bb46060e46b325@5441920c 7e6debd8e9fe0f58f0c0ee19225e4664+11356746+E15749f35c8f636eb7666f8d62d32f179c7f2b443@5441920c 62d6d9202fc0cd2099157526b4977b6c+7600427+E5363fc1d6f6c9ec60576c454be6e0e026c638644@5441920c 80f767764063d69fb042e73741108330+20722736+E79223662b666f482c76c074de7c948d9b81e9eee@5441920c 7de230cb3c601ffdc306c656d729e766+13729019+Ed6839fff29b73d5b54c16855f0cb57ef1f0d5dee@5441920c 566eb88cf65d80f8def689999ef64367+20246913+E4868dc526d88506ced164b48b2cb6ce669820484@5441920c 27250e8f350f3b51c756d68e47e2c980+26945676+E8c606e26b483c6e93227776776b116e63c7b6607@5441920c bb9e9cd086ee769366229cd0b32b5c09+3364670+Ef63125e4676b66d764234e76f314863e7769e3f5@5441920c 50e50111ef9bfff37663d6932f9b72fd+16155754+E056360cc57665896b629cd38fe14715621363de6@5441920c 72e864cd512f786c54b9f07646e66e37+12762477+E6bd9bff5c2926b09dfd6b66c2e969dbce9f53669@5441920c c339c751cf7d5166c30b8b21dbefb69c+16572364+Ef279e41366b796bbfb333ee55631cd9dfb6e097f@5441920c cbb37c74cd1f688d1c9756cffbfee897+12456663+E523b778eb6355bb66c2f5d4773d775bc6df25dfb@5441920c 819066f13ed2c71947e3f647656b576f+14524669+E62b3c65fee64e372239593516c64d60fcb850d75@5441920c e3635e4290543563388e94e1e6109729+36661662+E767f7d2e1298f1ef565e967e6170f88f7d6ec9f1@5441920c 2ce76730ceec8d843946f809c16f6f46+3149045+E882e0ecf259166b860f68dc6fd844cdce3fe49f9@5441920c fb2814493d1c484625bee373d5369cb9+13700211+E6be1eee5409d867cf0327d762d7ede7fbc296f25@5441920c 6ef39899b0ce52e83964c55f466f7021+7529724+E3095671946451ef2d9b129106c26f1e9515eb60b@5441920c 36e914556f2c8d21b82b63578764e811+7950542+E329cd0c0b244ff75d31782f2dbb7741619b24861@5441920c 895c6d874d1245d8e66455604fc45d3d+14756600+E764966661b47eb9946f1964e5ed060f623240695@5441920c b66ff865ef7d09bb19966902e62429e5+16443596+Ee56f4778f3b067103eb6bb8e0249fed5133749b0@5441920c 3e76f1361961466b0b95d3b6f8ece285+20106669+E01c84b2e28e91ebfe917067bb6671061c8db49e2@5441920c 63953f84933eef8bc8bd15c5d560c522+20056363+E4c6bd626c3b008116064f13694d49844e6e656ff@5441920c 5964964ef7c947f1c185073125669465+2567406+E064d861f4630b32521588b17290264c70f3cd71d@5441920c 379733627e446179436f327832659951+30547504+E76c3833c4d3698066d4eb966d179b85bc889e628@5441920c 3358c02673c23b84c37d83e469c72f66+21562054+E6008936e0c5343533bfc19f5c81ff58c3e2925b3@5441920c 46cb194289db37ee376f4f3346de0e04+27395356+E6db539216c1b433314f27bded4c6cf0078bfce37@5441920c 94310de101827648d6b3bc3c89708c59+26365676+Ee319940fb28fc2b11801e3019bd84937e2248074@5441920c 42220345631c336b5194ce9b573ed40b+269200+Efe4d5267e1d56103455663b90c06d54622e0641d@5441920c 2263e6126061fd7681b1d7e22b9f6e14+5237174+E51317e2730be6fb316f2b2b6e31d2913f4f37676@5441920c 07642351234b816b15e150bb6bd637ec+29727146+E66325bb50e67ef4de1d94737653dbb98761c1e66@5441920c 2fd5ccf86cbbd0e3c3f366d7bfee56de+30907674+Eedfe3e86d6243ddf6d5ede6c86604e7e310283d4@5441920c d6859cec4d9fb1c68e391840579b56de+1504656+E69c673e18f46659560ce19e24cd642d7ec4cb3b7@5441920c 49620b9c06ec234288fe02c59e694928+14943044+E4557ff4e2cd1800c94b296ee059f895660b0d38b@5441920c 1e7664d9f69c30178124676004c5622c+33721037+E16ec6ff518bc86565f4c9dcfc0656e38cf2d47fc@5441920c 17ebf9c6bc4ec665ce79750639272662+24605551+E636c8155632762d667d6c9004f6738f927dd5979@5441920c 342b663668c683fb488c62ce8568b618+29376907+E156b0293e6de6662cebb0703b9e2b37386fd116e@5441920c f0ff7321084e5fb26b047c29b787166f+12633635+Eb57428f2bbc765e1391c660e6592684e76f624f8@5441920c edfc2352776c326d1425c8f75206518b+14797426+E136b15d57166c3791c3cec25f2606868be3bbdc7@5441920c 19556e814b8696d174614d2635efce37+13760102+E8e64c18124f98b3f0d615b89b4bcc0db5345471c@5441920c 25764e17398bb530336f104fe1f16fe6+26794272+E6fc3ce18868166e546e46d72fe289455cfc70834@5441920c ed98ddfbf7181c16fc299ee261fbfc82+10201924+E2de330f0e91b386d0d779d21c3918e998cdde6ce@5441920c 77eecfed3522b3b96d26b645e3367fb1+24124636+E2332473f67efcc195ef87657368074fc7b600642@5441920c 9bc03661300986db109ef2626d3742c8+26615557+E68ed6cff0f9894c2ec3e940e0c676ccf99b6c0ff@5441920c 152316dbdb21124ed53e3eb985b94dc0+22145236+E658096d502e9136b69b1fcf50d5064613dcc7d0e@5441920c 561c751762166c7b8fb609601b9f2f48+7311346+E81d4d07984d6c5e974c15008d4f92d663c710388@5441920c 012c01572b943bb8466fb8116e57e60c+12577740+E1c98f4cd9f1760b062bbb20bcc0131eb9cbf5821@5441920c 4dd985e1e9728f9d676d9d526c0225cb+21506140+E7ef21dd62f372fbf66c17e6164064bc9c1283863@5441920c 626622416232e782cd0874f9fc41e170+52369+E8ec7e615f231dfe25b603f3c178460c06e624f6f@5441920c 6b7e084ec85bdb5633ee1355933517eb+5076969+E3251c561406ffccf6f6678054cc66308160672b9@5441920c d944332019b54e4213694d720652f837+31190176+E51c7c1b974617f8711d31f1ed3d554dd69708b92@5441920c bc35e4ec4f310481df053878c99e2028+41160366+E72e6fc6c8996446f8428889039d6382c3187ee57@5441920c 32b116162e37fe261fbf44699d161bdc+23615045+E7616236b140e626104830c0bf9b63c3632defc9c@5441920c 3260853d69d0f6b96ce5b079b1f1037c+34031699+E614c898376081ef614581fcf012196259b247f1c@5441920c 9024866876926291e291e983816cb080+13651503+E44d2c5f757e5ce51df4bed90d213e67280c08cbf@5441920c 7f7352234c5c86d70eed25447b6f6e51+1996046+E68d1c68b7d65e0697e6c47285061b36474bc9848@5441920c 0866e053769fee5e5eb4c9315d6bc5f2+22692591+E44f353b0622fe8378168c3cc6684ee351e0105cb@5441920c db74b6286949f3b1fc69be2083982e48+6672354+E2546bb731323d421439cd1c6e426dfdc0e6f3184@5441920c 1f6e9090bce4972b5371f66be3dbe365+13749361+E6276f45e81bdbc0eee34e591e76b38385ed87108@5441920c 8495966c987b24d64e8f23261e40773c+16660930+E6b7e063904d76d68b68ce542095408b362230e93@5441920c 222648c113cd8d52179954bf684d5626+11036031+Eb563b11617cf4f44d7c31e51e50d17e0f398f063@5441920c d878ec2cecd3470c7dbf4291653e6c90+13412650+E0dbd46d19e8b6f8c66064196cdceccf5b762727d@5441920c 7658c35e0b91464508f7133dce6e60cb+14313555+E54e6f8e766224090ec6c74f776d30ceccc3de46b@5441920c e8789734411f44661e0fc74c1c0d36f6+33635703+E470928dfe26c643e0603f7630526232621ddc4c7@5441920c 8825382969eb6c5066fe78997e0c7bbf+469634+Ed6dec1e7f6886d2bd1efbd8c6edfb22edff74bc1@5441920c f5552117005f6c9d736496e2f9030f5d+11377056+E0c2be5653d1776957700311e5de86764c636bdc8@5441920c de62f65e30719327fdeb326c2f16d698+16346545+E5d6e6d619f640363f167467b2de9c64347e63768@5441920c 3cc990452997b05b51ed170d291f9f1d+21127772+E281769d6ed0579760f4f2342c1f9bc76618c8cc3@5441920c 13c43c4e049c7d067f0f1dde01648303+1059366+E3f19eee97b53b375756ef3367b86deb6077c593c@5441920c 7d62d6e36364e35252710e47b06c54ed+6964270+E8030563b53b8d7d6c4d127c2e527e6f2ef56e98f@5441920c 7255f3e557e3be60e6bd18054b360f99+20073973+E2d6e29ce1c66668b02f075d99194392b83bb67eb@5441920c 32289c50f7dee66d59260463d7b85c7c+15769669+Ecd9f070e6f3b0555848c8506610997600db07b15@5441920c 6603201c20e0c24b9602169b3547381c+9756229+E3268c74ff8f0f67d1cf1d10c01dd9e2332dcec21@5441920c 061c6b2528256682c7b205b0f0f9d69c+11469333+E960692b62d3d34902fc765048d36081bd58b0e75@5441920c 80b649545f654616348cf66f4dff90f0+11074951+E49c53fcd4deed6b62e3d292e66e2948716e7e1cc@5441920c e736c8b66c29160f42d7ee5bd649e636+26145091+E436426d265d3d4d65658e6b39405b82d308639fd@5441920c 961e212b3d7f9464c268692761090f6c+20545569+E4606f1cfe9cedef085404f8465b915190c8cce76@5441920c 761fe39e125f6f19585464b661706631+13562476+E33877770d62273e62e345f52b755f73fd56c59e7@5441920c 241691bc053966df9f226e308c46e36c+19737049+E86d7f2325737cbb78d6ff61b583ec96fc4c8d0f4@5441920c 6e367eef8cb34400d2b43368893c81b3+27529030+Edff31b6b50ccddb0954c28c8cf38ecbc86417510@5441920c 05555c5fb49bedcf63ff878f1cfbe3c8+15452164+Eefb61e71fb4066dc56c247904be42015ef755861@5441920c 354e0c970b39c6956fc9660eb7367b61+12062565+E27cdb80616591bf8781d75f2349c12c7261338b6@5441920c b7c109d474fecd5b568fd8e460e81d02+39769591+E66648208f40b52f1822c01c3d61c374b1b656055@5441920c 6b6b334d6fc6ce94572fdcc96dbbb204+15604669+E690739f9742699cd09db1fb6b7c8f864916663d6@5441920c 796452beff88c6c0b46efc4b93f14ee2+4141622+E864cee574b2995464159f65fcb48768275ec1649@5441920c 61b6165606d625f9e2f5d22966e9f6c6+67106664+Ef0ce0c9615bf03becf58b76695fcfbf57596d5d9@5441920c c3c76c86ffbdb4331c4d29705f7bf508+13102561+Ecc8d369181f0836cfc5964c61e1e36945eb163cd@5441920c 40b30d29c63466c5e6239f6be673e456+16343642+E2c9d43453c0772ddd2619efbec822e08dcc33967@5441920c 386dc864e33f0436b915d5fe99e568fe+16664730+E20655b581566fdfcc78c0210d212eebddf4ee191@5441920c 6310b937f32c88e68c99d1065dd562ed+13661616+Ee8f93e9678226b32596883f5283d6271b57cee3d@5441920c 9dfc7371d62085d018c01f6e734d7666+26472421+E6b56e878337cbd25dffd733e1613722630682615@5441920c 2b26b973d557149726460d0c84dce8ee+13161766+Efdf846b7114c9dbe0f464dd7fe5226600d66decd@5441920c 4822d05c1f1061302f5e90ed3e33eb32+26136564+E293c1b58e1dc2eefd8ecd3dd99357b837c2d1165@5441920c 802f5e0e9957fb6fef23f77d1826e5b5+23561374+E05b6f3952dffd11ecf83c61f3eb2dff941cf0d48@5441920c 6f81c5950c9bc67d7bc82566e8735fe5+22349651+Ecf7818e4574828536cf5416cc67b87e5233b1586@5441920c 969c688f7267f6e313e4f0fe1c97d3d4+9400437+E697ddb147c825d4b09d2f56466ed5e61cccb10c7@5441920c 23d33d2b86f5be5e60626f213453696e+6696401+E5476e70ce697f686bb63f6927c758653123c7926@5441920c 01fb6544531e50d5dc982f54c6945839+14463365+E796ffc2fb3492cd5f70b9e46b09e7b904e86c186@5441920c 5e10cce37c60cc768ece04794589b362+2797932+Ec7bc4352c6c25f73fe54b62f671673701b676488@5441920c d27e35b3168f6fe30d6446d469cfb82e+7140760+E8e0e1d27865edf69d6f162f262f418267864b716@5441920c 79ce0bcc5f565e689c44df3b2f299690+7956760+E6b405440347634c4d780d9cd2f751b1b74801821@5441920c 6429667e76cfd6c7049b9f2dc83d2e02+26100130+E1c67439fc75bbe8822c11f6be411228c75474346@5441920c f63d64c68edf1058f8042054d9e608c1+15570132+Ef59753bed1608c150b463db19e0b824c56180472@5441920c 3d76591c1fbc9b1cd43216b53037d3b8+12079936+E3659f239292e2cb4c86885b44c6669507140f5b9@5441920c f438cd1e753312868038166908b7746d+23646496+E2b187c62f3015562691904e717f0b766b1d119f4@5441920c 476b689f6d00f5d5c94d4bf89d2d6f26+7320072+E7e4d35700d55497f8cc8188559d256f046d0bf16@5441920c ccbf6b908e6d39954627695372c66646+10249929+Ec220ef724e48c90b31d0c396802df409203f44e6@5441920c e25bc8599399b2b9c174d0b866633d63+13622024+E736877b72407836e424889479e46e60506db8c6b@5441920c 573e08705ff70f79d328c60c0dfe1151+7329647+E978fffec456ecd2633409ee866f9bb9311d976be@5441920c 5f2772d86c6567de1c03fb9b1535e6b5+25915639+E26e094692d34cd8e6e51f964bb8f147be4825d0e@5441920c 6c5cc886928952bd46f1e0432e966c39+6902437+E22272c74f82664339e62651c6373fcd997684ebd@5441920c d7581c3bf65327e93bf6cb536650063c+19367309+Eb904b6e6c9337464e0bb3e3b1fbcd0bf4228726f@5441920c c4dd8646b372463c3ce23c3604418ebc+10334901+E680573c727b403b3c7d364e9076479e6c68ff635@5441920c 16fe696306debe5906c75fbfb4f35e82+15956391+E68cb974c31829f20f4381d605c396ddd9021502f@5441920c b457f19b1c560665968f580861bb5519+22361464+Ee0e5f7040fe15c3d1138046b4204e2d81ffb09ef@5441920c 6283c883239d206dc8d7bd20439ff2fe+26762910+Eb6598d1d22ec11840f06949940cd671e16f54d66@5441920c fec697c5e865cc4e4587d9e2bf4b1df4+27462517+E6cfee6c054636f17309efc8185cd86cd1d0f2f28@5441920c e369d98390996c5b6d124db79d188615+17696144+Eebb1069fe1f6f406c36e2bdd4ced45961d1f63fb@5441920c 6566d9d439d70e07e6590b7232bc6dc6+16115379+E4615bf36e6691866358c30874be71993dc04c491@5441920c 78ebc34f2f582b1e58e52b36cb9b9fd3+26603399+Eb868f96c8010eb08b8bf48fd6689d884962fe856@5441920c 7b306f84f006e652f346640314e565ef+42767332+E8696fcf20e694d7e3190d2263dd0013486d9e286@5441920c 6164704991bcf25741294e26fb6f1033+22519054+Ed30ed601783b6f824d96968157e0ff69d0199301@5441920c f7942548dd956c6c02c1eedfd2755947+15623994+E8dc622096c66e7e459680b0466c97c08e968247c@5441920c 55496f6870b58c20c61c69e329f86b18+50651137+E4d15666681f614d666ffc6033cb2564fd498b422@5441920c 511635872c2be5d2773ebd578167369b+2340763+Ee6f8d691449ec061efdb7db6e67e686446660060@5441920c 5e4db617b4b314863d3df7f5f7d40b46+12296366+Ec52619bfb7bed7e38283cec6c31c629f3b43609e@5441920c 2c1c155e211f8615f348f56cd4e2eb68+19160541+E4d61dff6db10bcdd89f30333c6f416c4dcb10050@5441920c 26793ecd9d648d83017188676d1e468f+21150112+Ebe666f5f9db78499070dd5cce17f1801f5856395@5441920c 5c19db5f2feb0ec6cc247077326132b7+15934102+Ef2c65268fb7556e5008c1ed147e6cb62fc23b8b6@5441920c 18924490df2fe7c8bf536710b6fd6766+9572247+E7606e9814ef7776e16c3661693f0490c94195225@5441920c 640e94c562bf36ddeb0dd226029eb0b9+37063925+E6ef54638f818d4fe8c3dd65c8f3366c7e5d74607@5441920c 14cd1cc7e24f6f166bb26dcfe4143ed9+26279656+E0e62c48482369497792441dd4672849654fb0616@5441920c 606fb1c0c699c7ccd315576b02e692bf+21312663+E05ee10d5f8cc07fcc3cc665d0efe3d1b297cc615@5441920c b42c6410199f3c4b0e54cbf94ee88980+17966553+E6b79e87c11e7fe96d5d960fd875261711e66f06c@5441920c c286916e594c40952556b7857d67e889+20502272+E1b528c0bb53c020dcd3581d629845bc1c25316d0@5441920c e285ce576d5090b707f24dd699667c27+10454346+E6384d44e091f0b6379d8523d6defc6cb6975eedc@5441920c d530986cfed06e608fecb1191df8c11e+26240932+Ee061638e4f42024ef17e01b02e67383f15c14593@5441920c 3536b5d45d919cb866d1569d96f9e939+11477343+E19f085e4dbd379e83c9856956386bccb26495d6b@5441920c 956fd18076397dd9602e5c01ef76623c+16121702+E976fc641f109ceb585672eb795e964c6b8f2f509@5441920c ef33ee876d98646e6fdcb3867518b6cf+21665969+Ebc9b108234b28642df30c976460016486d27f2c1@5441920c 7426c8c56917966f5e7d867133c104c4+2106601+E63d93fe162433e6744e8bf1f63613d3994d46615@5441920c d12b745d9bb4de069124635140d94e66+22234696+E39d77b9c6db4180d930e44d7e77594b7328cb8e6@5441920c 7cc94de0506c1800b23456081e828694+17466445+E3ee18b1031435b6c714cd132d53324f3ec004ee0@5441920c 1984fed8feecb6697671f6c7629736c6+27353500+E69652dcf6edd66d6f7f223c87526ee683550ebed@5441920c f2ff43078422e101efb31546d513d917+7951115+E08c6d30fce60f953131ee9639397d6b9f361b6f3@5441920c 386b40bcf914276c970f642f66521be1+10132647+E69d7529f917e1874c44c21e3c1d391261690268f@5441920c e09636e09cd7d8c32b6663e95678f4b4+24122390+E115e9506efd385e3c03b51d274b136cc283cdb61@5441920c 5dcf989f58765256e745395de2c16d69+21750606+Efb7615104f94c7b4bf48ed8ec84e6ce1f884632e@5441920c 686e5915cd9858003f6822546d6b6d4d+15546705+Ee8d6933f60c51411f136b86962dd7b30c27f466b@5441920c 01486366fe6d0482971666c98fc70766+30792695+E49c08c45d856d386f968485e4505e36fc823ec2e@5441920c bed62e9e6bb42eb6006f7065e6990e18+17604912+E9cb886387c324b05c6be038882bd29434cc49e7c@5441920c e2859d677d7c237974c872784e13e6d3+5164960+Ec46db70c565633fc267dd6d133be6bb5891b6c4c@5441920c e757577865ddb690336d4cecc386c3e6+17296739+E7d1dc238f71762ccf46766627eb215be08b3d5b3@5441920c e2d2f7dd057617592cf9e4317535b11e+3301773+Ecd6413cb8c4e5b795dcc5680693d623b91744107@5441920c 0622825df321b6b36886ce672217feb9+3676756+E46d666c70e222477b3337606dc209e5f6cde7625@5441920c 23566113cc3c2891f84df717b6b5ceb5+13263209+E6f1d139cd24c47f6b5bdbe5636d49d2140745175@5441920c dee0ecf366b0e469126252646cd78667+5712724+E67405ebe84168df10534466699ff60c899055389@5441920c ebe97bb12f1656d3176bb8ce05dcf62d+10516666+E79769813ddcc30681b29180676df6666c06b5164@5441920c e16b6468eb876f7582d666b4538796d4+10144603+E6681f4fef94f71787c6bdf60f73ffc31dcecc444@5441920c 908963806d665f6692de6131cb688e3d+15620599+E19d63801835710d6fe726dfd3002226d59d1e6c2@5441920c e55d7c07c1351d8d52db166be6b8c09b+26940326+Ef7468138dee02cf621b8869c9b9e50476fec05bd@5441920c d4c83966648b99e8dcf6c6494d8d763c+6160746+E24c413dd9f4c938f3234023714c5dde6dde24d2e@5441920c 530f167ef4dc42e18714b4d6fc79bed3+11144267+Ebb553f3bd952cf396b654261465b55bbcf814826@5441920c 16c2c511d3066e6032f465bee26cb26c+1431977+Eb914580870bb2b6bd01dfdb14bd83331470484f2@5441920c 0b71c2164058821f852fc4456876c7b6+2244756+E6c07eef683e14f34fe3f7f066e33c3333304e6d3@5441920c e65eb96f4c91605b01158d56374663dc+9266561+E0b5bc104933b16464fb9d3f15555b6eb321ff820@5441920c bf69063c2fc34526666771506f68bf5d+41245659+Eed1f87b918f56236ecff73bde704699ef23d9fe2@5441920c 6056068b9e9989c1c3260c3501865930+15344510+E04e7df1e225c11c83512b4029fbd2c018b256c45@5441920c 3e5b8e59d577b16b6e84786242521806+24932791+E74c4d89582d84340cdf5465fec29706076667669@5441920c 634d6b1d6146338e38344547047643bd+22442446+Ed776175e050fd858036380649d6482d49287d096@5441920c fb5b4283359e7e5e366c3606cd8894b6+7752724+E680f4054419d6fbe710970d65d33bcc466613cec@5441920c 9b2911bb7fd67f6cd4f64337664d831f+26224360+E20cf4b6c243f160fd6f86253cc6377cdf46873d6@5441920c 63f6692b0fcbe33870031f8687547dbb+17304639+E92ec56f25f729945fb30562bef77f6684645658b@5441920c d96d6835e084f2c1eff67c52f566f6cf+16113075+E454fc1c125573183c69bd5e5cbe26f4bd4412670@5441920c 1d2eb0963b1fdfc11f6ff534162728d6+22233411+E60b2eb26e8d067f3d7612bd3cd6fffc46de1fdd9@5441920c 694b1b84ebddbe61749d6c7744e2e2f3+5524922+Ed932398d61660693e39554b50b2212f8d4960971@5441920c 0b42c92d97c0877b04d33666f22509c5+9664262+E9ce27760d3e7e05b965366f712b5e5f349638f54@5441920c 2cfe498b5b41ff5586b3c18fbf175d68+9160746+Ed2db55d98c2efbef816f30972eefb7f366705618@5441920c 6607b727ee38d0151e22927e8432e2f3+7956752+E20e43f0628df779e08c742dc2651861e4644b161@5441920c d782d2966fe60ceb61760be1891ee909+5100756+E35634fe29d03c35d26f2dc6c03f60272f0674160@5441920c 606030c626d5e94c4062618c3f652b38+9937902+E5662749f1f2e19249023941e760f73fd6df66334@5441920c d62d663092ce6f4d50361f36c0232049+19546232+E6316b6c1b16bc310863d18e7e387e35e4e001d27@5441920c 748d7e6865bd463de20915f53be86056+6663394+E7609edc173c34c9e36112f163563762933d1d284@5441920c cd0b2e572966bee981f066b967c25558+11752445+E224d2e284600166f66b0dd65562f01e7f6bb495d@5441920c ce1806850f36d94b326506b6d9763515+19256022+Eb4b964d8cf18b298376b5b42e1745c925fb6b568@5441920c 3496386d8279d2519c237764d914f862+12954653+E94e21b2f6c32195f270ec96567f6135e4c9d9f7f@5441920c 2eb661f4b584753660883614c14650e2+23233415+E22c6fb1b5e3d821378772485590ccf287b46153c@5441920c b9465b26065de0b6fb1fe66660060fc3+10667296+E95b5997d369dc93fc0bb8646217870cd50110f4b@5441920c 70485d53084d944674663bbe07336639+16966664+E5438d9ef89f2512b426cf230e9ed03461e490566@5441920c d29c2195fe6226f7cbc596264b1ecb9f+31566677+E9107c836bd1436d9d8f06e0fe58f74c36619eec8@5441920c 170d605b14e135f717cded781b3659c4+26966370+E2e596e0187c64ccdcdbb7ee379d59139eb84fe7b@5441920c 0836cfb1122130ecf01d5d2bf06def42+10993650+E4076b5402f7e21f1639dd22370286d76b7c67565@5441920c 851e496848b92598c551313836610426+33045521+Edb0688f6969e275c687f66bfc3be0318e674b13f@5441920c 6e4834f41842034f423626cbc8c2684f+14150927+Ec802066b016ec6ce60661b2b46c10e1663b0915d@5441920c 90e67d4209cbf05e9df318e63f645b54+23576635+E3b614e8f064e641468dc25c8340edf12d10cfe33@5441920c d933cbcedefd194be06338cd661d666b+14665552+Ebeb1c374bd74c56625c164d2e9660134d3623069@5441920c 76922bc107807f84b39fe3c763def0d0+10410131+E62f638476bffb582fd56696696ffddc344d55417@5441920c 7f523f4e5b7fe74b758f084c68f4cc3d+14156634+Eeeebe9b69ffb2424771669467f0c53ee18323294@5441920c bff64b2d1466d691b7967513cbd13dfb+7576172+E95bb1660e199d76d1fcc3bf756844d334bcb5ffb@5441920c 5346366f8e228259192b1fd25fb03174+44109465+E0cc4fc81f3e00e2626ecd5990388e38de4758611@5441920c 23c9d6b46c17b44f615072641d7f1ce4+31254935+E3b51142315f65f48b8e9cf29299712b55469bd9d@5441920c 18c9077006641766e2b0fe36b698011b+5169067+E083cf08445fb6c789417280f85938d6dff9ce4c3@5441920c db7d18f27edc3c6ddfee633731c2be53+10366921+E08619bee42652e512c63090e464049ec58f5502f@5441920c 6407825d48318f5626e310333b4b968e+29052271+Ec14b855cc4525b3d31542216d7b03c74d301068f@5441920c b96e133b4557374be68806e19132e43f+17612627+E775c6d28d896678b02f419e1215405db04db1dbe@5441920c 61f26b19764e01221b6f589b46f2589f+7641759+E4ebf4463513e7ec8326e77eef8b443b7deb6fd30@5441920c 580506ccc464195d925e3bd2d37c2b89+13411716+E534f2d36ec36702153d2ffebc88bde6d16681314@5441920c 0ff594047bfc075755fb6f6d368d4446+17757245+E686ff1689039794f81c901ddceb5bf96b002f471@5441920c ce16005db1fdd8fb664184e4897ed848+26954567+Eb1925567366e0b994d07507992215736ef795c6d@5441920c 668b8606e229bf34b265b5889cc2555e+23246223+Ef8b4096de374b658d17c366314fb2b78d604055d@5441920c 606b63de08276f2fe96e97839f80c725+19074161+E8864426fc98765090568d788fb706bf66eb2025d@5441920c 6f8436bd2f31836f58cec1bb0ee05636+39449695+Ee8933dbb69c24d6d37c456817370dc907b5c95ce@5441920c ec05d19ce5eb8f336eb13e2557d63124+11696577+E964d26d1560036f27bf8776572362c03e4e9f7f6@5441920c 44385cb347d456b6c56885e8de160e6f+13249663+E46f5be980854e6bfc58c55668db28c4090cc627e@5441920c 706fc99f3855ffb94b6d81c5c62b069d+6706592+E1053643769f364b1dc5f766789b747f0bd383d4f@5441920c 6652574701024f5e2f18edc6fd036681+29162964+Ee1b80f61e7971c1f4b3e55260337cd33266c0f00@5441920c 14763654521f8d4f6bc427d3b52e1121+10264945+E69bf6eb86179653066b92c30d288d34e698fc996@5441920c 8d6f406ec665e80c3986fb2d0dc39ce9+24601643+Ec7326d895814e06dcd041c099e25f26cd4e3c214@5441920c cb32b5ed637e57de216603c266184249+5951761+E16690fdf9283ebfd35e13f67307432b23448fe63@5441920c 565e8136fe7ed38f038b4236b645375e+23795506+Ed68ece3056f828c9e5d7fde3f240c404d1c472f3@5441920c 710dd03c65252f2187e76e7666d7d120+3150007+E19fc4021fdd7040bef16d9760fcb94c312665319@5441920c 9bfdf090c2776f258c39e549391c1612+23077469+E7637186506886b0e54b1e3e32cc578ec49c6f3b0@5441920c 7792c138d461748e9e5627e9b0c76c09+25966072+E82c28076635440613cd55d6993fecb03580f4cb6@5441920c e8f20b6349d80e1107ebee500169b8c4+20640325+Eefd81f4895e4ed54edb560bc5586d8df43456678@5441920c 4b61f2ce70111127b33f6249665c3d47+31996632+E388eb8561b2724c225c873e14421df59ecf09fc6@5441920c 661fbce63c16de3520d44e033134c6bb+43632512+E6ebc3f333f55fd0f69c7c5bee2687d1228933e8c@5441920c 5e6df4d8cbb16dd620f373369bc8c9d8+13731959+E16052995d0bc657b3759b020207ff0e3e41369b2@5441920c 24f521b431f2d770f1338700bc6c6917+12656172+E6e287eb610cc483fec0218d27632b69c546d01d0@5441920c ce0c0138e32619861616966d61be5915+34247127+Eebe1b981633c8728f6c69815fd3b88678266ee96@5441920c d870092429833d18585b6f4ec01dc640+13266016+E2d1df1258701d32d0d01f7613b88cb196f410262@5441920c d0728b8d923894b266361b90f06c047d+13161256+E8f4ec81d6944833c3dbc3cb1de240cdcd5f32ddc@5441920c 4c434f76f60d949088278498e5512652+29663052+E7bde3f3f63c9ddd47d466442e2c7116bdd26c9ce@5441920c 953b273715e8c9c1e89155300fc76183+30634366+Ecf3b53ed7d6bf5fb62e0861446615623596fe359@5441920c 432cd2317e2e713880e73660431c3648+6075493+E4b8e179f8c779951f5697e454dedc3b2d5dc0498@5441920c 3251f5bb90f3e37db864b31661297db2+21661204+E69bb7d26df5775651fc098e530697067ef65e343@5441920c 1c6168d1547d01601c45dc5485d6c8d0+33606107+Edf5663b3b04e139dd68bbe6960d6761d14e7d96f@5441920c 0482dc465426bd763b346e8474e3306e+32791910+E716d0c2d56e2b3ebe74922786f5526cd85650c05@5441920c cff6ce7521e98176cd89b1996cc6b2b4+19669112+E33760661c08ff206847f837f6629f2246eb24f90@5441920c 66b606bf24b864bc3e165cdec26cb2b6+4741605+E4175e96fec0c423932d88eb21ec0c63f077e9683@5441920c 63c8d322dc1998841b3b5461070969b5+25904705+Ebce6d95c6e12c00ed08efe4de69856ff194fbeb7@5441920c 4c672b0f4d22e5b6eb0fce791981b5c4+23619321+Ef803e6ce60d662931e60c1e7eb0e0307cf719639@5441920c 212c8b6286520d28024171b6316919f0+25423194+E15c722ef20ee1217c4d25e7be6382e10887c474c@5441920c e9477cfd645d0e20e85ddfbc827b65e2+9119290+Ebf9394269e1967cb3206c86d19f7806bbe48786c@5441920c 9cff6d241881d3fce55ed434be59b717+30796914+E7e404f9edddd288648e0b656bcc8c651e42eff34@5441920c 80666518360e529e9d92c201ff716b4b+19924674+Eb76685753d3cf6e3f358c407e48f038d7c351613@5441920c bef263464b2c50b53b2f6869099c0461+11135309+E6951358e80152e40712796312f6643ee63017b3e@5441920c fe8d5b36b6408eb475c4e1e446649058+10940177+Eb66e396c194c25e7e9fe16f92e25521db80e4036@5441920c 68d36b133c7dc81f5934d70c0617575c+23560116+E2cf474592b605bf861651e09cff2f62e166b351b@5441920c 6332b32b6617659b3840e701167c8222+14661122+E5627d019f688cddeb6b8960629d88973232dddff@5441920c 4e4e6b6b40d792e7c70362b47c968b2e+22609615+E4db8cb76e66db83f0806649bc2101046d4e6b254@5441920c 94668c2206155f7949de7c58f61404b7+15046616+E0f9495622572b5ff0cfc988436bbdc76b6bec4f9@5441920c d69777eeec1b5cb599d6614629b5142e+22166262+E27b789796f685c194502ce65578e5b7149f65b1c@5441920c 95edb266c0f8249c9fd662bb2b15454b+2056060+E8e86762f71e0637c7ff02fe61ce0e4e75f07286c@5441920c 6262cd49c44b278f3dc3c26626811601+521252+E23d8b5757618266f67ce07c658f9b85832c2d162@5441920c 00d93614557937f44de6f05f32c52790+32234144+E0f9f4506067b8b03b2c341f52315fef5bdc7848f@5441920c 3e815000b466f885958480d629962711+6441932+E9736c4dcd0db18be05035b497394832e06c46263@5441920c bde54d38f7b66830c212f9fd8215dcbb+10946699+E1be619426673d73606614825b965d48c72335766@5441920c 6240c463730c510e1d4c78899ce6d1fb+21772696+E480d296eff2c33873c7636eb723e8731c6959ce2@5441920c 2643b11d023f4f0f7614363664ed94e7+27069700+E0634917365618b24d91c37c3297140430ccb2556@5441920c b8cc10de6847ec2063bdb661f54906ed+6545313+E4490384364cc607681f6977b50863dedc73607c6@5441920c e20c9288b641899dbf4cebfb56ccd9c8+31767795+E44cc68804efc112c269c26c4369c67ee6c3193c9@5441920c 3b20e469d187234df0465b86666599df+23275612+Ee611c6930c20756cb200b2cde45057c242457cb1@5441920c 255f4954bcdfcbd1d4e0defd92985d29+29739564+E63b62f61cbe87116d56397b35bcdc3b91114db38@5441920c 5e64c3dc3fbb136403f47c18b06d9cfd+20035093+Eddc50bf137ee36fbd1f1fe0546533e97536926d5@5441920c 600fc94c672bde2472e2447effc449ee+20162106+E47b5116de964dff76b87bdefd81e56666f020f98@5441920c 057e9f3925259bf349d351cd75e01146+3767564+Ec46b4b19673ec8e14b697f77fb76c9822e51b100@5441920c 6d3c76220e433618c237ef6dbee0b20e+13561503+Eb1d89b38199393599fc613ed23e359eecf5880e6@5441920c d227c3f6355fedec0507675b7103e86d+7002557+E461eb64220cc189b14f845c1c696e7020c63e1fc@5441920c dcd0ed86431d171f935c2f86d6102166+23576165+E381238e16b036dc6df7614b6ed2f87c4324c8fd7@5441920c 2666135948e62048f269cf807ee5039d+6615671+E724406f1702c8b9d4b54865b7b197de08e68d057@5441920c 0bf0328bf9c3e9ccc6c386febe043cb7+24662143+E11355cc83b85db6d6d743f608c9db4e3777378e7@5441920c 186647736b93302dd61610f424c8b366+4697534+E40386c60866141c75cc84c3ec39772920b7e0196@5441920c e4ce7d1409ee024c1646e2bd9116d96f+9636940+E88d6ce1c9b866c666e86336e9d406b678efe9802@5441920c 463bb99ff72e344514e00683836dd4c4+16496116+E16301e6dc3b6d434214ebe82fed7333d2d201661@5441920c 3c8e07176c59686e683938069424ff92+9016631+Ef746d69e963c8380dc3b93f3cece96f202893f9f@5441920c e35874b80f896ef662c8ef10509d8612+17929166+E60132dcbf4fc5851d3c387420d13255457dfe9cb@5441920c 0f6973694123d553842f312ee1e7f9e0+11594711+E6ec26bffb5c736d335bd77d57d8c8e2f1be866dd@5441920c 9000642526f472c77960bdf873fd01c1+20306666+E8d23013d5065109c13fffdf723bd66fbb608b949@5441920c 7b237e32669f8636b96ddc6bd4bf63e7+9030401+E3c2be036b14dc4d7ebbb5e86e08eb8075cccb8e5@5441920c 36b36df49e07763417f98881786d8559+14696627+E339168621266131471b452731dc9f62f73bf056d@5441920c dc5694ddff5394218744de5398156668+10006611+Efef3e681202d6b2c713e659ecdd26df28e73867b@5441920c 06f25d6f4944c65bff12b41f65b9ef6e+15979710+Ef66d40d2473b250e733bce068b416ce526cb3f53@5441920c 3e333726fb6849b943b4484c1861599b+10116166+E3e7426053bff164299d4d397165b0fd6220b8dec@5441920c 12de047e2ccc029d5e058db8fc27468c+17606797+Ee906d267962f6f0836c36e376deec2676189060b@5441920c 14beb6bccd174c0d46c298320e76bdcb+23227207+E234249e7c4f2bc3e62fd6f6b0443f0cc543f1147@5441920c d18bb654f006323febde3039f09553bf+10556009+E8817cf8fe57546d649f5d4505d43f7e3f179e3fc@5441920c 83f045513b4f3f4021f65e845bb6ddb5+15162933+Ec1dd1156d1363c11f887cfc8ed001bf86835b2c5@5441920c 648336f5f4936526b645d64725621826+21307590+E6de931671f89b44940b760352e2d0ee530442267@5441920c 08f67de336c072c6d662b6cf2c583861+32759712+E8d66d86d62c0cb53f7b7b45d2f5b6e363622e2f1@5441920c c80193521c5914f978569b404b5d641c+16676434+Ee05b6346211c4f606521ce55234127eb763d5bf9@5441920c 436322cbbf425f6618b10c6d369ede1f+15100163+E60064882f33e83e1c12473c20957cf176fb53e73@5441920c 0bc877878b00f4dc96e0b93be374d5c7+10434017+E1e5016dfe71330e966d163b325c738b126533ff6@5441920c e6b152369e78d29bf8251e0995bcbc17+7765476+E2e56fe08f21e08d1693903bd29722e3f679bb7e2@5441920c 4e6f2124dd087175cf26b53d70f2f816+10542610+E481819516d33f267c0811e007365904f907644e1@5441920c 61664890644031d2e269f642f2669466+6767004+E3ccc64d7245c916b473b8065418527f5def1209d@5441920c 358cf83e673e0b5c9b9ef4316367b369+37253134+Ed1329475fd3b9680fef7648dc3cd0b086b4fdbf7@5441920c 189308e56578d296d153b6d7cfc8ffd6+17163652+Ef2342c38299bd6c8033cd9066d94f9f0966216cc@5441920c 480cc0e63c7d3c15621bd664fd67b509+25746921+E0c75c64e567f7cc675633894b80d2c1f55534c9f@5441920c 6967c4f89e8b866d18e8015d7786fe54+17296262+E6ccb6666326b056be3b82ce978b61656c55e81b8@5441920c cb619e0608d05d333ee66dd60133c26b+19044617+E582576631693e5f9f6463c79285c6399f10dfebd@5441920c 5dbcff65061e1859129467d669376e57+9216326+E66b33696790ee67567724fd77bb6f081ceb0b1b6@5441920c b5cd4c03fd56ce74f47534305f41966d+7709647+E369d45c56d9120b7f54c926cd465b6508c061168@5441920c 021c56603b593fe7b0b9341d3e69dbd5+9992471+E5904c358669ebf85d9672d96b1f05562be4cc1fd@5441920c 25e6d8e21638046d71fdd9236b5c3bbc+16105743+E0b235dd4d6ecf49ee503194de09e83995bbb8b37@5441920c 1035df4f548660343340651661d54861+23723049+E860501658363d21944c46d2861dcf27375cdbe2e@5441920c 66c958453bc3c0536053228807554242+26740659+E6bf768f4c64b76980e71f3986f653dc17dff3fb4@5441920c ee27ffe36861e4d610769c1ef36e81bf+39101465+E07f66cf945d7636bbcf48500ed84f6fede43066c@5441920c 366f8f5b1b4ecd36c3d4bc6e28454223+13179037+E0ed961bc4edefbdd47c9659b746fc485361b3866@5441920c 4545c0483335c548e5454620e8087531+23659026+E16019b3165d481b46fedf5506606dce182507e93@5441920c 27df881c9c906dc3fd04c2ff68d7f69b+6320674+E6e2799fed96fb6f5f8ebf32e18680b691d9528df@5441920c 28b9b320074163fc02b034e862246754+22624620+Ec26668860c2e9b956b8bdb06b69536b65f34d974@5441920c 5968de9783406e3cf1585824f3068095+19209706+Edc1ec66fc64dce19c967735840c19791e0c7b9d1@5441920c dd6b0299fe83e269b456540618bd4837+6364513+Ee18b3f6bfdc9e1ff8f927e61e1d3c9990bf259e4@5441920c 56cfe9b7296366840fbf3c9d0cb2bdb9+4766253+E5296479852716963f5749f7866dd919322e284c8@5441920c 368b66c5265db5c5613316566f7f9652+35016116+E8d446e66889974311f73c0b7cd682ed4dc4163be@5441920c cddb3f3bbf9d3c545fd68d76983b45bf+36549974+E6f26c14bf26fc4c57903f698475076cbdcdf1f07@5441920c 7cef19426bed80ff83f9d900c8178667+20373460+E5c6cb47d34cc6495fd887903d6d6666d9505d761@5441920c 8f32865c19f724438e2d9b648c6640ee+29919661+E16c07cb08b67feb34fb9c76b36206644f898976e@5441920c 09043bdd449b7946c4fec913e4217364+13493460+Eeefc5c2c41ceb676feb866380ef68062579196c6@5441920c b5d1be0d12754de15166479f927dd02f+16466490+E28ec14599eb0db52480483f68be739f4ec638686@5441920c f483fb8e54f6e1763799e9df42f08950+6660416+E962653dc7f63c1f1fbb6856633f1c2b857de4cf1@5441920c d4dbcb0d851764f4f94e4d62996d7261+7796021+E91f5483255146fb4f1eb66c2b797b6e924b8b108@5441920c 6691b09543c044060441936ee10221f5+14575657+E09fb9c6678805f0e7b29e290177f1d2f3916f0f8@5441920c b19b9c3869e39becd78c8053ff63c6fe+5634479+Ee35b3b397624685016de4586c9d96f57fec9fb4e@5441920c 94208330d58de63b7b603355845e2e9f+29716269+E0d811ef3d936670d06e74b9fe6fec5c86ed5004c@5441920c f024f736cff9618312339bd9847f08f0+7363995+E4b972de47eeff1038c3be68b28c652c6750cf1c1@5441920c 8b66166d236682687322174e707f5bf1+19715177+Effe9b54cf6b0b4156cc78334b71ed29cefe2fbce@5441920c 687925fb3f001f6eb17e262f7f3bf6c9+11922350+E6e45db64c158b168d9866928667ed8c5e400dc37@5441920c 811bc86929c6cbe690e68e712f81df76+34696356+E1d9dbd19b5b1f6d16697890d4136646e0b250567@5441920c f88343667b26669cebdb91160bde17e6+33645974+Ef0e4ccb520cdd1fd51f4008b596e370b8920fc63@5441920c b9e2c0b204645f0b5ee13776052de068+35567370+E78b5f0cc1d71b91ee13763613c715b5c0d946874@5441920c 516603449b0e68dbe1f10916626c66cf+15611642+E35b47868610850f2b866fbc566936872708ec8d0@5441920c 5ee193db01448f87063d7b854d07986c+27146461+E8c3b7df08f26c457d654c4d90c956b75d3856660@5441920c 928de1604d0f709c62e23fb2f6c1d3f6+26736354+Ec954bcb4e6951f3fd82e89f4d675d0d6655b5ff5@5441920c 17c99db9e4d850c53408ff6593bf4e6d+12053649+E6418566222886b6e5003c6804f92327c66059e3e@5441920c b28d67d5c60e0165d639695ccce06c60+45621670+E39f873b0cc620266d04f5b32482c68e3ff3fc15c@5441920c f7d21c881b4ecdc6105befd96983d442+10457142+Eb98210f27687e94d9f8921502c56bfd5b0606e8f@5441920c 0bf6beeb6097903930dbfb6f397363f7+27163032+Ec0d77f7bc0182f422df918f597e01f9e6b7715be@5441920c e7170d8075f74f96bb230515214907c2+6901657+E9e0b89feb40e2267f5d94df2d1993ec640268b53@5441920c 0e59c8753f30cc7cc9fd19f8e11dc5f6+13650247+E0b663b68366d8df921269fb04bb7f72770352066@5441920c 29f864d900551cf85dc33c850f49061f+23602906+Ed6f0120d02dd26216c5510c1e46bd109bebf6681@5441920c c8f4528bd47bddb5b26e4006d9cc89f0+33300672+Ef1b055439022dcb8e5b60721226028cf60b9660e@5441920c b84644525493d6b827f166d0edb616de+14622270+E56972e6be0dfd68fc0362332ce43cdc55f9c30be@5441920c 04f6c936ef65edd854c2105b246c7d0b+20760162+E8410cc9b133b6082efe33c6f42996d304708984d@5441920c ce1c666fc933026cfcf39c0221987462+29757577+E8b297663f7f1b63191103790dd7060374535f380@5441920c d6369df6628f2d7c48ecc5726e544004+9439391+E4d6b43d61290d3383d50668b68ed1b1d3b86cb9e@5441920c 3092d05f3b8f55ec765c8c95b6b40622+23690991+E269654b67d14c41bdd9920303500003f0e930cd8@5441920c 50b159ef9c1213116d947c92285d4983+6504376+E8e941656effc51485f2f6419e6f76d6b0619cd65@5441920c c8543b693d01c8fe6cb3728ddcdbdb22+31429424+E96e17b3cf08bb6494f841556d6037b6df5cb4842@5441920c 4846c99e6bb5b179de4fd46edf46e31b+20667266+E89858f30656fe456b6b4c2271fb1f5fd98b4e9dd@5441920c df48275087655f67867539949f52cc01+21542259+E6d2796f768f4621eb6b7b74c3322d1bd2b3d981c@5441920c e07398c19366fc4f876b23bf79049f1f+13635330+E24336044e23d35569f51f466c47b1c0e3090666d@5441920c d27072861c18d3d61663bb359e61d1e4+16645443+E57c58c03f56666fded7e95596e6017f6458f5e8e@5441920c 868b525fc41c185415fee9ede35c9b7f+33763672+Ed656e64e7f08cd2363648f29446f84f0013f3662@5441920c 634d4d1116c85199b4c8837667126628+43935944+E51dc654d2602ee26618e60c8842112225ec2bf48@5441920c 6e8cec6b84b340b746f63f7368339430+26173344+E7b16866c76fd11f50f6768172453cbe3c83385b6@5441920c 1c7746f9733e0ece7923ed3537dd2966+17960379+E2c7f5850549c1662d20c09e330fb173c243f4f47@5441920c 331557d6b124e16eb4de307655c40882+23669100+Ed8c6c496bef0c4fd0866922cf4d7762b2b9390e6@5441920c e200e83c9304eec0022b7521c3d8f256+21475297+Ee99dd49f41fec9566f6fb75301236673737db243@5441920c 9c76d58e16d65c05325d12318189b06d+16293653+E862e595b7100806e3036dd94df563646226bd766@5441920c 515e08e4b23d320644267cb4946d5e3e+1514377+E67d27dec368dd978e2fc48ee90f59711e5dbc2e0@5441920c 1d943f36d01f35f0f1bf9663b506e924+6509364+E59408bb9f6fd9f3c537000dd213e628f656ef976@5441920c 6d234083cee3e2efe81455d863cc5dc4+43017690+E6b1e7c6b5c44860e1fecbf135b7e9f662801cce6@5441920c d1922cc8dc266b6f6eb0c95cd8b2f417+20665117+E962b126b99c4c8cc216450937bc8c1dbd8e2d2dc@5441920c c3f2e5d04b3346545c2584cbcc9969f2+1591467+E6d7e5836b23ec1ff6336d62f6037e9d3cb92693d@5441920c 867de2fc66953e25bf15c61378ebb781+16146759+Ed18d2b753e63678546bce0bb1196b4ccc23207e6@5441920c d8c6f838e3d60b0f1f7dd7e9bc896cc2+6656200+E2db7f97705c6c32e14562c2776776bc80fc97d63@5441920c 0cdb1913f6fb98e1680d101dec9c07cc+20707621+E9178d6b652d68bc7f61dbbfc942673d523c7d86e@5441920c 413e0b5537cb3d5ce03f9e9cec4f62c6+9656450+E7f00c2344edbd7683c37d786c0c7cdb9168d1cec@5441920c f8083c6ec29669d7ee607223e3ed584d+16425621+E8083b2db35f09487c86c03c0165716144f68112d@5441920c d57b00fff01f31e839921b4109151f30+23196332+E663e94940799968e43e632cb56d75fee8b418677@5441920c c9d80bdb4b75c42ef1154bc13e11021e+7300691+E8681d5461b3d984ed09eed8fb41917b9e7bcbe17@5441920c e4d78db5894943cd403b6b3147c7321f+49692537+E12239bf4e933dfb24292001dcdb3b074969ded00@5441920c 4902623e0f182b4f31fbbf6c1b917ec6+30721960+Ec98bb316ded2bdc765967b66218227f45e4ecdcf@5441920c 5f0238db354266526666793b0b228312+23666340+E0b679d71662d6387682430b11bbbc47737356e93@5441920c 8c0db777b2b8b95785766bc1b47733b6+9229611+E5869e889bf157f2612f20b6d765bdee03476e9c0@5441920c b53c6b59d74ec6dd58b56152519274e5+27421753+E42b4c33532ff2638983b21548b50f8d77b40cef9@5441920c 65bf786bf68c762e3fe62c2357896c7d+9699436+E8f2b22716ef79f09748948eec2c610118f7576eb@5441920c 894c39bec02f51f622e4b1bf202be8cd+6406659+E6b56fe4f277d784ce1d3d3c279763690f19d576b@5441920c 32dbd624968d15ccf65b3d26bcf3e0bb+16694996+E38435585ce3658c50de109653661fd661968fd2e@5441920c 5218162dc863d88d27c8088b7fe0db3e+11026527+E70b1e8d389f0b1b1e635bd5f0219635976f53586@5441920c 2664546541d516425ef812bd00e4e549+32909679+E56d0d0f602d8d2e240dc6ccc4d69d9353030ce9b@5441920c 24400617c269c4ddc9bef64256865245+30963436+E7eb3c301800f63d66ed0755b1858ebf488464166@5441920c 80e8650bf6f2101d6526e85cbf1669c1+17266919+Ec9e10be5668d905fcc4fed6e5856281c4e2d64b5@5441920c 659e40465fc1d4d93b9596d6902258b6+26996009+Ee6ebb57415fd1b8b668e276f34f9b5b891d3f526@5441920c 940b65f21799e622371662b8c543f280+16704607+E2eb498f04302367895cb3ec665eb7941bc62dd82@5441920c e5bdddf3051f3e66608008750c46f2d7+26045175+E6661b5d76f3253e1044d6b266174b6d27fd7b65b@5441920c c4810116de72ffbf10295ed9c07e7685+27916575+E6ef1302d90884fbf836712f1fd74d61f612f536e@5441920c 64fc98e6841b185fc0d82fb136b663f1+15050054+Ef795fd6e80365f9ff17767c6231327463433e9bd@5441920c 6456c7ef22d529c812b5622668f1f84d+15603577+E5577c12c3563684bb5600b4e9be014dec6b06c33@5441920c 687405d9d700bc374b30029cf8d4be59+27716393+Eb0b3748363bb867ddd6dc8c3c8c08105741864d4@5441920c d284d714348344645242506366129f16+22019757+Eee039cdcd2e2630126ed862ff4e697bb1b93637d@5441920c 69b48566c2663ef36086d9db2f990136+45797643+E1c80842642f746545dd1405229f35c3b3dc6b19d@5441920c e32e44b17e16fc2e2113466ff867e26c+22514360+E56d768865cbdc8b4c3c56965ed282e1fee305906@5441920c c8b2896f824744f6569b88fefdd216ce+19253951+E322903880e688b62d3bc146765c5c1750e43f45f@5441920c eb2576903cced0150e92eb028603f228+21229495+Ec266378d59606199c6e5294f1d400b196904859f@5441920c c9d128d476e5c463452d08d4ce0efe6f+17559372+E465f5926d3711b9b1dc8266666fb7ced402c9c78@5441920c 565cd1f686914644b63dfeb72e9d041f+25526673+E49e77665901ddf4f98fb5d61de73edd66b43fdcb@5441920c f293c29e91b111e9330209f3d94dec55+7096070+E3ef2322ce8517189616069206c266c66c16ce39c@5441920c c243edbd633d9795c9008457e7f64c24+23411651+E0fd1066e77be25015675fbf8e338364bd404d16e@5441920c 66959d9139f6de12ec00f9dd486fb30c+26119054+E04bd73dc60b645f68239df27e8707d342cc5be4f@5441920c 6b633b3567fcf12293e447f2f535f68e+23290349+E8e76c6686bbc756c2b966ed43e9fe1dd4f9bbbc8@5441920c 8c6b77dd767ff6e4e4bcb5645db9c267+11654057+Ec5e46e815801c11f9d0b13200539d5b8c05c6b90@5441920c 3377c8e76b7eb9f04d30063e468fe4e1+6496414+Eecd1131c353c78259036e2c36205d71e695ef6b5@5441920c 4026556790fdb1739541e13e97c58e9d+7220726+E01078c064ef4477876ee0d730ccb97c695f72d9b@5441920c 265fc5b76cbd9cbb3fb0ce49dd4ee2b6+15666346+E831444667901b15497b4b1850fb5df76f5098681@5441920c d6f158bdeec1c3cbe0df75466d5e0691+20565771+E66517714fb6121c25260c6d766080d107136b199@5441920c c99404c36f55be9285d6ce7f6c398728+29696076+E46bde61dd962c7659b6bf58c5f24f3c4b0295fc4@5441920c 3162d76defe7c44544707f52d67b4770+29661960+E88d05cc566b526ef6cb76626bd386ed468eccddf@5441920c ebcf6967d9e4c232e2876786f86e3cd8+5667660+E21dd3f48b8116e7824b2fd342e7d1300663ee33f@5441920c 7507d4647d3bf526be5e64f86fc24740+21602934+E6729c2617d186d5d1f828bf0126c67ce3c67534e@5441920c 64745b39622be74bc8c6bf8d566f7f64+6125690+E9e94137358c821d701932363b415c35511b41009@5441920c 8ed366614cf657d3d654f181b698d28d+345265+Ef9f24b9b4b39e13c0859033f00ded590e89e9eb5@5441920c 10b4f10368dd17556465f21dc66c9d62+10003929+E4b200cf2ee068279d431940f687d300e4741c76d@5441920c 748847d8c44cc3c6ec068161d13d8269+6997133+E117d6fb869e6138b8c9cef842f2dc2f60b9b8cb3@5441920c 332d07334b64462499c6fd664e3ce8e4+550060+Ef1062eb63d03656f3368fb088c6e152667662c00@5441920c 386e6796b1756f14b906d6496667666b+35514556+E398d66e1be91928eb0f6725e40096f34c1566bc8@5441920c f699169634840b6c5c22032f04662885+30770003+E23066b0c9607b5c0c848716251bccfff57f57644@5441920c e3328568c69cb7833368c53660bf7778+41661599+E934b4b27473595d61ee61381b912dd1b15f69b3e@5441920c b68239278c162b35f59dd11b26c7bedb+5032660+E13f6f6039e1954d72bcfe87cf714144b71f3c9ec@5441920c 63b57755c3cd8069e5e2626dee6c93b7+15730167+Ee3c7d8676fc6461df3dd9b78c30931b29c569485@5441920c 18dc31ccb81062638626639d1c7bdf60+26696961+E61e3bf0e745b644443647e287252e84061f20838@5441920c 37ff6375ed894106e4365c5c6416d067+33670066+E6d626346726f50f7f186c6602f6cb1166dd7506f@5441920c 11365f54371b62654524fc65e34ee36f+5763371+E315283978f461c40e5641556f19e630c2256046b@5441920c d830c420616b1e0ceecd1361e07575fc+15201350+E8260833cce68c73846008c810d7e910821f6fffd@5441920c 93f16b759b1cb023344dcf15cc7b199c+26316506+E9fe3d33fc9c66091ddfd6eedf752442bb988254f@5441920c c23706e6fed66ded615945667300d388+47367411+E8b073cc5777d624909c6bd3e65b61ed303d5423e@5441920c e24b474d105f11b6573565fe54862860+19419515+E4cb466d568e663b386014080e11bdd9e22db32b6@5441920c 3632b61819853b163036e1f402638c44+1079105+E8db9e3652cc1780bebf800344e250feb52ff1f11@5441920c 22c064b8de458f72fb77e43f73cf3123+40594325+E672dbd4b2b496ef28b64ed6910948c68b607e491@5441920c 3c5710662d8d0e6f12b2071426d48644+5163249+Ec6517c5bd09f4b081dd95ebee4bd869f89b1441c@5441920c c3f52592b3f97416b23f689536e37693+17064012+Ecbcee67800458b6df98ff689187446966d821f39@5441920c 8456c6cfb316bf82c7934d3ced09b5b7+4703673+E916ece3d7117b6dce14e2e1621566cbd7766de3e@5441920c cbb9f866c562655729c4bf5f67666e46+20937649+E2d2396b6587f6f4ebf76295728070c835d55bfcb@5441920c 7eb39636607c3c8b726d67e928d0c950+19766577+E16676995d471f8d36806e8065063e9144e612d6c@5441920c 6873797eedef2fcd226686765b83cd84+16520799+E4de63c7082f1d2ccfc77457150423562d9346b62@5441920c 16c2450d3153b864de69e362c16ec6d4+20064956+E3b681fe29ee6c47e19ed1bc08947576d38c1b1c6@5441920c ce951e19024eb6ef606629527d03657f+14563555+Ebb1556509137131bec2c637946440d2e39f2dfeb@5441920c 968f09f24f240b0ee3de2615905d284c+17666235+Ec08966e756c198656d867620466617cd3d1021b7@5441920c 01264be9f7569fc6d446c6658c68c7ff+16200150+Efb257061168310d1334e51db7d064d13f053b7db@5441920c 3354d97c0c7368349cb167d463ffff3c+15699664+Ecdfe2e0b6c86dc12f8ed0c035056143dfdd16bc7@5441920c 685b0eb9860422e6c308197912f9cf89+26566964+Ee1c91b8b47f186dee67bc0bfc8581487f1734841@5441920c 10d6fde917d2f67974c342f2bdb99810+16666607+Ed9d736c3598219d786bccdc4480ccb6574fe65d4@5441920c 7436e9b3dc58d16c4f7fec9558e2e3f4+34299516+E84f366efbc7f687c37bd4006c80bf867606bbc2d@5441920c 0c7dc3f9be85bf7f02b6986369e15396+5167494+E3491f6f0157be9976e8f52f48f427068efef2041@5441920c 853860b5e3d7d68db870d64887eee036+9550459+E22296006714611dbf6ff2100006d14f7ee49274b@5441920c f4b3b1b8c22d36c1b2efdd626c3f7353+9425652+E502bd665962785f678f6e33e9b79ff5c09dbb892@5441920c 46dd6e718e7bc94962d460890d532d46+52257569+Ebe11c76489465f24695550366c829b11679d9f4c@5441920c b72f5e0756c4b5fd3ebdd71812f3ee56+6929925+E242c9f670807672f3b9cc681b47140529f436874@5441920c ed7f65db39984d581c595cd0e1e9d056+17556391+E8486351c5ec074e0b844c186d66fe701e44e3763@5441920c 63006056e077e0dc7716bf3425010ecc+14247713+E664b6f21cf6debc0095276164d3091f20b752597@5441920c d5116b69973d889d6f298f4738deb498+11066306+E3b4646b51c989f65567426664efde6e6c341f66c@5441920c 13e923c021e62ee066ef759d74e32d92+32067066+E3effb2cb94884161514b9cef413cc81b178be806@5441920c 65078f352fbbd13b8b56ebd0defb5cc1+10666222+E68f108063ebebded3649026b960f55f646c9b3f4@5441920c 3d4be63f60347bb56626be3969de967b+16626376+Ed8f748f6f073e373f71126f1c984815b26607dc3@5441920c f90b18dee1e00c275c2e238eb0393064+7956919+Ebd4f603319ed5e1f01c3e5875391688de2627899@5441920c b203671ec56f6d0bd72ff3f8415091e5+16713509+E5e929bb716e7dd51eef64530b23257d64dd06d64@5441920c 73b1ec2c6b3358b8190bf6c23e4569b0+16935900+E66185731649cf69f5d192b084b03dbef866ded63@5441920c 18697cdc7111d7dcf188dc222dc236d8+261032+E4b9dd594df44bdf4eb8850bb2f7dd1154b2fc5c8@5441920c 466b81f8768877dbc09fced3669fe11b+4269160+Ed51deef9b87c6c468b8cee2f1c7354f15117df62@5441920c 56719bc3f4db387ee926e85f9c017bb2+22617045+E182150854d000ed3316429530534337731b1c888@5441920c b6560f6d5e974464461f5d996cc16160+6953071+E26ddf53265f8146ed70f620d46f56667fb6e6411@5441920c 61280c9751d006c822044302870516f6+26475163+Ebe9b8b46c367107632bdf064fb80566ec8175e10@5441920c cd36ebee7f20ebb43dbe61351c9e33d8+21260557+E84b512df1b769c965f796560616566d36ed612d9@5441920c eeb7dd91c17fe167d870676648891ee1+47650592+Ed857b3e69858537d67766568e6e43d8b6487108c@5441920c 62ed43cd619e5307f96ec7294634ff81+9264520+Ef6ebc37b8877018c46c43f31b322f46b8676096f@5441920c 637d2d6c28466b2fbcc3596e4e48e925+15247646+E69510297f56571313d71633767be496d6ee5bce7@5441920c 1955419f9b01bd0e663edc61ef23fb44+6560616+E8eb1266643ff694fb8b2b2062469736302211e84@5441920c ed3b97847e816871458df4097897f666+26610427+E91d463cbc8639e2982d83e5ef6d6676130112c29@5441920c cc560418ec87624942f357bd6e349f11+27671122+E6326b03674bd7db8bd951d008d8e8617c64b959b@5441920c 346d8222e5e662b9d52d535e6354b571+3665630+E971cd85296175789601c66de54420bc0b04e58dc@5441920c 974beb90b949dc76258e7d73b6505e86+14940321+Ed73ffbc16e630c20b681536c7c6446c6fb6253ef@5441920c 6f88628c91767558e376f10d6eefb559+12957633+Eee6b1166f4c4fd01b9b39d84f85fc8bc68c7fb52@5441920c 50016d6fe13d9dc340cc27b6c20d6040+36096753+E6027b3b3d25bcb41de50469c6d8f6576613e6cf7@5441920c 66c0d207c66e6fbc509f52b8bf20b664+14674016+E206b24966155d3ce169076f32c91ef17c3bf7c85@5441920c 467e1966e64f17f6c68be65561c1dd6d+19901201+E8f59b255569d94803750e9c98e29c335218bde60@5441920c 3669c6c63e8ec00fde480790eff80647+14314479+E857690ec233c9c4b6436ef04590e21ceb26e7606@5441920c f924d760468f7dd3d28940f57f77193c+17691663+Ef8d9fbb6446528ff1c84bfbd61bfb4e2e9074fcf@5441920c 437442184168c16035eee82204cdf366+10632652+E715f84e7ff6f6d9d6b456d8854ec6b78403205f9@5441920c 4b2ebfccf47699f59192894db210d37e+5606647+Eb16336fe5036868c36db8cfbe0be660c4611676e@5441920c 9bd8ccfe078b86dc475656911667dc24+11677064+E1245c040830dc83b3e8ff5648296ff1e0bec36e7@5441920c 628384646666c65c0c67f9e671e67262+29615252+E3efcec0c36df19663d3c7240634740feb051d6cb@5441920c 04676d96e3b5dcf5e36f633c124b366c+24913006+E6461330125be80553808e043915b51c31567be17@5441920c 2e5cc96bf6d6c81674364bf74534d96d+35077014+E408515ceee93c3781b012517294560695b2d3ff8@5441920c 625d6dcc95bf40d68d42bc4f0d8c8135+23967236+Ef8f0869bdbfd658032d59d02692f6b4671d67b16@5441920c f7f176fd25d26d69057e4259f7164280+23454742+Eb05113506e03958136f1e77b659c805e481346b6@5441920c e0c006f2dfd4542e2b6b9d66c065975c+12476502+E6e617df6b90f7f1633b960cb710e5d639f507684@5441920c 65476f45e197d0cb38bcb0e5c3eb88c6+21624407+E75496e0662883f622ceb1166426172b11f066049@5441920c 46d6d4e5356fff3df952c006c60e605b+14556946+E2478fe7b5334c38c666519e26085f8f879f2e5ff@5441920c c06b49d30b4c58d6c407d0f01d8c9134+23503963+E6f100f01e53944593d14668b674e766eff14d626@5441920c 9ee892068c2c07664f13e519e2356f65+23959972+E5e5566d7092060b6e573d9b6058569967936f8f1@5441920c 8063f4732047fd6290456863c75355f8+13023330+Ef5e6fd0bd4d9e3e619769122400e6699ef42ffb6@5441920c 6133e2fd32713cd037fee9ee60193360+4304022+Eb9181ee3eb8d773d3929b1e58f605ef46158b668@5441920c 06cb6265f8ec862ee6c405b9c5185ff5+32133100+E2d828480e29d35725418721e6d969066fe6310c3@5441920c f9ed6b5c26c126de295bf1232ccedbdb+11110146+E8c90f9ddd3b6cd1620f48e34b7490534c43f7e74@5441920c 6bb1e5ccf8c4ef2583efe96bccf70491+17322291+E196662fc86919b65b4d92890cb6d758510fe0d6b@5441920c 32543c512ede456e6d526780b88b6b15+3523440+E4c9067c126d27c21558d66b676b9050c020feccd@5441920c 3884892318510d6e5e52e6194686d19d+6666009+Ebdb05b4e5c10f28d5507ce38c81c268cd3457084@5441920c 42bf30ee736e0e254b04e9e6913e06be+21279449+E1b145961d7c6637bfb1916eb42f503b4680636d8@5441920c 05f010fe0e3687f66986159dd1916699+2064644+Ed52318d3e26290f188266514dbd9602e779bb4d3@5441920c 662c56b8642e26265093b826e0689288+14665421+E36cdfe7b5656d9b7158e8ee7883886d1d34d52f8@5441920c 6789d9df635d40dbdb42cf201796619c+31742006+E496c7611143e3960f1c1c6c82fb16d8620dc7cd3@5441920c dcfe9fd63c4f21c8eedcdf27d60477bb+3139055+E91e6d07fccb75cc809bf6d5bf06bb2585ff0e8eb@5441920c f69eb3f93d80bf4f7221794964081e46+20025220+Ee596640d0963153b29b2edf074fd8c431c4b3f90@5441920c 967d6d4ed233c610896fc1d98c3b28b0+10164650+E81e99e39e3557b64076e433547541397b1478685@5441920c 9039b674c5217c1c2cd912f6e5028439+22530516+Ebb526c19b663ce48e1c6016634928684b7374b63@5441920c 14d6d4915152e2ce983496774fcde566+13062626+E22f59b087fbbe5eb5de5f7f66176b6bdcecb4940@5441920c e7060435500d777946081cd8df43c78b+17612660+Ec32fdd296301d5673c5415ef79c25e7fcdf1dfde@5441920c 9e31b4720c6ed8d60c4757065491f965+23191965+E67b454334b6140627c3fe06fdbe6028499970f63@5441920c d26d94682c04132c5c42709463ccc26e+9532662+Eb2cc826866265f6b9d72606fd6f5b250c61e9c7e@5441920c 4077d037e9c6096d6388e967236fc9e3+23463610+E93c7660c4f0defb357f973e15663e50b016ed35c@5441920c 939efcf9d66d076f1f52dbee0b46d886+11316441+E193158f0e66c8369d87e5561c720f355ee77c987@5441920c 78edd8c79e4911de2266411947056c8d+33406606+E632b24bd669f2c39ef56540fdc4fd90f5460303c@5441920c 2ff0eb23186b6db3c3f296363de5180c+29555310+E0139db36506e657b6995fd4771019d393f82885c@5441920c 5b0fe6d6604951cd5f6c78f66f359c66+13315726+E295b6db03207f03143e00edc9964739cffc195ce@5441920c 22d56132b728471e2712561b5e683548+24175332+E426de3f93b2bcbde645b57970b0663c1b7fb665f@5441920c f5092e3541b32c9b8553c18fc75deb59+30526779+E5f26e773f219367c550c67c6902b7e929d65e7d3@5441920c d43ec61d1ff5e63b9076626671b0c038+22764166+E915f309339169f6e23c4336240c6e592e08313e6@5441920c e6f4908ec7ec6060c8d466beb076d87d+6501650+Ed2502bebd746dc46382c87cf196c4bb468939782@5441920c 3b0d1c2743570c67ef61cc5940e60bff+30459907+Ef4258964867743bcb19f8ec64e66d018e865f452@5441920c fd47e84763e40011d00b23eb19cfe0d6+7937153+Edfc4e6bf57c0f1df165661393f86b1d355bd4d44@5441920c 34987c6857bce32f07bf4ee618772df9+17300095+Ed03738e8fdf1041662244f4270066862ff1fb197@5441920c b5356f66d692218e690bb94561c047fd+25176211+E0643cebe06c4ebe5fe1c5dbf746e2868e24b9c6f@5441920c cb9e15151f73d81f6864d55758496cc5+22053170+E6962e9ee716ce012e916f2cc93cc9624ce090225@5441920c 687f6d297b07065c613e6192963e82f0+26170716+Edd7828372c78ec4964d9363891068e13f339124f@5441920c b3e5368d95d1916d5653b90cbe4c5166+16915964+Ee39ebeb6e92096ee58648c66d373e5035b19fb96@5441920c 028f92d2328d0b82662e85c504389ebf+33011109+E56f2d7d585901e563e6223b63444806861778686@5441920c 8fee741b744b81dc001986562c76e2e3+34615593+E1e92ec6070bb4561382d33d7049dec660d71f87d@5441920c 0561c687debcbf5cd5bb661d576d88dd+22721666+E5fb1596528cbb8bc42d1d18cc8f0b46c5e0de6f5@5441920c b41703b42df6d3cd992599e9677b7c0c+21656716+E888d9325f666683427068d89bfc54806d296b684@5441920c e88dee4736fb1332fd0624d886984839+7279296+Ee91142f0e8f0fd9678e3e188effdc440e1d23408@5441920c 219dc65085f06379262dd3b4727b6efe+40036264+E9865f1497fd07c02915697e223727563686663ff@5441920c 8632264fbc7898588dc38e4639496636+9223066+E9c265e5654eccb4def668d87e0614465b4ec44bb@5441920c 10c33b498107392c30e1c5f3494e602d+7265467+E417be03290b37248c4c91fccd30b1b4692266255@5441920c c21959f5756d956456bf8f9eb955c4f0+11169673+E85e840f2b4870c83264233dc08fc3b396c9d1de2@5441920c 96db807ff3e36722740dddc5c1bf62e0+15396551+E1e8f4b9270dcf3638cb6182254c9df3d0e26221e@5441920c bbbf2e45768169c6699cccd60655b635+10659291+Ed47d66136074c5764fe655cb7d96b251ccbc4561@5441920c bfd61f58437b8b8021132efec2446665+6096913+E916f0429b8e8cc046dded7b8c07f4837ff021b5f@5441920c 37604e70419598750ec924d525267f3c+7660539+E6c4d798bf8eb3122157d31e0306c1e545611b259@5441920c de66fe2dcdb83b476990ecf1fbb27c26+11204600+Eb81c3d896180706f813d67882bd113fd69cf6528@5441920c 342eef125628740b9562673bfd2b4d96+54366+E70098b304b0d8975c36075254076223fb73f2eeb@5441920c 1b9215689eb58c256528bd2865c2d626+3466752+E127268472ff8bde9ee818669f4629298e8086bed@5441920c 086034465e0316e2648f8e4802604f51+31370255+E6f34120d24c1f3665846204865b4bcceb47686fe@5441920c 2e234e3f091bec21456c9cff0bc761b6+6507254+E6fb62e6b9e0e734d152106d060ee6692b6c9ff16@5441920c d6c63e474cf6b5385104b0b78677ec67+13175993+E9809c18b776b605849b2c321928863c362988576@5441920c 4ddfb1ccc7611289d7264bd70cc93dcf+12766634+E3b762696bee4dd0569d0302f957868fc20f51652@5441920c 0bdf4063360d6b021e7bd3ccdef516b7+9460636+Ec07113cdb6264b0821504ccf3b0e2604c1870916@5441920c 491e0110808fffddc6b9d2576c19dc1c+27323515+E11f2c06e4f100f75448161462cb693e6debd5178@5441920c 4f16e91e756de766b9bc8ce99900623e+9640416+E06208cbec42bfc0d55661e91036cdb4cb5dce80b@5441920c 5605f670b81c38565483275336c3eb92+15604264+E7ec502fe6fb951904769d67621100686144d56de@5441920c 176d89f315f6d49fee66317f081634c6+14067052+Ec6345f6d94364e6e310655435b476c46087e6746@5441920c 89b45833d2c19fd864538c2ec1d39db0+17922209+E6679c164b6f598d5d0630b678297cd068c9c9262@5441920c b42f550fc4f7c3987413b19e69ec784d+6951532+E589684c52326864cd096ce66dc61e30ebe4130d8@5441920c 70462101bfe861f600cff25705738683+10179374+Ee8735f8d2e55946d6dbd3622bfd0b52ed4ff5645@5441920c e5639f1ce89f30d7647d43d92f3749c9+13769767+Ecd88f922b0db4102564b81fc91c7b74f66112656@5441920c 922c4f0efe4d505f216cf6b16e0c74f0+13596264+E521d6ccf9306e12e3976c9169c122220b1cd702d@5441920c f75c903f8d88d6969e7ff2c72e5b31b8+22691146+E0f3dfdb223b828723e3017ff77e3f66b493b86dd@5441920c 6788dc29696632e5f39b668e84337147+16625559+Eff07ef424de0e25ff25316c43ecb9620c8ff6cd6@5441920c 195872d8776fb7df2699106f22de52eb+29592637+E3796e413486c57e1671b9066cf91fb6f358e1b8c@5441920c 1872d8876c16d6c72b1915486c996f51+16101070+E299d2660721262061d20e5421d387c966595f396@5441920c 6d54122d77b2246369d35f699220bd41+16610443+E763f085d9b08d8333e5d95f295028d5b848fc7b7@5441920c 9987e8842471d306ff54b68333fc94bc+14696064+E66641522d69ffb1b990be462106b248c99506b55@5441920c 37ed73f77c77c6c8ec8666e753cbbf7b+25736556+E87781259d92d670966e1654d369ee46d86d5ce66@5441920c de0dbcf70d224c16dcf92905ec10e261+17151669+E4228816e8d6d28e8835d8dfb46e54dc1f63c7c67@5441920c 709e8ee867526b180b619b682159c277+23262243+E93f00c26b28e85d26e8cec6d916de796e6e3333f@5441920c 374f74d9f4f0409b19ef96d00b267868+15933520+Eb4f5933760625f77d172235bb2fd62b5d46c1b6c@5441920c 51202e99c801cfc3062bd9610c00f063+2539339+E26c714978b06906d7144158b6ebb1fbe36d56344@5441920c b6fd759cb167c94557649cb3f7482d49+26353605+Ee642b25b5c00040520fe3dddd988c146e632cc14@5441920c 94669028355369bfe0db926846bb56f2+9695904+E94bd0c5fbe063be26b5d37061e0d5e13666b67d5@5441920c 2350567d203eb82066ef6dd59351990f+7647526+E27c8f695b3d508984bb35cdb78f75b0b690e5078@5441920c ed1f536d97255d9b3287612ed4833026+19420966+E35cd0f0303cfc68077376266e3117c72b369b10c@5441920c bf202c423c2f658db116976b3866c622+12634376+E7086c00ef933ccf0f07f0c9d00377797f337fefe@5441920c 17055b910c95c42619109362966c8fbf+10157396+Efd6d11c193bd32c7c69df08d8217ec63cf8414e3@5441920c f11c6144838cb9b4d67351d6626d1802+7156443+E4d5dcdd8ed1c174076dfd46767996651f38c4903@5441920c 9cb47df53d1cff53cd5b4796d0bc23f3+29952199+Eed564950d188541356161227068fd9f40fb5933d@5441920c 2ede654beb747fe9ee17be9dd5d3949c+12640911+Ee99ffeb440dd729067c606762ec076e524d592f5@5441920c 9be26457c84576c7e66e3168fd979607+26005247+Ec94b90868305fb875497f3b687655fd096e95296@5441920c 38b141749fdcc96dc28f725593486bec+61264+Ebe8cc5cfd0bdd54732ff1c62f6620ce4c797cbc8@5441920c 853b659766fbc96f641bc6923d5694bb+14544713+Eb567982c333b291b2d72467b6c431cee1bfcb6de@5441920c c6ecf79b145527c6cd62b8b6cf6f51b8+24455427+Ebc7bd846b936b266f7985111223eb1fb73d99cd5@5441920c 753b0b93996c2970915290ebb7eebd27+19979357+E3c0604c2ec64edbbc8360e568601e1c6ecbeebc1@5441920c 8fcfbb2b43c14680bdff1e514210632d+15760934+Ed0265cebdd6c614709e8cb4295f353e36083b32f@5441920c 11e006c41883660d19e68df266fe4636+22066346+E08206865313e13d29662e02927f424c7c8ebf265@5441920c d55b9e552b90d8f54c84f620ecb73e2b+15463950+E31701420310310e677b648926644c3234d52f472@5441920c c6b76f0b30d2e1c48c345608961c6603+2245652+E1248cd543d2160eef37fe460402ff946e75d8d64@5441920c 4806216f9c2638b63e678d0d660d2409+6206011+E1e94089dbb7c14d892d7f6521fc36764f5c6d761@5441920c 47f36ce735ff98996762cb1245e2d97b+7456077+Ec7ddc386d614f46ce7bbd4db8fc8e2e0261f923d@5441920c 7e8e73b8655f80b3583507fb666c77e1+15544112+Edfdd6ce9b54bf6fd3d2e8116783289dd77532b9f@5441920c 675b0fd88758c546376314872801576e+19435511+E30b4e596f663f0b826b5208370246bd321bbd856@5441920c d69769253bb145bc162c6158e9675316+6640055+Ef9c9c2ccbb6e964c05fb79b1250b606d57e59164@5441920c 0e357377b64fe29c3806fbf96c946645+9325419+E1b8609b20f5fef67fc46ffe5046b9f86e883d6e7@5441920c 9f57b97c259fed92f637d5232dde6104+9611496+E7e2b4cd0562494cbec77f3f67eb55414266d8d50@5441920c 09f60e940e2b603004d6337b32665beb+42415433+E93636b065e97d59bbdb24bc7dff5145f618f64d9@5441920c 6276b65424d63984f8015782060647b6+6046575+Ecc7e42155e92667eb8499956d012fc67b674301e@5441920c c9ce65d27ed164502366f9f5ec6e3fdf+22647045+Ebbff16b79dd826b687464f496f630db769e4f267@5441920c c16f091009b6f237366d5554137509c0+7507452+E3099761fe738fd5ee6368dcb8f1871d9bc018673@5441920c e06b96b906460dc628310477ec136ed7+24532176+E467927670673306f4186e4298f594c2584625137@5441920c f4ff1289c81b231be38907b88e82e975+20702445+Eb06cd9434e0292e6650453656986dbee2e5517b6@5441920c 8ed4167cbc6998f76847f4504cc21655+5393310+E3216b6f606602517fc6102e663746762e348b261@5441920c ed96eee78bcd599609bccb890d19d1c0+25036697+E2855c621547f6508f06862739b1d3c98d502f60f@5441920c b10905f5fbde35f7764492472ef1296c+17526792+E2387540056d68b4f5370bf7cb01d8439c83fc571@5441920c 762ef6d6e967ef7de65eb2095005664c+39123936+E366b9e4e438991d75f6cbc63d66d4671b62dc13b@5441920c 58686918bf8226496969555356830d50+21530262+E08415f6366061839595597edf078cc42764ec929@5441920c 987cc9c5c66e600676ccb76827266b69+39763257+Eb8e06991c83ec041e86f2e563656c869b6237cd7@5441920c c5c010572d6fd5f3683b3f7452e88b2d+6637631+Efb665b8364468f891bf42622099c643c558534f1@5441920c 076d7008f20864612f7f5132c66b84ce+16073436+Ec6cf748b16cc57f7168c989e661346495224f661@5441920c 81115023d44583e3dd80c630e9eb3b95+21766601+E4456d3c5e1cedc36461269e8c84fe32e8882f0b7@5441920c 26e15cef932e661c163d65c53f3d7596+11316659+Ed328777b54e6570d8fb1067f00847290be9642d7@5441920c 2e9c846ce77c8d62e58728d948f32301+6626151+E6742654b169c78c2636ee26bfbbbd246f86ec811@5441920c 86d19f8cc3be48b90501605017b36579+25421420+Edebc6387dd9f7fed0d4bcf6696220087381e5404@5441920c 27e6162bc2c14c183953fe682fdf1525+36360466+E7c6ece51c0fbd20f6647230bbdbdc66c66860beb@5441920c c03d55167fb6714d78880dc460574091+36766715+E140799f4146c60857050b56e4ffc66693b576ec2@5441920c 631c6b6f09985860c7fed6048e76b716+11066673+E5db6df91202e3100c4577f4bb665474382f8811c@5441920c d62dd2616f00f463681e15ec3647cd58+13126734+E609f8229cdf8c9e9642dfd6e3167ffd076dedbb8@5441920c 8749dd87c0d6b1377909c58fbc45dded+15236795+E461ee6611937f46654806754353bd32961666056@5441920c df7e5e5e1dd4d9dc09d8bf35b5fe3f24+22561443+E8fffe5863e071f5becb24e9c4de0569c1d864ec9@5441920c 4738611fe367691dd44e18f3c8857839+11364640+Ef171c946e87f52ec2877c74964d6c05115724fd6@5441920c f9ce82f59e5908d2d70e18df9679b469+31367794+E53f903684239bcc114f7bf8ff9bd6089f33058db@5441920c 0:15893477:chr10_band0_s0_e3000000.fj 15893477:8770829:chr10_band10_s29600000_e31300000.fj 24664306:15962689:chr10_band11_s31300000_e34400000.fj 40626995:18342794:chr10_band12_s34400000_e38000000.fj 58969789:5087834:chr10_band13_s38000000_e40200000.fj 64057623:4284756:chr10_band14_s40200000_e42300000.fj 68342379:18665404:chr10_band15_s42300000_e46100000.fj 87007783:13536792:chr10_band16_s46100000_e49900000.fj 100544575:13714429:chr10_band17_s49900000_e52900000.fj 114259004:44743112:chr10_band18_s52900000_e61200000.fj 159002116:17555223:chr10_band19_s61200000_e64500000.fj 176557339:4386647:chr10_band1_s3000000_e3800000.fj 180943986:32161952:chr10_band20_s64500000_e70600000.fj 213105938:22400285:chr10_band21_s70600000_e74900000.fj 235506223:14028139:chr10_band22_s74900000_e77700000.fj 249534362:22042495:chr10_band23_s77700000_e82000000.fj 271576857:31053589:chr10_band24_s82000000_e87900000.fj 302630446:7357223:chr10_band25_s87900000_e89500000.fj 309987669:17709824:chr10_band26_s89500000_e92900000.fj 327697493:6148418:chr10_band27_s92900000_e94100000.fj 333845911:14689912:chr10_band28_s94100000_e97000000.fj 348535823:11964810:chr10_band29_s97000000_e99300000.fj 360500633:14904735:chr10_band2_s3800000_e6600000.fj 375405368:13400037:chr10_band30_s99300000_e101900000.fj 388805405:5685774:chr10_band31_s101900000_e103000000.fj 394491179:9646905:chr10_band32_s103000000_e104900000.fj 404138084:4640161:chr10_band33_s104900000_e105800000.fj 408778245:32455363:chr10_band34_s105800000_e111900000.fj 441233608:15940309:chr10_band35_s111900000_e114900000.fj 457173917:22488871:chr10_band36_s114900000_e119100000.fj 479662788:13741614:chr10_band37_s119100000_e121700000.fj 493404402:7619587:chr10_band38_s121700000_e123100000.fj 501023989:23222084:chr10_band39_s123100000_e127500000.fj 524246073:29868907:chr10_band3_s6600000_e12200000.fj 554114980:16511841:chr10_band40_s127500000_e130600000.fj 570626821:26095352:chr10_band41_s130600000_e135534747.fj 596722173:26538428:chr10_band4_s12200000_e17300000.fj 623260601:5595242:chr10_band5_s17300000_e18600000.fj 628855843:524638:chr10_band6_s18600000_e18700000.fj 629380481:20166758:chr10_band7_s18700000_e22600000.fj 649547239:10195576:chr10_band8_s22600000_e24600000.fj 659742815:26057104:chr10_band9_s24600000_e29600000.fj 685799919:14129943:chr11_band0_s0_e2800000.fj 699929862:27262406:chr11_band10_s43500000_e48800000.fj 727192268:11366584:chr11_band11_s48800000_e51600000.fj 738558852:4284756:chr11_band12_s51600000_e53700000.fj 742843608:6746810:chr11_band13_s53700000_e55700000.fj 749590418:21620368:chr11_band14_s55700000_e59900000.fj 771210786:9186489:chr11_band15_s59900000_e61700000.fj 780397275:8326193:chr11_band16_s61700000_e63400000.fj 788723468:12757371:chr11_band17_s63400000_e65900000.fj 801480839:12157116:chr11_band18_s65900000_e68400000.fj 813637955:10261919:chr11_band19_s68400000_e70400000.fj 823899874:40669605:chr11_band1_s2800000_e10700000.fj 864569479:24190274:chr11_band20_s70400000_e75200000.fj 888759753:10020619:chr11_band21_s75200000_e77100000.fj 898780372:44638330:chr11_band22_s77100000_e85600000.fj 943418702:13920977:chr11_band23_s85600000_e88300000.fj 957339679:22389141:chr11_band24_s88300000_e92800000.fj 979728820:22616388:chr11_band25_s92800000_e97200000.fj 1002345208:26439412:chr11_band26_s97200000_e102100000.fj 1028784620:4173314:chr11_band27_s102100000_e102900000.fj 1032957934:39884156:chr11_band28_s102900000_e110400000.fj 1072842090:11123032:chr11_band29_s110400000_e112500000.fj 1083965122:10756630:chr11_band2_s10700000_e12700000.fj 1094721752:10580316:chr11_band30_s112500000_e114500000.fj 1105302068:35565428:chr11_band31_s114500000_e121200000.fj 1140867496:14197081:chr11_band32_s121200000_e123900000.fj 1155064577:20758432:chr11_band33_s123900000_e127800000.fj 1175823009:15792191:chr11_band34_s127800000_e130800000.fj 1191615200:22249239:chr11_band35_s130800000_e135006516.fj 1213864439:18449708:chr11_band3_s12700000_e16200000.fj 1232314147:29052525:chr11_band4_s16200000_e21700000.fj 1261366672:23968312:chr11_band5_s21700000_e26100000.fj 1285334984:5944481:chr11_band6_s26100000_e27200000.fj 1291279465:20155513:chr11_band7_s27200000_e31000000.fj 1311434978:28292374:chr11_band8_s31000000_e36400000.fj 1339727352:37778620:chr11_band9_s36400000_e43500000.fj 1377505972:16720695:chr12_band0_s0_e3300000.fj 1394226667:13059459:chr12_band10_s30700000_e33300000.fj 1407286126:7673046:chr12_band11_s33300000_e35800000.fj 1414959172:5825767:chr12_band12_s35800000_e38200000.fj 1420784939:42976743:chr12_band13_s38200000_e46400000.fj 1463761682:13809906:chr12_band14_s46400000_e49100000.fj 1477571588:11988262:chr12_band15_s49100000_e51500000.fj 1489559850:17595626:chr12_band16_s51500000_e54900000.fj 1507155476:8587338:chr12_band17_s54900000_e56600000.fj 1515742814:7408989:chr12_band18_s56600000_e58100000.fj 1523151803:26345033:chr12_band19_s58100000_e63100000.fj 1549496836:11140028:chr12_band1_s3300000_e5400000.fj 1560636864:9977002:chr12_band20_s63100000_e65100000.fj 1570613866:13651023:chr12_band21_s65100000_e67700000.fj 1584264889:19846309:chr12_band22_s67700000_e71500000.fj 1604111198:22406679:chr12_band23_s71500000_e75700000.fj 1626517877:24370117:chr12_band24_s75700000_e80300000.fj 1650887994:34354522:chr12_band25_s80300000_e86700000.fj 1685242516:12153797:chr12_band26_s86700000_e89000000.fj 1697396313:19120741:chr12_band27_s89000000_e92600000.fj 1716517054:18678462:chr12_band28_s92600000_e96200000.fj 1735195516:28125462:chr12_band29_s96200000_e101600000.fj 1763320978:23263164:chr12_band2_s5400000_e10100000.fj 1786584142:11438933:chr12_band30_s101600000_e103800000.fj 1798023075:27434807:chr12_band31_s103800000_e109000000.fj 1825457882:13431932:chr12_band32_s109000000_e111700000.fj 1838889814:2833555:chr12_band33_s111700000_e112300000.fj 1841723369:10166739:chr12_band34_s112300000_e114300000.fj 1851890108:13335983:chr12_band35_s114300000_e116800000.fj 1865226091:6763178:chr12_band36_s116800000_e118100000.fj 1871989269:13444650:chr12_band37_s118100000_e120700000.fj 1885433919:26286416:chr12_band38_s120700000_e125900000.fj 1911720335:18376984:chr12_band39_s125900000_e129300000.fj 1930097319:14118184:chr12_band3_s10100000_e12800000.fj 1944215503:23892725:chr12_band40_s129300000_e133851895.fj 1968108228:10507783:chr12_band4_s12800000_e14800000.fj 1978616011:27625276:chr12_band5_s14800000_e20000000.fj 2006241287:7026139:chr12_band6_s20000000_e21300000.fj 2013267426:27711533:chr12_band7_s21300000_e26500000.fj 2040978959:6793207:chr12_band8_s26500000_e27800000.fj 2047772166:15405916:chr12_band9_s27800000_e30700000.fj 2063178082:9180724:chr13_band0_s0_e4500000.fj 2072358806:9467601:chr13_band10_s32200000_e34000000.fj 2081826407:7989532:chr13_band11_s34000000_e35500000.fj 2089815939:24739014:chr13_band12_s35500000_e40100000.fj 2114554953:26941582:chr13_band13_s40100000_e45200000.fj 2141496535:3036311:chr13_band14_s45200000_e45800000.fj 2144532846:7761096:chr13_band15_s45800000_e47300000.fj 2152293942:18709476:chr13_band16_s47300000_e50900000.fj 2171003418:22602285:chr13_band17_s50900000_e55300000.fj 2193605703:23405896:chr13_band18_s55300000_e59600000.fj 2217011599:14457382:chr13_band19_s59600000_e62300000.fj 2231468981:11220750:chr13_band1_s4500000_e10000000.fj 2242689731:18581486:chr13_band20_s62300000_e65700000.fj 2261271217:15834314:chr13_band21_s65700000_e68600000.fj 2277105531:26147285:chr13_band22_s68600000_e73300000.fj 2303252816:11193151:chr13_band23_s73300000_e75400000.fj 2314445967:9599462:chr13_band24_s75400000_e77200000.fj 2324045429:9625154:chr13_band25_s77200000_e79000000.fj 2333670583:46677445:chr13_band26_s79000000_e87700000.fj 2380348028:12795853:chr13_band27_s87700000_e90000000.fj 2393143881:27123199:chr13_band28_s90000000_e95000000.fj 2420267080:16832721:chr13_band29_s95000000_e98200000.fj 2437099801:12852756:chr13_band2_s10000000_e16300000.fj 2449952557:5708668:chr13_band30_s98200000_e99300000.fj 2455661225:12588075:chr13_band31_s99300000_e101700000.fj 2468249300:16946677:chr13_band32_s101700000_e104800000.fj 2485195977:12209370:chr13_band33_s104800000_e107000000.fj 2497405347:17916606:chr13_band34_s107000000_e110300000.fj 2515321953:24643337:chr13_band35_s110300000_e115169878.fj 2539965290:3264756:chr13_band3_s16300000_e17900000.fj 2543230046:4102134:chr13_band4_s17900000_e19500000.fj 2547332180:19703325:chr13_band5_s19500000_e23300000.fj 2567035505:11554223:chr13_band6_s23300000_e25500000.fj 2578589728:12130664:chr13_band7_s25500000_e27800000.fj 2590720392:5842000:chr13_band8_s27800000_e28900000.fj 2596562392:17354821:chr13_band9_s28900000_e32200000.fj 2613917213:7548724:chr14_band0_s0_e3700000.fj 2621465937:30306549:chr14_band10_s37800000_e43500000.fj 2651772486:19488657:chr14_band11_s43500000_e47200000.fj 2671261143:19588732:chr14_band12_s47200000_e50900000.fj 2690849875:16728188:chr14_band13_s50900000_e54100000.fj 2707578063:7297044:chr14_band14_s54100000_e55500000.fj 2714875107:13453405:chr14_band15_s55500000_e58100000.fj 2728328512:20891242:chr14_band16_s58100000_e62100000.fj 2749219754:13969727:chr14_band17_s62100000_e64800000.fj 2763189481:15929958:chr14_band18_s64800000_e67900000.fj 2779119439:12006715:chr14_band19_s67900000_e70200000.fj 2791126154:8976748:chr14_band1_s3700000_e8100000.fj 2800102902:18617309:chr14_band20_s70200000_e73800000.fj 2818720211:28602130:chr14_band21_s73800000_e79300000.fj 2847322341:22781826:chr14_band22_s79300000_e83600000.fj 2870104167:7096857:chr14_band23_s83600000_e84900000.fj 2877201024:26087198:chr14_band24_s84900000_e89800000.fj 2903288222:10873992:chr14_band25_s89800000_e91900000.fj 2914162214:14647560:chr14_band26_s91900000_e94700000.fj 2928809774:8587442:chr14_band27_s94700000_e96300000.fj 2937397216:27389311:chr14_band28_s96300000_e101400000.fj 2964786527:9264693:chr14_band29_s101400000_e103200000.fj 2974051220:16320752:chr14_band2_s8100000_e16100000.fj 2990371972:4140293:chr14_band30_s103200000_e104000000.fj 2994512265:17268099:chr14_band31_s104000000_e107349540.fj 3011780364:3060756:chr14_band3_s16100000_e17600000.fj 3014841120:3260428:chr14_band4_s17600000_e19100000.fj 3018101548:26138225:chr14_band5_s19100000_e24600000.fj 3044239773:45862056:chr14_band6_s24600000_e33300000.fj 3090101829:10447980:chr14_band7_s33300000_e35300000.fj 3100549809:6564588:chr14_band8_s35300000_e36600000.fj 3107114397:6398876:chr14_band9_s36600000_e37800000.fj 3113513273:7956724:chr15_band0_s0_e3900000.fj 3121469997:34269266:chr15_band10_s33600000_e40100000.fj 3155739263:13762411:chr15_band11_s40100000_e42800000.fj 3169501674:3947813:chr15_band12_s42800000_e43600000.fj 3173449487:5537714:chr15_band13_s43600000_e44800000.fj 3178987201:24305832:chr15_band14_s44800000_e49500000.fj 3203293033:17507515:chr15_band15_s49500000_e52900000.fj 3220800548:32826524:chr15_band16_s52900000_e59100000.fj 3253627072:1010299:chr15_band17_s59100000_e59300000.fj 3254637371:23454838:chr15_band18_s59300000_e63700000.fj 3278092209:18017355:chr15_band19_s63700000_e67200000.fj 3296109564:9792748:chr15_band1_s3900000_e8700000.fj 3305902312:533847:chr15_band20_s67200000_e67300000.fj 3306436159:1084858:chr15_band21_s67300000_e67500000.fj 3307521017:27465637:chr15_band22_s67500000_e72700000.fj 3334986654:12707353:chr15_band23_s72700000_e75200000.fj 3347694007:6832970:chr15_band24_s75200000_e76600000.fj 3354526977:8748794:chr15_band25_s76600000_e78300000.fj 3363275771:17732191:chr15_band26_s78300000_e81700000.fj 3381007962:15491375:chr15_band27_s81700000_e85200000.fj 3396499337:20295749:chr15_band28_s85200000_e89100000.fj 3416795086:27117670:chr15_band29_s89100000_e94300000.fj 3443912756:14484752:chr15_band2_s8700000_e15800000.fj 3458397508:22592925:chr15_band30_s94300000_e98500000.fj 3480990433:21043993:chr15_band31_s98500000_e102531392.fj 3502034426:6528756:chr15_band3_s15800000_e19000000.fj 3508563182:4646274:chr15_band4_s19000000_e20700000.fj 3513209456:19571328:chr15_band5_s20700000_e25700000.fj 3532780784:12923689:chr15_band6_s25700000_e28100000.fj 3545704473:9921926:chr15_band7_s28100000_e30300000.fj 3555626399:2895507:chr15_band8_s30300000_e31200000.fj 3558521906:11292446:chr15_band9_s31200000_e33600000.fj 3569814352:40629656:chr16_band0_s0_e7900000.fj 3610444008:4080756:chr16_band10_s36600000_e38600000.fj 3614524764:18810667:chr16_band11_s38600000_e47000000.fj 3633335431:29170320:chr16_band12_s47000000_e52600000.fj 3662505751:21574362:chr16_band13_s52600000_e56700000.fj 3684080113:3619563:chr16_band14_s56700000_e57400000.fj 3687699676:49161531:chr16_band15_s57400000_e66700000.fj 3736861207:19748144:chr16_band16_s66700000_e70800000.fj 3756609351:10946735:chr16_band17_s70800000_e72900000.fj 3767556086:6378485:chr16_band18_s72900000_e74100000.fj 3773934571:26881587:chr16_band19_s74100000_e79200000.fj 3800816158:13661669:chr16_band1_s7900000_e10500000.fj 3814477827:13501427:chr16_band20_s79200000_e81700000.fj 3827979254:13677551:chr16_band21_s81700000_e84200000.fj 3841656805:15666076:chr16_band22_s84200000_e87100000.fj 3857322881:7998490:chr16_band23_s87100000_e88700000.fj 3865321371:8053236:chr16_band24_s88700000_e90354753.fj 3873374607:10728254:chr16_band2_s10500000_e12600000.fj 3884102861:11356748:chr16_band3_s12600000_e14800000.fj 3895459609:7600427:chr16_band4_s14800000_e16800000.fj 3903060036:20722736:chr16_band5_s16800000_e21200000.fj 3923782772:13729019:chr16_band6_s21200000_e24200000.fj 3937511791:20246913:chr16_band7_s24200000_e28100000.fj 3957758704:26945678:chr16_band8_s28100000_e34600000.fj 3984704382:3384870:chr16_band9_s34600000_e36600000.fj 3988089252:16155754:chr17_band0_s0_e3300000.fj 4004245006:12762477:chr17_band10_s38400000_e40900000.fj 4017007483:18572384:chr17_band11_s40900000_e44900000.fj 4035579867:12458663:chr17_band12_s44900000_e47400000.fj 4048038530:14524689:chr17_band13_s47400000_e50200000.fj 4062563219:38661662:chr17_band14_s50200000_e57600000.fj 4101224881:3149045:chr17_band15_s57600000_e58300000.fj 4104373926:13700211:chr17_band16_s58300000_e61100000.fj 4118074137:7529724:chr17_band17_s61100000_e62600000.fj 4125603861:7950542:chr17_band18_s62600000_e64200000.fj 4133554403:14756800:chr17_band19_s64200000_e67100000.fj 4148311203:16443598:chr17_band1_s3300000_e6500000.fj 4164754801:20108889:chr17_band20_s67100000_e70900000.fj 4184863690:20058363:chr17_band21_s70900000_e74800000.fj 4204922053:2587408:chr17_band22_s74800000_e75300000.fj 4207509461:30547504:chr17_band23_s75300000_e81195210.fj 4238056965:21562054:chr17_band2_s6500000_e10700000.fj 4259619019:27395356:chr17_band3_s10700000_e16000000.fj 4287014375:28365678:chr17_band4_s16000000_e22200000.fj 4315380053:289200:chr17_band5_s22200000_e24000000.fj 4315669253:5237174:chr17_band6_s24000000_e25800000.fj 4320906427:29727146:chr17_band7_s25800000_e31800000.fj 4350633573:30907874:chr17_band8_s31800000_e38100000.fj 4381541447:1504858:chr17_band9_s38100000_e38400000.fj 4383046305:14943044:chr18_band0_s0_e2900000.fj 4397989349:33721037:chr18_band10_s37200000_e43500000.fj 4431710386:24805551:chr18_band11_s43500000_e48200000.fj 4456515937:29378907:chr18_band12_s48200000_e53800000.fj 4485894844:12633635:chr18_band13_s53800000_e56200000.fj 4498528479:14797428:chr18_band14_s56200000_e59000000.fj 4513325907:13780102:chr18_band15_s59000000_e61600000.fj 4527106009:28794272:chr18_band16_s61600000_e66800000.fj 4555900281:10201924:chr18_band17_s66800000_e68700000.fj 4566102205:24124836:chr18_band18_s68700000_e73100000.fj 4590227041:26615557:chr18_band19_s73100000_e78077248.fj 4616842598:22145236:chr18_band1_s2900000_e7100000.fj 4638987834:7311348:chr18_band2_s7100000_e8500000.fj 4646299182:12577740:chr18_band3_s8500000_e10900000.fj 4658876922:21508140:chr18_band4_s10900000_e15400000.fj 4680385062:52389:chr18_band5_s15400000_e17200000.fj 4680437451:5076969:chr18_band6_s17200000_e19000000.fj 4685514420:31190178:chr18_band7_s19000000_e25000000.fj 4716704598:41160388:chr18_band8_s25000000_e32700000.fj 4757864986:23815045:chr18_band9_s32700000_e37200000.fj 4781680031:34031899:chr19_band0_s0_e6900000.fj 4815711930:13851503:chr19_band10_s35500000_e38300000.fj 4829563433:1998048:chr19_band11_s38300000_e38700000.fj 4831561481:22892591:chr19_band12_s38700000_e43400000.fj 4854454072:8872354:chr19_band13_s43400000_e45200000.fj 4863326426:13749381:chr19_band14_s45200000_e48000000.fj 4877075807:16660930:chr19_band15_s48000000_e51400000.fj 4893736737:11038031:chr19_band16_s51400000_e53600000.fj 4904774768:13412850:chr19_band17_s53600000_e56300000.fj 4918187618:14313555:chr19_band18_s56300000_e59128983.fj 4932501173:33635703:chr19_band1_s6900000_e13900000.fj 4966136876:489834:chr19_band2_s13900000_e14000000.fj 4966626710:11377056:chr19_band3_s14000000_e16300000.fj 4978003766:18348545:chr19_band4_s16300000_e20000000.fj 4996352311:21127772:chr19_band5_s20000000_e24400000.fj 5017480083:1059388:chr19_band6_s24400000_e26500000.fj 5018539471:6984270:chr19_band7_s26500000_e28600000.fj 5025523741:20073973:chr19_band8_s28600000_e32400000.fj 5045597714:15769669:chr19_band9_s32400000_e35500000.fj 5061367383:9756229:chr1_band0_s0_e2300000.fj 5071123612:11489333:chr1_band10_s30200000_e32400000.fj 5082612945:11074951:chr1_band11_s32400000_e34600000.fj 5093687896:28145091:chr1_band12_s34600000_e40100000.fj 5121832987:20545569:chr1_band13_s40100000_e44100000.fj 5142378556:13582476:chr1_band14_s44100000_e46800000.fj 5155961032:19737049:chr1_band15_s46800000_e50700000.fj 5175698081:27529030:chr1_band16_s50700000_e56100000.fj 5203227111:15452164:chr1_band17_s56100000_e59000000.fj 5218679275:12082565:chr1_band18_s59000000_e61300000.fj 5230761840:39789591:chr1_band19_s61300000_e68900000.fj 5270551431:15804689:chr1_band1_s2300000_e5400000.fj 5286356120:4141822:chr1_band20_s68900000_e69700000.fj 5290497942:80211445:chr1_band21_s69700000_e84900000.fj 5370709387:18343642:chr1_band22_s84900000_e88400000.fj 5389053029:18664730:chr1_band23_s88400000_e92000000.fj 5407717759:13861818:chr1_band24_s92000000_e94700000.fj 5421579577:26472421:chr1_band25_s94700000_e99700000.fj 5448051998:13161786:chr1_band26_s99700000_e102200000.fj 5461213784:26136584:chr1_band27_s102200000_e107200000.fj 5487350368:23561374:chr1_band28_s107200000_e111800000.fj 5510911742:22349851:chr1_band29_s111800000_e116100000.fj 5533261593:9400437:chr1_band2_s5400000_e7200000.fj 5542662030:8898401:chr1_band30_s116100000_e117800000.fj 5551560431:14463385:chr1_band31_s117800000_e120600000.fj 5566023816:2797932:chr1_band32_s120600000_e121500000.fj 5568821748:7140760:chr1_band33_s121500000_e125000000.fj 5575962508:7956760:chr1_band34_s125000000_e128900000.fj 5583919268:28100130:chr1_band35_s128900000_e142600000.fj 5612019398:15570132:chr1_band36_s142600000_e147000000.fj 5627589530:12079936:chr1_band37_s147000000_e150300000.fj 5639669466:23848498:chr1_band38_s150300000_e155000000.fj 5663517964:7320072:chr1_band39_s155000000_e156500000.fj 5670838036:10249929:chr1_band3_s7200000_e9200000.fj 5681087965:13622024:chr1_band40_s156500000_e159100000.fj 5694709989:7329847:chr1_band41_s159100000_e160500000.fj 5702039836:25915639:chr1_band42_s160500000_e165500000.fj 5727955475:8902437:chr1_band43_s165500000_e167200000.fj 5736857912:19387309:chr1_band44_s167200000_e170900000.fj 5756245221:10334901:chr1_band45_s170900000_e172900000.fj 5766580122:15956391:chr1_band46_s172900000_e176000000.fj 5782536513:22381464:chr1_band47_s176000000_e180300000.fj 5804917977:28762910:chr1_band48_s180300000_e185800000.fj 5833680887:27482517:chr1_band49_s185800000_e190800000.fj 5861163404:17698144:chr1_band4_s9200000_e12700000.fj 5878861548:16115379:chr1_band50_s190800000_e193800000.fj 5894976927:26603399:chr1_band51_s193800000_e198700000.fj 5921580326:42767332:chr1_band52_s198700000_e207200000.fj 5964347658:22519054:chr1_band53_s207200000_e211500000.fj 5986866712:15623994:chr1_band54_s211500000_e214500000.fj 6002490706:50651137:chr1_band55_s214500000_e224100000.fj 6053141843:2340783:chr1_band56_s224100000_e224600000.fj 6055482626:12296366:chr1_band57_s224600000_e227000000.fj 6067778992:19160541:chr1_band58_s227000000_e230700000.fj 6086939533:21150112:chr1_band59_s230700000_e234700000.fj 6108089645:15934102:chr1_band5_s12700000_e16200000.fj 6124023747:9572247:chr1_band60_s234700000_e236600000.fj 6133595994:37063925:chr1_band61_s236600000_e243700000.fj 6170659919:28279658:chr1_band62_s243700000_e249250621.fj 6198939577:21312883:chr1_band6_s16200000_e20400000.fj 6220252460:17968553:chr1_band7_s20400000_e23900000.fj 6238221013:20502272:chr1_band8_s23900000_e28000000.fj 6258723285:10454348:chr1_band9_s28000000_e30200000.fj 6269177633:26240932:chr20_band0_s0_e5100000.fj 6295418565:11477343:chr20_band10_s32100000_e34400000.fj 6306895908:16121702:chr20_band11_s34400000_e37600000.fj 6323017610:21665969:chr20_band12_s37600000_e41700000.fj 6344683579:2106601:chr20_band13_s41700000_e42100000.fj 6346790180:22234896:chr20_band14_s42100000_e46400000.fj 6369025076:17466445:chr20_band15_s46400000_e49800000.fj 6386491521:27353500:chr20_band16_s49800000_e55000000.fj 6413845021:7951115:chr20_band17_s55000000_e56500000.fj 6421796136:10132647:chr20_band18_s56500000_e58400000.fj 6431928783:24122390:chr20_band19_s58400000_e63025520.fj 6456051173:21750808:chr20_band1_s5100000_e9200000.fj 6477801981:15548705:chr20_band2_s9200000_e12100000.fj 6493350686:30792695:chr20_band3_s12100000_e17900000.fj 6524143381:17804912:chr20_band4_s17900000_e21300000.fj 6541948293:5184960:chr20_band5_s21300000_e22300000.fj 6547133253:17298739:chr20_band6_s22300000_e25600000.fj 6564431992:3301773:chr20_band7_s25600000_e27500000.fj 6567733765:3876756:chr20_band8_s27500000_e29400000.fj 6571610521:13283209:chr20_band9_s29400000_e32100000.fj 6584893730:5712724:chr21_band0_s0_e2800000.fj 6590606454:10518888:chr21_band10_s35800000_e37800000.fj 6601125342:10144603:chr21_band11_s37800000_e39700000.fj 6611269945:15620599:chr21_band12_s39700000_e42600000.fj 6626890544:28940326:chr21_band13_s42600000_e48129895.fj 6655830870:8160748:chr21_band1_s2800000_e6800000.fj 6663991618:11144287:chr21_band2_s6800000_e10900000.fj 6675135905:1431977:chr21_band3_s10900000_e13200000.fj 6676567882:2244756:chr21_band4_s13200000_e14300000.fj 6678812638:9266581:chr21_band5_s14300000_e16400000.fj 6688079219:41245659:chr21_band6_s16400000_e24000000.fj 6729324878:15344510:chr21_band7_s24000000_e26800000.fj 6744669388:24932791:chr21_band8_s26800000_e31500000.fj 6769602179:22442446:chr21_band9_s31500000_e35800000.fj 6792044625:7752724:chr22_band0_s0_e3800000.fj 6799797349:28224380:chr22_band10_s32200000_e37600000.fj 6828021729:17304839:chr22_band11_s37600000_e41000000.fj 6845326568:16113075:chr22_band12_s41000000_e44200000.fj 6861439643:22233411:chr22_band13_s44200000_e48400000.fj 6883673054:5524922:chr22_band14_s48400000_e49400000.fj 6889197976:9664262:chr22_band15_s49400000_e51304566.fj 6898862238:9180748:chr22_band1_s3800000_e8300000.fj 6908042986:7956752:chr22_band2_s8300000_e12200000.fj 6915999738:5100756:chr22_band3_s12200000_e14700000.fj 6921100494:9937902:chr22_band4_s14700000_e17900000.fj 6931038396:19548232:chr22_band5_s17900000_e22200000.fj 6950586628:6683394:chr22_band6_s22200000_e23500000.fj 6957270022:11752445:chr22_band7_s23500000_e25900000.fj 6969022467:19256022:chr22_band8_s25900000_e29600000.fj 6988278489:12954853:chr22_band9_s29600000_e32200000.fj 7001233342:23233415:chr2_band0_s0_e4400000.fj 7024466757:10667298:chr2_band10_s36600000_e38600000.fj 7035134055:16966684:chr2_band11_s38600000_e41800000.fj 7052100739:31586877:chr2_band12_s41800000_e47800000.fj 7083687616:26968370:chr2_band13_s47800000_e52900000.fj 7110655986:10993850:chr2_band14_s52900000_e55000000.fj 7121649836:33045521:chr2_band15_s55000000_e61300000.fj 7154695357:14150927:chr2_band16_s61300000_e64100000.fj 7168846284:23578835:chr2_band17_s64100000_e68600000.fj 7192425119:14885552:chr2_band18_s68600000_e71500000.fj 7207310671:10410131:chr2_band19_s71500000_e73500000.fj 7217720802:14156834:chr2_band1_s4400000_e7100000.fj 7231877636:7578172:chr2_band20_s73500000_e75000000.fj 7239455808:44109485:chr2_band21_s75000000_e83300000.fj 7283565293:31254935:chr2_band22_s83300000_e90500000.fj 7314820228:5169067:chr2_band23_s90500000_e93300000.fj 7319989295:10368921:chr2_band24_s93300000_e96800000.fj 7330358216:29052271:chr2_band25_s96800000_e102700000.fj 7359410487:17612827:chr2_band26_s102700000_e106000000.fj 7377023314:7641759:chr2_band27_s106000000_e107500000.fj 7384665073:13411716:chr2_band28_s107500000_e110200000.fj 7398076789:17757245:chr2_band29_s110200000_e114400000.fj 7415834034:26954567:chr2_band2_s7100000_e12200000.fj 7442788601:23246223:chr2_band30_s114400000_e118800000.fj 7466034824:19074161:chr2_band31_s118800000_e122400000.fj 7485108985:39449695:chr2_band32_s122400000_e129900000.fj 7524558680:11696577:chr2_band33_s129900000_e132500000.fj 7536255257:13249863:chr2_band34_s132500000_e135100000.fj 7549505120:8708592:chr2_band35_s135100000_e136800000.fj 7558213712:29182964:chr2_band36_s136800000_e142200000.fj 7587396676:10264945:chr2_band37_s142200000_e144100000.fj 7597661621:24601843:chr2_band38_s144100000_e148700000.fj 7622263464:5951781:chr2_band39_s148700000_e149900000.fj 7628215245:23795508:chr2_band3_s12200000_e16700000.fj 7652010753:3150007:chr2_band40_s149900000_e150500000.fj 7655160760:23077469:chr2_band41_s150500000_e154900000.fj 7678238229:25968072:chr2_band42_s154900000_e159800000.fj 7704206301:20640325:chr2_band43_s159800000_e163700000.fj 7724846626:31998832:chr2_band44_s163700000_e169700000.fj 7756845458:43632512:chr2_band45_s169700000_e178000000.fj 7800477970:13731959:chr2_band46_s178000000_e180600000.fj 7814209929:12856172:chr2_band47_s180600000_e183000000.fj 7827066101:34247127:chr2_band48_s183000000_e189400000.fj 7861313228:13286018:chr2_band49_s189400000_e191900000.fj 7874599246:13181256:chr2_band4_s16700000_e19200000.fj 7887780502:29663052:chr2_band50_s191900000_e197400000.fj 7917443554:30634366:chr2_band51_s197400000_e203300000.fj 7948077920:8075493:chr2_band52_s203300000_e204900000.fj 7956153413:21661204:chr2_band53_s204900000_e209000000.fj 7977814617:33806107:chr2_band54_s209000000_e215300000.fj 8011620724:32791910:chr2_band55_s215300000_e221500000.fj 8044412634:19689112:chr2_band56_s221500000_e225200000.fj 8064101746:4741805:chr2_band57_s225200000_e226100000.fj 8068843551:25904705:chr2_band58_s226100000_e231000000.fj 8094748256:23619321:chr2_band59_s231000000_e235600000.fj 8118367577:25423194:chr2_band5_s19200000_e24000000.fj 8143790771:9119290:chr2_band60_s235600000_e237300000.fj 8152910061:30796914:chr2_band61_s237300000_e243199373.fj 8183706975:19924674:chr2_band6_s24000000_e27900000.fj 8203631649:11135309:chr2_band7_s27900000_e30000000.fj 8214766958:10940177:chr2_band8_s30000000_e32100000.fj 8225707135:23560118:chr2_band9_s32100000_e36600000.fj 8249267253:14861122:chr3_band0_s0_e2800000.fj 8264128375:22809815:chr3_band10_s32100000_e36500000.fj 8286938190:15046818:chr3_band11_s36500000_e39400000.fj 8301985008:22186262:chr3_band12_s39400000_e43700000.fj 8324171270:2058080:chr3_band13_s43700000_e44100000.fj 8326229350:521252:chr3_band14_s44100000_e44200000.fj 8326750602:32234144:chr3_band15_s44200000_e50600000.fj 8358984746:8441932:chr3_band16_s50600000_e52300000.fj 8367426678:10948899:chr3_band17_s52300000_e54400000.fj 8378375577:21772898:chr3_band18_s54400000_e58600000.fj 8400148475:27069700:chr3_band19_s58600000_e63700000.fj 8427218175:6545313:chr3_band1_s2800000_e4000000.fj 8433763488:31787795:chr3_band20_s63700000_e69800000.fj 8465551283:23275812:chr3_band21_s69800000_e74200000.fj 8488827095:29739564:chr3_band22_s74200000_e79800000.fj 8518566659:20035093:chr3_band23_s79800000_e83500000.fj 8538601752:20162108:chr3_band24_s83500000_e87200000.fj 8558763860:3767584:chr3_band25_s87200000_e87900000.fj 8562531444:13581503:chr3_band26_s87900000_e91000000.fj 8576112947:7002557:chr3_band27_s91000000_e93900000.fj 8583115504:23576185:chr3_band28_s93900000_e98300000.fj 8606691689:8815871:chr3_band29_s98300000_e100000000.fj 8615507560:24882143:chr3_band2_s4000000_e8700000.fj 8640389703:4697534:chr3_band30_s100000000_e100900000.fj 8645087237:9838940:chr3_band31_s100900000_e102800000.fj 8654926177:18496118:chr3_band32_s102800000_e106200000.fj 8673422295:9018631:chr3_band33_s106200000_e107900000.fj 8682440926:17929166:chr3_band34_s107900000_e111300000.fj 8700370092:11594711:chr3_band35_s111300000_e113500000.fj 8711964803:20308668:chr3_band36_s113500000_e117300000.fj 8732273471:9030401:chr3_band37_s117300000_e119000000.fj 8741303872:14898827:chr3_band38_s119000000_e121900000.fj 8756202699:10008811:chr3_band39_s121900000_e123800000.fj 8766211510:15979710:chr3_band3_s8700000_e11800000.fj 8782191220:10116188:chr3_band40_s123800000_e125800000.fj 8792307408:17806797:chr3_band41_s125800000_e129200000.fj 8810114205:23227207:chr3_band42_s129200000_e133700000.fj 8833341412:10556009:chr3_band43_s133700000_e135700000.fj 8843897421:15182933:chr3_band44_s135700000_e138700000.fj 8859080354:21307590:chr3_band45_s138700000_e142800000.fj 8880387944:32759712:chr3_band46_s142800000_e148900000.fj 8913147656:16878434:chr3_band47_s148900000_e152100000.fj 8930026090:15100163:chr3_band48_s152100000_e155000000.fj 8945126253:10434017:chr3_band49_s155000000_e157000000.fj 8955560270:7785476:chr3_band4_s11800000_e13300000.fj 8963345746:10542610:chr3_band50_s157000000_e159000000.fj 8973888356:8787004:chr3_band51_s159000000_e160700000.fj 8982675360:37253134:chr3_band52_s160700000_e167600000.fj 9019928494:17183652:chr3_band53_s167600000_e170900000.fj 9037112146:25746921:chr3_band54_s170900000_e175700000.fj 9062859067:17296262:chr3_band55_s175700000_e179000000.fj 9080155329:19044817:chr3_band56_s179000000_e182700000.fj 9099200146:9216326:chr3_band57_s182700000_e184500000.fj 9108416472:7709847:chr3_band58_s184500000_e186000000.fj 9116126319:9992471:chr3_band59_s186000000_e187900000.fj 9126118790:16105743:chr3_band5_s13300000_e16400000.fj 9142224533:23723049:chr3_band60_s187900000_e192300000.fj 9165947582:28740659:chr3_band61_s192300000_e198022430.fj 9194688241:39101485:chr3_band6_s16400000_e23900000.fj 9233789726:13179037:chr3_band7_s23900000_e26400000.fj 9246968763:23659026:chr3_band8_s26400000_e30900000.fj 9270627789:6320874:chr3_band9_s30900000_e32100000.fj 9276948663:22624820:chr4_band0_s0_e4500000.fj 9299573483:19209706:chr4_band10_s44600000_e48200000.fj 9318783189:6384513:chr4_band11_s48200000_e50400000.fj 9325167702:4766253:chr4_band12_s50400000_e52700000.fj 9329933955:35018116:chr4_band13_s52700000_e59500000.fj 9364952071:38549974:chr4_band14_s59500000_e66600000.fj 9403502045:20373460:chr4_band15_s66600000_e70500000.fj 9423875505:29919881:chr4_band16_s70500000_e76300000.fj 9453795386:13493480:chr4_band17_s76300000_e78900000.fj 9467288866:18466490:chr4_band18_s78900000_e82400000.fj 9485755356:8860418:chr4_band19_s82400000_e84100000.fj 9494615774:7798021:chr4_band1_s4500000_e6000000.fj 9502413795:14575657:chr4_band20_s84100000_e86900000.fj 9516989452:5634479:chr4_band21_s86900000_e88000000.fj 9522623931:29718269:chr4_band22_s88000000_e93700000.fj 9552342200:7383995:chr4_band23_s93700000_e95100000.fj 9559726195:19715177:chr4_band24_s95100000_e98800000.fj 9579441372:11922350:chr4_band25_s98800000_e101100000.fj 9591363722:34698356:chr4_band26_s101100000_e107700000.fj 9626062078:33645974:chr4_band27_s107700000_e114100000.fj 9659708052:35587370:chr4_band28_s114100000_e120800000.fj 9695295422:15811642:chr4_band29_s120800000_e123800000.fj 9711107064:27146461:chr4_band2_s6000000_e11300000.fj 9738253525:26736354:chr4_band30_s123800000_e128800000.fj 9764989879:12053649:chr4_band31_s128800000_e131100000.fj 9777043528:45621870:chr4_band32_s131100000_e139500000.fj 9822665398:10457142:chr4_band33_s139500000_e141500000.fj 9833122540:27183032:chr4_band34_s141500000_e146800000.fj 9860305572:8901657:chr4_band35_s146800000_e148500000.fj 9869207229:13650247:chr4_band36_s148500000_e151100000.fj 9882857476:23802908:chr4_band37_s151100000_e155600000.fj 9906660384:33300872:chr4_band38_s155600000_e161800000.fj 9939961256:14822270:chr4_band39_s161800000_e164500000.fj 9954783526:20780182:chr4_band3_s11300000_e15200000.fj 9975563708:29757577:chr4_band40_s164500000_e170100000.fj 10005321285:9439391:chr4_band41_s170100000_e171900000.fj 10014760676:23890991:chr4_band42_s171900000_e176300000.fj 10038651667:6504378:chr4_band43_s176300000_e177500000.fj 10045156045:31429424:chr4_band44_s177500000_e183200000.fj 10076585469:20867286:chr4_band45_s183200000_e187100000.fj 10097452755:21542259:chr4_band46_s187100000_e191154276.fj 10118995014:13635330:chr4_band4_s15200000_e17800000.fj 10132630344:18645443:chr4_band5_s17800000_e21300000.fj 10151275787:33763872:chr4_band6_s21300000_e27700000.fj 10185039659:43935944:chr4_band7_s27700000_e35800000.fj 10228975603:28173344:chr4_band8_s35800000_e41200000.fj 10257148947:17960379:chr4_band9_s41200000_e44600000.fj 10275109326:23869100:chr5_band0_s0_e4500000.fj 10298978426:21475297:chr5_band10_s38400000_e42500000.fj 10320453723:18293853:chr5_band11_s42500000_e46100000.fj 10338747576:1514377:chr5_band12_s46100000_e48400000.fj 10340261953:8509364:chr5_band13_s48400000_e50700000.fj 10348771317:43017890:chr5_band14_s50700000_e58900000.fj 10391789207:20665117:chr5_band15_s58900000_e62900000.fj 10412454324:1591467:chr5_band16_s62900000_e63200000.fj 10414045791:18148759:chr5_band17_s63200000_e66700000.fj 10432194550:8856200:chr5_band18_s66700000_e68400000.fj 10441050750:20707621:chr5_band19_s68400000_e73300000.fj 10461758371:9656450:chr5_band1_s4500000_e6300000.fj 10471414821:18425621:chr5_band20_s73300000_e76900000.fj 10489840442:23196332:chr5_band21_s76900000_e81400000.fj 10513036774:7300891:chr5_band22_s81400000_e82800000.fj 10520337665:49892537:chr5_band23_s82800000_e92300000.fj 10570230202:30721980:chr5_band24_s92300000_e98200000.fj 10600952182:23888340:chr5_band25_s98200000_e102800000.fj 10624840522:9229611:chr5_band26_s102800000_e104500000.fj 10634070133:27421753:chr5_band27_s104500000_e109600000.fj 10661491886:9899436:chr5_band28_s109600000_e111500000.fj 10671391322:8406659:chr5_band29_s111500000_e113100000.fj 10679797981:18694996:chr5_band2_s6300000_e9800000.fj 10698492977:11028527:chr5_band30_s113100000_e115200000.fj 10709521504:32909679:chr5_band31_s115200000_e121400000.fj 10742431183:30963436:chr5_band32_s121400000_e127300000.fj 10773394619:17266919:chr5_band33_s127300000_e130600000.fj 10790661538:28998009:chr5_band34_s130600000_e136200000.fj 10819659547:16704607:chr5_band35_s136200000_e139500000.fj 10836364154:26045175:chr5_band36_s139500000_e144500000.fj 10862409329:27918575:chr5_band37_s144500000_e149800000.fj 10890327904:15050054:chr5_band38_s149800000_e152700000.fj 10905377958:15603577:chr5_band39_s152700000_e155700000.fj 10920981535:27716393:chr5_band3_s9800000_e15000000.fj 10948697928:22019757:chr5_band40_s155700000_e159900000.fj 10970717685:45797643:chr5_band41_s159900000_e168500000.fj 11016515328:22514380:chr5_band42_s168500000_e172800000.fj 11039029708:19253951:chr5_band43_s172800000_e176600000.fj 11058283659:21229495:chr5_band44_s176600000_e180915260.fj 11079513154:17559372:chr5_band4_s15000000_e18400000.fj 11097072526:25526673:chr5_band5_s18400000_e23300000.fj 11122599199:7096070:chr5_band6_s23300000_e24600000.fj 11129695269:23411851:chr5_band7_s24600000_e28900000.fj 11153107120:26119054:chr5_band8_s28900000_e33800000.fj 11179226174:23290349:chr5_band9_s33800000_e38400000.fj 11202516523:11854057:chr6_band0_s0_e2300000.fj 11214370580:8496414:chr6_band10_s30400000_e32100000.fj 11222866994:7220728:chr6_band11_s32100000_e33500000.fj 11230087722:15866348:chr6_band12_s33500000_e36600000.fj 11245954070:20565771:chr6_band13_s36600000_e40500000.fj 11266519841:29696078:chr6_band14_s40500000_e46200000.fj 11296215919:29661980:chr6_band15_s46200000_e51800000.fj 11325877899:5687860:chr6_band16_s51800000_e52900000.fj 11331565759:21802934:chr6_band17_s52900000_e57000000.fj 11353368693:8125890:chr6_band18_s57000000_e58700000.fj 11361494583:345265:chr6_band19_s58700000_e61000000.fj 11361839848:10003929:chr6_band1_s2300000_e4200000.fj 11371843777:8997133:chr6_band20_s61000000_e63300000.fj 11380840910:550060:chr6_band21_s63300000_e63400000.fj 11381390970:35514558:chr6_band22_s63400000_e70000000.fj 11416905528:30770003:chr6_band23_s70000000_e75900000.fj 11447675531:41661599:chr6_band24_s75900000_e83900000.fj 11489337130:5032680:chr6_band25_s83900000_e84900000.fj 11494369810:15730167:chr6_band26_s84900000_e88000000.fj 11510099977:26698981:chr6_band27_s88000000_e93100000.fj 11536798958:33870086:chr6_band28_s93100000_e99500000.fj 11570669044:5783371:chr6_band29_s99500000_e100600000.fj 11576452415:15201350:chr6_band2_s4200000_e7100000.fj 11591653765:26318508:chr6_band30_s100600000_e105500000.fj 11617972273:47367411:chr6_band31_s105500000_e114600000.fj 11665339684:19419515:chr6_band32_s114600000_e118300000.fj 11684759199:1079105:chr6_band33_s118300000_e118500000.fj 11685838304:40594325:chr6_band34_s118500000_e126100000.fj 11726432629:5183249:chr6_band35_s126100000_e127100000.fj 11731615878:17064012:chr6_band36_s127100000_e130300000.fj 11748679890:4703673:chr6_band37_s130300000_e131200000.fj 11753383563:20937849:chr6_band38_s131200000_e135200000.fj 11774321412:19768577:chr6_band39_s135200000_e139000000.fj 11794089989:18520799:chr6_band3_s7100000_e10600000.fj 11812610788:20084958:chr6_band40_s139000000_e142800000.fj 11832695746:14583555:chr6_band41_s142800000_e145600000.fj 11847279301:17888235:chr6_band42_s145600000_e149000000.fj 11865167536:18200150:chr6_band43_s149000000_e152500000.fj 11883367686:15899684:chr6_band44_s152500000_e155500000.fj 11899267370:28588964:chr6_band45_s155500000_e161000000.fj 11927856334:18688807:chr6_band46_s161000000_e164500000.fj 11946545141:34299518:chr6_band47_s164500000_e171115067.fj 11980844659:5187494:chr6_band4_s10600000_e11600000.fj 11986032153:9550459:chr6_band5_s11600000_e13400000.fj 11995582612:9425852:chr6_band6_s13400000_e15200000.fj 12005008464:52257569:chr6_band7_s15200000_e25200000.fj 12057266033:8929925:chr6_band8_s25200000_e27000000.fj 12066195958:17556391:chr6_band9_s27000000_e30400000.fj 12083752349:14247713:chr7_band0_s0_e2800000.fj 12098000062:11066306:chr7_band10_s35000000_e37200000.fj 12109066368:32087088:chr7_band11_s37200000_e43300000.fj 12141153456:10668222:chr7_band12_s43300000_e45400000.fj 12151821678:18626376:chr7_band13_s45400000_e49000000.fj 12170448054:7958919:chr7_band14_s49000000_e50500000.fj 12178406973:18713509:chr7_band15_s50500000_e54000000.fj 12197120482:18935900:chr7_band16_s54000000_e58000000.fj 12216056382:261032:chr7_band17_s58000000_e59900000.fj 12216317414:4289180:chr7_band18_s59900000_e61700000.fj 12220606594:22817045:chr7_band19_s61700000_e67000000.fj 12243423639:8953071:chr7_band1_s2800000_e4500000.fj 12252376710:26475183:chr7_band20_s67000000_e72200000.fj 12278851893:21260557:chr7_band21_s72200000_e77500000.fj 12300112450:47850592:chr7_band22_s77500000_e86400000.fj 12347963042:9284520:chr7_band23_s86400000_e88200000.fj 12357247562:15247848:chr7_band24_s88200000_e91100000.fj 12372495410:8580818:chr7_band25_s91100000_e92800000.fj 12381076228:26810427:chr7_band26_s92800000_e98000000.fj 12407886655:27671122:chr7_band27_s98000000_e103800000.fj 12435557777:3665630:chr7_band28_s103800000_e104500000.fj 12439223407:14940321:chr7_band29_s104500000_e107400000.fj 12454163728:12957633:chr7_band2_s4500000_e7300000.fj 12467121361:38098753:chr7_band30_s107400000_e114600000.fj 12505220114:14874016:chr7_band31_s114600000_e117400000.fj 12520094130:19901201:chr7_band32_s117400000_e121100000.fj 12539995331:14314479:chr7_band33_s121100000_e123800000.fj 12554309810:17691683:chr7_band34_s123800000_e127100000.fj 12572001493:10632852:chr7_band35_s127100000_e129200000.fj 12582634345:5806847:chr7_band36_s129200000_e130400000.fj 12588441192:11677084:chr7_band37_s130400000_e132600000.fj 12600118276:29615252:chr7_band38_s132600000_e138200000.fj 12629733528:24913008:chr7_band39_s138200000_e143100000.fj 12654646536:35077014:chr7_band3_s7300000_e13800000.fj 12689723550:23967238:chr7_band40_s143100000_e147900000.fj 12713690788:23454742:chr7_band41_s147900000_e152600000.fj 12737145530:12478502:chr7_band42_s152600000_e155100000.fj 12749624032:21624407:chr7_band43_s155100000_e159138663.fj 12771248439:14556948:chr7_band4_s13800000_e16500000.fj 12785805387:23503963:chr7_band5_s16500000_e20900000.fj 12809309350:23959972:chr7_band6_s20900000_e25500000.fj 12833269322:13023330:chr7_band7_s25500000_e28000000.fj 12846292652:4304022:chr7_band8_s28000000_e28800000.fj 12850596674:32133100:chr7_band9_s28800000_e35000000.fj 12882729774:11110146:chr8_band0_s0_e2200000.fj 12893839920:17322291:chr8_band10_s39700000_e43100000.fj 12911162211:3523440:chr8_band11_s43100000_e45600000.fj 12914685651:8688009:chr8_band12_s45600000_e48100000.fj 12923373660:21279449:chr8_band13_s48100000_e52200000.fj 12944653109:2084644:chr8_band14_s52200000_e52600000.fj 12946737753:14865421:chr8_band15_s52600000_e55500000.fj 12961603174:31742008:chr8_band16_s55500000_e61600000.fj 12993345182:3139055:chr8_band17_s61600000_e62200000.fj 12996484237:20025220:chr8_band18_s62200000_e66000000.fj 13016509457:10184650:chr8_band19_s66000000_e68000000.fj 13026694107:22530516:chr8_band1_s2200000_e6200000.fj 13049224623:13062826:chr8_band20_s68000000_e70500000.fj 13062287449:17612880:chr8_band21_s70500000_e73900000.fj 13079900329:23191965:chr8_band22_s73900000_e78300000.fj 13103092294:9532882:chr8_band23_s78300000_e80100000.fj 13112625176:23483810:chr8_band24_s80100000_e84600000.fj 13136108986:11316441:chr8_band25_s84600000_e86900000.fj 13147425427:33406608:chr8_band26_s86900000_e93300000.fj 13180832035:29555310:chr8_band27_s93300000_e99000000.fj 13210387345:13315728:chr8_band28_s99000000_e101600000.fj 13223703073:24175332:chr8_band29_s101600000_e106200000.fj 13247878405:30528779:chr8_band2_s6200000_e12700000.fj 13278407184:22764188:chr8_band30_s106200000_e110500000.fj 13301171372:8501850:chr8_band31_s110500000_e112100000.fj 13309673222:30459907:chr8_band32_s112100000_e117700000.fj 13340133129:7937153:chr8_band33_s117700000_e119200000.fj 13348070282:17300095:chr8_band34_s119200000_e122500000.fj 13365370377:25178211:chr8_band35_s122500000_e127300000.fj 13390548588:22053170:chr8_band36_s127300000_e131500000.fj 13412601758:26170716:chr8_band37_s131500000_e136400000.fj 13438772474:18915984:chr8_band38_s136400000_e139900000.fj 13457688458:33011109:chr8_band39_s139900000_e146364022.fj 13490699567:34615593:chr8_band3_s12700000_e19000000.fj 13525315160:22721686:chr8_band4_s19000000_e23300000.fj 13548036846:21858716:chr8_band5_s23300000_e27400000.fj 13569895562:7279298:chr8_band6_s27400000_e28800000.fj 13577174860:40036264:chr8_band7_s28800000_e36500000.fj 13617211124:9223086:chr8_band8_s36500000_e38300000.fj 13626434210:7285487:chr8_band9_s38300000_e39700000.fj 13633719697:11189873:chr9_band0_s0_e2200000.fj 13644909570:15398551:chr9_band10_s33200000_e36300000.fj 13660308121:10859291:chr9_band11_s36300000_e38400000.fj 13671167412:8098913:chr9_band12_s38400000_e41000000.fj 13679266325:7680539:chr9_band13_s41000000_e43600000.fj 13686946864:11204600:chr9_band14_s43600000_e47300000.fj 13698151464:54388:chr9_band15_s47300000_e49000000.fj 13698205852:3468752:chr9_band16_s49000000_e50700000.fj 13701674604:31370255:chr9_band17_s50700000_e65900000.fj 13733044859:8507254:chr9_band18_s65900000_e68700000.fj 13741552113:13175993:chr9_band19_s68700000_e72200000.fj 13754728106:12788834:chr9_band1_s2200000_e4600000.fj 13767516940:9460838:chr9_band20_s72200000_e74000000.fj 13776977778:27323515:chr9_band21_s74000000_e79200000.fj 13804301293:9840418:chr9_band22_s79200000_e81100000.fj 13814141711:15804284:chr9_band23_s81100000_e84100000.fj 13829945995:14087052:chr9_band24_s84100000_e86900000.fj 13844033047:17922209:chr9_band25_s86900000_e90400000.fj 13861955256:6951532:chr9_band26_s90400000_e91800000.fj 13868906788:10179374:chr9_band27_s91800000_e93900000.fj 13879086162:13769787:chr9_band28_s93900000_e96600000.fj 13892855949:13598284:chr9_band29_s96600000_e99300000.fj 13906454233:22891146:chr9_band2_s4600000_e9000000.fj 13929345379:16625559:chr9_band30_s99300000_e102600000.fj 13945970938:29592637:chr9_band31_s102600000_e108200000.fj 13975563575:16101070:chr9_band32_s108200000_e111300000.fj 13991664645:18810443:chr9_band33_s111300000_e114900000.fj 14010475088:14696084:chr9_band34_s114900000_e117700000.fj 14025171172:25738558:chr9_band35_s117700000_e122500000.fj 14050909730:17151669:chr9_band36_s122500000_e125800000.fj 14068061399:23282243:chr9_band37_s125800000_e130300000.fj 14091343642:15933520:chr9_band38_s130300000_e133500000.fj 14107277162:2539339:chr9_band39_s133500000_e134000000.fj 14109816501:28353605:chr9_band3_s9000000_e14200000.fj 14138170106:9895904:chr9_band40_s134000000_e135900000.fj 14148066010:7847526:chr9_band41_s135900000_e137400000.fj 14155913536:19420968:chr9_band42_s137400000_e141213431.fj 14175334504:12634378:chr9_band4_s14200000_e16600000.fj 14187968882:10157396:chr9_band5_s16600000_e18500000.fj 14198126278:7156443:chr9_band6_s18500000_e19900000.fj 14205282721:29952199:chr9_band7_s19900000_e25600000.fj 14235234920:12640911:chr9_band8_s25600000_e28000000.fj 14247875831:28005247:chr9_band9_s28000000_e33200000.fj 14275881078:61284:chrM_band0_s0_e16571.fj 14275942362:14544713:chrX_band0_s0_e4300000.fj 14290487075:24455427:chrX_band10_s37600000_e42400000.fj 14314942502:19979357:chrX_band11_s42400000_e46400000.fj 14334921859:15780934:chrX_band12_s46400000_e49800000.fj 14350702793:22068346:chrX_band13_s49800000_e54800000.fj 14372771139:15483950:chrX_band14_s54800000_e58100000.fj 14388255089:2245852:chrX_band15_s58100000_e60600000.fj 14390500941:8206011:chrX_band16_s60600000_e63000000.fj 14398706952:7456077:chrX_band17_s63000000_e64600000.fj 14406163029:15544112:chrX_band18_s64600000_e67800000.fj 14421707141:19435511:chrX_band19_s67800000_e71800000.fj 14441142652:8840055:chrX_band1_s4300000_e6000000.fj 14449982707:9325419:chrX_band20_s71800000_e73900000.fj 14459308126:9811498:chrX_band21_s73900000_e76000000.fj 14469119624:42415433:chrX_band22_s76000000_e84600000.fj 14511535057:8048575:chrX_band23_s84600000_e86200000.fj 14519583632:22647045:chrX_band24_s86200000_e91800000.fj 14542230677:7507452:chrX_band25_s91800000_e93500000.fj 14549738129:24532176:chrX_band26_s93500000_e98300000.fj 14574270305:20702445:chrX_band27_s98300000_e102600000.fj 14594972750:5393310:chrX_band28_s102600000_e103700000.fj 14600366060:25038697:chrX_band29_s103700000_e108700000.fj 14625404757:17528792:chrX_band2_s6000000_e9500000.fj 14642933549:39123936:chrX_band30_s108700000_e116500000.fj 14682057485:21530282:chrX_band31_s116500000_e120900000.fj 14703587767:39763257:chrX_band32_s120900000_e128700000.fj 14743351024:8637631:chrX_band33_s128700000_e130400000.fj 14751988655:16073438:chrX_band34_s130400000_e133600000.fj 14768062093:21768801:chrX_band35_s133600000_e138000000.fj 14789830894:11318859:chrX_band36_s138000000_e140300000.fj 14801149753:8828151:chrX_band37_s140300000_e142100000.fj 14809977904:25421420:chrX_band38_s142100000_e147100000.fj 14835399324:38360486:chrX_band39_s147100000_e155270560.fj 14873759810:38788715:chrX_band3_s9500000_e17100000.fj 14912548525:11066673:chrX_band4_s17100000_e19300000.fj 14923615198:13128734:chrX_band5_s19300000_e21900000.fj 14936743932:15238795:chrX_band6_s21900000_e24900000.fj 14951982727:22561443:chrX_band7_s24900000_e29300000.fj 14974544170:11364840:chrX_band8_s29300000_e31500000.fj 14985909010:31367794:chrX_band9_s31500000_e37600000.fj
+
+
+
+
+
+
diff --git a/sdk/go/manifest/testdata/short_manifest b/sdk/go/manifest/testdata/short_manifest
new file mode 100644 (file)
index 0000000..bb9336c
--- /dev/null
@@ -0,0 +1,6 @@
+. b746e3d2104645f2f64cd3cc69dd895d+15693477+E2866e643690156651c03d876e638e674dcd79475@5441920c 0:15893477:chr10_band0_s0_e3000000.fj
+
+
+
+
+
diff --git a/sdk/go/util/util.go b/sdk/go/util/util.go
new file mode 100644 (file)
index 0000000..6bc8625
--- /dev/null
@@ -0,0 +1,34 @@
+/* Helper methods for dealing with responses from API Server. */
+
+package util
+
+import (
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+)
+
+func UserIsAdmin(arv arvadosclient.ArvadosClient) (is_admin bool, err error) {
+       type user struct {
+               IsAdmin bool `json:"is_admin"`
+       }
+       var u user
+       err = arv.Call("GET", "users", "", "current", nil, &u)
+       return u.IsAdmin, err
+}
+
+// Returns the total count of a particular type of resource
+//
+//   resource - the arvados resource to count
+// return
+//   count - the number of items of type resource the api server reports, if no error
+//   err - error accessing the resource, or nil if no error
+func NumberItemsAvailable(client arvadosclient.ArvadosClient, resource string) (count int, err error) {
+       var response struct {
+               ItemsAvailable int `json:"items_available"`
+       }
+       sdkParams := arvadosclient.Dict{"limit": 0}
+       err = client.List(resource, sdkParams, &response)
+       if err == nil {
+               count = response.ItemsAvailable
+       }
+       return
+}
index c618fc3c6623ef18e8663609340b55bcce219f78..2f1f74044d78a7fee4ce6337f1d649b69a587df6 100644 (file)
@@ -13,7 +13,6 @@ import errors
 import util
 
 _logger = logging.getLogger('arvados.api')
-conncache = {}
 
 class CredentialsFromToken(object):
     def __init__(self, api_token):
@@ -80,11 +79,8 @@ def api(version=None, cache=True, host=None, token=None, insecure=False, **kwarg
     Arguments:
     * version: A string naming the version of the Arvados API to use (for
       example, 'v1').
-    * cache: If True (default), return an existing Resources object if
-      one already exists with the same endpoint and credentials. If
-      False, create a new one, and do not keep it in the cache (i.e.,
-      do not return it from subsequent api(cache=True) calls with
-      matching endpoint and credentials).
+    * cache: Use a cache (~/.cache/arvados/discovery) for the discovery
+      document.
     * host: The Arvados API server host (and optional :port) to connect to.
     * token: The authentication token to send with each API call.
     * insecure: If True, ignore SSL certificate validation errors.
@@ -132,12 +128,6 @@ def api(version=None, cache=True, host=None, token=None, insecure=False, **kwarg
         kwargs['discoveryServiceUrl'] = (
             'https://%s/discovery/v1/apis/{api}/{apiVersion}/rest' % (host,))
 
-    if cache:
-        connprofile = (version, host, token, insecure)
-        svc = conncache.get(connprofile)
-        if svc:
-            return svc
-
     if 'http' not in kwargs:
         http_kwargs = {}
         # Prefer system's CA certificates (if available) over httplib2's.
@@ -156,6 +146,4 @@ def api(version=None, cache=True, host=None, token=None, insecure=False, **kwarg
     svc = apiclient_discovery.build('arvados', version, **kwargs)
     svc.api_token = token
     kwargs['http'].cache = None
-    if cache:
-        conncache[connprofile] = svc
     return svc
index d530f58b03e70f2983280bf673c937df0653669f..7bfdf782f8d06b03d6ac482fa64872d1eb8ff9be 100644 (file)
@@ -304,7 +304,7 @@ class _WriterFile(ArvadosFileBase):
 class CollectionWriter(CollectionBase):
     KEEP_BLOCK_SIZE = 2**26
 
-    def __init__(self, api_client=None, num_retries=0):
+    def __init__(self, api_client=None, num_retries=0, replication=None):
         """Instantiate a CollectionWriter.
 
         CollectionWriter lets you build a new Arvados Collection from scratch.
@@ -320,9 +320,13 @@ class CollectionWriter(CollectionBase):
           service requests.  Default 0.  You may change this value
           after instantiation, but note those changes may not
           propagate to related objects like the Keep client.
+        * replication: The number of copies of each block to store.
+          If this argument is None or not supplied, replication is
+          the server-provided default if available, otherwise 2.
         """
         self._api_client = api_client
         self.num_retries = num_retries
+        self.replication = (2 if replication is None else replication)
         self._keep_client = None
         self._data_buffer = []
         self._data_buffer_len = 0
@@ -477,7 +481,9 @@ class CollectionWriter(CollectionBase):
         data_buffer = ''.join(self._data_buffer)
         if data_buffer:
             self._current_stream_locators.append(
-                self._my_keep().put(data_buffer[0:self.KEEP_BLOCK_SIZE]))
+                self._my_keep().put(
+                    data_buffer[0:self.KEEP_BLOCK_SIZE],
+                    copies=self.replication))
             self._data_buffer = [data_buffer[self.KEEP_BLOCK_SIZE:]]
             self._data_buffer_len = len(self._data_buffer[0])
 
@@ -552,8 +558,16 @@ class CollectionWriter(CollectionBase):
         self._current_file_name = None
 
     def finish(self):
-        # Store the manifest in Keep and return its locator.
-        return self._my_keep().put(self.manifest_text())
+        """Store the manifest in Keep and return its locator.
+
+        This is useful for storing manifest fragments (task outputs)
+        temporarily in Keep during a Crunch job.
+
+        In other cases you should make a collection instead, by
+        sending manifest_text() to the API server's "create
+        collection" endpoint.
+        """
+        return self._my_keep().put(self.manifest_text(), copies=self.replication)
 
     def portable_data_hash(self):
         stripped = self.stripped_manifest()
@@ -587,10 +601,9 @@ class ResumableCollectionWriter(CollectionWriter):
                    '_data_buffer', '_dependencies', '_finished_streams',
                    '_queued_dirents', '_queued_trees']
 
-    def __init__(self, api_client=None, num_retries=0):
+    def __init__(self, api_client=None, **kwargs):
         self._dependencies = {}
-        super(ResumableCollectionWriter, self).__init__(
-            api_client, num_retries=num_retries)
+        super(ResumableCollectionWriter, self).__init__(api_client, **kwargs)
 
     @classmethod
     def from_state(cls, state, *init_args, **init_kwargs):
index 7da23acb5074a217a8ba70d0000d7e519cf35139..b5280f4bb5fdfede8a7afffe9c058b2e095ea5ed 100755 (executable)
@@ -163,8 +163,7 @@ def api_for_instance(instance_name):
         client = arvados.api('v1',
                              host=cfg['ARVADOS_API_HOST'],
                              token=cfg['ARVADOS_API_TOKEN'],
-                             insecure=api_is_insecure,
-                             cache=False)
+                             insecure=api_is_insecure)
     else:
         abort('need ARVADOS_API_HOST and ARVADOS_API_TOKEN for {}'.format(instance_name))
     return client
index d070a8b9c79037b8ea22de911861a6544f773cb1..f556e7ecb598eb0381400709d7edd272c76c7be3 100644 (file)
@@ -110,6 +110,13 @@ Print the portable data hash instead of the Arvados UUID for the collection
 created by the upload.
 """)
 
+upload_opts.add_argument('--replication', type=int, metavar='N', default=None,
+                         help="""
+Set the replication level for the new collection: how many different
+physical storage devices (e.g., disks) should have a copy of each data
+block. Default is to use the server-provided default (if any) or 2.
+""")
+
 run_opts = argparse.ArgumentParser(add_help=False)
 
 run_opts.add_argument('--project-uuid', metavar='UUID', help="""
@@ -253,24 +260,23 @@ class ArvPutCollectionWriter(arvados.ResumableCollectionWriter):
     STATE_PROPS = (arvados.ResumableCollectionWriter.STATE_PROPS +
                    ['bytes_written', '_seen_inputs'])
 
-    def __init__(self, cache=None, reporter=None, bytes_expected=None,
-                 api_client=None, num_retries=0):
+    def __init__(self, cache=None, reporter=None, bytes_expected=None, **kwargs):
         self.bytes_written = 0
         self._seen_inputs = []
         self.cache = cache
         self.reporter = reporter
         self.bytes_expected = bytes_expected
-        super(ArvPutCollectionWriter, self).__init__(
-            api_client, num_retries=num_retries)
+        super(ArvPutCollectionWriter, self).__init__(**kwargs)
 
     @classmethod
     def from_cache(cls, cache, reporter=None, bytes_expected=None,
-                   num_retries=0):
+                   num_retries=0, replication=0):
         try:
             state = cache.load()
             state['_data_buffer'] = [base64.decodestring(state['_data_buffer'])]
             writer = cls.from_state(state, cache, reporter, bytes_expected,
-                                    num_retries=num_retries)
+                                    num_retries=num_retries,
+                                    replication=replication)
         except (TypeError, ValueError,
                 arvados.errors.StaleWriterStateError) as error:
             return cls(cache, reporter, bytes_expected, num_retries=num_retries)
@@ -402,6 +408,19 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr):
         print >>stderr, error
         sys.exit(1)
 
+    # write_copies diverges from args.replication here.
+    # args.replication is how many copies we will instruct Arvados to
+    # maintain (by passing it in collections().create()) after all
+    # data is written -- and if None was given, we'll use None there.
+    # Meanwhile, write_copies is how many copies of each data block we
+    # write to Keep, which has to be a number.
+    #
+    # If we simply changed args.replication from None to a default
+    # here, we'd end up erroneously passing the default replication
+    # level (instead of None) to collections().create().
+    write_copies = (args.replication or
+                    api_client._rootDesc.get('defaultCollectionReplication', 2))
+
     if args.progress:
         reporter = progress_writer(human_progress)
     elif args.batch_progress:
@@ -423,11 +442,15 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr):
             sys.exit(1)
 
     if resume_cache is None:
-        writer = ArvPutCollectionWriter(resume_cache, reporter, bytes_expected,
-                                        num_retries=args.retries)
+        writer = ArvPutCollectionWriter(
+            resume_cache, reporter, bytes_expected,
+            num_retries=args.retries,
+            replication=write_copies)
     else:
         writer = ArvPutCollectionWriter.from_cache(
-            resume_cache, reporter, bytes_expected, num_retries=args.retries)
+            resume_cache, reporter, bytes_expected,
+            num_retries=args.retries,
+            replication=write_copies)
 
     # Install our signal handler for each code in CAUGHT_SIGNALS, and save
     # the originals.
@@ -464,12 +487,17 @@ def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr):
             manifest_text = writer.manifest_text()
             if args.normalize:
                 manifest_text = CollectionReader(manifest_text).manifest_text(normalize=True)
+            replication_attr = 'replication_desired'
+            if api_client._schema.schemas['Collection']['properties'].get(replication_attr, None) is None:
+                # API called it 'redundancy' before #3410.
+                replication_attr = 'redundancy'
             # Register the resulting collection in Arvados.
             collection = api_client.collections().create(
                 body={
                     'owner_uuid': project_uuid,
                     'name': collection_name,
-                    'manifest_text': manifest_text
+                    'manifest_text': manifest_text,
+                    replication_attr: args.replication,
                     },
                 ensure_unique_name=True
                 ).execute(num_retries=args.retries)
index 04e3f6414b338f47ee88a228d84eca1ea5389344..9bce9971076372394fdf2e5c1f9523eda9d115ec 100644 (file)
@@ -46,7 +46,7 @@ def main(arguments=None):
             ws.subscribe([['object_uuid', 'in', [args.pipeline] + list(pipeline_jobs)]])
             known_component_jobs = pipeline_jobs
 
-    api = arvados.api('v1', cache=False)
+    api = arvados.api('v1')
 
     if args.uuid:
         filters += [ ['object_uuid', '=', args.uuid] ]
@@ -85,7 +85,7 @@ def main(arguments=None):
             print json.dumps(ev)
 
     try:
-        ws = subscribe(arvados.api('v1', cache=False), filters, on_message, poll_fallback=args.poll_interval)
+        ws = subscribe(arvados.api('v1'), filters, on_message, poll_fallback=args.poll_interval)
         if ws:
             if args.pipeline:
                 c = api.pipeline_instances().get(uuid=args.pipeline).execute()
index f70fa17149711d1d869af4efa104d638b8d64ebc..16f4096572c422cf37a93a7110b19c45aa8f1061 100644 (file)
@@ -74,3 +74,5 @@ class NoKeepServersError(Exception):
     pass
 class StaleWriterStateError(Exception):
     pass
+class FeatureNotEnabledError(Exception):
+    pass
index f65486a7f1775e7cedb2bc3e40b2b5d7e9095c9e..09f2a871a966522201f5808e09fb37a820b1baf5 100644 (file)
@@ -1,24 +1,34 @@
-from ws4py.client.threadedclient import WebSocketClient
-import threading
+import arvados
+import config
+import errors
+
+import logging
 import json
-import os
+import threading
 import time
-import ssl
+import os
 import re
-import config
-import logging
-import arvados
+import ssl
+from ws4py.client.threadedclient import WebSocketClient
 
 _logger = logging.getLogger('arvados.events')
 
 class EventClient(WebSocketClient):
     def __init__(self, url, filters, on_event):
-        ssl_options = None
-        if re.match(r'(?i)^(true|1|yes)$',
-                    config.get('ARVADOS_API_HOST_INSECURE', 'no')):
-            ssl_options={'cert_reqs': ssl.CERT_NONE}
+        # Prefer system's CA certificates (if available)
+        ssl_options = {}
+        certs_path = '/etc/ssl/certs/ca-certificates.crt'
+        if os.path.exists(certs_path):
+            ssl_options['ca_certs'] = certs_path
+        if config.flag_is_true('ARVADOS_API_HOST_INSECURE'):
+            ssl_options['cert_reqs'] = ssl.CERT_NONE
         else:
-            ssl_options={'cert_reqs': ssl.CERT_REQUIRED}
+            ssl_options['cert_reqs'] = ssl.CERT_REQUIRED
+
+        # Warning: If the host part of url resolves to both IPv6 and
+        # IPv4 addresses (common with "localhost"), only one of them
+        # will be attempted -- and it might not be the right one. See
+        # ws4py's WebSocketBaseClient.__init__.
         super(EventClient, self).__init__(url, ssl_options=ssl_options)
         self.filters = filters
         self.on_event = on_event
@@ -55,6 +65,7 @@ class PollClient(threading.Thread):
             self.filters = [[]]
         self.on_event = on_event
         self.poll_time = poll_time
+        self.daemon = True
         self.stop = threading.Event()
 
     def run(self):
@@ -102,29 +113,41 @@ class PollClient(threading.Thread):
         del self.filters[self.filters.index(filters)]
 
 
+def _subscribe_websocket(api, filters, on_event):
+    endpoint = api._rootDesc.get('websocketUrl', None)
+    if not endpoint:
+        raise errors.FeatureNotEnabledError(
+            "Server does not advertise a websocket endpoint")
+    uri_with_token = "{}?api_token={}".format(endpoint, api.api_token)
+    client = EventClient(uri_with_token, filters, on_event)
+    ok = False
+    try:
+        client.connect()
+        ok = True
+        return client
+    finally:
+        if not ok:
+            client.close_connection()
+
 def subscribe(api, filters, on_event, poll_fallback=15):
-    '''
-    api: Must be a newly created from arvados.api(cache=False), not shared with the caller, as it may be used by a background thread.
-    filters: Initial subscription filters.
-    on_event: The callback when a message is received
-    poll_fallback: If websockets are not available, fall back to polling every N seconds.  If poll_fallback=False, this will return None if websockets are not available.
-    '''
-    ws = None
-    if 'websocketUrl' in api._rootDesc:
-        try:
-            url = "{}?api_token={}".format(api._rootDesc['websocketUrl'], api.api_token)
-            ws = EventClient(url, filters, on_event)
-            ws.connect()
-            return ws
-        except Exception as e:
-            _logger.warn("Got exception %s trying to connect to websockets at %s" % (e, api._rootDesc['websocketUrl']))
-            if ws:
-                ws.close_connection()
-    if poll_fallback:
-        _logger.warn("Websockets not available, falling back to log table polling")
-        p = PollClient(api, filters, on_event, poll_fallback)
-        p.start()
-        return p
-    else:
-        _logger.error("Websockets not available")
-        return None
+    """
+    :api:
+      a client object retrieved from arvados.api(). The caller should not use this client object for anything else after calling subscribe().
+    :filters:
+      Initial subscription filters.
+    :on_event:
+      The callback when a message is received.
+    :poll_fallback:
+      If websockets are not available, fall back to polling every N seconds.  If poll_fallback=False, this will return None if websockets are not available.
+    """
+
+    if not poll_fallback:
+        return _subscribe_websocket(api, filters, on_event)
+
+    try:
+        return _subscribe_websocket(api, filters, on_event)
+    except Exception as e:
+        _logger.warn("Falling back to polling after websocket error: %s" % e)
+    p = PollClient(api, filters, on_event, poll_fallback)
+    p.start()
+    return p
index 7c53339650f622260263cbded25e16622cf77189..262e68864db7a7e12847a138de9922c489f473e2 100644 (file)
@@ -736,10 +736,18 @@ class KeepClient(object):
                 "failed to write {} (wanted {} copies but wrote {})".format(
                     data_hash, copies, thread_limiter.done()), service_errors)
 
-    # Local storage methods need no-op num_retries arguments to keep
-    # integration tests happy.  With better isolation they could
-    # probably be removed again.
-    def local_store_put(self, data, num_retries=0):
+    def local_store_put(self, data, copies=1, num_retries=None):
+        """A stub for put().
+
+        This method is used in place of the real put() method when
+        using local storage (see constructor's local_store argument).
+
+        copies and num_retries arguments are ignored: they are here
+        only for the sake of offering the same call signature as
+        put().
+
+        Data stored this way can be retrieved via local_store_get().
+        """
         md5 = hashlib.md5(data).hexdigest()
         locator = '%s+%d' % (md5, len(data))
         with open(os.path.join(self.local_store, md5 + '.tmp'), 'w') as f:
@@ -748,7 +756,8 @@ class KeepClient(object):
                   os.path.join(self.local_store, md5))
         return locator
 
-    def local_store_get(self, loc_s, num_retries=0):
+    def local_store_get(self, loc_s, num_retries=None):
+        """Companion to local_store_put()."""
         try:
             locator = KeepLocator(loc_s)
         except ValueError:
diff --git a/sdk/python/gittaggers.py b/sdk/python/gittaggers.py
new file mode 100644 (file)
index 0000000..9223443
--- /dev/null
@@ -0,0 +1,20 @@
+from setuptools.command.egg_info import egg_info
+import subprocess
+import time
+
+class EggInfoFromGit(egg_info):
+    """Tag the build with git commit timestamp.
+
+    If a build tag has already been set (e.g., "egg_info -b", building
+    from source package), leave it alone.
+    """
+    def git_timestamp_tag(self):
+        gitinfo = subprocess.check_output(
+            ['git', 'log', '--first-parent', '--max-count=1',
+             '--format=format:%ct', '.']).strip()
+        return time.strftime('.%Y%m%d%H%M%S', time.gmtime(int(gitinfo)))
+
+    def tags(self):
+        if self.tag_build is None:
+            self.tag_build = self.git_timestamp_tag()
+        return egg_info.tags(self)
index 754d89bdad70e120367de344799fe337a0b4e1ab..ca28025fea64ae6b388af4388337814494a8c0c8 100644 (file)
@@ -1,32 +1,19 @@
 #!/usr/bin/env python
 
 import os
-import subprocess
-import time
+import sys
+import setuptools.command.egg_info as egg_info_cmd
 
 from setuptools import setup, find_packages
-from setuptools.command.egg_info import egg_info
 
-SETUP_DIR = os.path.dirname(__file__)
+SETUP_DIR = os.path.dirname(__file__) or '.'
 README = os.path.join(SETUP_DIR, 'README.rst')
 
-class TagBuildWithCommit(egg_info):
-    """Tag the build with the sha1 and date of the last git commit.
-
-    If a build tag has already been set (e.g., "egg_info -b", building
-    from source package), leave it alone.
-    """
-    def tags(self):
-        if self.tag_build is None:
-            git_tags = subprocess.check_output(
-                ['git', 'log', '--first-parent', '--max-count=1',
-                 '--format=format:%ct %h', SETUP_DIR]).split()
-            assert len(git_tags) == 2
-            git_tags[0] = time.strftime(
-                '%Y%m%d%H%M%S', time.gmtime(int(git_tags[0])))
-            self.tag_build = '.{}+{}'.format(*git_tags)
-        return egg_info.tags(self)
-
+try:
+    import gittaggers
+    tagger = gittaggers.EggInfoFromGit
+except ImportError:
+    tagger = egg_info_cmd.egg_info
 
 setup(name='arvados-python-client',
       version='0.1',
@@ -59,5 +46,5 @@ setup(name='arvados-python-client',
       test_suite='tests',
       tests_require=['mock>=1.0', 'PyYAML'],
       zip_safe=False,
-      cmdclass={'egg_info': TagBuildWithCommit},
+      cmdclass={'egg_info': tagger},
       )
index 04ca6b5e10606ac5d6825242d56b47bb64256bcb..378e93f3861633f82af2b253515877833c21052d 100644 (file)
@@ -1,5 +1,6 @@
 #!/usr/bin/env python
 
+import arvados
 import errno
 import hashlib
 import httplib
@@ -7,6 +8,7 @@ import httplib2
 import io
 import mock
 import os
+import Queue
 import requests
 import shutil
 import tempfile
@@ -18,6 +20,18 @@ TEST_HOST = '100::'
 
 skip_sleep = mock.patch('time.sleep', lambda n: None)  # clown'll eat me
 
+def queue_with(items):
+    """Return a thread-safe iterator that yields the given items.
+
+    +items+ can be given as an array or an iterator. If an iterator is
+    given, it will be consumed to fill the queue before queue_with()
+    returns.
+    """
+    queue = Queue.Queue()
+    for val in items:
+        queue.put(val)
+    return lambda *args, **kwargs: queue.get(block=False)
+
 # fake_httplib2_response and mock_responses
 # mock calls to httplib2.Http.request()
 def fake_httplib2_response(code, **headers):
@@ -26,8 +40,8 @@ def fake_httplib2_response(code, **headers):
     return httplib2.Response(headers)
 
 def mock_responses(body, *codes, **headers):
-    return mock.patch('httplib2.Http.request', side_effect=(
-            (fake_httplib2_response(code, **headers), body) for code in codes))
+    return mock.patch('httplib2.Http.request', side_effect=queue_with((
+        (fake_httplib2_response(code, **headers), body) for code in codes)))
 
 # fake_requests_response, mock_get_responses and mock_put_responses
 # mock calls to requests.get() and requests.put()
@@ -40,16 +54,16 @@ def fake_requests_response(code, body, **headers):
     return r
 
 def mock_get_responses(body, *codes, **headers):
-    return mock.patch('requests.get', side_effect=(
-        fake_requests_response(code, body, **headers) for code in codes))
+    return mock.patch('requests.get', side_effect=queue_with((
+        fake_requests_response(code, body, **headers) for code in codes)))
 
 def mock_put_responses(body, *codes, **headers):
-    return mock.patch('requests.put', side_effect=(
-        fake_requests_response(code, body, **headers) for code in codes))
+    return mock.patch('requests.put', side_effect=queue_with((
+        fake_requests_response(code, body, **headers) for code in codes)))
 
 def mock_requestslib_responses(method, body, *codes, **headers):
-    return mock.patch(method, side_effect=(
-        fake_requests_response(code, body, **headers) for code in codes))
+    return mock.patch(method, side_effect=queue_with((
+        fake_requests_response(code, body, **headers) for code in codes)))
 
 class MockStreamReader(object):
     def __init__(self, name='.', *data):
@@ -66,6 +80,40 @@ class MockStreamReader(object):
         return self._data[start:start + size]
 
 
+class ApiClientMock(object):
+    def api_client_mock(self):
+        return mock.MagicMock(name='api_client_mock')
+
+    def mock_keep_services(self, api_mock=None, status=200, count=12,
+                           service_type='disk',
+                           service_host=None,
+                           service_port=None,
+                           service_ssl_flag=False):
+        if api_mock is None:
+            api_mock = self.api_client_mock()
+        body = {
+            'items_available': count,
+            'items': [{
+                'uuid': 'zzzzz-bi6l4-{:015x}'.format(i),
+                'owner_uuid': 'zzzzz-tpzed-000000000000000',
+                'service_host': service_host or 'keep0x{:x}'.format(i),
+                'service_port': service_port or 65535-i,
+                'service_ssl_flag': service_ssl_flag,
+                'service_type': service_type,
+            } for i in range(0, count)]
+        }
+        self._mock_api_call(api_mock.keep_services().accessible, status, body)
+        return api_mock
+
+    def _mock_api_call(self, mock_method, code, body):
+        mock_method = mock_method().execute
+        if code == 200:
+            mock_method.return_value = body
+        else:
+            mock_method.side_effect = arvados.errors.ApiError(
+                fake_httplib2_response(code), "{}")
+
+
 class ArvadosBaseTestCase(unittest.TestCase):
     # This class provides common utility functions for our tests.
 
index 739c75499509fabb24312bdd6c87a9cb5d8f48e8..18011af7a64e61dd43b3d2f40ec7400ff6324d9c 100644 (file)
@@ -1,10 +1,17 @@
 #!/usr/bin/env python
 
 import argparse
+import atexit
+import httplib2
 import os
+import pipes
+import random
+import re
 import shutil
 import signal
+import socket
 import subprocess
+import string
 import sys
 import tempfile
 import time
@@ -21,18 +28,19 @@ if __name__ == '__main__' and os.path.exists(
 import arvados.api
 import arvados.config
 
-SERVICES_SRC_DIR = os.path.join(MY_DIRNAME, '../../../services')
-SERVER_PID_PATH = 'tmp/pids/webrick-test.pid'
-WEBSOCKETS_SERVER_PID_PATH = 'tmp/pids/passenger-test.pid'
+ARVADOS_DIR = os.path.realpath(os.path.join(MY_DIRNAME, '../../..'))
+SERVICES_SRC_DIR = os.path.join(ARVADOS_DIR, 'services')
+SERVER_PID_PATH = 'tmp/pids/test-server.pid'
 if 'GOPATH' in os.environ:
     gopaths = os.environ['GOPATH'].split(':')
     gobins = [os.path.join(path, 'bin') for path in gopaths]
     os.environ['PATH'] = ':'.join(gobins) + ':' + os.environ['PATH']
 
-if os.path.isdir('tests'):
-    TEST_TMPDIR = 'tests/tmp'
-else:
-    TEST_TMPDIR = 'tmp'
+TEST_TMPDIR = os.path.join(ARVADOS_DIR, 'tmp')
+if not os.path.exists(TEST_TMPDIR):
+    os.mkdir(TEST_TMPDIR)
+
+my_api_host = None
 
 def find_server_pid(PID_PATH, wait=10):
     now = time.time()
@@ -55,90 +63,201 @@ def find_server_pid(PID_PATH, wait=10):
 
     return server_pid
 
-def kill_server_pid(PID_PATH, wait=10):
+def kill_server_pid(pidfile, wait=10, passenger_root=False):
+    # Must re-import modules in order to work during atexit
+    import os
+    import signal
+    import subprocess
+    import time
     try:
+        if passenger_root:
+            # First try to shut down nicely
+            restore_cwd = os.getcwd()
+            os.chdir(passenger_root)
+            subprocess.call([
+                'bundle', 'exec', 'passenger', 'stop', '--pid-file', pidfile])
+            os.chdir(restore_cwd)
         now = time.time()
         timeout = now + wait
-        with open(PID_PATH, 'r') as f:
+        with open(pidfile, 'r') as f:
             server_pid = int(f.read())
         while now <= timeout:
-            os.kill(server_pid, signal.SIGTERM)
-            os.getpgid(server_pid) # throw OSError if no such pid
-            now = time.time()
+            if not passenger_root or timeout - now < wait / 2:
+                # Half timeout has elapsed. Start sending SIGTERM
+                os.kill(server_pid, signal.SIGTERM)
+            # Raise OSError if process has disappeared
+            os.getpgid(server_pid)
             time.sleep(0.1)
+            now = time.time()
     except IOError:
-        good_pid = False
+        pass
     except OSError:
-        good_pid = False
-
-def run(websockets=False, reuse_server=False):
-    cwd = os.getcwd()
-    os.chdir(os.path.join(SERVICES_SRC_DIR, 'api'))
-
-    if websockets:
-        pid_file = WEBSOCKETS_SERVER_PID_PATH
-    else:
-        pid_file = SERVER_PID_PATH
-
-    test_pid = find_server_pid(pid_file, 0)
-
-    if test_pid is None or not reuse_server:
-        # do not try to run both server variants at once
-        stop()
-
-        # delete cached discovery document
-        shutil.rmtree(arvados.http_cache('discovery'))
-
-        # Setup database
-        os.environ["RAILS_ENV"] = "test"
-        subprocess.call(['bundle', 'exec', 'rake', 'tmp:cache:clear'])
-        subprocess.call(['bundle', 'exec', 'rake', 'db:test:load'])
-        subprocess.call(['bundle', 'exec', 'rake', 'db:fixtures:load'])
+        pass
 
-        subprocess.call(['bundle', 'exec', 'rails', 'server', '-d',
-                         '--pid',
-                         os.path.join(os.getcwd(), SERVER_PID_PATH),
-                         '-p3000'])
-        os.environ["ARVADOS_API_HOST"] = "127.0.0.1:3000"
+def find_available_port():
+    """Return an IPv4 port number that is not in use right now.
 
-        if websockets:
-            os.environ["ARVADOS_WEBSOCKETS"] = "ws-only"
-            subprocess.call(['bundle', 'exec',
-                             'passenger', 'start', '-d', '-p3333',
-                             '--pid-file',
-                             os.path.join(os.getcwd(), WEBSOCKETS_SERVER_PID_PATH)
-                         ])
+    We assume whoever needs to use the returned port is able to reuse
+    a recently used port without waiting for TIME_WAIT (see
+    SO_REUSEADDR / SO_REUSEPORT).
 
-        pid = find_server_pid(SERVER_PID_PATH)
+    Some opportunity for races here, but it's better than choosing
+    something at random and not checking at all. If all of our servers
+    (hey Passenger) knew that listening on port 0 was a thing, the OS
+    would take care of the races, and this wouldn't be needed at all.
+    """
 
-    os.environ["ARVADOS_API_HOST_INSECURE"] = "true"
-    os.environ["ARVADOS_API_TOKEN"] = ""
-    os.chdir(cwd)
+    sock = socket.socket()
+    sock.bind(('0.0.0.0', 0))
+    port = sock.getsockname()[1]
+    sock.close()
+    return port
 
-def stop():
-    cwd = os.getcwd()
-    os.chdir(os.path.join(SERVICES_SRC_DIR, 'api'))
+def run(leave_running_atexit=False):
+    """Ensure an API server is running, and ARVADOS_API_* env vars have
+    admin credentials for it.
 
-    kill_server_pid(WEBSOCKETS_SERVER_PID_PATH, 0)
-    kill_server_pid(SERVER_PID_PATH, 0)
+    If ARVADOS_TEST_API_HOST is set, a parent process has started a
+    test server for us to use: we just need to reset() it using the
+    admin token fixture.
 
-    try:
-        os.unlink('self-signed.pem')
-    except:
-        pass
-
-    try:
-        os.unlink('self-signed.key')
-    except:
-        pass
+    If a previous call to run() started a new server process, and it
+    is still running, we just need to reset() it to fixture state and
+    return.
 
-    os.chdir(cwd)
+    If neither of those options work out, we'll really start a new
+    server.
+    """
+    global my_api_host
+
+    # Delete cached discovery document.
+    shutil.rmtree(arvados.http_cache('discovery'))
+
+    pid_file = os.path.join(SERVICES_SRC_DIR, 'api', SERVER_PID_PATH)
+    pid_file_ok = find_server_pid(pid_file, 0)
+
+    existing_api_host = os.environ.get('ARVADOS_TEST_API_HOST', my_api_host)
+    if existing_api_host and pid_file_ok:
+        if existing_api_host == my_api_host:
+            try:
+                return reset()
+            except:
+                # Fall through to shutdown-and-start case.
+                pass
+        else:
+            # Server was provided by parent. Can't recover if it's
+            # unresettable.
+            return reset()
+
+    # Before trying to start up our own server, call stop() to avoid
+    # "Phusion Passenger Standalone is already running on PID 12345".
+    # (If we've gotten this far, ARVADOS_TEST_API_HOST isn't set, so
+    # we know the server is ours to kill.)
+    stop(force=True)
+
+    restore_cwd = os.getcwd()
+    api_src_dir = os.path.join(SERVICES_SRC_DIR, 'api')
+    os.chdir(api_src_dir)
+
+    # Either we haven't started a server of our own yet, or it has
+    # died, or we have lost our credentials, or something else is
+    # preventing us from calling reset(). Start a new one.
+
+    if not os.path.exists('tmp/self-signed.pem'):
+        # We assume here that either passenger reports its listening
+        # address as https:/0.0.0.0:port/. If it reports "127.0.0.1"
+        # then the certificate won't match the host and reset() will
+        # fail certificate verification. If it reports "localhost",
+        # clients (notably Python SDK's websocket client) might
+        # resolve localhost as ::1 and then fail to connect.
+        subprocess.check_call([
+            'openssl', 'req', '-new', '-x509', '-nodes',
+            '-out', 'tmp/self-signed.pem',
+            '-keyout', 'tmp/self-signed.key',
+            '-days', '3650',
+            '-subj', '/CN=0.0.0.0'],
+        stdout=sys.stderr)
+
+    port = find_available_port()
+    env = os.environ.copy()
+    env['RAILS_ENV'] = 'test'
+    env['ARVADOS_WEBSOCKETS'] = 'yes'
+    env.pop('ARVADOS_TEST_API_HOST', None)
+    env.pop('ARVADOS_API_HOST', None)
+    env.pop('ARVADOS_API_HOST_INSECURE', None)
+    env.pop('ARVADOS_API_TOKEN', None)
+    start_msg = subprocess.check_output(
+        ['bundle', 'exec',
+         'passenger', 'start', '-d', '-p{}'.format(port),
+         '--pid-file', os.path.join(os.getcwd(), pid_file),
+         '--log-file', os.path.join(os.getcwd(), 'log/test.log'),
+         '--ssl',
+         '--ssl-certificate', 'tmp/self-signed.pem',
+         '--ssl-certificate-key', 'tmp/self-signed.key'],
+        env=env)
+
+    if not leave_running_atexit:
+        atexit.register(kill_server_pid, pid_file, passenger_root=api_src_dir)
+
+    match = re.search(r'Accessible via: https://(.*?)/', start_msg)
+    if not match:
+        raise Exception(
+            "Passenger did not report endpoint: {}".format(start_msg))
+    my_api_host = match.group(1)
+    os.environ['ARVADOS_API_HOST'] = my_api_host
+
+    # Make sure the server has written its pid file before continuing
+    find_server_pid(pid_file)
+
+    reset()
+    os.chdir(restore_cwd)
+
+def reset():
+    """Reset the test server to fixture state.
+
+    This resets the ARVADOS_TEST_API_HOST provided by a parent process
+    if any, otherwise the server started by run().
+
+    It also resets ARVADOS_* environment vars to point to the test
+    server with admin credentials.
+    """
+    existing_api_host = os.environ.get('ARVADOS_TEST_API_HOST', my_api_host)
+    token = auth_token('admin')
+    httpclient = httplib2.Http(ca_certs=os.path.join(
+        SERVICES_SRC_DIR, 'api', 'tmp', 'self-signed.pem'))
+    httpclient.request(
+        'https://{}/database/reset'.format(existing_api_host),
+        'POST',
+        headers={'Authorization': 'OAuth2 {}'.format(token)})
+    os.environ['ARVADOS_API_HOST_INSECURE'] = 'true'
+    os.environ['ARVADOS_API_HOST'] = existing_api_host
+    os.environ['ARVADOS_API_TOKEN'] = token
+
+def stop(force=False):
+    """Stop the API server, if one is running.
+
+    If force==False, kill it only if we started it ourselves. (This
+    supports the use case where a Python test suite calls run(), but
+    run() just uses the ARVADOS_TEST_API_HOST provided by the parent
+    process, and the test suite cleans up after itself by calling
+    stop(). In this case the test server provided by the parent
+    process should be left alone.)
+
+    If force==True, kill it even if we didn't start it
+    ourselves. (This supports the use case in __main__, where "run"
+    and "stop" happen in different processes.)
+    """
+    global my_api_host
+    if force or my_api_host is not None:
+        kill_server_pid(os.path.join(SERVICES_SRC_DIR, 'api', SERVER_PID_PATH))
+        my_api_host = None
 
 def _start_keep(n, keep_args):
     keep0 = tempfile.mkdtemp()
+    port = find_available_port()
     keep_cmd = ["keepstore",
                 "-volumes={}".format(keep0),
-                "-listen=:{}".format(25107+n),
+                "-listen=:{}".format(port),
                 "-pid={}".format("{}/keep{}.pid".format(TEST_TMPDIR, n))]
 
     for arg, val in keep_args.iteritems():
@@ -151,12 +270,11 @@ def _start_keep(n, keep_args):
     with open("{}/keep{}.volume".format(TEST_TMPDIR, n), 'w') as f:
         f.write(keep0)
 
+    return port
+
 def run_keep(blob_signing_key=None, enforce_permissions=False):
     stop_keep()
 
-    if not os.path.exists(TEST_TMPDIR):
-        os.mkdir(TEST_TMPDIR)
-
     keep_args = {}
     if blob_signing_key:
         with open(os.path.join(TEST_TMPDIR, "keep.blob_signing_key"), "w") as f:
@@ -165,33 +283,28 @@ def run_keep(blob_signing_key=None, enforce_permissions=False):
     if enforce_permissions:
         keep_args['--enforce-permissions'] = 'true'
 
-    _start_keep(0, keep_args)
-    _start_keep(1, keep_args)
-
-    os.environ["ARVADOS_API_HOST"] = "127.0.0.1:3000"
-    os.environ["ARVADOS_API_HOST_INSECURE"] = "true"
-
-    authorize_with("admin")
-    api = arvados.api('v1', cache=False)
+    api = arvados.api(
+        version='v1',
+        host=os.environ['ARVADOS_API_HOST'],
+        token=os.environ['ARVADOS_API_TOKEN'],
+        insecure=True)
     for d in api.keep_services().list().execute()['items']:
         api.keep_services().delete(uuid=d['uuid']).execute()
     for d in api.keep_disks().list().execute()['items']:
         api.keep_disks().delete(uuid=d['uuid']).execute()
 
-    s1 = api.keep_services().create(body={"keep_service": {
-                "uuid": "zzzzz-bi6l4-5bo5n1iekkjyz6b",
-                "service_host": "localhost",
-                "service_port": 25107,
-                "service_type": "disk"
-                }}).execute()
-    s2 = api.keep_services().create(body={"keep_service": {
-                "uuid": "zzzzz-bi6l4-2nz60e0ksj7vr3s",
-                "service_host": "localhost",
-                "service_port": 25108,
-                "service_type": "disk"
-                }}).execute()
-    api.keep_disks().create(body={"keep_disk": {"keep_service_uuid": s1["uuid"] } }).execute()
-    api.keep_disks().create(body={"keep_disk": {"keep_service_uuid": s2["uuid"] } }).execute()
+    for d in range(0, 2):
+        port = _start_keep(d, keep_args)
+        svc = api.keep_services().create(body={'keep_service': {
+            'uuid': 'zzzzz-bi6l4-keepdisk{:07d}'.format(d),
+            'service_host': 'localhost',
+            'service_port': port,
+            'service_type': 'disk',
+            'service_ssl_flag': False,
+        }}).execute()
+        api.keep_disks().create(body={
+            'keep_disk': {'keep_service_uuid': svc['uuid'] }
+        }).execute()
 
 def _stop_keep(n):
     kill_server_pid("{}/keep{}.pid".format(TEST_TMPDIR, n), 0)
@@ -206,25 +319,34 @@ def stop_keep():
     _stop_keep(0)
     _stop_keep(1)
 
-def run_keep_proxy(auth):
+def run_keep_proxy():
     stop_keep_proxy()
 
-    if not os.path.exists(TEST_TMPDIR):
-        os.mkdir(TEST_TMPDIR)
-
-    os.environ["ARVADOS_API_HOST"] = "127.0.0.1:3000"
-    os.environ["ARVADOS_API_HOST_INSECURE"] = "true"
-    os.environ["ARVADOS_API_TOKEN"] = fixture("api_client_authorizations")[auth]["api_token"]
-
-    kp0 = subprocess.Popen(["keepproxy",
-                            "-pid={}/keepproxy.pid".format(TEST_TMPDIR),
-                            "-listen=:{}".format(25101)])
-
-    authorize_with("admin")
-    api = arvados.api('v1', cache=False)
-    api.keep_services().create(body={"keep_service": {"service_host": "localhost",  "service_port": 25101, "service_type": "proxy"} }).execute()
-
-    os.environ["ARVADOS_KEEP_PROXY"] = "http://localhost:25101"
+    admin_token = auth_token('admin')
+    port = find_available_port()
+    env = os.environ.copy()
+    env['ARVADOS_API_TOKEN'] = admin_token
+    kp = subprocess.Popen(
+        ['keepproxy',
+         '-pid={}/keepproxy.pid'.format(TEST_TMPDIR),
+         '-listen=:{}'.format(port)],
+        env=env)
+
+    api = arvados.api(
+        version='v1',
+        host=os.environ['ARVADOS_API_HOST'],
+        token=admin_token,
+        insecure=True)
+    for d in api.keep_services().list(
+            filters=[['service_type','=','proxy']]).execute()['items']:
+        api.keep_services().delete(uuid=d['uuid']).execute()
+    api.keep_services().create(body={'keep_service': {
+        'service_host': 'localhost',
+        'service_port': port,
+        'service_type': 'proxy',
+        'service_ssl_flag': False,
+    }}).execute()
+    os.environ["ARVADOS_KEEP_PROXY"] = "http://localhost:{}".format(port)
 
 def stop_keep_proxy():
     kill_server_pid(os.path.join(TEST_TMPDIR, "keepproxy.pid"), 0)
@@ -241,9 +363,12 @@ def fixture(fix):
           pass
         return yaml.load(yaml_file)
 
-def authorize_with(token):
-    '''token is the symbolic name of the token from the api_client_authorizations fixture'''
-    arvados.config.settings()["ARVADOS_API_TOKEN"] = fixture("api_client_authorizations")[token]["api_token"]
+def auth_token(token_name):
+    return fixture("api_client_authorizations")[token_name]["api_token"]
+
+def authorize_with(token_name):
+    '''token_name is the symbolic name of the token from the api_client_authorizations fixture'''
+    arvados.config.settings()["ARVADOS_API_TOKEN"] = auth_token(token_name)
     arvados.config.settings()["ARVADOS_API_HOST"] = os.environ.get("ARVADOS_API_HOST")
     arvados.config.settings()["ARVADOS_API_HOST_INSECURE"] = "true"
 
@@ -279,16 +404,15 @@ class TestCaseWithServers(unittest.TestCase):
         cls._orig_environ = os.environ.copy()
         cls._orig_config = arvados.config.settings().copy()
         cls._cleanup_funcs = []
+        os.environ.pop('ARVADOS_KEEP_PROXY', None)
+        os.environ.pop('ARVADOS_EXTERNAL_CLIENT', None)
         for server_kwargs, start_func, stop_func in (
-              (cls.MAIN_SERVER, run, stop),
-              (cls.KEEP_SERVER, run_keep, stop_keep),
-              (cls.KEEP_PROXY_SERVER, run_keep_proxy, stop_keep_proxy)):
+                (cls.MAIN_SERVER, run, reset),
+                (cls.KEEP_SERVER, run_keep, stop_keep),
+                (cls.KEEP_PROXY_SERVER, run_keep_proxy, stop_keep_proxy)):
             if server_kwargs is not None:
                 start_func(**server_kwargs)
                 cls._cleanup_funcs.append(stop_func)
-        os.environ.pop('ARVADOS_EXTERNAL_CLIENT', None)
-        if cls.KEEP_PROXY_SERVER is None:
-            os.environ.pop('ARVADOS_KEEP_PROXY', None)
         if (cls.KEEP_SERVER is None) and (cls.KEEP_PROXY_SERVER is None):
             cls.local_store = tempfile.mkdtemp()
             os.environ['KEEP_LOCAL_STORE'] = cls.local_store
@@ -307,29 +431,34 @@ class TestCaseWithServers(unittest.TestCase):
 
 
 if __name__ == "__main__":
+    actions = ['start', 'stop',
+               'start_keep', 'stop_keep',
+               'start_keep_proxy', 'stop_keep_proxy']
     parser = argparse.ArgumentParser()
-    parser.add_argument('action', type=str, help='''one of "start", "stop", "start_keep", "stop_keep"''')
-    parser.add_argument('--websockets', action='store_true', default=False)
-    parser.add_argument('--reuse', action='store_true', default=False)
-    parser.add_argument('--auth', type=str, help='Print authorization info for given api_client_authorizations fixture')
+    parser.add_argument('action', type=str, help="one of {}".format(actions))
+    parser.add_argument('--auth', type=str, metavar='FIXTURE_NAME', help='Print authorization info for given api_client_authorizations fixture')
     args = parser.parse_args()
 
     if args.action == 'start':
-        run(websockets=args.websockets, reuse_server=args.reuse)
+        stop(force=('ARVADOS_TEST_API_HOST' not in os.environ))
+        run(leave_running_atexit=True)
+        host = os.environ['ARVADOS_API_HOST']
         if args.auth is not None:
-            authorize_with(args.auth)
-            print("export ARVADOS_API_HOST={}".format(arvados.config.settings()["ARVADOS_API_HOST"]))
-            print("export ARVADOS_API_TOKEN={}".format(arvados.config.settings()["ARVADOS_API_TOKEN"]))
-            print("export ARVADOS_API_HOST_INSECURE={}".format(arvados.config.settings()["ARVADOS_API_HOST_INSECURE"]))
+            token = auth_token(args.auth)
+            print("export ARVADOS_API_TOKEN={}".format(pipes.quote(token)))
+            print("export ARVADOS_API_HOST={}".format(pipes.quote(host)))
+            print("export ARVADOS_API_HOST_INSECURE=true")
+        else:
+            print(host)
     elif args.action == 'stop':
-        stop()
+        stop(force=('ARVADOS_TEST_API_HOST' not in os.environ))
     elif args.action == 'start_keep':
         run_keep()
     elif args.action == 'stop_keep':
         stop_keep()
     elif args.action == 'start_keep_proxy':
-        run_keep_proxy("admin")
+        run_keep_proxy()
     elif args.action == 'stop_keep_proxy':
         stop_keep_proxy()
     else:
-        print('Unrecognized action "{}", actions are "start", "stop", "start_keep", "stop_keep"'.format(args.action))
+        print("Unrecognized action '{}'. Actions are: {}.".format(args.action, actions))
index 0d81fdf738caf3d3120725479c8b110d27204c66..5cf2d2b58c8c4086a075b78eee0d34024ab81db1 100644 (file)
@@ -42,21 +42,52 @@ class ArvadosApiClientTest(unittest.TestCase):
                     {'items_available': 0, 'items': []})),
             }
         req_builder = apiclient_http.RequestMockBuilder(mock_responses)
-        cls.api = arvados.api('v1', cache=False,
+        cls.api = arvados.api('v1',
                               host=os.environ['ARVADOS_API_HOST'],
                               token='discovery-doc-only-no-token-needed',
                               insecure=True,
                               requestBuilder=req_builder)
 
-    @classmethod
-    def tearDownClass(cls):
-        run_test_server.stop()
+    def tearDown(cls):
+        run_test_server.reset()
+
+    def test_new_api_objects_with_cache(self):
+        clients = [arvados.api('v1', cache=True,
+                               host=os.environ['ARVADOS_API_HOST'],
+                               token='discovery-doc-only-no-token-needed',
+                               insecure=True)
+                   for index in [0, 1]]
+        self.assertIsNot(*clients)
 
-    def test_basic_list(self):
-        answer = self.api.humans().list(
+    def test_empty_list(self):
+        answer = arvados.api('v1').humans().list(
             filters=[['uuid', 'is', None]]).execute()
         self.assertEqual(answer['items_available'], len(answer['items']))
 
+    def test_nonempty_list(self):
+        answer = arvados.api('v1').collections().list().execute()
+        self.assertNotEqual(0, answer['items_available'])
+        self.assertNotEqual(0, len(answer['items']))
+
+    def test_timestamp_inequality_filter(self):
+        api = arvados.api('v1')
+        new_item = api.specimens().create(body={}).execute()
+        for operator, should_include in [
+                ['<', False], ['>', False],
+                ['<=', True], ['>=', True], ['=', True]]:
+            response = api.specimens().list(filters=[
+                ['created_at', operator, new_item['created_at']],
+                # Also filter by uuid to ensure (if it matches) it's on page 0
+                ['uuid', '=', new_item['uuid']]]).execute()
+            uuids = [item['uuid'] for item in response['items']]
+            did_include = new_item['uuid'] in uuids
+            self.assertEqual(
+                did_include, should_include,
+                "'%s %s' filter should%s have matched '%s'" % (
+                    operator, new_item['created_at'],
+                    ('' if should_include else ' not'),
+                    new_item['created_at']))
+
     def test_exceptions_include_errors(self):
         with self.assertRaises(apiclient_errors.HttpError) as err_ctx:
             self.api.humans().get(uuid='xyz-xyz-abcdef').execute()
index 001add3d7cd9fb99c7bd6d83d6838b9c5c2e7ceb..eaefd790b0741585804ac0f6e503ee2cad31fe0f 100644 (file)
@@ -399,9 +399,13 @@ class ArvadosPutTest(run_test_server.TestCaseWithServers, ArvadosBaseTestCase):
 class ArvPutIntegrationTest(run_test_server.TestCaseWithServers,
                             ArvadosBaseTestCase):
     def _getKeepServerConfig():
-        for config_file in ['application.yml', 'application.default.yml']:
-            with open(os.path.join(run_test_server.SERVICES_SRC_DIR,
-                                   "api", "config", config_file)) as f:
+        for config_file, mandatory in [
+                ['application.yml', True], ['application.default.yml', False]]:
+            path = os.path.join(run_test_server.SERVICES_SRC_DIR,
+                                "api", "config", config_file)
+            if not mandatory and not os.path.exists(path):
+                continue
+            with open(path) as f:
                 rails_config = yaml.load(f.read())
                 for config_section in ['test', 'common']:
                     try:
@@ -433,7 +437,7 @@ class ArvPutIntegrationTest(run_test_server.TestCaseWithServers,
                   "ARVADOS_API_HOST_INSECURE",
                   "ARVADOS_API_TOKEN"]:
             self.ENVIRON[v] = arvados.config.settings()[v]
-        arv_put.api_client = arvados.api('v1', cache=False)
+        arv_put.api_client = arvados.api('v1')
 
     def current_user(self):
         return arv_put.api_client.users().current().execute()
@@ -523,14 +527,28 @@ class ArvPutIntegrationTest(run_test_server.TestCaseWithServers,
             stdin=subprocess.PIPE, stdout=subprocess.PIPE,
             stderr=subprocess.PIPE, env=self.ENVIRON)
         stdout, stderr = pipe.communicate(text)
-        collection_list = arvados.api('v1', cache=False).collections().list(
-            filters=[['portable_data_hash', '=', stdout.strip()]]).execute().get('items', [])
+        search_key = ('portable_data_hash'
+                      if '--portable-data-hash' in extra_args else 'uuid')
+        collection_list = arvados.api('v1').collections().list(
+            filters=[[search_key, '=', stdout.strip()]]).execute().get('items', [])
         self.assertEqual(1, len(collection_list))
         return collection_list[0]
 
+    def test_put_collection_with_high_redundancy(self):
+        # Write empty data: we're not testing CollectionWriter, just
+        # making sure collections.create tells the API server what our
+        # desired replication level is.
+        collection = self.run_and_find_collection("", ['--replication', '4'])
+        self.assertEqual(4, collection['replication_desired'])
+
+    def test_put_collection_with_default_redundancy(self):
+        collection = self.run_and_find_collection("")
+        self.assertEqual(None, collection['replication_desired'])
+
     def test_put_collection_with_unnamed_project_link(self):
-        link = self.run_and_find_collection("Test unnamed collection",
-                                      ['--portable-data-hash', '--project-uuid', self.PROJECT_UUID])
+        link = self.run_and_find_collection(
+            "Test unnamed collection",
+            ['--portable-data-hash', '--project-uuid', self.PROJECT_UUID])
         username = pwd.getpwuid(os.getuid()).pw_name
         self.assertRegexpMatches(
             link['name'],
@@ -538,8 +556,9 @@ class ArvPutIntegrationTest(run_test_server.TestCaseWithServers,
 
     def test_put_collection_with_name_and_no_project(self):
         link_name = 'Test Collection Link in home project'
-        collection = self.run_and_find_collection("Test named collection in home project",
-                                      ['--portable-data-hash', '--name', link_name])
+        collection = self.run_and_find_collection(
+            "Test named collection in home project",
+            ['--portable-data-hash', '--name', link_name])
         self.assertEqual(link_name, collection['name'])
         my_user_uuid = self.current_user()['uuid']
         self.assertEqual(my_user_uuid, collection['owner_uuid'])
index c991154e7f669ff2e92dc80c3cafbf2a62309d86..dbbe3f5e73deca582b65f42900f8181e52a63a02 100644 (file)
@@ -500,17 +500,7 @@ class ArvadosCollectionsTest(run_test_server.TestCaseWithServers,
                 ).manifest_text())
 
 
-class CollectionTestMixin(object):
-    PROXY_RESPONSE = {
-        'items_available': 1,
-        'items': [{
-                'uuid': 'zzzzz-bi6l4-mockproxy012345',
-                'owner_uuid': 'zzzzz-tpzed-mockowner012345',
-                'service_host': tutil.TEST_HOST,
-                'service_port': 65535,
-                'service_ssl_flag': True,
-                'service_type': 'proxy',
-                }]}
+class CollectionTestMixin(tutil.ApiClientMock):
     API_COLLECTIONS = run_test_server.fixture('collections')
     DEFAULT_COLLECTION = API_COLLECTIONS['foo_file']
     DEFAULT_DATA_HASH = DEFAULT_COLLECTION['portable_data_hash']
@@ -520,20 +510,9 @@ class CollectionTestMixin(object):
     ALT_DATA_HASH = ALT_COLLECTION['portable_data_hash']
     ALT_MANIFEST = ALT_COLLECTION['manifest_text']
 
-    def _mock_api_call(self, mock_method, code, body):
-        mock_method = mock_method().execute
-        if code == 200:
-            mock_method.return_value = body
-        else:
-            mock_method.side_effect = arvados.errors.ApiError(
-                tutil.fake_httplib2_response(code), "{}")
-
-    def mock_keep_services(self, api_mock, code, body):
-        self._mock_api_call(api_mock.keep_services().accessible, code, body)
-
-    def api_client_mock(self, code=200):
-        client = mock.MagicMock(name='api_client')
-        self.mock_keep_services(client, code, self.PROXY_RESPONSE)
+    def api_client_mock(self, status=200):
+        client = super(CollectionTestMixin, self).api_client_mock()
+        self.mock_keep_services(client, status=status, service_type='proxy', count=1)
         return client
 
 
@@ -543,9 +522,9 @@ class CollectionReaderTestCase(unittest.TestCase, CollectionTestMixin):
         body = self.API_COLLECTIONS.get(body)
         self._mock_api_call(api_mock.collections().get, code, body)
 
-    def api_client_mock(self, code=200):
-        client = super(CollectionReaderTestCase, self).api_client_mock(code)
-        self.mock_get_collection(client, code, 'foo_file')
+    def api_client_mock(self, status=200):
+        client = super(CollectionReaderTestCase, self).api_client_mock()
+        self.mock_get_collection(client, status, 'foo_file')
         return client
 
     def test_init_no_default_retries(self):
@@ -703,8 +682,8 @@ class CollectionWriterTestCase(unittest.TestCase, CollectionTestMixin):
         return tutil.mock_put_responses(body, *codes, **headers)
 
     def foo_writer(self, **kwargs):
-        api_client = self.api_client_mock()
-        writer = arvados.CollectionWriter(api_client, **kwargs)
+        kwargs.setdefault('api_client', self.api_client_mock())
+        writer = arvados.CollectionWriter(**kwargs)
         writer.start_new_file('foo')
         writer.write('foo')
         return writer
@@ -720,6 +699,32 @@ class CollectionWriterTestCase(unittest.TestCase, CollectionTestMixin):
             with self.assertRaises(arvados.errors.KeepWriteError):
                 writer.finish()
 
+    def test_write_insufficient_replicas_via_proxy(self):
+        writer = self.foo_writer(replication=3)
+        with self.mock_keep(None, 200, headers={'x-keep-replicas-stored': 2}):
+            with self.assertRaises(arvados.errors.KeepWriteError):
+                writer.manifest_text()
+
+    def test_write_insufficient_replicas_via_disks(self):
+        client = mock.MagicMock(name='api_client')
+        self.mock_keep_services(client, status=200, service_type='disk', count=2)
+        writer = self.foo_writer(api_client=client, replication=3)
+        with self.mock_keep(
+                None, 200, 200,
+                **{'x-keep-replicas-stored': 1}) as keepmock:
+            with self.assertRaises(arvados.errors.KeepWriteError):
+                writer.manifest_text()
+
+    def test_write_three_replicas(self):
+        client = mock.MagicMock(name='api_client')
+        self.mock_keep_services(client, status=200, service_type='disk', count=6)
+        writer = self.foo_writer(api_client=client, replication=3)
+        with self.mock_keep(
+                None, 500, 500, 500, 200, 200, 200,
+                **{'x-keep-replicas-stored': 1}) as keepmock:
+            writer.manifest_text()
+            self.assertEqual(6, keepmock.call_count)
+
     def test_write_whole_collection_through_retries(self):
         writer = self.foo_writer(num_retries=2)
         with self.mock_keep(self.DEFAULT_DATA_HASH,
index 37f274a16118f6e46966df5770f7686cd6e45131..6d4d3cd8b816ea3854151285cd46cdebf60f5e63 100644 (file)
@@ -1,6 +1,7 @@
 import hashlib
 import mock
 import os
+import random
 import re
 import socket
 import unittest
@@ -188,11 +189,12 @@ class KeepOptionalPermission(run_test_server.TestCaseWithServers):
 class KeepProxyTestCase(run_test_server.TestCaseWithServers):
     MAIN_SERVER = {}
     KEEP_SERVER = {}
-    KEEP_PROXY_SERVER = {'auth': 'admin'}
+    KEEP_PROXY_SERVER = {}
 
     @classmethod
     def setUpClass(cls):
         super(KeepProxyTestCase, cls).setUpClass()
+        run_test_server.authorize_with('active')
         cls.api_client = arvados.api('v1')
 
     def tearDown(self):
@@ -231,44 +233,22 @@ class KeepProxyTestCase(run_test_server.TestCaseWithServers):
         self.assertTrue(keep_client.using_proxy)
 
 
-class KeepClientServiceTestCase(unittest.TestCase):
-    def mock_keep_services(self, *services):
-        api_client = mock.MagicMock(name='api_client')
-        api_client.keep_services().accessible().execute.return_value = {
-            'items_available': len(services),
-            'items': [{
-                    'uuid': 'zzzzz-bi6l4-{:015x}'.format(index),
-                    'owner_uuid': 'zzzzz-tpzed-000000000000000',
-                    'service_host': host,
-                    'service_port': port,
-                    'service_ssl_flag': ssl,
-                    'service_type': servtype,
-                    } for index, (host, port, ssl, servtype)
-                      in enumerate(services)],
-            }
-        return api_client
-
-    def mock_n_keep_disks(self, service_count):
-        return self.mock_keep_services(
-            *[("keep0x{:x}".format(index), 80, False, 'disk')
-              for index in range(service_count)])
-
-    def get_service_roots(self, *services):
-        api_client = self.mock_keep_services(*services)
+class KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock):
+    def get_service_roots(self, api_client):
         keep_client = arvados.KeepClient(api_client=api_client)
         services = keep_client.weighted_service_roots('000000')
         return [urlparse.urlparse(url) for url in sorted(services)]
 
     def test_ssl_flag_respected_in_roots(self):
-        services = self.get_service_roots(('keep', 10, False, 'disk'),
-                                          ('keep', 20, True, 'disk'))
-        self.assertEqual(10, services[0].port)
-        self.assertEqual('http', services[0].scheme)
-        self.assertEqual(20, services[1].port)
-        self.assertEqual('https', services[1].scheme)
+        for ssl_flag in [False, True]:
+            services = self.get_service_roots(self.mock_keep_services(
+                service_ssl_flag=ssl_flag))
+            self.assertEqual(
+                ('https' if ssl_flag else 'http'), services[0].scheme)
 
     def test_correct_ports_with_ipv6_addresses(self):
-        service = self.get_service_roots(('100::1', 10, True, 'proxy'))[0]
+        service = self.get_service_roots(self.mock_keep_services(
+            service_type='proxy', service_host='100::1', service_port=10, count=1))[0]
         self.assertEqual('100::1', service.hostname)
         self.assertEqual(10, service.port)
 
@@ -277,7 +257,7 @@ class KeepClientServiceTestCase(unittest.TestCase):
     # when connected directly to a Keep server (i.e. non-proxy timeout)
 
     def test_get_timeout(self):
-        api_client = self.mock_keep_services(('keep', 10, False, 'disk'))
+        api_client = self.mock_keep_services(count=1)
         keep_client = arvados.KeepClient(api_client=api_client)
         force_timeout = [socket.timeout("timed out")]
         with mock.patch('requests.get', side_effect=force_timeout) as mock_request:
@@ -289,7 +269,7 @@ class KeepClientServiceTestCase(unittest.TestCase):
                 mock_request.call_args[1]['timeout'])
 
     def test_put_timeout(self):
-        api_client = self.mock_keep_services(('keep', 10, False, 'disk'))
+        api_client = self.mock_keep_services(count=1)
         keep_client = arvados.KeepClient(api_client=api_client)
         force_timeout = [socket.timeout("timed out")]
         with mock.patch('requests.put', side_effect=force_timeout) as mock_request:
@@ -304,7 +284,7 @@ class KeepClientServiceTestCase(unittest.TestCase):
         # Force a timeout, verifying that the requests.get or
         # requests.put method was called with the proxy_timeout
         # setting rather than the default timeout.
-        api_client = self.mock_keep_services(('keep', 10, False, 'proxy'))
+        api_client = self.mock_keep_services(service_type='proxy', count=1)
         keep_client = arvados.KeepClient(api_client=api_client)
         force_timeout = [socket.timeout("timed out")]
         with mock.patch('requests.get', side_effect=force_timeout) as mock_request:
@@ -319,7 +299,7 @@ class KeepClientServiceTestCase(unittest.TestCase):
         # Force a timeout, verifying that the requests.get or
         # requests.put method was called with the proxy_timeout
         # setting rather than the default timeout.
-        api_client = self.mock_keep_services(('keep', 10, False, 'proxy'))
+        api_client = self.mock_keep_services(service_type='proxy', count=1)
         keep_client = arvados.KeepClient(api_client=api_client)
         force_timeout = [socket.timeout("timed out")]
         with mock.patch('requests.put', side_effect=force_timeout) as mock_request:
@@ -346,7 +326,7 @@ class KeepClientServiceTestCase(unittest.TestCase):
         hashes = [
             hashlib.md5("{:064x}".format(x)).hexdigest()
             for x in range(len(expected_order))]
-        api_client = self.mock_n_keep_disks(16)
+        api_client = self.mock_keep_services(count=16)
         keep_client = arvados.KeepClient(api_client=api_client)
         for i, hash in enumerate(hashes):
             roots = keep_client.weighted_service_roots(hash)
@@ -359,12 +339,12 @@ class KeepClientServiceTestCase(unittest.TestCase):
         hashes = [
             hashlib.md5("{:064x}".format(x)).hexdigest() for x in range(100)]
         initial_services = 12
-        api_client = self.mock_n_keep_disks(initial_services)
+        api_client = self.mock_keep_services(count=initial_services)
         keep_client = arvados.KeepClient(api_client=api_client)
         probes_before = [
             keep_client.weighted_service_roots(hash) for hash in hashes]
         for added_services in range(1, 12):
-            api_client = self.mock_n_keep_disks(initial_services+added_services)
+            api_client = self.mock_keep_services(count=initial_services+added_services)
             keep_client = arvados.KeepClient(api_client=api_client)
             total_penalty = 0
             for hash_index in range(len(hashes)):
@@ -398,7 +378,9 @@ class KeepClientServiceTestCase(unittest.TestCase):
         data = '0' * 64
         if verb == 'get':
             data = hashlib.md5(data).hexdigest() + '+1234'
-        api_client = self.mock_n_keep_disks(16)
+        # Arbitrary port number:
+        aport = random.randint(1024,65535)
+        api_client = self.mock_keep_services(service_port=aport, count=16)
         keep_client = arvados.KeepClient(api_client=api_client)
         with mock.patch('requests.' + verb,
                         side_effect=socket.timeout) as req_mock, \
@@ -406,7 +388,7 @@ class KeepClientServiceTestCase(unittest.TestCase):
             getattr(keep_client, verb)(data)
         urls = [urlparse.urlparse(url)
                 for url in err_check.exception.service_errors()]
-        self.assertEqual([('keep0x' + c, 80) for c in '3eab2d5fc9681074'],
+        self.assertEqual([('keep0x' + c, aport) for c in '3eab2d5fc9681074'],
                          [(url.hostname, url.port) for url in urls])
 
     def test_get_error_shows_probe_order(self):
@@ -431,7 +413,7 @@ class KeepClientServiceTestCase(unittest.TestCase):
         self.check_no_services_error('put', arvados.errors.KeepWriteError)
 
     def check_errors_from_last_retry(self, verb, exc_class):
-        api_client = self.mock_n_keep_disks(2)
+        api_client = self.mock_keep_services(count=2)
         keep_client = arvados.KeepClient(api_client=api_client)
         req_mock = getattr(tutil, 'mock_{}_responses'.format(verb))(
             "retry error reporting test", 500, 500, 403, 403)
@@ -452,7 +434,7 @@ class KeepClientServiceTestCase(unittest.TestCase):
     def test_put_error_does_not_include_successful_puts(self):
         data = 'partial failure test'
         data_loc = '{}+{}'.format(hashlib.md5(data).hexdigest(), len(data))
-        api_client = self.mock_n_keep_disks(3)
+        api_client = self.mock_keep_services(count=3)
         keep_client = arvados.KeepClient(api_client=api_client)
         with tutil.mock_put_responses(data_loc, 200, 500, 500) as req_mock, \
                 self.assertRaises(arvados.errors.KeepWriteError) as exc_check:
index 54539b02b39a9260fc40acab06839b4e6da10c58..fa9fef28266bd668d61b1513adcf47395a937a6d 100644 (file)
@@ -7,13 +7,13 @@ import arvados
 import apiclient
 import run_test_server
 
-class PipelineTemplateTest(unittest.TestCase):
-    def setUp(self):
-        run_test_server.run()
+class PipelineTemplateTest(run_test_server.TestCaseWithServers):
+    MAIN_SERVER = {}
+    KEEP_SERVER = {}
 
     def runTest(self):
         run_test_server.authorize_with("admin")
-        pt_uuid = arvados.api('v1', cache=False).pipeline_templates().create(
+        pt_uuid = arvados.api('v1').pipeline_templates().create(
             body={'name':__file__}
             ).execute()['uuid']
         self.assertEqual(len(pt_uuid), 27,
@@ -27,7 +27,7 @@ class PipelineTemplateTest(unittest.TestCase):
             'spass_box': False,
             'spass-box': [True, 'Maybe', False]
             }
-        update_response = arvados.api('v1', cache=False).pipeline_templates().update(
+        update_response = arvados.api('v1').pipeline_templates().update(
             uuid=pt_uuid,
             body={'components':components}
             ).execute()
@@ -39,22 +39,19 @@ class PipelineTemplateTest(unittest.TestCase):
         self.assertEqual(update_response['name'], __file__,
                          'update() response has a different name (%s, not %s)'
                          % (update_response['name'], __file__))
-        get_response = arvados.api('v1', cache=False).pipeline_templates().get(
+        get_response = arvados.api('v1').pipeline_templates().get(
             uuid=pt_uuid
             ).execute()
         self.assertEqual(get_response['components'], components,
                          'components got munged by server (%s -> %s)'
                          % (components, update_response['components']))
-        delete_response = arvados.api('v1', cache=False).pipeline_templates().delete(
+        delete_response = arvados.api('v1').pipeline_templates().delete(
             uuid=pt_uuid
             ).execute()
         self.assertEqual(delete_response['uuid'], pt_uuid,
                          'delete() response has wrong uuid (%s, not %s)'
                          % (delete_response['uuid'], pt_uuid))
         with self.assertRaises(apiclient.errors.HttpError):
-            geterror_response = arvados.api('v1', cache=False).pipeline_templates().get(
+            geterror_response = arvados.api('v1').pipeline_templates().get(
                 uuid=pt_uuid
                 ).execute()
-
-    def tearDown(self):
-        run_test_server.stop()
index 032ac51f0d445a5b03e751cf569f5c835307c367..d879ebe1f8062c02d965bd9c845e5e00c57d1e76 100644 (file)
@@ -1,52 +1,40 @@
+import Queue
 import run_test_server
 import unittest
 import arvados
 import arvados.events
+import mock
 import threading
 
-class EventTestBase(object):
-    def on_event(self, ev):
-        if self.state == 1:
-            self.assertEqual(200, ev['status'])
-            self.state = 2
-            self.subscribed.set()
-        elif self.state == 2:
-            self.assertEqual(self.h[u'uuid'], ev[u'object_uuid'])
-            self.state = 3
-            self.done.set()
-        elif self.state == 3:
-            self.fail()
+class WebsocketTest(run_test_server.TestCaseWithServers):
+    MAIN_SERVER = {}
 
-    def runTest(self):
+    def setUp(self):
         self.ws = None
-        self.state = 1
-        self.subscribed = threading.Event()
-        self.done = threading.Event()
-
-        run_test_server.authorize_with("admin")
-        api = arvados.api('v1', cache=False)
-        self.ws = arvados.events.subscribe(arvados.api('v1', cache=False), [['object_uuid', 'is_a', 'arvados#human']], self.on_event, poll_fallback=2)
-        self.assertIsInstance(self.ws, self.WS_TYPE)
-        self.subscribed.wait(10)
-        self.h = api.humans().create(body={}).execute()
-        self.done.wait(10)
-        self.assertEqual(3, self.state)
-
-class WebsocketTest(run_test_server.TestCaseWithServers, EventTestBase):
-    MAIN_SERVER = {'websockets': True}
-    WS_TYPE = arvados.events.EventClient
 
     def tearDown(self):
         if self.ws:
             self.ws.close()
         super(WebsocketTest, self).tearDown()
 
-
-class PollClientTest(run_test_server.TestCaseWithServers, EventTestBase):
-    MAIN_SERVER = {}
-    WS_TYPE = arvados.events.PollClient
-
-    def tearDown(self):
-        if self.ws:
-            self.ws.close()
-        super(PollClientTest, self).tearDown()
+    def _test_subscribe(self, poll_fallback, expect_type):
+        run_test_server.authorize_with('active')
+        events = Queue.Queue(3)
+        self.ws = arvados.events.subscribe(
+            arvados.api('v1'), [['object_uuid', 'is_a', 'arvados#human']],
+            events.put, poll_fallback=poll_fallback)
+        self.assertIsInstance(self.ws, expect_type)
+        self.assertEqual(200, events.get(True, 10)['status'])
+        human = arvados.api('v1').humans().create(body={}).execute()
+        self.assertEqual(human['uuid'], events.get(True, 10)['object_uuid'])
+        self.assertTrue(events.empty(), "got more events than expected")
+
+    def test_subscribe_websocket(self):
+        self._test_subscribe(
+            poll_fallback=False, expect_type=arvados.events.EventClient)
+
+    @mock.patch('arvados.events.EventClient.__init__')
+    def test_subscribe_poll(self, event_client_constr):
+        event_client_constr.side_effect = Exception('All is well')
+        self._test_subscribe(
+            poll_fallback=1, expect_type=arvados.events.PollClient)
index 5361e03a2bea967f00b441932d0efeef0f0a7054..e957b67038a0a351ddabc25c0f1afa64f406a1dd 100644 (file)
@@ -15,7 +15,8 @@ Gem::Specification.new do |s|
   s.authors     = ["Arvados Authors"]
   s.email       = 'gem-dev@curoverse.com'
   s.licenses    = ['Apache License, Version 2.0']
-  s.files       = ["lib/arvados.rb", "lib/arvados/keep.rb"]
+  s.files       = ["lib/arvados.rb", "lib/arvados/google_api_client.rb",
+                   "lib/arvados/keep.rb"]
   s.required_ruby_version = '>= 2.1.0'
   s.add_dependency('google-api-client', '~> 0.6.3', '>= 0.6.3')
   s.add_dependency('activesupport', '>= 3.2.13')
index a6ebc36ef9662f5a5b93973606f14d0dc8917625..753c518b3191ebbfefbd4407ca67c2f9b83daa45 100644 (file)
@@ -1,10 +1,11 @@
 require 'rubygems'
-require 'google/api_client'
 require 'active_support/inflector'
 require 'json'
 require 'fileutils'
 require 'andand'
 
+require 'arvados/google_api_client'
+
 ActiveSupport::Inflector.inflections do |inflect|
   inflect.irregular 'specimen', 'specimens'
   inflect.irregular 'human', 'humans'
@@ -105,34 +106,6 @@ class Arvados
     end
   end
 
-  class Google::APIClient
-    def discovery_document(api, version)
-      api = api.to_s
-      discovery_uri = self.discovery_uri(api, version)
-      discovery_uri_hash = Digest::MD5.hexdigest(discovery_uri)
-      return @discovery_documents[discovery_uri_hash] ||=
-        begin
-          # fetch new API discovery doc if stale
-          cached_doc = File.expand_path "~/.cache/arvados/discovery-#{discovery_uri_hash}.json" rescue nil
-          if cached_doc.nil? or not File.exist?(cached_doc) or (Time.now - File.mtime(cached_doc)) > 86400
-            response = self.execute!(:http_method => :get,
-                                     :uri => discovery_uri,
-                                     :authenticated => false)
-            begin
-              FileUtils.makedirs(File.dirname cached_doc)
-              File.open(cached_doc, 'w') do |f|
-                f.puts response.body
-              end
-            rescue
-              return JSON.load response.body
-            end
-          end
-
-          File.open(cached_doc) { |f| JSON.load f }
-        end
-    end
-  end
-
   def client
     @client ||= Google::APIClient.
       new(:host => @arvados_api_host,
diff --git a/sdk/ruby/lib/arvados/google_api_client.rb b/sdk/ruby/lib/arvados/google_api_client.rb
new file mode 100644 (file)
index 0000000..71a53f0
--- /dev/null
@@ -0,0 +1,55 @@
+require 'google/api_client'
+require 'json'
+require 'tempfile'
+
+class Google::APIClient
+  def discovery_document(api, version)
+    api = api.to_s
+    discovery_uri = self.discovery_uri(api, version)
+    discovery_uri_hash = Digest::MD5.hexdigest(discovery_uri)
+    discovery_cache_path =
+      File.expand_path("~/.cache/arvados/discovery-#{discovery_uri_hash}.json")
+    @discovery_documents[discovery_uri_hash] ||=
+      disk_cached_discovery_document(discovery_cache_path) or
+      fetched_discovery_document(discovery_uri, discovery_cache_path)
+  end
+
+  private
+
+  def disk_cached_discovery_document(cache_path)
+    begin
+      if (Time.now - File.mtime(cache_path)) < 86400
+        open(cache_path) do |cache_file|
+          return JSON.load(cache_file)
+        end
+      end
+    rescue IOError, SystemCallError, JSON::JSONError
+      # Error reading the cache.  Act like it doesn't exist.
+    end
+    nil
+  end
+
+  def write_cached_discovery_document(cache_path, body)
+    cache_dir = File.dirname(cache_path)
+    cache_file = nil
+    begin
+      FileUtils.makedirs(cache_dir)
+      cache_file = Tempfile.new("discovery", cache_dir)
+      cache_file.write(body)
+      cache_file.flush
+      File.rename(cache_file.path, cache_path)
+    rescue IOError, SystemCallError
+      # Failure to write the cache is non-fatal.  Do nothing.
+    ensure
+      cache_file.close! unless cache_file.nil?
+    end
+  end
+
+  def fetched_discovery_document(uri, cache_path)
+    response = self.execute!(:http_method => :get,
+                             :uri => uri,
+                             :authenticated => false)
+    write_cached_discovery_document(cache_path, response.body)
+    JSON.load(response.body)
+  end
+end
index f28cfdc3808bf741be22d3873bc8702def383984..ede40c3fc2b364f8f886166885fe9e8a59e61e29 100644 (file)
@@ -138,7 +138,10 @@ module Keep
 
     def split_file_token token
       start_pos, filesize, filename = token.split(':', 3)
-      [start_pos.to_i, filesize.to_i, filename]
+      if filename.nil?
+        raise ArgumentError.new "Invalid file token '#{token}'"
+      end
+      [start_pos.to_i, filesize.to_i, unescape(filename)]
     end
 
     def each_file_spec
index 64c8ea3129ca461d6e1de266c1a1957229657afd..8ad8134592984dabd203d822d42b0e8696bb5a83 100644 (file)
@@ -1,5 +1,6 @@
 require "minitest/autorun"
 require "arvados/keep"
+require "yaml"
 
 def random_block(size=nil)
   sprintf("%032x+%d", rand(16 ** 32), size || rand(64 * 1024 * 1024))
@@ -57,6 +58,22 @@ class ManifestTest < Minitest::Test
     assert_empty(Keep::Manifest.new("").each_line.to_a)
   end
 
+  def test_empty_each_file_spec
+    assert_empty(Keep::Manifest.new("").each_file_spec.to_a)
+  end
+
+  def test_empty_files
+    assert_empty(Keep::Manifest.new("").files)
+  end
+
+  def test_empty_files_count
+    assert_equal(0, Keep::Manifest.new("").files_count)
+  end
+
+  def test_empty_has_file?
+    refute(Keep::Manifest.new("").has_file?(""))
+  end
+
   def test_empty_line_within_manifest
     block_s = random_block
     manifest = Keep::Manifest.
@@ -105,6 +122,11 @@ class ManifestTest < Minitest::Test
     assert_equal([[".", "file:test.txt", 9]], manifest.files)
   end
 
+  def test_files_with_escape_sequence_in_filename
+    manifest = Keep::Manifest.new(". #{random_block(9)} 0:9:a\\040\\141.txt\n")
+    assert_equal([[".", "a a.txt", 9]], manifest.files)
+  end
+
   def test_files_spanning_multiple_blocks
     manifest = Keep::Manifest.new(MULTIBLOCK_FILE_MANIFEST)
     assert_equal([[".", "repfile", 5],
@@ -153,4 +175,48 @@ class ManifestTest < Minitest::Test
     refute(manifest.has_file?("./s2/repfile"), "one-arg missing stream found")
     refute(manifest.has_file?("./s2", "repfile"), "two-arg missing stream found")
   end
+
+  def test_has_file_with_spaces
+    manifest = Keep::Manifest.new(". #{random_block(3)} 0:3:a\\040b\\040c\n")
+    assert(manifest.has_file?("./a b c"), "one-arg 'a b c' not found")
+    assert(manifest.has_file?(".", "a b c"), "two-arg 'a b c' not found")
+    refute(manifest.has_file?("a\\040b\\040c"), "one-arg unescaped found")
+    refute(manifest.has_file?(".", "a\\040b\\040c"), "two-arg unescaped found")
+  end
+
+  def test_parse_all_fixtures
+    fixtures('collections').each do |name, collection|
+      parse_collection_manifest name, collection
+    end
+  end
+
+  def test_raise_on_bogus_fixture
+    assert_raises ArgumentError do
+      parse_collection_manifest('bogus collection',
+                                {'manifest_text' => ". zzz 0:\n"})
+    end
+  end
+
+  def parse_collection_manifest name, collection
+    manifest = Keep::Manifest.new(collection['manifest_text'])
+    manifest.each_file_spec do |stream_name, start_pos, file_size, file_name|
+      assert_kind_of String, stream_name
+      assert_kind_of Integer, start_pos
+      assert_kind_of Integer, file_size
+      assert_kind_of String, file_name
+      assert !stream_name.empty?, "empty stream_name in #{name} fixture"
+      assert !file_name.empty?, "empty file_name in #{name} fixture"
+    end
+  end
+
+  @@fixtures = nil
+  def fixtures name
+    return @@fixtures if @@fixtures
+    path = File.expand_path("../../../../services/api/test/fixtures/#{name}.yml",
+                            __FILE__)
+    file = IO.read(path)
+    trim_index = file.index('# Test Helper trims the rest of the file')
+    file = file[0, trim_index] if trim_index
+    @@fixtures = YAML.load(file)
+  end
 end
index c1d5219b07c9a6d243a3bb7e7a142f5064cdfd8e..4ad5e10faa46b96222d4291596d2f47f686bf18c 100644 (file)
@@ -3,6 +3,7 @@
 
 # Ignore all logfiles and tempfiles.
 /log/*.log
+/log/*.log.gz
 /tmp
 
 # Sensitive files and local configuration
index 7e70eb2f31f1050eecb8e85a8a64604fec7b2075..70f67d5d76b10ecd5798c021045398372c4497a9 100644 (file)
@@ -57,7 +57,7 @@ gem 'rvm-capistrano', :group => :test
 
 gem 'acts_as_api'
 
-gem 'passenger', :group => :production
+gem 'passenger'
 
 gem 'omniauth', '1.1.1'
 gem 'omniauth-oauth2', '1.1.1'
@@ -72,8 +72,8 @@ gem 'faye-websocket'
 
 gem 'themes_for_rails'
 
-gem 'arvados', '>= 0.1.20140919104705'
-gem 'arvados-cli', '>= 0.1.20150121183928'
+gem 'arvados', '>= 0.1.20150210011250'
+gem 'arvados-cli', '>= 0.1.20150128223752'
 
 # pg_power lets us use partial indexes in schema.rb in Rails 3
 gem 'pg_power'
index 6db1cb443b9793718597cd5fc330e764ea39cfbc..a6a8326eeb7be74ea0f54cfcf296b52c69d3e51d 100644 (file)
@@ -32,19 +32,19 @@ GEM
       activemodel (>= 3.0.0)
       activesupport (>= 3.0.0)
       rack (>= 1.1.0)
-    addressable (2.3.6)
+    addressable (2.3.7)
     andand (1.3.3)
     arel (3.0.3)
-    arvados (0.1.20141114230720)
+    arvados (0.1.20150210011250)
       activesupport (>= 3.2.13)
       andand (~> 1.3, >= 1.3.3)
       google-api-client (~> 0.6.3, >= 0.6.3)
       json (~> 1.7, >= 1.7.7)
       jwt (>= 0.1.5, < 1.0.0)
-    arvados-cli (0.1.20150121183928)
+    arvados-cli (0.1.20150205181653)
       activesupport (~> 3.2, >= 3.2.13)
       andand (~> 1.3, >= 1.3.3)
-      arvados (~> 0.1, >= 0.1.0)
+      arvados (~> 0.1, >= 0.1.20150128223554)
       curb (~> 0.8)
       google-api-client (~> 0.6.3, >= 0.6.3)
       json (~> 1.7, >= 1.7.7)
@@ -100,12 +100,12 @@ GEM
     highline (1.6.21)
     hike (1.2.3)
     httpauth (0.2.1)
-    i18n (0.6.11)
+    i18n (0.7.0)
     journey (1.0.4)
     jquery-rails (3.1.0)
       railties (>= 3.0, < 5.0)
       thor (>= 0.14, < 2.0)
-    json (1.8.1)
+    json (1.8.2)
     jwt (0.1.13)
       multi_json (>= 1.5)
     launchy (2.4.3)
@@ -130,7 +130,7 @@ GEM
       jwt (~> 0.1.4)
       multi_json (~> 1.0)
       rack (~> 1.2)
-    oj (2.11.1)
+    oj (2.11.4)
     omniauth (1.1.1)
       hashie (~> 1.2)
       rack
@@ -210,7 +210,7 @@ GEM
     treetop (1.4.15)
       polyglot
       polyglot (>= 0.3.1)
-    trollop (2.0)
+    trollop (2.1.1)
     tzinfo (0.3.39)
     uglifier (2.5.0)
       execjs (>= 0.3.0)
@@ -224,8 +224,8 @@ PLATFORMS
 DEPENDENCIES
   acts_as_api
   andand
-  arvados (>= 0.1.20140919104705)
-  arvados-cli (>= 0.1.20150121183928)
+  arvados (>= 0.1.20150210011250)
+  arvados-cli (>= 0.1.20150128223752)
   coffee-rails (~> 3.2.0)
   database_cleaner
   factory_girl_rails
index a65bf3e328697da220d111c8be36460de82220bd..69c03bde9fc0a1b22ca7a39a3bb7a78d94dd471c 100644 (file)
@@ -205,8 +205,9 @@ class ApplicationController < ActionController::Base
     end
   end
 
-  def apply_where_limit_order_params *args
-    apply_filters *args
+  def apply_where_limit_order_params model_class=nil
+    model_class ||= self.model_class
+    apply_filters model_class
 
     ar_table_name = @objects.table_name
     if @where.is_a? Hash and @where.any?
@@ -271,7 +272,7 @@ class ApplicationController < ActionController::Base
         columns_list = @select.
           flat_map { |attr| api_column_map[attr] }.
           uniq.
-          map { |s| "#{table_name}.#{ActiveRecord::Base.connection.quote_column_name s}" }
+          map { |s| "#{ar_table_name}.#{ActiveRecord::Base.connection.quote_column_name s}" }
         @objects = @objects.select(columns_list.join(", "))
       end
 
@@ -436,8 +437,8 @@ class ApplicationController < ActionController::Base
   end
   accept_param_as_json :reader_tokens, Array
 
-  def render_list
-    @object_list = {
+  def object_list
+    list = {
       :kind  => "arvados##{(@response_resource_name || resource_name).camelize(:lower)}List",
       :etag => "",
       :self_link => "",
@@ -446,11 +447,15 @@ class ApplicationController < ActionController::Base
       :items => @objects.as_api_response(nil, {select: @select})
     }
     if @objects.respond_to? :except
-      @object_list[:items_available] = @objects.
+      list[:items_available] = @objects.
         except(:limit).except(:offset).
         count(:id, distinct: true)
     end
-    send_json @object_list
+    list
+  end
+
+  def render_list
+    send_json object_list
   end
 
   def remote_ip
index 922fa6aa39b1f3b2421fef6536332ab10a7afa6f..956de8e8942826bdb1ad1473c9d5e1f59631a8e9 100644 (file)
@@ -33,10 +33,6 @@ class Arvados::V1::CollectionsController < ApplicationController
     end
   end
 
-  def index
-    super
-  end
-
   def find_collections(visited, sp, &b)
     case sp
     when ArvadosModel
@@ -184,8 +180,7 @@ class Arvados::V1::CollectionsController < ApplicationController
   def load_limit_offset_order_params *args
     if action_name == 'index'
       # Omit manifest_text from index results unless expressly selected.
-      @select ||= model_class.api_accessible_attributes(:user).
-        map { |attr_spec| attr_spec.first.to_s } - ["manifest_text"]
+      @select ||= model_class.selectable_attributes - ["manifest_text"]
     end
     super
   end
index c82ffb49cd7af8e4b7fe2afac128474718de71c8..eae6dca8c0332ae820fbedbb3965f3112453dfb9 100644 (file)
@@ -1,17 +1,14 @@
 class Arvados::V1::GroupsController < ApplicationController
 
   def self._contents_requires_parameters
-    _index_requires_parameters.
+    params = _index_requires_parameters.
       merge({
               uuid: {
                 type: 'string', required: false, default: nil
               },
-              # include_linked returns name links, which are obsolete, so
-              # remove it when clients have been migrated.
-              include_linked: {
-                type: 'boolean', required: false, default: false
-              },
             })
+    params.delete(:select)
+    params
   end
 
   def render_404_if_no_object
@@ -35,34 +32,21 @@ class Arvados::V1::GroupsController < ApplicationController
   end
 
   def contents
-    # Set @objects:
-    # include_linked returns name links, which are obsolete, so
-    # remove it when clients have been migrated.
-    load_searchable_objects(owner_uuid: @object.andand.uuid,
-                            include_linked: params[:include_linked])
-    sql = 'link_class=? and head_uuid in (?)'
-    sql_params = ['name', @objects.collect(&:uuid)]
-    if @object
-      sql += ' and tail_uuid=?'
-      sql_params << @object.uuid
-    end
-    @links = Link.where sql, *sql_params
-    @object_list = {
-      :kind  => "arvados#objectList",
+    load_searchable_objects
+    send_json({
+      :kind => "arvados#objectList",
       :etag => "",
       :self_link => "",
-      :links => @links.as_api_response(nil),
       :offset => @offset,
       :limit => @limit,
       :items_available => @items_available,
       :items => @objects.as_api_response(nil)
-    }
-    send_json @object_list
+    })
   end
 
   protected
 
-  def load_searchable_objects opts
+  def load_searchable_objects
     all_objects = []
     @items_available = 0
 
@@ -81,39 +65,31 @@ class Arvados::V1::GroupsController < ApplicationController
      Job, PipelineInstance, PipelineTemplate,
      Collection,
      Human, Specimen, Trait].each do |klass|
-      @objects = klass.readable_by(*@read_users)
-      if klass == Group
-        @objects = @objects.where(group_class: 'project')
-      end
-      if opts[:owner_uuid]
-        conds = []
-        cond_params = []
-        conds << "#{klass.table_name}.owner_uuid = ?"
-        cond_params << opts[:owner_uuid]
-        if conds.any?
-          cond_sql = '(' + conds.join(') OR (') + ')'
-          @objects = @objects.where(cond_sql, *cond_params)
-        end
-      end
+      # If the currently requested orders specifically match the
+      # table_name for the current klass, apply that order.
+      # Otherwise, order by recency.
+      request_order =
+        request_orders.andand.find { |r| r =~ /^#{klass.table_name}\./i } ||
+        klass.default_orders.join(", ")
 
-      # If the currently requested orders specifically match the table_name for the current klass, apply the order
-      request_order = request_orders && request_orders.find{ |r| r =~ /^#{klass.table_name}\./i }
-      if request_order
-        @objects = @objects.order(request_order)
-      else
-        # default to created_at desc, ignoring any currently requested ordering because it doesn't apply to this klass
-        @objects = @objects.order("#{klass.table_name}.created_at desc")
+      @select = nil
+      where_conds = {}
+      where_conds[:owner_uuid] = @object.uuid if @object
+      if klass == Collection
+        @select = klass.selectable_attributes - ["manifest_text"]
+      elsif klass == Group
+        where_conds[:group_class] = "project"
       end
 
+      @objects = klass.readable_by(*@read_users).
+        order(request_order).where(where_conds)
       @limit = limit_all - all_objects.count
       apply_where_limit_order_params klass
-      klass_items_available = @objects.
-        except(:limit).except(:offset).
-        count(:id, distinct: true)
+      klass_object_list = object_list
+      klass_items_available = klass_object_list[:items_available] || 0
       @items_available += klass_items_available
       @offset = [@offset - klass_items_available, 0].max
-
-      all_objects += @objects.to_a
+      all_objects += klass_object_list[:items]
     end
 
     @objects = all_objects
index 798217dc0cfef7aaea385a67ede5fae198ce6e41..acc7100f08c1595d3603a3b00bf6a91eaddc338e 100644 (file)
@@ -20,7 +20,7 @@ class Arvados::V1::LinksController < ApplicationController
   end
 
   def get_permissions
-    if current_user.can?(manage: @object)
+    if current_user.andand.can?(manage: @object)
       # find all links and return them
       @objects = Link.where(link_class: "permission",
                             head_uuid: params[:uuid])
index bc5a20fb0db8f0a392558af563529904eeb86fd1..c108fb898dfc43300f511fe84851dfe72ad47dd9 100644 (file)
@@ -1,6 +1,11 @@
 class Arvados::V1::SchemaController < ApplicationController
+  skip_before_filter :catch_redirect_hint
   skip_before_filter :find_objects_for_index
   skip_before_filter :find_object_by_uuid
+  skip_before_filter :load_filters_param
+  skip_before_filter :load_limit_offset_order_params
+  skip_before_filter :load_read_auths
+  skip_before_filter :load_where_param
   skip_before_filter :render_404_if_no_object
   skip_before_filter :require_auth_scope
 
@@ -20,6 +25,7 @@ class Arvados::V1::SchemaController < ApplicationController
         title: "Arvados API",
         description: "The API to interact with Arvados.",
         documentationLink: "http://doc.arvados.org/api/index.html",
+        defaultCollectionReplication: Rails.configuration.default_collection_replication,
         protocol: "rest",
         baseUrl: root_url + "arvados/v1/",
         basePath: "/arvados/v1/",
index 224dd291561ebf02496e13136a1603347d7fa7e9..131ee5236bc08e26afb096912505594db0755f67 100644 (file)
@@ -8,9 +8,14 @@ class Arvados::V1::UsersController < ApplicationController
   before_filter :admin_required, only: [:setup, :unsetup]
 
   def current
-    @object = current_user
-    show
+    if current_user
+      @object = current_user
+      show
+    else
+      send_error("Not logged in", status: 401)
+    end
   end
+
   def system
     @object = system_user
     show
@@ -136,7 +141,7 @@ class Arvados::V1::UsersController < ApplicationController
     }
   end
 
-  def apply_filters
+  def apply_filters(model_class=nil)
     return super if @read_users.any? &:is_admin
     if params[:uuid] != current_user.andand.uuid
       # Non-admin index/show returns very basic information about readable users.
index 5817ff6648f933d15d34f8c91064bf778712a964..b9442d64e78bf888741e9e47d532f53d73059e22 100644 (file)
@@ -80,6 +80,10 @@ class ApiClientAuthorization < ArvadosModel
     attrs
   end
 
+  def self.default_orders
+    ["#{table_name}.id desc"]
+  end
+
   protected
 
   def permission_to_create
index 308da7fa11576acc00066bb47de5592cf2543f96..cf0aba9721dffc82dca1ca3d7860244bbfc0a076 100644 (file)
@@ -56,6 +56,12 @@ class ArvadosModel < ActiveRecord::Base
     "#{current_api_base}/#{self.class.to_s.pluralize.underscore}/#{self.uuid}"
   end
 
+  def self.selectable_attributes(template=:user)
+    # Return an array of attribute name strings that can be selected
+    # in the given template.
+    api_accessible_attributes(template).map { |attr_spec| attr_spec.first.to_s }
+  end
+
   def self.searchable_columns operator
     textonly_operator = !operator.match(/[<=>]/)
     self.columns.select do |col|
@@ -96,6 +102,10 @@ class ArvadosModel < ActiveRecord::Base
     api_column_map
   end
 
+  def self.default_orders
+    ["#{table_name}.modified_at desc", "#{table_name}.uuid"]
+  end
+
   # If current user can manage the object, return an array of uuids of
   # users and groups that have permission to write the object. The
   # first two elements are always [self.owner_uuid, current user's
@@ -107,6 +117,7 @@ class ArvadosModel < ActiveRecord::Base
   # If current user cannot write this object, just return
   # [self.owner_uuid].
   def writable_by
+    return [owner_uuid] if not current_user
     unless (owner_uuid == current_user.uuid or
             current_user.is_admin or
             (current_user.groups_i_can(:manage) & [uuid, owner_uuid]).any?)
@@ -205,6 +216,25 @@ class ArvadosModel < ActiveRecord::Base
     attributes
   end
 
+  def self.full_text_searchable_columns
+    self.columns.select do |col|
+      if col.type == :string or col.type == :text
+        true
+      end
+    end.map(&:name)
+  end
+
+  def self.full_text_tsvector
+    tsvector_str = "to_tsvector('english', "
+    first = true
+    self.full_text_searchable_columns.each do |column|
+      tsvector_str += " || ' ' || " if not first
+      tsvector_str += "coalesce(#{column},'')"
+      first = false
+    end
+    tsvector_str += ")"
+  end
+
   protected
 
   def ensure_ownership_path_leads_to_user
index 457fb5f778cb3429d0d19f3104b6142a2a171dfe..89ad874cd7d211aae90f841927a620150af1ab9b 100644 (file)
@@ -5,10 +5,13 @@ class Collection < ArvadosModel
   include KindAndEtag
   include CommonApiTemplate
 
+  serialize :properties, Hash
+
   before_validation :check_encoding
   before_validation :check_signatures
   before_validation :strip_manifest_text
   before_validation :set_portable_data_hash
+  before_validation :maybe_clear_replication_confirmed
   validate :ensure_hash_matches_manifest_text
   before_save :set_file_names
 
@@ -21,14 +24,20 @@ class Collection < ArvadosModel
     t.add :properties
     t.add :portable_data_hash
     t.add :signed_manifest_text, as: :manifest_text
+    t.add :replication_desired
+    t.add :replication_confirmed
+    t.add :replication_confirmed_at
   end
 
   def self.attributes_required_columns
-    # If we don't list this explicitly, the params[:select] code gets
-    # confused by the way we expose signed_manifest_text as
-    # manifest_text in the API response, and never let clients select
-    # the manifest_text column.
-    super.merge('manifest_text' => ['manifest_text'])
+    super.merge(
+                # If we don't list manifest_text explicitly, the
+                # params[:select] code gets confused by the way we
+                # expose signed_manifest_text as manifest_text in the
+                # API response, and never let clients select the
+                # manifest_text column.
+                'manifest_text' => ['manifest_text'],
+                )
   end
 
   def check_signatures
@@ -175,22 +184,6 @@ class Collection < ArvadosModel
     end
   end
 
-  def redundancy_status
-    if redundancy_confirmed_as.nil?
-      'unconfirmed'
-    elsif redundancy_confirmed_as < redundancy
-      'degraded'
-    else
-      if redundancy_confirmed_at.nil?
-        'unconfirmed'
-      elsif Time.now - redundancy_confirmed_at < 7.days
-        'OK'
-      else
-        'stale'
-      end
-    end
-  end
-
   def signed_manifest_text
     if has_attribute? :manifest_text
       token = current_api_client_authorization.andand.api_token
@@ -214,7 +207,7 @@ class Collection < ArvadosModel
   def self.munge_manifest_locators! manifest
     # Given a manifest text and a block, yield each locator,
     # and replace it with whatever the block returns.
-    manifest.andand.gsub!(/ [[:xdigit:]]{32}(\+[[:digit:]]+)?(\+\S+)/) do |word|
+    manifest.andand.gsub!(/ [[:xdigit:]]{32}(\+\S+)?/) do |word|
       if loc = Keep::Locator.parse(word.strip)
         " " + yield(loc)
       else
@@ -223,6 +216,15 @@ class Collection < ArvadosModel
     end
   end
 
+  def self.each_manifest_locator manifest
+    # Given a manifest text and a block, yield each locator.
+    manifest.andand.scan(/ ([[:xdigit:]]{32}(\+\S+)?)/) do |word, _|
+      if loc = Keep::Locator.parse(word)
+        yield loc
+      end
+    end
+  end
+
   def self.normalize_uuid uuid
     hash_part = nil
     size_part = nil
@@ -300,11 +302,19 @@ class Collection < ArvadosModel
     super - ["manifest_text"]
   end
 
+  def self.full_text_searchable_columns
+    super - ["manifest_text"]
+  end
+
   protected
   def portable_manifest_text
     portable_manifest = self[:manifest_text].dup
     self.class.munge_manifest_locators!(portable_manifest) do |loc|
-      loc.hash + '+' + loc.size.to_s
+      if loc.size
+        loc.hash + '+' + loc.size.to_s
+      else
+        loc.hash
+      end
     end
     portable_manifest
   end
@@ -315,4 +325,32 @@ class Collection < ArvadosModel
      '+' +
      portable_manifest.bytesize.to_s)
   end
+
+  def maybe_clear_replication_confirmed
+    if manifest_text_changed?
+      # If the new manifest_text contains locators whose hashes
+      # weren't in the old manifest_text, storage replication is no
+      # longer confirmed.
+      in_old_manifest = {}
+      self.class.each_manifest_locator(manifest_text_was) do |loc|
+        in_old_manifest[loc.hash] = true
+      end
+      self.class.each_manifest_locator(manifest_text) do |loc|
+        if not in_old_manifest[loc.hash]
+          self.replication_confirmed_at = nil
+          self.replication_confirmed = nil
+          break
+        end
+      end
+    end
+  end
+
+  def ensure_permission_to_save
+    if (not current_user.andand.is_admin and
+        (replication_confirmed_at_changed? or replication_confirmed_changed?) and
+        not (replication_confirmed_at.nil? and replication_confirmed.nil?))
+      raise ArvadosModel::PermissionDeniedError.new("replication_confirmed and replication_confirmed_at attributes cannot be changed, except by setting both to nil")
+    end
+    super
+  end
 end
index bc68283db290dd5a84b7d05196b910eef55074c5..cd97349823f1048d3c64ff7535365b531e5c4647 100644 (file)
@@ -5,6 +5,7 @@ class DatabaseSeeds
     system_group
     all_users_group
     anonymous_group
+    anonymous_group_read_permission
     anonymous_user
     empty_collection
   end
index a32ce39299228b324ec63a64be6e6f4a152af6b0..a47a4583cd533e272d9b5f850b4281aef75f26c3 100644 (file)
@@ -411,8 +411,8 @@ class User < ArvadosModel
 
   # Give the special "System group" permission to manage this user and
   # all of this user's stuff.
-  #
   def add_system_group_permission_link
+    return true if uuid == system_user_uuid
     act_as_system_user do
       Link.create(link_class: 'permission',
                   name: 'can_manage',
index ed2c533f5662e008bc4d25f5ae4b29c7556b0a49..1e8d79fcd679d1f4cb6b82ac61e55a519484314d 100644 (file)
@@ -45,7 +45,6 @@ test:
   blob_signing_key: zfhgfenhffzltr9dixws36j1yhksjoll2grmku38mi7yxd66h5j4q9w4jzanezacp8s6q0ro3hxakfye02152hncy6zml2ed0uc
   user_profile_notification_address: arvados@example.com
   workbench_address: https://localhost:3001/
-  websocket_address: ws://127.0.0.1:3333/websocket
 
 common:
   uuid_prefix: <%= Digest::MD5.hexdigest(`hostname`).to_i(16).to_s(36)[0..4] %>
@@ -245,4 +244,8 @@ common:
   # Permit insecure (OpenSSL::SSL::VERIFY_NONE) connections to the Single Sign
   # On (sso) server.  Should only be enabled during development when the SSO
   # server is using a self-signed cert.
-  sso_insecure: false
\ No newline at end of file
+  sso_insecure: false
+
+  # Default replication level for collections. This is used when a
+  # collection's replication_desired attribute is nil.
+  default_collection_replication: 2
diff --git a/services/api/config/initializers/time_format.rb b/services/api/config/initializers/time_format.rb
new file mode 100644 (file)
index 0000000..d476781
--- /dev/null
@@ -0,0 +1,5 @@
+class ActiveSupport::TimeWithZone
+  def as_json *args
+    strftime "%Y-%m-%dT%H:%M:%S.%NZ"
+  end
+end
diff --git a/services/api/db/migrate/20150122175935_no_description_in_search_index.rb b/services/api/db/migrate/20150122175935_no_description_in_search_index.rb
new file mode 100644 (file)
index 0000000..75b946f
--- /dev/null
@@ -0,0 +1,30 @@
+# If the database reflects an obsolete version of the 20141208185217
+# migration (i.e., before commit:5c1db683), revert it and reapply the
+# current version. (The down-migration is the same in both versions.)
+
+require "./db/migrate/20141208185217_search_index.rb"
+
+class NoDescriptionInSearchIndex < ActiveRecord::Migration
+  def up
+    all_tables = %w{collections groups jobs pipeline_instances pipeline_templates}
+    all_tables.each do |table|
+      indexes = ActiveRecord::Base.connection.indexes(table)
+      search_index_by_name = indexes.select do |index|
+        index.name == "#{table}_search_index"
+      end
+
+      if !search_index_by_name.empty?
+        index_columns = search_index_by_name.first.columns
+        has_description = index_columns.include? 'description'
+        if has_description
+          SearchIndex.new.migrate(:down)
+          SearchIndex.new.migrate(:up)
+          break
+        end
+      end
+    end
+  end
+
+  def down
+  end
+end
diff --git a/services/api/db/migrate/20150123142953_full_text_search.rb b/services/api/db/migrate/20150123142953_full_text_search.rb
new file mode 100644 (file)
index 0000000..4d93210
--- /dev/null
@@ -0,0 +1,18 @@
+class FullTextSearch < ActiveRecord::Migration
+
+  def up
+    execute "CREATE INDEX collections_full_text_search_idx ON collections USING gin(#{Collection.full_text_tsvector});"
+    execute "CREATE INDEX groups_full_text_search_idx ON groups USING gin(#{Group.full_text_tsvector});"
+    execute "CREATE INDEX jobs_full_text_search_idx ON jobs USING gin(#{Job.full_text_tsvector});"
+    execute "CREATE INDEX pipeline_instances_full_text_search_idx ON pipeline_instances USING gin(#{PipelineInstance.full_text_tsvector});"
+    execute "CREATE INDEX pipeline_templates_full_text_search_idx ON pipeline_templates USING gin(#{PipelineTemplate.full_text_tsvector});"
+  end
+
+  def down
+    remove_index :pipeline_templates, :name => 'pipeline_templates_full_text_search_idx'
+    remove_index :pipeline_instances, :name => 'pipeline_instances_full_text_search_idx'
+    remove_index :jobs, :name => 'jobs_full_text_search_idx'
+    remove_index :groups, :name => 'groups_full_text_search_idx'
+    remove_index :collections, :name => 'collections_full_text_search_idx'
+  end
+end
diff --git a/services/api/db/migrate/20150203180223_set_group_class_on_anonymous_group.rb b/services/api/db/migrate/20150203180223_set_group_class_on_anonymous_group.rb
new file mode 100644 (file)
index 0000000..d182006
--- /dev/null
@@ -0,0 +1,14 @@
+class SetGroupClassOnAnonymousGroup < ActiveRecord::Migration
+  include CurrentApiClient
+  def up
+    act_as_system_user do
+      anonymous_group.update_attributes group_class: 'role', name: 'Anonymous users', description: 'Anonymous users'
+    end
+  end
+
+  def down
+    act_as_system_user do
+      anonymous_group.update_attributes group_class: nil, name: 'Anonymous group', description: 'Anonymous group'
+    end
+  end
+end
diff --git a/services/api/db/migrate/20150206210804_all_users_can_read_anonymous_group.rb b/services/api/db/migrate/20150206210804_all_users_can_read_anonymous_group.rb
new file mode 100644 (file)
index 0000000..848fe36
--- /dev/null
@@ -0,0 +1,12 @@
+class AllUsersCanReadAnonymousGroup < ActiveRecord::Migration
+  include CurrentApiClient
+
+  def up
+    anonymous_group_read_permission
+  end
+
+  def down
+    # Do nothing - it's too dangerous to try to figure out whether or not
+    # the permission was created by the migration.
+  end
+end
diff --git a/services/api/db/migrate/20150206230342_rename_replication_attributes.rb b/services/api/db/migrate/20150206230342_rename_replication_attributes.rb
new file mode 100644 (file)
index 0000000..58572d7
--- /dev/null
@@ -0,0 +1,30 @@
+class RenameReplicationAttributes < ActiveRecord::Migration
+  RENAME = [[:redundancy, :replication_desired],
+            [:redundancy_confirmed_as, :replication_confirmed],
+            [:redundancy_confirmed_at, :replication_confirmed_at]]
+
+  def up
+    RENAME.each do |oldname, newname|
+      rename_column :collections, oldname, newname
+    end
+    remove_column :collections, :redundancy_confirmed_by_client_uuid
+    Collection.reset_column_information
+
+    # Removing that column dropped some search indexes. Let's put them back.
+    add_index :collections, ["owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "portable_data_hash", "uuid", "name", "file_names"], name: 'collections_search_index'
+    execute "CREATE INDEX collections_full_text_search_idx ON collections USING gin(#{Collection.full_text_tsvector});"
+  end
+
+  def down
+    remove_index :collections, name: 'collections_search_index'
+    add_column :collections, :redundancy_confirmed_by_client_uuid, :string
+    RENAME.reverse.each do |oldname, newname|
+      rename_column :collections, newname, oldname
+    end
+    remove_index :collections, :name => 'collections_full_text_search_idx'
+    Collection.reset_column_information
+
+    execute "CREATE INDEX collections_full_text_search_idx ON collections USING gin(#{Collection.full_text_tsvector});"
+    add_index :collections, ["owner_uuid", "modified_by_client_uuid", "modified_by_user_uuid", "portable_data_hash", "uuid", "name", "file_names", "redundancy_confirmed_by_client_uuid"], name: 'collections_search_index'
+  end
+end
diff --git a/services/api/db/migrate/20150216193428_collection_name_owner_unique_only_non_expired.rb b/services/api/db/migrate/20150216193428_collection_name_owner_unique_only_non_expired.rb
new file mode 100644 (file)
index 0000000..d2c629a
--- /dev/null
@@ -0,0 +1,23 @@
+class CollectionNameOwnerUniqueOnlyNonExpired < ActiveRecord::Migration
+  def find_index
+    indexes = ActiveRecord::Base.connection.indexes('collections')
+    name_owner_index = indexes.select do |index|
+      index.name == 'collection_owner_uuid_name_unique'
+    end
+    name_owner_index
+  end
+
+  def up
+    remove_index :collections, :name => 'collection_owner_uuid_name_unique' if !find_index.empty?
+    add_index(:collections, [:owner_uuid, :name], unique: true,
+              where: 'expires_at is null',
+              name: 'collection_owner_uuid_name_unique')
+  end
+
+  def down
+    # it failed during up. is it going to pass now? should we do nothing?
+    remove_index :collections, :name => 'collection_owner_uuid_name_unique' if !find_index.empty?
+    add_index(:collections, [:owner_uuid, :name], unique: true,
+              name: 'collection_owner_uuid_name_unique')
+  end
+end
index 96180c7d3c008e1abf2a6f8fa8f957d53853ff15..afc03510d58e030f66f5026ec1ecf056a16b9313 100644 (file)
@@ -159,10 +159,9 @@ CREATE TABLE collections (
     modified_by_user_uuid character varying(255),
     modified_at timestamp without time zone,
     portable_data_hash character varying(255),
-    redundancy integer,
-    redundancy_confirmed_by_client_uuid character varying(255),
-    redundancy_confirmed_at timestamp without time zone,
-    redundancy_confirmed_as integer,
+    replication_desired integer,
+    replication_confirmed_at timestamp without time zone,
+    replication_confirmed integer,
     updated_at timestamp without time zone NOT NULL,
     uuid character varying(255),
     manifest_text text,
@@ -1304,14 +1303,28 @@ CREATE INDEX authorized_keys_search_index ON authorized_keys USING btree (uuid,
 -- Name: collection_owner_uuid_name_unique; Type: INDEX; Schema: public; Owner: -; Tablespace: 
 --
 
-CREATE UNIQUE INDEX collection_owner_uuid_name_unique ON collections USING btree (owner_uuid, name);
+CREATE UNIQUE INDEX collection_owner_uuid_name_unique ON collections USING btree (owner_uuid, name) WHERE (expires_at IS NULL);
+
+
+--
+-- Name: collections_full_text_search_idx; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX collections_full_text_search_idx ON collections USING gin (to_tsvector('english'::regconfig, (((((((((((((((((COALESCE(owner_uuid, ''::character varying))::text || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(portable_data_hash, ''::character varying))::text) || ' '::text) || (COALESCE(uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || COALESCE(properties, ''::text)) || ' '::text) || (COALESCE(file_names, ''::character varying))::text)));
 
 
 --
 -- Name: collections_search_index; Type: INDEX; Schema: public; Owner: -; Tablespace: 
 --
 
-CREATE INDEX collections_search_index ON collections USING btree (owner_uuid, modified_by_client_uuid, modified_by_user_uuid, portable_data_hash, redundancy_confirmed_by_client_uuid, uuid, name, file_names);
+CREATE INDEX collections_search_index ON collections USING btree (owner_uuid, modified_by_client_uuid, modified_by_user_uuid, portable_data_hash, uuid, name, file_names);
+
+
+--
+-- Name: groups_full_text_search_idx; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX groups_full_text_search_idx ON groups USING gin (to_tsvector('english'::regconfig, (((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || (COALESCE(group_class, ''::character varying))::text)));
 
 
 --
@@ -1986,6 +1999,13 @@ CREATE UNIQUE INDEX index_virtual_machines_on_uuid ON virtual_machines USING btr
 CREATE INDEX job_tasks_search_index ON job_tasks USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, job_uuid, created_by_job_task_uuid);
 
 
+--
+-- Name: jobs_full_text_search_idx; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX jobs_full_text_search_idx ON jobs USING gin (to_tsvector('english'::regconfig, (((((((((((((((((((((((((((((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(submit_id, ''::character varying))::text) || ' '::text) || (COALESCE(script, ''::character varying))::text) || ' '::text) || (COALESCE(script_version, ''::character varying))::text) || ' '::text) || COALESCE(script_parameters, ''::text)) || ' '::text) || (COALESCE(cancelled_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(cancelled_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(output, ''::character varying))::text) || ' '::text) || (COALESCE(is_locked_by_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(log, ''::character varying))::text) || ' '::text) || COALESCE(tasks_summary, ''::text)) || ' '::text) || COALESCE(runtime_constraints, ''::text)) || ' '::text) || (COALESCE(repository, ''::character varying))::text) || ' '::text) || (COALESCE(supplied_script_version, ''::character varying))::text) || ' '::text) || (COALESCE(docker_image_locator, ''::character varying))::text) || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || (COALESCE(state, ''::character varying))::text) || ' '::text) || (COALESCE(arvados_sdk_version, ''::character varying))::text)));
+
+
 --
 -- Name: jobs_search_index; Type: INDEX; Schema: public; Owner: -; Tablespace: 
 --
@@ -2035,6 +2055,13 @@ CREATE INDEX logs_search_index ON logs USING btree (uuid, owner_uuid, modified_b
 CREATE INDEX nodes_search_index ON nodes USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, hostname, domain, ip_address, job_uuid);
 
 
+--
+-- Name: pipeline_instances_full_text_search_idx; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX pipeline_instances_full_text_search_idx ON pipeline_instances USING gin (to_tsvector('english'::regconfig, (((((((((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(pipeline_template_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(components, ''::text)) || ' '::text) || COALESCE(properties, ''::text)) || ' '::text) || (COALESCE(state, ''::character varying))::text) || ' '::text) || COALESCE(components_summary, ''::text)) || ' '::text) || (COALESCE(description, ''::character varying))::text)));
+
+
 --
 -- Name: pipeline_instances_search_index; Type: INDEX; Schema: public; Owner: -; Tablespace: 
 --
@@ -2049,6 +2076,13 @@ CREATE INDEX pipeline_instances_search_index ON pipeline_instances USING btree (
 CREATE UNIQUE INDEX pipeline_template_owner_uuid_name_unique ON pipeline_templates USING btree (owner_uuid, name);
 
 
+--
+-- Name: pipeline_templates_full_text_search_idx; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX pipeline_templates_full_text_search_idx ON pipeline_templates USING gin (to_tsvector('english'::regconfig, (((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(components, ''::text)) || ' '::text) || (COALESCE(description, ''::character varying))::text)));
+
+
 --
 -- Name: pipeline_templates_search_index; Type: INDEX; Schema: public; Owner: -; Tablespace: 
 --
@@ -2314,4 +2348,16 @@ INSERT INTO schema_migrations (version) VALUES ('20141208174553');
 
 INSERT INTO schema_migrations (version) VALUES ('20141208174653');
 
-INSERT INTO schema_migrations (version) VALUES ('20141208185217');
\ No newline at end of file
+INSERT INTO schema_migrations (version) VALUES ('20141208185217');
+
+INSERT INTO schema_migrations (version) VALUES ('20150122175935');
+
+INSERT INTO schema_migrations (version) VALUES ('20150123142953');
+
+INSERT INTO schema_migrations (version) VALUES ('20150203180223');
+
+INSERT INTO schema_migrations (version) VALUES ('20150206210804');
+
+INSERT INTO schema_migrations (version) VALUES ('20150206230342');
+
+INSERT INTO schema_migrations (version) VALUES ('20150216193428');
\ No newline at end of file
index 9f78587eabe9de75e37f49268d508d832203ad46..2e78612fc2d8b0ea3883cbb964d73c3e443e9c21 100644 (file)
@@ -54,48 +54,44 @@ module CurrentApiClient
   end
 
   def system_user
-    if not $system_user
+    $system_user = check_cache $system_user do
       real_current_user = Thread.current[:user]
-      Thread.current[:user] = User.new(is_admin: true,
-                                       is_active: true,
-                                       uuid: system_user_uuid)
-      $system_user = User.where('uuid=?', system_user_uuid).first
-      if !$system_user
-        $system_user = User.new(uuid: system_user_uuid,
-                                is_active: true,
-                                is_admin: true,
-                                email: 'root',
-                                first_name: 'root',
-                                last_name: '')
-        $system_user.save!
-        $system_user.reload
+      begin
+        Thread.current[:user] = User.new(is_admin: true,
+                                         is_active: true,
+                                         uuid: system_user_uuid)
+        User.where(uuid: system_user_uuid).
+          first_or_create!(is_active: true,
+                           is_admin: true,
+                           email: 'root',
+                           first_name: 'root',
+                           last_name: '')
+      ensure
+        Thread.current[:user] = real_current_user
       end
-      Thread.current[:user] = real_current_user
     end
-    $system_user
   end
 
   def system_group
-    if not $system_group
+    $system_group = check_cache $system_group do
       act_as_system_user do
         ActiveRecord::Base.transaction do
-          $system_group = Group.
-            where(uuid: system_group_uuid).first_or_create do |g|
-            g.update_attributes(name: "System group",
-                                description: "System group")
+          Group.where(uuid: system_group_uuid).
+            first_or_create!(name: "System group",
+                             description: "System group") do |g|
+            g.save!
             User.all.collect(&:uuid).each do |user_uuid|
-              Link.create(link_class: 'permission',
-                          name: 'can_manage',
-                          tail_kind: 'arvados#group',
-                          tail_uuid: system_group_uuid,
-                          head_kind: 'arvados#user',
-                          head_uuid: user_uuid)
+              Link.create!(link_class: 'permission',
+                           name: 'can_manage',
+                           tail_kind: 'arvados#group',
+                           tail_uuid: system_group_uuid,
+                           head_kind: 'arvados#user',
+                           head_uuid: user_uuid)
             end
           end
         end
       end
     end
-    $system_group
   end
 
   def all_users_group_uuid
@@ -105,19 +101,16 @@ module CurrentApiClient
   end
 
   def all_users_group
-    if not $all_users_group
+    $all_users_group = check_cache $all_users_group do
       act_as_system_user do
         ActiveRecord::Base.transaction do
-          $all_users_group = Group.
-            where(uuid: all_users_group_uuid).first_or_create do |g|
-            g.update_attributes(name: "All users",
-                                description: "All users",
-                                group_class: "role")
-          end
+          Group.where(uuid: all_users_group_uuid).
+            first_or_create!(name: "All users",
+                             description: "All users",
+                             group_class: "role")
         end
       end
     end
-    $all_users_group
   end
 
   def act_as_system_user
@@ -141,49 +134,48 @@ module CurrentApiClient
   end
 
   def anonymous_group
-    if not $anonymous_group
+    $anonymous_group = check_cache $anonymous_group do
       act_as_system_user do
         ActiveRecord::Base.transaction do
-          $anonymous_group = Group.
-          where(uuid: anonymous_group_uuid).first_or_create do |g|
-            g.update_attributes(name: "Anonymous group",
-                                description: "Anonymous group")
-          end
+          Group.where(uuid: anonymous_group_uuid).
+            first_or_create!(group_class: "role",
+                             name: "Anonymous users",
+                             description: "Anonymous users")
         end
       end
     end
-    $anonymous_group
   end
 
-  def anonymous_user
-    if not $anonymous_user
+  def anonymous_group_read_permission
+    $anonymous_group_read_permission =
+        check_cache $anonymous_group_read_permission do
       act_as_system_user do
-        $anonymous_user = User.where('uuid=?', anonymous_user_uuid).first
-        if !$anonymous_user
-          $anonymous_user = User.new(uuid: anonymous_user_uuid,
-                                     is_active: false,
-                                     is_admin: false,
-                                     email: 'anonymouspublic',
-                                     first_name: 'anonymouspublic',
-                                     last_name: 'anonymouspublic')
-          $anonymous_user.save!
-          $anonymous_user.reload
-        end
-
-        group_perms = Link.where(tail_uuid: anonymous_user_uuid,
-                                 head_uuid: anonymous_group_uuid,
-                                 link_class: 'permission',
-                                 name: 'can_read')
+        Link.where(tail_uuid: all_users_group.uuid,
+                   head_uuid: anonymous_group.uuid,
+                   link_class: "permission",
+                   name: "can_read").first_or_create!
+      end
+    end
+  end
 
-        if !group_perms.any?
-          group_perm = Link.create!(tail_uuid: anonymous_user_uuid,
-                                    head_uuid: anonymous_group_uuid,
-                                    link_class: 'permission',
-                                    name: 'can_read')
+  def anonymous_user
+    $anonymous_user = check_cache $anonymous_user do
+      act_as_system_user do
+        User.where(uuid: anonymous_user_uuid).
+          first_or_create!(is_active: false,
+                           is_admin: false,
+                           email: 'anonymous',
+                           first_name: 'Anonymous',
+                           last_name: '') do |u|
+          u.save!
+          Link.where(tail_uuid: anonymous_user_uuid,
+                     head_uuid: anonymous_group.uuid,
+                     link_class: 'permission',
+                     name: 'can_read').
+            first_or_create!
         end
       end
     end
-    $anonymous_user
   end
 
   def empty_collection_uuid
@@ -191,15 +183,42 @@ module CurrentApiClient
   end
 
   def empty_collection
-    if not $empty_collection
+    $empty_collection = check_cache $empty_collection do
       act_as_system_user do
         ActiveRecord::Base.transaction do
-          $empty_collection = Collection.
+          Collection.
             where(portable_data_hash: empty_collection_uuid).
             first_or_create!(manifest_text: '', owner_uuid: anonymous_group.uuid)
         end
       end
     end
-    $empty_collection
+  end
+
+  private
+
+  # If the given value is nil, or the cache has been cleared since it
+  # was set, yield. Otherwise, return the given value.
+  def check_cache value
+    if not Rails.env.test? and
+        ActionController::Base.cache_store.is_a? ActiveSupport::Cache::FileStore and
+        not File.owned? ActionController::Base.cache_store.cache_path
+      # If we don't own the cache dir, we're probably
+      # crunch-dispatch. Whoever we are, using this cache is likely to
+      # either fail or screw up the cache for someone else. So we'll
+      # just assume the $globals are OK to live forever.
+      #
+      # The reason for making the globals expire with the cache in the
+      # first place is to avoid leaking state between test cases: in
+      # production, we don't expect the database seeds to ever go away
+      # even when the cache is cleared, so there's no particular
+      # reason to expire our global variables.
+    else
+      Rails.cache.fetch "CurrentApiClient.$globals" do
+        value = nil
+        true
+      end
+    end
+    return value unless value.nil?
+    yield
   end
 end
index 3f1a3b223a851f46c171a58d5b182a6790df12f1..35f1d0b640cbf9f8b7aa907b1336b5a61e1b5fa4 100644 (file)
@@ -47,10 +47,6 @@ module LoadParam
     end
   end
 
-  def default_orders
-    ["#{table_name}.modified_at desc"]
-  end
-
   # Load params[:limit], params[:offset] and params[:order]
   # into @limit, @offset, @orders
   def load_limit_offset_order_params
@@ -113,9 +109,11 @@ module LoadParam
       end
     end
 
-    if @orders.empty?
-      @orders = default_orders
-    end
+    # If the client-specified orders don't amount to a full ordering
+    # (e.g., [] or ['owner_uuid desc']), fall back on the default
+    # orders to ensure repeating the same request (possibly with
+    # different limit/offset) will return records in the same order.
+    @orders += model_class.default_orders
 
     case params[:select]
     when Array
index 9408dcfade120e5b68235f952eb980ef7c443c89..c009bf537f1966fd16326ef3bc239c7d6d623f99 100644 (file)
@@ -22,7 +22,7 @@ module RecordFilters
     ar_table_name = model_class.table_name
     filters.each do |filter|
       attrs_in, operator, operand = filter
-      if attrs_in == 'any'
+      if attrs_in == 'any' && operator != '@@'
         attrs = model_class.searchable_columns(operator)
       elsif attrs_in.is_a? Array
         attrs = attrs_in
@@ -34,7 +34,25 @@ module RecordFilters
       elsif !operator.is_a? String
         raise ArgumentError.new("Invalid operator '#{operator}' (#{operator.class}) in filter")
       end
+
       cond_out = []
+
+      if operator == '@@'
+        # Full-text search
+        if attrs_in != 'any'
+          raise ArgumentError.new("Full text search on individual columns is not supported")
+        end
+        if operand.is_a? Array
+          raise ArgumentError.new("Full text search not supported for array operands")
+        end
+
+        # Skip the generic per-column operator loop below
+        attrs = []
+        # Use to_tsquery since plainto_tsquery does not support prefix
+        # search. And, split operand and join the words with ' & '
+        cond_out << model_class.full_text_tsvector+" @@ to_tsquery(?)"
+        param_out << operand.split.join(' & ')
+      end
       attrs.each do |attr|
         if !model_class.searchable_columns(operator).index attr.to_s
           raise ArgumentError.new("Invalid attribute '#{attr}' in filter")
index f28606a09b445e21d39d13238113e69410a47346..220122533bea050ef23cb70cef5ba9f5454ef89b 100644 (file)
@@ -58,6 +58,18 @@ baz_file:
   manifest_text: ". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\n"
   name: baz_file
 
+w_a_z_file:
+  uuid: zzzzz-4zz18-25k12570yk134b3
+  portable_data_hash: 8706aadd12a0ebc07d74cae88762ba9e+56
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2015-02-09T10:53:38Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2015-02-09T10:53:38Z
+  updated_at: 2015-02-09T10:53:38Z
+  manifest_text: ". 4c6c2c0ac8aa0696edd7316a3be5ca3c+5 0:5:w\\040\\141\\040z\n"
+  name: "\"w a z\" file"
+
 multilevel_collection_1:
   uuid: zzzzz-4zz18-pyw8yp9g3pr7irn
   portable_data_hash: 1fd08fc162a5c6413070a8bd0bffc818+150
@@ -143,6 +155,16 @@ user_agreement_in_anonymously_accessible_project:
   manifest_text: ". 6a4ff0499484c6c79c95cd8c566bd25f+249025 0:249025:GNU_General_Public_License,_version_3.pdf\n"
   name: GNU General Public License, version 3
 
+public_text_file:
+  uuid: zzzzz-4zz18-4en62shvi99lxd4
+  portable_data_hash: 55713e6a34081eb03609e7ad5fcad129+62
+  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  created_at: 2015-02-12 16:58:03 -0500
+  modified_at: 2015-02-12 16:58:03 -0500
+  updated_at: 2015-02-12 16:58:03 -0500
+  manifest_text: ". f0ef7081e1539ac00ef5b761b4fb01b3+12 0:12:Hello\\040world.txt\n"
+  name: Hello world
+
 baz_collection_name_in_asubproject:
   uuid: zzzzz-4zz18-lsitwcf548ui4oe
   portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
@@ -347,12 +369,12 @@ collection_with_no_name_in_aproject:
 
 collection_to_search_for_in_aproject:
   uuid: zzzzz-4zz18-abcd6fx123409f7
-  portable_data_hash: 5bd9c1ad0bc8c7f34be170a7b7b39089+45
+  portable_data_hash: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
   owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
   created_at: 2014-04-21 15:37:48 -0400
   modified_at: 2014-04-21 15:37:48 -0400
   updated_at: 2014-04-21 15:37:48 -0400
-  manifest_text: ". juku76584cc2f85cedef654fjyhtgimh+3 0:3:foo\n"
+  manifest_text: ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
   name: "zzzzz-4zz18-abcd6fx123409f7 used to search with any"
 
 upload_sandbox:
@@ -366,6 +388,88 @@ upload_sandbox:
   manifest_text: ''
   name: upload sandbox
 
+collection_with_unique_words_to_test_full_text_search:
+  uuid: zzzzz-4zz18-mnt690klmb51aud
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: collection_with_some_unique_words
+  description: The quick_brown_fox jumps over the lazy_dog
+
+replication_undesired_unconfirmed:
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2015-02-07 00:19:28.596506247 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2015-02-07 00:19:28.596338465 Z
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  replication_desired: ~
+  replication_confirmed_at: ~
+  replication_confirmed: ~
+  updated_at: 2015-02-07 00:19:28.596236608 Z
+  uuid: zzzzz-4zz18-wjxq7uzx2m9jj4a
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: replication want=null have=null
+
+replication_desired_2_unconfirmed:
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2015-02-07 00:21:35.050333515 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2015-02-07 00:21:35.050189104 Z
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  replication_desired: 2
+  replication_confirmed_at: ~
+  replication_confirmed: ~
+  updated_at: 2015-02-07 00:21:35.050126576 Z
+  uuid: zzzzz-4zz18-3t236wrz4769h7x
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: replication want=2 have=null
+
+replication_desired_2_confirmed_2:
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2015-02-07 00:19:28.596506247 Z
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2015-02-07 00:19:28.596338465 Z
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  replication_desired: 2
+  replication_confirmed_at: 2015-02-07 00:24:52.983381227 Z
+  replication_confirmed: 2
+  updated_at: 2015-02-07 00:24:52.983381227 Z
+  uuid: zzzzz-4zz18-434zv1tnnf2rygp
+  manifest_text: ". acbd18db4cc2f85cedef654fccc4a4d8+3 37b51d194a7513e45b56f6524f2d51f2+3 0:3:foo 3:6:bar\n"
+  name: replication want=2 have=2
+
+collection_with_empty_properties:
+  uuid: zzzzz-4zz18-emptyproperties
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2015-02-13T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2015-02-13T17:22:54Z
+  updated_at: 2015-02-13T17:22:54Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: collection with empty properties
+  properties: {}
+
+collection_with_one_property:
+  uuid: zzzzz-4zz18-withoneproperty
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2015-02-13T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2015-02-13T17:22:54Z
+  updated_at: 2015-02-13T17:22:54Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: collection with one property
+  properties:
+    property1: value1
+
 # Test Helper trims the rest of the file
 
 # Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper
index 86815c04633f2ebac6d73cace66fb7f5e8306ee0..fb23c8cdf4ee80ee02aae6172825308bb18a1b12 100644 (file)
@@ -116,8 +116,9 @@ bad_group_has_ownership_cycle_b:
 anonymous_group:
   uuid: zzzzz-j7d0g-anonymouspublic
   owner_uuid: zzzzz-tpzed-000000000000000
-  name: Anonymous group
-  description: Anonymous group
+  name: Anonymous users
+  group_class: role
+  description: Anonymous users
 
 anonymously_accessible_project:
   uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
index 888cb2af7f578e1dccd59251f8f8864b29995d24..78120042aa625bec49a4c5bdf27f0abeb00f572b 100644 (file)
@@ -76,6 +76,8 @@ foobar:
   cancelled_at: ~
   cancelled_by_user_uuid: ~
   cancelled_by_client_uuid: ~
+  script: hash
+  repository: foo
   script_version: 7def43a4d3f20789dda4700f703b5514cc3ed250
   script_parameters:
     input: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
@@ -371,6 +373,58 @@ graph_stage3:
     input2: "stuff2"
   output: ea10d51bcf88862dbcc36eb292017dfd+45
 
+job_with_latest_version:
+  uuid: zzzzz-8i9sb-nj8ioxnrvjtyk2b
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  cancelled_at: ~
+  cancelled_by_user_uuid: ~
+  cancelled_by_client_uuid: ~
+  script: hash
+  repository: foo
+  script_version: 7def43a4d3f20789dda4700f703b5514cc3ed250
+  supplied_script_version: master
+  script_parameters:
+    input: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+  created_at: <%= 4.minute.ago.to_s(:db) %>
+  started_at: <%= 3.minute.ago.to_s(:db) %>
+  finished_at: <%= 2.minute.ago.to_s(:db) %>
+  running: false
+  success: true
+  output: fa7aeb5140e2848d39b416daeef4ffc5+45
+  priority: 0
+  log: ea10d51bcf88862dbcc36eb292017dfd+45
+  is_locked_by_uuid: ~
+  tasks_summary:
+    failed: 0
+    todo: 0
+    running: 0
+    done: 1
+  runtime_constraints: {}
+  state: Complete
+
+running_job_in_publicly_accessible_project:
+  uuid: zzzzz-8i9sb-n7omg50bvt0m1nf
+  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  repository: foo
+  script: running_job_script
+  script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
+  state: Running
+  script_parameters:
+    input: fa7aeb5140e2848d39b416daeef4ffc5+45
+    input2: "stuff2"
+
+completed_job_in_publicly_accessible_project:
+  uuid: zzzzz-8i9sb-jyq01m7in1jlofj
+  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  repository: foo
+  script: completed_job_script
+  script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
+  state: Complete
+  script_parameters:
+    input: fa7aeb5140e2848d39b416daeef4ffc5+45
+    input2: "stuff2"
 
 # Test Helper trims the rest of the file
 
index 4d576a25c14ab73f96989564ac0948811071e9b0..b8856efd38cea9d0e32677a87ceff1a8877a0077 100644 (file)
@@ -26,6 +26,20 @@ user_agreement_readable:
   head_uuid: zzzzz-4zz18-t68oksiu9m80s4y
   properties: {}
 
+all_users_can_read_anonymous_group:
+  uuid: zzzzz-o0j2j-0lhbqyjab4g0bwp
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2015-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2015-01-24 20:42:26 -0800
+  updated_at: 2015-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-j7d0g-fffffffffffffff
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-j7d0g-anonymouspublic
+  properties: {}
+
 active_user_member_of_all_users_group:
   uuid: zzzzz-o0j2j-ctbysaduejxfrs5
   owner_uuid: zzzzz-tpzed-000000000000000
@@ -306,6 +320,20 @@ repository3_readable_by_active:
   head_uuid: zzzzz-s0uqq-38orljkqpyo1j61
   properties: {}
 
+repository4_writable_by_active:
+  uuid: zzzzz-o0j2j-lio9debdt6yhkil
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-09-23 13:52:46 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-09-23 13:52:46 -0400
+  updated_at: 2014-09-23 13:52:46 -0400
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: permission
+  name: can_write
+  head_uuid: zzzzz-s0uqq-38oru8hnk57ht34
+  properties: {}
+
 miniadmin_user_is_a_testusergroup_admin:
   uuid: zzzzz-o0j2j-38vvkciz7qc12j9
   owner_uuid: zzzzz-tpzed-000000000000000
@@ -498,6 +526,20 @@ anonymous_group_can_read_anonymously_accessible_project:
   head_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
   properties: {}
 
+anonymous_user_can_read_anonymously_accessible_project:
+  uuid: zzzzz-o0j2j-82nbli3jptwksj1
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-05-30 14:30:00.184389725 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-05-30 14:30:00.184019565 Z
+  updated_at: 2014-05-30 14:30:00.183829316 Z
+  link_class: permission
+  name: can_read
+  tail_uuid: zzzzz-tpzed-anonymouspublic
+  head_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  properties: {}
+
 user_agreement_readable_by_anonymously_accessible_project:
   uuid: zzzzz-o0j2j-o5ds5gvhkztdc8h
   owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
index d42120a570b520eb0ec7ba40afcdc589502d230c..69e1f7d5ff7d2da848ee5308ff78eca202da1a36 100644 (file)
@@ -184,6 +184,53 @@ pipeline_instance_in_fuse_project:
           dataclass: Collection
           title: foo instance input
 
+pipeline_owned_by_active_in_aproject:
+  name: Completed pipeline in A Project
+  state: Complete
+  uuid: zzzzz-d1hrv-ju5ghi0i9z2kqc6
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  created_at: 2014-09-15 12:00:00
+  components:
+    foo:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: foo instance input
+
+pipeline_owned_by_active_in_home:
+  name: Completed pipeline in active user home
+  state: Complete
+  uuid: zzzzz-d1hrv-lihrbd0i9z2kqc6
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-09-15 12:00:00
+  components:
+    foo:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: foo instance input
+
+pipeline_in_publicly_accessible_project:
+  uuid: zzzzz-d1hrv-n68vc490mloy4fi
+  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  name: Pipeline in publicly accessible project
+  state: Complete
+  created_at: 2014-09-15 12:00:00
+  components:
+    foo:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: foo instance input
 
 # Test Helper trims the rest of the file
 
index 260eab8852025a83ae938af45164908e41414f1a..40bf63dd7e2108b490841c940e12a7aa34253432 100644 (file)
@@ -164,3 +164,42 @@ template_with_dataclass_file:
           default: [1,1,2,3,5]
         array_with_value: # important to test repeating values in the array!
           value: [1,1,2,3,5]
+
+template_with_dataclass_number:
+  uuid: zzzzz-p5p6p-numbertemplatea
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2015-01-14 12:35:04 -0400
+  updated_at: 2015-01-14 12:35:04 -0400
+  modified_at: 2015-01-14 12:35:04 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: Template with dataclass number
+  components:
+    work:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: number
+          title: "Input number"
+
+pipeline_template_in_publicly_accessible_project:
+  uuid: zzzzz-p5p6p-tmpltpublicproj
+  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  created_at: 2014-04-14 12:35:04 -0400
+  updated_at: 2014-04-14 12:35:04 -0400
+  modified_at: 2014-04-14 12:35:04 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: Pipeline template in publicly accessible project
+  components:
+    foo_component:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: "default input"
+          description: "input collection"
index 5775f8ac80759be42b0ade7240d1d029a52bd4f5..a0e3b1f480f95aff5421d30fba7b3a446a03c568 100644 (file)
@@ -23,6 +23,11 @@ repository3:
   owner_uuid: zzzzz-tpzed-d9tiejq69daie8f # admin user
   name: foo3
 
+repository4:
+  uuid: zzzzz-s0uqq-38oru8hnk57ht34
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f # admin user
+  name: foo4
+
 auto_setup_repository:
   uuid: zzzzz-s0uqq-382brabc8rp3667
   owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
index c65ae2f4c0a2544e8f0c55a02e7d73ac20e47442..54ffe66f174baf341ae19a00a58c71b578f9b3ce 100644 (file)
@@ -345,14 +345,15 @@ EOS
   end
 
   test "search collections with 'any' operator" do
+    expect_pdh = collections(:docker_image).portable_data_hash
     authorize_with :active
     get :index, {
-      where: { any: ['contains', 'd0bc8c7f34be170a7b7b'] }
+      where: { any: ['contains', expect_pdh[5..25]] }
     }
     assert_response :success
-    found = assigns(:objects).collect(&:portable_data_hash)
+    found = assigns(:objects)
     assert_equal 1, found.count
-    assert_equal true, !!found.index('5bd9c1ad0bc8c7f34be170a7b7b39089+45')
+    assert_equal expect_pdh, found.first.portable_data_hash
   end
 
   [false, true].each do |permit_unsigned|
@@ -695,4 +696,42 @@ EOS
       assert_response expected_response
     end
   end
+
+  [1, 5, nil].each do |ask|
+    test "Set replication_desired=#{ask.inspect}" do
+      Rails.configuration.default_collection_replication = 2
+      authorize_with :active
+      put :update, {
+        id: collections(:replication_undesired_unconfirmed).uuid,
+        collection: {
+          replication_desired: ask,
+        },
+      }
+      assert_response :success
+      assert_equal ask, json_response['replication_desired']
+    end
+  end
+
+  test "get collection with properties" do
+    authorize_with :active
+    get :show, {id: collections(:collection_with_one_property).uuid}
+    assert_response :success
+    assert_not_nil json_response['uuid']
+    assert_equal 'value1', json_response['properties']['property1']
+  end
+
+  test "create collection with properties" do
+    authorize_with :active
+    manifest_text = ". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\n"
+    post :create, {
+      collection: {
+        manifest_text: manifest_text,
+        portable_data_hash: "d30fe8ae534397864cb96c544f4cf102+47",
+        properties: {'property_1' => 'value_1'}
+      }
+    }
+    assert_response :success
+    assert_not_nil json_response['uuid']
+    assert_equal 'value_1', json_response['properties']['property_1']
+  end
 end
index 2e8e231a9723a78b10af6dbb747044d526e76307..9344b0bc75c3be0c9bc878207df52275a1190e88 100644 (file)
@@ -13,4 +13,86 @@ class Arvados::V1::FiltersTest < ActionController::TestCase
     assert_includes(found.collect(&:group_class), nil,
                     "'group_class not in ['project']' filter should pass null")
   end
+
+  test 'error message for non-array element in filters array' do
+    @controller = Arvados::V1::CollectionsController.new
+    authorize_with :active
+    get :index, {
+      filters: [{bogus: 'filter'}],
+    }
+    assert_response 422
+    assert_match(/Invalid element in filters array/,
+                 json_response['errors'].join(' '))
+  end
+
+  test 'error message for full text search on a specific column' do
+    @controller = Arvados::V1::CollectionsController.new
+    authorize_with :active
+    get :index, {
+      filters: [['uuid', '@@', 'abcdef']],
+    }
+    assert_response 422
+    assert_match /not supported/, json_response['errors'].join(' ')
+  end
+
+  test 'difficult characters in full text search' do
+    @controller = Arvados::V1::CollectionsController.new
+    authorize_with :active
+    get :index, {
+      filters: [['any', '@@', 'a|b"c']],
+    }
+    assert_response :success
+    # (Doesn't matter so much which results are returned.)
+  end
+
+  test 'array operand in full text search' do
+    @controller = Arvados::V1::CollectionsController.new
+    authorize_with :active
+    get :index, {
+      filters: [['any', '@@', ['abc', 'def']]],
+    }
+    assert_response 422
+    assert_match /not supported/, json_response['errors'].join(' ')
+  end
+
+  test 'api responses provide timestamps with nanoseconds' do
+    @controller = Arvados::V1::CollectionsController.new
+    authorize_with :active
+    get :index
+    assert_response :success
+    assert_not_empty json_response['items']
+    json_response['items'].each do |item|
+      %w(created_at modified_at).each do |attr|
+        # Pass fixtures with null timestamps.
+        next if item[attr].nil?
+        assert_match /^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d.\d{9}Z$/, item[attr]
+      end
+    end
+  end
+
+  %w(< > <= >= =).each do |operator|
+    test "timestamp #{operator} filters work with nanosecond precision" do
+      # Python clients like Node Manager rely on this exact format.
+      # If you must change this format for some reason, make sure you
+      # coordinate the change with them.
+      expect_match = !!operator.index('=')
+      mine = act_as_user users(:active) do
+        Collection.create!(manifest_text: '')
+      end
+      timestamp = mine.modified_at.strftime('%Y-%m-%dT%H:%M:%S.%NZ')
+      @controller = Arvados::V1::CollectionsController.new
+      authorize_with :active
+      get :index, {
+        filters: [['modified_at', operator, timestamp],
+                  ['uuid', '=', mine.uuid]],
+      }
+      assert_response :success
+      uuids = json_response['items'].map { |item| item['uuid'] }
+      if expect_match
+        assert_includes uuids, mine.uuid
+      else
+        assert_not_includes uuids, mine.uuid
+      end
+    end
+  end
 end
index c974076c6fc15610e2e062e3b89dc926375f7355..922612fb38cd53bb3387569b477121f537cc575a 100644 (file)
@@ -75,7 +75,6 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
     get :contents, {
       id: groups(:aproject).uuid,
       format: :json,
-      include_linked: true,
     }
     check_project_contents_response
   end
@@ -85,7 +84,6 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
     get :contents, {
       id: groups(:aproject).uuid,
       format: :json,
-      include_linked: true,
     }
     check_project_contents_response
   end
@@ -176,7 +174,6 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
     authorize_with :project_viewer
     get :contents, {
       format: :json,
-      include_linked: false,
       filters: [['uuid', 'is_a', 'arvados#specimen']]
     }
     assert_response :success
@@ -294,6 +291,20 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
     end
   end
 
+  test "Collection contents don't include manifest_text" do
+    authorize_with :active
+    get :contents, {
+      id: groups(:aproject).uuid,
+      filters: [["uuid", "is_a", "arvados#collection"]],
+      format: :json,
+    }
+    assert_response :success
+    refute(json_response["items"].any? { |c| not c["portable_data_hash"] },
+           "response included an item without a portable data hash")
+    refute(json_response["items"].any? { |c| c.include?("manifest_text") },
+           "response included an item with a manifest text")
+  end
+
   test 'get writable_by list for owned group' do
     authorize_with :active
     get :show, {
index bea76aabfd09339c7e6e7a639451b0fcaa21c858..4251047cea6b74ece4d8e4b1473d554e59daeb7e 100644 (file)
@@ -165,6 +165,7 @@ class CollectionsApiTest < ActionDispatch::IntegrationTest
     }, auth(:active)
     assert_response :success
     assert_equal true, json_response['manifest_text'].include?('my_test_file.txt')
+    assert_includes json_response['manifest_text'], 'my_test_file.txt'
 
     created = json_response
 
@@ -179,8 +180,8 @@ class CollectionsApiTest < ActionDispatch::IntegrationTest
     }, auth(:active)
     assert_response :success
     assert_equal created['uuid'], json_response['uuid']
-    assert_equal true, json_response['manifest_text'].include?('my_updated_test_file.txt')
-    assert_equal false, json_response['manifest_text'].include?('my_test_file.txt')
+    assert_includes json_response['manifest_text'], 'my_updated_test_file.txt'
+    assert_not_includes json_response['manifest_text'], 'my_test_file.txt'
 
     # search using the new filename
     search_using_filter 'my_updated_test_file.txt', 1
@@ -196,12 +197,101 @@ class CollectionsApiTest < ActionDispatch::IntegrationTest
     response_items = json_response['items']
     assert_not_nil response_items
     if expected_items == 0
-      assert_equal 0, json_response['items_available']
-      assert_equal 0, response_items.size
+      assert_empty response_items
     else
-      assert_equal expected_items, response_items.size
+      refute_empty response_items
       first_item = response_items.first
       assert_not_nil first_item
     end
   end
+
+  test "search collection using full text search" do
+    # create collection to be searched for
+    signed_manifest = Collection.sign_manifest(". 85877ca2d7e05498dd3d109baf2df106+95+A3a4e26a366ee7e4ed3e476ccf05354761be2e4ae@545a9920 0:95:file_in_subdir1\n./subdir2/subdir3 2bbc341c702df4d8f42ec31f16c10120+64+A315d7e7bad2ce937e711fc454fae2d1194d14d64@545a9920 0:32:file1_in_subdir3.txt 32:32:file2_in_subdir3.txt\n./subdir2/subdir3/subdir4 2bbc341c702df4d8f42ec31f16c10120+64+A315d7e7bad2ce937e711fc454fae2d1194d14d64@545a9920 0:32:file3_in_subdir4.txt 32:32:file4_in_subdir4.txt\n", api_token(:active))
+    post "/arvados/v1/collections", {
+      format: :json,
+      collection: {description: 'specific collection description', manifest_text: signed_manifest}.to_json,
+    }, auth(:active)
+    assert_response :success
+    assert_equal true, json_response['manifest_text'].include?('file4_in_subdir4.txt')
+
+    created = json_response
+
+    # search using the filename
+    search_using_full_text_search 'subdir2', 0
+    search_using_full_text_search 'subdir2:*', 1
+    search_using_full_text_search 'subdir2/subdir3/subdir4', 1
+    search_using_full_text_search 'file4:*', 1
+    search_using_full_text_search 'file4_in_subdir4.txt', 1
+    search_using_full_text_search 'subdir2 file4:*', 0      # first word is incomplete
+    search_using_full_text_search 'subdir2/subdir3/subdir4 file4:*', 1
+    search_using_full_text_search 'subdir2/subdir3/subdir4 file4_in_subdir4.txt', 1
+    search_using_full_text_search 'ile4', 0                 # not a prefix match
+  end
+
+  def search_using_full_text_search search_filter, expected_items
+    get '/arvados/v1/collections', {
+      :filters => [['any', '@@', search_filter]].to_json
+    }, auth(:active)
+    assert_response :success
+    response_items = json_response['items']
+    assert_not_nil response_items
+    if expected_items == 0
+      assert_empty response_items
+    else
+      refute_empty response_items
+      first_item = response_items.first
+      assert_not_nil first_item
+    end
+  end
+
+  # search for the filename in the file_names column and expect error
+  test "full text search not supported for individual columns" do
+    get '/arvados/v1/collections', {
+      :filters => [['name', '@@', 'General']].to_json
+    }, auth(:active)
+    assert_response 422
+  end
+
+  [
+    'quick fox',
+    'quick_brown fox',
+    'brown_ fox',
+    'fox dogs',
+  ].each do |search_filter|
+    test "full text search ignores special characters and finds with filter #{search_filter}" do
+      # description: The quick_brown_fox jumps over the lazy_dog
+      # full text search treats '_' as space apparently
+      get '/arvados/v1/collections', {
+        :filters => [['any', '@@', search_filter]].to_json
+      }, auth(:active)
+      assert_response 200
+      response_items = json_response['items']
+      assert_not_nil response_items
+      first_item = response_items.first
+      refute_empty first_item
+      assert_equal first_item['description'], 'The quick_brown_fox jumps over the lazy_dog'
+    end
+  end
+
+  test "create and get collection with properties" do
+    # create collection to be searched for
+    signed_manifest = Collection.sign_manifest(". bad42fa702ae3ea7d888fef11b46f450+44 0:44:my_test_file.txt\n", api_token(:active))
+    post "/arvados/v1/collections", {
+      format: :json,
+      collection: {manifest_text: signed_manifest}.to_json,
+    }, auth(:active)
+    assert_response 200
+    assert_not_nil json_response['uuid']
+    assert_not_nil json_response['properties']
+    assert_empty json_response['properties']
+
+    # update collection's description
+    put "/arvados/v1/collections/#{json_response['uuid']}", {
+      format: :json,
+      collection: { properties: {'property_1' => 'value_1'} }
+    }, auth(:active)
+    assert_response :success
+    assert_equal 'value_1', json_response['properties']['property_1']
+  end
 end
index 0f6f93aa1307bf5b90743f3915ef0d75e88b18c7..2afece9563c092a1ad221883ba601e9334de3aa0 100644 (file)
@@ -1,6 +1,25 @@
 require 'test_helper'
 
 class GroupsTest < ActionDispatch::IntegrationTest
+  [[], ['replication_confirmed']].each do |orders|
+    test "results are consistent when provided orders #{orders} is incomplete" do
+      last = nil
+      (0..20).each do
+        get '/arvados/v1/groups/contents', {
+          id: groups(:aproject).uuid,
+          filters: [["uuid", "is_a", "arvados#collection"]].to_json,
+          orders: orders.to_json,
+          format: :json,
+        }, auth(:active)
+        assert_response :success
+        if last.nil?
+          last = json_response['items']
+        else
+          assert_equal last, json_response['items']
+        end
+      end
+    end
+  end
 
   test "get all pages of group-owned objects" do
     limit = 5
@@ -9,8 +28,6 @@ class GroupsTest < ActionDispatch::IntegrationTest
     uuid_received = {}
     owner_received = {}
     while true
-      @json_response = nil
-
       get "/arvados/v1/groups/contents", {
         id: groups(:aproject).uuid,
         limit: limit,
@@ -39,4 +56,39 @@ class GroupsTest < ActionDispatch::IntegrationTest
     end
   end
 
+  [
+    ['Collection_', true],            # collections and pipelines templates
+    ['hash', true],                   # pipeline templates
+    ['fa7aeb5140e2848d39b', false],   # script_parameter of pipeline instances
+    ['fa7aeb5140e2848d39b:*', true],  # script_parameter of pipeline instances
+    ['project pipeline', true],       # finds "Completed pipeline in A Project"
+    ['project pipeli:*', true],       # finds "Completed pipeline in A Project"
+    ['proje pipeli:*', false],        # first word is incomplete, so no prefix match
+    ['no-such-thing', false],         # script_parameter of pipeline instances
+  ].each do |search_filter, expect_results|
+    test "full text search of group-owned objects for #{search_filter}" do
+      get "/arvados/v1/groups/contents", {
+        id: groups(:aproject).uuid,
+        limit: 5,
+        :filters => [['any', '@@', search_filter]].to_json
+      }, auth(:active)
+      assert_response :success
+      if expect_results
+        refute_empty json_response['items']
+        json_response['items'].each do |item|
+          assert item['uuid']
+          assert_equal groups(:aproject).uuid, item['owner_uuid']
+        end
+      else
+        assert_empty json_response['items']
+      end
+    end
+  end
+
+  test "full text search is not supported for individual columns" do
+    get "/arvados/v1/groups/contents", {
+      :filters => [['name', '@@', 'Private']].to_json
+    }, auth(:active)
+    assert_response 422
+  end
 end
index cb69127a1c40f0f92f63569b05680f58ce7cf3b9..09dece2660f38b1ecac09acaaa50543a4224698b 100644 (file)
@@ -136,7 +136,7 @@ class ArvadosModelTest < ActiveSupport::TestCase
 
         indexes = ActiveRecord::Base.connection.indexes(table)
         search_index_by_columns = indexes.select do |index|
-          index.columns == search_index_columns
+          index.columns.sort == search_index_columns.sort
         end
         search_index_by_name = indexes.select do |index|
           index.name == "#{table}_search_index"
@@ -145,4 +145,26 @@ class ArvadosModelTest < ActiveSupport::TestCase
       end
     end
   end
+
+  test "selectable_attributes includes database attributes" do
+    assert_includes(Job.selectable_attributes, "success")
+  end
+
+  test "selectable_attributes includes non-database attributes" do
+    assert_includes(Job.selectable_attributes, "node_uuids")
+  end
+
+  test "selectable_attributes includes common attributes in extensions" do
+    assert_includes(Job.selectable_attributes, "uuid")
+  end
+
+  test "selectable_attributes does not include unexposed attributes" do
+    refute_includes(Job.selectable_attributes, "nodes")
+  end
+
+  test "selectable_attributes on a non-default template" do
+    attr_a = Job.selectable_attributes(:common)
+    assert_includes(attr_a, "uuid")
+    refute_includes(attr_a, "success")
+  end
 end
index 16d041bea2e93a41eede051ade8dc87882936aa1..d8b8365efa212f3447aceddec6decd2154520584 100644 (file)
@@ -81,4 +81,176 @@ class CollectionTest < ActiveSupport::TestCase
       end
     end
   end
+
+  test "full text search for collections" do
+    # file_names column does not get populated when fixtures are loaded, hence setup test data
+    act_as_system_user do
+      Collection.create(manifest_text: ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n")
+      Collection.create(manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n")
+      Collection.create(manifest_text: ". 85877ca2d7e05498dd3d109baf2df106+95+A3a4e26a366ee7e4ed3e476ccf05354761be2e4ae@545a9920 0:95:file_in_subdir1\n./subdir2/subdir3 2bbc341c702df4d8f42ec31f16c10120+64+A315d7e7bad2ce937e711fc454fae2d1194d14d64@545a9920 0:32:file1.txt 32:32:file2.txt\n./subdir2/subdir3/subdir4 2bbc341c702df4d8f42ec31f16c10120+64+A315d7e7bad2ce937e711fc454fae2d1194d14d64@545a9920 0:32:file3.txt 32:32:file4.txt")
+    end
+
+    [
+      ['foo', true],
+      ['foo bar', false],                     # no collection matching both
+      ['foo&bar', false],                     # no collection matching both
+      ['foo|bar', true],                      # works only no spaces between the words
+      ['Gnu public', true],                   # both prefixes found, though not consecutively
+      ['Gnu&public', true],                   # both prefixes found, though not consecutively
+      ['file4', true],                        # prefix match
+      ['file4.txt', true],                    # whole string match
+      ['filex', false],                       # no such prefix
+      ['subdir', true],                       # prefix matches
+      ['subdir2', true],
+      ['subdir2/', true],
+      ['subdir2/subdir3', true],
+      ['subdir2/subdir3/subdir4', true],
+      ['subdir2 file4', true],                # look for both prefixes
+      ['subdir4', false],                     # not a prefix match
+    ].each do |search_filter, expect_results|
+      search_filters = search_filter.split.each {|s| s.concat(':*')}.join('&')
+      results = Collection.where("#{Collection.full_text_tsvector} @@ to_tsquery(?)",
+                                 "#{search_filters}")
+      if expect_results
+        refute_empty results
+      else
+        assert_empty results
+      end
+    end
+  end
+
+  test 'portable data hash with missing size hints' do
+    [[". d41d8cd98f00b204e9800998ecf8427e+0+Bar 0:0:x",
+      ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:x"],
+     [". d41d8cd98f00b204e9800998ecf8427e+Foo 0:0:x",
+      ". d41d8cd98f00b204e9800998ecf8427e 0:0:x"],
+     [". d41d8cd98f00b204e9800998ecf8427e 0:0:x",
+      ". d41d8cd98f00b204e9800998ecf8427e 0:0:x"],
+    ].each do |unportable, portable|
+      c = Collection.new(manifest_text: unportable)
+      assert c.valid?
+      assert_equal(Digest::MD5.hexdigest(portable)+"+#{portable.length}",
+                   c.portable_data_hash)
+    end
+  end
+
+  [0, 2, 4, nil].each do |ask|
+    test "set replication_desired to #{ask.inspect}" do
+      Rails.configuration.default_collection_replication = 2
+      act_as_user users(:active) do
+        c = collections(:replication_undesired_unconfirmed)
+        c.update_attributes replication_desired: ask
+        assert_equal ask, c.replication_desired
+      end
+    end
+  end
+
+  test "replication_confirmed* can be set by admin user" do
+    c = collections(:replication_desired_2_unconfirmed)
+    act_as_user users(:admin) do
+      assert c.update_attributes(replication_confirmed: 2,
+                                 replication_confirmed_at: Time.now)
+    end
+  end
+
+  test "replication_confirmed* cannot be set by non-admin user" do
+    act_as_user users(:active) do
+      c = collections(:replication_desired_2_unconfirmed)
+      # Cannot set just one at a time.
+      assert_raise ArvadosModel::PermissionDeniedError do
+        c.update_attributes replication_confirmed: 1
+      end
+      assert_raise ArvadosModel::PermissionDeniedError do
+        c.update_attributes replication_confirmed_at: Time.now
+      end
+      # Cannot set both at once, either.
+      assert_raise ArvadosModel::PermissionDeniedError do
+        c.update_attributes(replication_confirmed: 1,
+                            replication_confirmed_at: Time.now)
+      end
+    end
+  end
+
+  test "replication_confirmed* can be cleared (but only together) by non-admin user" do
+    act_as_user users(:active) do
+      c = collections(:replication_desired_2_confirmed_2)
+      # Cannot clear just one at a time.
+      assert_raise ArvadosModel::PermissionDeniedError do
+        c.update_attributes replication_confirmed: nil
+      end
+      c.reload
+      assert_raise ArvadosModel::PermissionDeniedError do
+        c.update_attributes replication_confirmed_at: nil
+      end
+      # Can clear both at once.
+      c.reload
+      assert c.update_attributes(replication_confirmed: nil,
+                                 replication_confirmed_at: nil)
+    end
+  end
+
+  test "clear replication_confirmed* when introducing a new block in manifest" do
+    c = collections(:replication_desired_2_confirmed_2)
+    act_as_user users(:active) do
+      assert c.update_attributes(manifest_text: collections(:user_agreement).signed_manifest_text)
+      assert_nil c.replication_confirmed
+      assert_nil c.replication_confirmed_at
+    end
+  end
+
+  test "don't clear replication_confirmed* when just renaming a file" do
+    c = collections(:replication_desired_2_confirmed_2)
+    act_as_user users(:active) do
+      new_manifest = c.signed_manifest_text.sub(':bar', ':foo')
+      assert c.update_attributes(manifest_text: new_manifest)
+      assert_equal 2, c.replication_confirmed
+      assert_not_nil c.replication_confirmed_at
+    end
+  end
+
+  test "don't clear replication_confirmed* when just deleting a data block" do
+    c = collections(:replication_desired_2_confirmed_2)
+    act_as_user users(:active) do
+      new_manifest = c.signed_manifest_text
+      new_manifest.sub!(/ \S+:bar/, '')
+      new_manifest.sub!(/ acbd\S+/, '')
+
+      # Confirm that we did just remove a block from the manifest (if
+      # not, this test would pass without testing the relevant case):
+      assert_operator new_manifest.length+40, :<, c.signed_manifest_text.length
+
+      assert c.update_attributes(manifest_text: new_manifest)
+      assert_equal 2, c.replication_confirmed
+      assert_not_nil c.replication_confirmed_at
+    end
+  end
+
+  test "create collection with properties" do
+    act_as_system_user do
+      c = Collection.create(manifest_text: ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n",
+                            properties: {'property_1' => 'value_1'})
+      assert c.valid?
+      assert_equal 'value_1', c.properties['property_1']
+    end
+  end
+
+  test 'create, delete, recreate collection with same name and owner' do
+    act_as_user users(:active) do
+      # create collection with name
+      c = Collection.create(manifest_text: '',
+                            name: "test collection name")
+      assert c.valid?
+      uuid = c.uuid
+
+      # mark collection as expired
+      c.update_attribute 'expires_at', Time.new.strftime("%Y-%m-%d")
+      c = Collection.where(uuid: uuid)
+      assert_empty c, 'Should not be able to find expired collection'
+
+      # recreate collection with the same name
+      c = Collection.create(manifest_text: '',
+                            name: "test collection name")
+      assert c.valid?
+    end
+  end
 end
index 028f403a286e2b27dbb456f69b703ebc6387110b..16ce54bbe0b045aa8f2d5c3f8f0d2c0a22c22106 100644 (file)
@@ -34,6 +34,11 @@ class LinkTest < ActiveSupport::TestCase
     end
   end
 
+  test "non-admin project owner can make it public" do
+    assert(new_active_link_valid?(tail_uuid: groups(:anonymous_group).uuid),
+           "non-admin project owner can't make their project public")
+  end
+
   test "link granting permission to nonexistent user is invalid" do
     refute new_active_link_valid?(tail_uuid:
                                   users(:active).uuid.sub(/-\w+$/, "-#{'z' * 15}"))
index df72e246a6cedadf53386c3420c6f8d168ded8d6..65af8ce2bd9a732e1e7d2ace6772f618670cc5dc 100644 (file)
@@ -7,7 +7,7 @@ class WebsocketTestRunner < MiniTest::Unit
   def _system(*cmd)
     Bundler.with_clean_env do
       if not system({'ARVADOS_WEBSOCKETS' => 'ws-only', 'RAILS_ENV' => 'test'}, *cmd)
-        raise RuntimeError, "#{cmd[0]} returned exit code #{$?.exitstatus}"
+        raise RuntimeError, "Command failed with exit status #{$?}: #{cmd.inspect}"
       end
     end
   end
@@ -34,7 +34,13 @@ class WebsocketTestRunner < MiniTest::Unit
     begin
       super(args)
     ensure
-      Process.kill('TERM', server_pid)
+      Dir.chdir($ARV_API_SERVER_DIR) do
+        _system('passenger', 'stop', '-p3002')
+      end
+      # DatabaseCleaner leaves the database empty. Prefer to leave it full.
+      dc = DatabaseController.new
+      dc.define_singleton_method :render do |*args| end
+      dc.reset
     end
   end
 end
diff --git a/services/arv-web/README b/services/arv-web/README
new file mode 100644 (file)
index 0000000..eaf7624
--- /dev/null
@@ -0,0 +1,6 @@
+arv-web enables you to run a custom web service using the contents of an
+Arvados collection.
+
+See "Using arv-web" in the Arvados user guide:
+
+http://doc.arvados.org/user/topics/arv-web.html
diff --git a/services/arv-web/arv-web.py b/services/arv-web/arv-web.py
new file mode 100755 (executable)
index 0000000..e731558
--- /dev/null
@@ -0,0 +1,252 @@
+#!/usr/bin/env python
+
+# arv-web enables you to run a custom web service from the contents of an Arvados collection.
+#
+# See http://doc.arvados.org/user/topics/arv-web.html
+
+import arvados
+import subprocess
+from arvados_fuse import Operations, SafeApi, CollectionDirectory
+import tempfile
+import os
+import llfuse
+import threading
+import Queue
+import argparse
+import logging
+import signal
+import sys
+import functools
+
+logger = logging.getLogger('arvados.arv-web')
+logger.setLevel(logging.INFO)
+
+class ArvWeb(object):
+    def __init__(self, project, docker_image, port):
+        self.project = project
+        self.loop = True
+        self.cid = None
+        self.prev_docker_image = None
+        self.mountdir = None
+        self.collection = None
+        self.override_docker_image = docker_image
+        self.port = port
+        self.evqueue = Queue.Queue()
+        self.api = SafeApi(arvados.config)
+
+        if arvados.util.group_uuid_pattern.match(project) is None:
+            raise arvados.errors.ArgumentError("Project uuid is not valid")
+
+        collections = self.api.collections().list(filters=[["owner_uuid", "=", project]],
+                        limit=1,
+                        order='modified_at desc').execute()['items']
+        self.newcollection = collections[0]['uuid'] if collections else None
+
+        self.ws = arvados.events.subscribe(self.api, [["object_uuid", "is_a", "arvados#collection"]], self.on_message)
+
+    def check_docker_running(self):
+        # It would be less hacky to use "docker events" than poll "docker ps"
+        # but that would require writing a bigger pile of code.
+        if self.cid:
+            ps = subprocess.check_output(["docker", "ps", "--no-trunc=true", "--filter=status=running"])
+            for l in ps.splitlines():
+                if l.startswith(self.cid):
+                    return True
+        return False
+
+    # Handle messages from Arvados event bus.
+    def on_message(self, ev):
+        if 'event_type' in ev:
+            old_attr = None
+            if 'old_attributes' in ev['properties'] and ev['properties']['old_attributes']:
+                old_attr = ev['properties']['old_attributes']
+            if self.project not in (ev['properties']['new_attributes']['owner_uuid'],
+                                    old_attr['owner_uuid'] if old_attr else None):
+                return
+
+            et = ev['event_type']
+            if ev['event_type'] == 'update':
+                if ev['properties']['new_attributes']['owner_uuid'] != ev['properties']['old_attributes']['owner_uuid']:
+                    if self.project == ev['properties']['new_attributes']['owner_uuid']:
+                        et = 'add'
+                    else:
+                        et = 'remove'
+                if ev['properties']['new_attributes']['expires_at'] is not None:
+                    et = 'remove'
+
+            self.evqueue.put((self.project, et, ev['object_uuid']))
+
+    # Run an arvados_fuse mount under the control of the local process.  This lets
+    # us switch out the contents of the directory without having to unmount and
+    # remount.
+    def run_fuse_mount(self):
+        self.mountdir = tempfile.mkdtemp()
+
+        self.operations = Operations(os.getuid(), os.getgid(), "utf-8")
+        self.cdir = CollectionDirectory(llfuse.ROOT_INODE, self.operations.inodes, self.api, 2, self.collection)
+        self.operations.inodes.add_entry(self.cdir)
+
+        # Initialize the fuse connection
+        llfuse.init(self.operations, self.mountdir, ['allow_other'])
+
+        t = threading.Thread(None, llfuse.main)
+        t.start()
+
+        # wait until the driver is finished initializing
+        self.operations.initlock.wait()
+
+    def mount_collection(self):
+        if self.newcollection != self.collection:
+            self.collection = self.newcollection
+            if not self.mountdir and self.collection:
+                self.run_fuse_mount()
+
+            if self.mountdir:
+                with llfuse.lock:
+                    self.cdir.clear()
+                    # Switch the FUSE directory object so that it stores
+                    # the newly selected collection
+                    if self.collection:
+                        logger.info("Mounting %s", self.collection)
+                    else:
+                        logger.info("Mount is empty")
+                    self.cdir.change_collection(self.collection)
+
+
+    def stop_docker(self):
+        if self.cid:
+            logger.info("Stopping Docker container")
+            subprocess.call(["docker", "stop", self.cid])
+            self.cid = None
+
+    def run_docker(self):
+        try:
+            if self.collection is None:
+                self.stop_docker()
+                return
+
+            docker_image = None
+            if self.override_docker_image:
+                docker_image = self.override_docker_image
+            else:
+                try:
+                    with llfuse.lock:
+                        if "docker_image" in self.cdir:
+                            docker_image = self.cdir["docker_image"].readfrom(0, 1024).strip()
+                except IOError as e:
+                    pass
+
+            has_reload = False
+            try:
+                with llfuse.lock:
+                    has_reload = "reload" in self.cdir
+            except IOError as e:
+                pass
+
+            if docker_image is None:
+                logger.error("Collection must contain a file 'docker_image' or must specify --image on the command line.")
+                self.stop_docker()
+                return
+
+            if docker_image == self.prev_docker_image and self.cid is not None and has_reload:
+                logger.info("Running container reload command")
+                subprocess.check_call(["docker", "exec", self.cid, "/mnt/reload"])
+                return
+
+            self.stop_docker()
+
+            logger.info("Starting Docker container %s", docker_image)
+            self.cid = subprocess.check_output(["docker", "run",
+                                                "--detach=true",
+                                                "--publish=%i:80" % (self.port),
+                                                "--volume=%s:/mnt:ro" % self.mountdir,
+                                                docker_image]).strip()
+
+            self.prev_docker_image = docker_image
+            logger.info("Container id %s", self.cid)
+
+        except subprocess.CalledProcessError:
+            self.cid = None
+
+    def wait_for_events(self):
+        if not self.cid:
+            logger.warning("No service running!  Will wait for a new collection to appear in the project.")
+        else:
+            logger.info("Waiting for events")
+
+        running = True
+        self.loop = True
+        while running:
+            # Main run loop.  Wait on project events, signals, or the
+            # Docker container stopping.
+
+            try:
+                # Poll the queue with a 1 second timeout, if we have no
+                # timeout the Python runtime doesn't have a chance to
+                # process SIGINT or SIGTERM.
+                eq = self.evqueue.get(True, 1)
+                logger.info("%s %s", eq[1], eq[2])
+                self.newcollection = self.collection
+                if eq[1] in ('add', 'update', 'create'):
+                    self.newcollection = eq[2]
+                elif eq[1] == 'remove':
+                    collections = self.api.collections().list(filters=[["owner_uuid", "=", self.project]],
+                                                        limit=1,
+                                                        order='modified_at desc').execute()['items']
+                    self.newcollection = collections[0]['uuid'] if collections else None
+                running = False
+            except Queue.Empty:
+                pass
+
+            if self.cid and not self.check_docker_running():
+                logger.warning("Service has terminated.  Will try to restart.")
+                self.cid = None
+                running = False
+
+
+    def run(self):
+        try:
+            while self.loop:
+                self.loop = False
+                self.mount_collection()
+                try:
+                    self.run_docker()
+                    self.wait_for_events()
+                except (KeyboardInterrupt):
+                    logger.info("Got keyboard interrupt")
+                    self.ws.close()
+                    self.loop = False
+                except Exception as e:
+                    logger.exception("Caught fatal exception, shutting down")
+                    self.ws.close()
+                    self.loop = False
+        finally:
+            self.stop_docker()
+
+            if self.mountdir:
+                logger.info("Unmounting")
+                subprocess.call(["fusermount", "-u", self.mountdir])
+                os.rmdir(self.mountdir)
+
+
+def main(argv):
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--project-uuid', type=str, required=True, help="Project uuid to watch")
+    parser.add_argument('--port', type=int, default=8080, help="Host port to listen on (default 8080)")
+    parser.add_argument('--image', type=str, help="Docker image to run")
+
+    args = parser.parse_args(argv)
+
+    signal.signal(signal.SIGTERM, lambda signal, frame: sys.exit(0))
+
+    try:
+        arvweb = ArvWeb(args.project_uuid, args.image, args.port)
+        arvweb.run()
+    except arvados.errors.ArgumentError as e:
+        logger.error(e)
+        return 1
+
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main(sys.argv[1:]))
diff --git a/services/arv-web/sample-cgi-app/docker_image b/services/arv-web/sample-cgi-app/docker_image
new file mode 100644 (file)
index 0000000..57f344f
--- /dev/null
@@ -0,0 +1 @@
+arvados/arv-web
\ No newline at end of file
diff --git a/services/arv-web/sample-cgi-app/public/.htaccess b/services/arv-web/sample-cgi-app/public/.htaccess
new file mode 100644 (file)
index 0000000..e5145bd
--- /dev/null
@@ -0,0 +1,3 @@
+Options +ExecCGI
+AddHandler cgi-script .cgi
+DirectoryIndex index.cgi
diff --git a/services/arv-web/sample-cgi-app/public/index.cgi b/services/arv-web/sample-cgi-app/public/index.cgi
new file mode 100755 (executable)
index 0000000..57bc2a9
--- /dev/null
@@ -0,0 +1,4 @@
+#!/usr/bin/perl
+
+print "Content-type: text/html\n\n";
+print "Hello world from perl!";
diff --git a/services/arv-web/sample-cgi-app/tmp/.keepkeep b/services/arv-web/sample-cgi-app/tmp/.keepkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/arv-web/sample-rack-app/config.ru b/services/arv-web/sample-rack-app/config.ru
new file mode 100644 (file)
index 0000000..84bb0da
--- /dev/null
@@ -0,0 +1,4 @@
+app = proc do |env|
+    [200, { "Content-Type" => "text/html" }, ["hello <b>world</b> from ruby"]]
+end
+run app
diff --git a/services/arv-web/sample-rack-app/docker_image b/services/arv-web/sample-rack-app/docker_image
new file mode 100644 (file)
index 0000000..57f344f
--- /dev/null
@@ -0,0 +1 @@
+arvados/arv-web
\ No newline at end of file
diff --git a/services/arv-web/sample-rack-app/public/.keepkeep b/services/arv-web/sample-rack-app/public/.keepkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/arv-web/sample-rack-app/tmp/.keepkeep b/services/arv-web/sample-rack-app/tmp/.keepkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/arv-web/sample-static-page/docker_image b/services/arv-web/sample-static-page/docker_image
new file mode 100644 (file)
index 0000000..57f344f
--- /dev/null
@@ -0,0 +1 @@
+arvados/arv-web
\ No newline at end of file
diff --git a/services/arv-web/sample-static-page/public/index.html b/services/arv-web/sample-static-page/public/index.html
new file mode 100644 (file)
index 0000000..a2e485c
--- /dev/null
@@ -0,0 +1,6 @@
+<html>
+  <head><title>arv-web sample</title></head>
+  <body>
+    <p>Hello world static page</p>
+  </body>
+</html>
diff --git a/services/arv-web/sample-static-page/tmp/.keepkeep b/services/arv-web/sample-static-page/tmp/.keepkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/arv-web/sample-wsgi-app/docker_image b/services/arv-web/sample-wsgi-app/docker_image
new file mode 100644 (file)
index 0000000..57f344f
--- /dev/null
@@ -0,0 +1 @@
+arvados/arv-web
\ No newline at end of file
diff --git a/services/arv-web/sample-wsgi-app/passenger_wsgi.py b/services/arv-web/sample-wsgi-app/passenger_wsgi.py
new file mode 100644 (file)
index 0000000..ea918f0
--- /dev/null
@@ -0,0 +1,3 @@
+def application(environ, start_response):
+    start_response('200 OK', [('Content-Type', 'text/plain')])
+    return [b"hello world from python!\n"]
diff --git a/services/arv-web/sample-wsgi-app/public/.keepkeep b/services/arv-web/sample-wsgi-app/public/.keepkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/arv-web/sample-wsgi-app/tmp/.keepkeep b/services/arv-web/sample-wsgi-app/tmp/.keepkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/datamanager/collection/collection.go b/services/datamanager/collection/collection.go
new file mode 100644 (file)
index 0000000..9a7a838
--- /dev/null
@@ -0,0 +1,293 @@
+/* Deals with parsing Collection responses from API Server. */
+
+package collection
+
+import (
+       "flag"
+       "fmt"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/blockdigest"
+       "git.curoverse.com/arvados.git/sdk/go/logger"
+       "git.curoverse.com/arvados.git/sdk/go/manifest"
+       "git.curoverse.com/arvados.git/sdk/go/util"
+       "git.curoverse.com/arvados.git/services/datamanager/loggerutil"
+       "log"
+       "os"
+       "runtime"
+       "runtime/pprof"
+       "time"
+)
+
+var (
+       heap_profile_filename string
+       // globals for debugging
+       totalManifestSize uint64
+       maxManifestSize   uint64
+)
+
+type Collection struct {
+       Uuid              string
+       OwnerUuid         string
+       ReplicationLevel  int
+       BlockDigestToSize map[blockdigest.BlockDigest]int
+       TotalSize         int
+}
+
+type ReadCollections struct {
+       ReadAllCollections    bool
+       UuidToCollection      map[string]Collection
+       OwnerToCollectionSize map[string]int
+}
+
+type GetCollectionsParams struct {
+       Client    arvadosclient.ArvadosClient
+       Logger    *logger.Logger
+       BatchSize int
+}
+
+type SdkCollectionInfo struct {
+       Uuid         string    `json:"uuid"`
+       OwnerUuid    string    `json:"owner_uuid"`
+       Redundancy   int       `json:"redundancy"`
+       ModifiedAt   time.Time `json:"modified_at"`
+       ManifestText string    `json:"manifest_text"`
+}
+
+type SdkCollectionList struct {
+       ItemsAvailable int                 `json:"items_available"`
+       Items          []SdkCollectionInfo `json:"items"`
+}
+
+func init() {
+       flag.StringVar(&heap_profile_filename,
+               "heap-profile",
+               "",
+               "File to write the heap profiles to. Leave blank to skip profiling.")
+}
+
+// Write the heap profile to a file for later review.
+// Since a file is expected to only contain a single heap profile this
+// function overwrites the previously written profile, so it is safe
+// to call multiple times in a single run.
+// Otherwise we would see cumulative numbers as explained here:
+// https://groups.google.com/d/msg/golang-nuts/ZyHciRglQYc/2nh4Ndu2fZcJ
+func WriteHeapProfile() {
+       if heap_profile_filename != "" {
+
+               heap_profile, err := os.Create(heap_profile_filename)
+               if err != nil {
+                       log.Fatal(err)
+               }
+
+               defer heap_profile.Close()
+
+               err = pprof.WriteHeapProfile(heap_profile)
+               if err != nil {
+                       log.Fatal(err)
+               }
+       }
+}
+
+func GetCollectionsAndSummarize(params GetCollectionsParams) (results ReadCollections) {
+       results = GetCollections(params)
+       ComputeSizeOfOwnedCollections(&results)
+
+       if params.Logger != nil {
+               params.Logger.Update(func(p map[string]interface{}, e map[string]interface{}) {
+                       collectionInfo := p["collection_info"].(map[string]interface{})
+                       // Since maps are shallow copied, we run a risk of concurrent
+                       // updates here. By copying results.OwnerToCollectionSize into
+                       // the log, we're assuming that it won't be updated.
+                       collectionInfo["owner_to_collection_size"] = results.OwnerToCollectionSize
+               })
+       }
+
+       log.Printf("Uuid to Size used: %v", results.OwnerToCollectionSize)
+       log.Printf("Read and processed %d collections",
+               len(results.UuidToCollection))
+
+       // TODO(misha): Add a "readonly" flag. If we're in readonly mode,
+       // lots of behaviors can become warnings (and obviously we can't
+       // write anything).
+       // if !readCollections.ReadAllCollections {
+       //      log.Fatalf("Did not read all collections")
+       // }
+
+       return
+}
+
+func GetCollections(params GetCollectionsParams) (results ReadCollections) {
+       if &params.Client == nil {
+               log.Fatalf("params.Client passed to GetCollections() should " +
+                       "contain a valid ArvadosClient, but instead it is nil.")
+       }
+
+       fieldsWanted := []string{"manifest_text",
+               "owner_uuid",
+               "uuid",
+               // TODO(misha): Start using the redundancy field.
+               "redundancy",
+               "modified_at"}
+
+       sdkParams := arvadosclient.Dict{
+               "select":  fieldsWanted,
+               "order":   []string{"modified_at ASC"},
+               "filters": [][]string{[]string{"modified_at", ">=", "1900-01-01T00:00:00Z"}}}
+
+       if params.BatchSize > 0 {
+               sdkParams["limit"] = params.BatchSize
+       }
+
+       initialNumberOfCollectionsAvailable, err :=
+               util.NumberItemsAvailable(params.Client, "collections")
+       if err != nil {
+               loggerutil.FatalWithMessage(params.Logger,
+                       fmt.Sprintf("Error querying collection count: %v", err))
+       }
+       // Include a 1% margin for collections added while we're reading so
+       // that we don't have to grow the map in most cases.
+       maxExpectedCollections := int(
+               float64(initialNumberOfCollectionsAvailable) * 1.01)
+       results.UuidToCollection = make(map[string]Collection, maxExpectedCollections)
+
+       if params.Logger != nil {
+               params.Logger.Update(func(p map[string]interface{}, e map[string]interface{}) {
+                       collectionInfo := make(map[string]interface{})
+                       collectionInfo["num_collections_at_start"] = initialNumberOfCollectionsAvailable
+                       collectionInfo["batch_size"] = params.BatchSize
+                       p["collection_info"] = collectionInfo
+               })
+       }
+
+       // These values are just for getting the loop to run the first time,
+       // afterwards they'll be set to real values.
+       previousTotalCollections := -1
+       totalCollections := 0
+       for totalCollections > previousTotalCollections {
+               // We're still finding new collections
+
+               // Write the heap profile for examining memory usage
+               WriteHeapProfile()
+
+               // Get next batch of collections.
+               var collections SdkCollectionList
+               err := params.Client.List("collections", sdkParams, &collections)
+               if err != nil {
+                       loggerutil.FatalWithMessage(params.Logger,
+                               fmt.Sprintf("Error querying collections: %v", err))
+               }
+
+               // Process collection and update our date filter.
+               sdkParams["filters"].([][]string)[0][2] =
+                       ProcessCollections(params.Logger,
+                               collections.Items,
+                               results.UuidToCollection).Format(time.RFC3339)
+
+               // update counts
+               previousTotalCollections = totalCollections
+               totalCollections = len(results.UuidToCollection)
+
+               log.Printf("%d collections read, %d new in last batch, "+
+                       "%s latest modified date, %.0f %d %d avg,max,total manifest size",
+                       totalCollections,
+                       totalCollections-previousTotalCollections,
+                       sdkParams["filters"].([][]string)[0][2],
+                       float32(totalManifestSize)/float32(totalCollections),
+                       maxManifestSize, totalManifestSize)
+
+               if params.Logger != nil {
+                       params.Logger.Update(func(p map[string]interface{}, e map[string]interface{}) {
+                               collectionInfo := p["collection_info"].(map[string]interface{})
+                               collectionInfo["collections_read"] = totalCollections
+                               collectionInfo["latest_modified_date_seen"] = sdkParams["filters"].([][]string)[0][2]
+                               collectionInfo["total_manifest_size"] = totalManifestSize
+                               collectionInfo["max_manifest_size"] = maxManifestSize
+                       })
+               }
+       }
+
+       // Just in case this lowers the numbers reported in the heap profile.
+       runtime.GC()
+
+       // Write the heap profile for examining memory usage
+       WriteHeapProfile()
+
+       return
+}
+
+// StrCopy returns a newly allocated string.
+// It is useful to copy slices so that the garbage collector can reuse
+// the memory of the longer strings they came from.
+func StrCopy(s string) string {
+       return string([]byte(s))
+}
+
+func ProcessCollections(arvLogger *logger.Logger,
+       receivedCollections []SdkCollectionInfo,
+       uuidToCollection map[string]Collection) (latestModificationDate time.Time) {
+       for _, sdkCollection := range receivedCollections {
+               collection := Collection{Uuid: StrCopy(sdkCollection.Uuid),
+                       OwnerUuid:         StrCopy(sdkCollection.OwnerUuid),
+                       ReplicationLevel:  sdkCollection.Redundancy,
+                       BlockDigestToSize: make(map[blockdigest.BlockDigest]int)}
+
+               if sdkCollection.ModifiedAt.IsZero() {
+                       loggerutil.FatalWithMessage(arvLogger,
+                               fmt.Sprintf(
+                                       "Arvados SDK collection returned with unexpected zero "+
+                                               "modifcation date. This probably means that either we failed to "+
+                                               "parse the modification date or the API server has changed how "+
+                                               "it returns modification dates: %v",
+                                       collection))
+               }
+
+               if sdkCollection.ModifiedAt.After(latestModificationDate) {
+                       latestModificationDate = sdkCollection.ModifiedAt
+               }
+               manifest := manifest.Manifest{sdkCollection.ManifestText}
+               manifestSize := uint64(len(sdkCollection.ManifestText))
+
+               if _, alreadySeen := uuidToCollection[collection.Uuid]; !alreadySeen {
+                       totalManifestSize += manifestSize
+               }
+               if manifestSize > maxManifestSize {
+                       maxManifestSize = manifestSize
+               }
+
+               blockChannel := manifest.BlockIterWithDuplicates()
+               for block := range blockChannel {
+                       if stored_size, stored := collection.BlockDigestToSize[block.Digest]; stored && stored_size != block.Size {
+                               message := fmt.Sprintf(
+                                       "Collection %s contains multiple sizes (%d and %d) for block %s",
+                                       collection.Uuid,
+                                       stored_size,
+                                       block.Size,
+                                       block.Digest)
+                               loggerutil.FatalWithMessage(arvLogger, message)
+                       }
+                       collection.BlockDigestToSize[block.Digest] = block.Size
+               }
+               collection.TotalSize = 0
+               for _, size := range collection.BlockDigestToSize {
+                       collection.TotalSize += size
+               }
+               uuidToCollection[collection.Uuid] = collection
+
+               // Clear out all the manifest strings that we don't need anymore.
+               // These hopefully form the bulk of our memory usage.
+               manifest.Text = ""
+               sdkCollection.ManifestText = ""
+       }
+
+       return
+}
+
+func ComputeSizeOfOwnedCollections(readCollections *ReadCollections) {
+       readCollections.OwnerToCollectionSize = make(map[string]int)
+       for _, coll := range readCollections.UuidToCollection {
+               readCollections.OwnerToCollectionSize[coll.OwnerUuid] =
+                       readCollections.OwnerToCollectionSize[coll.OwnerUuid] + coll.TotalSize
+       }
+
+       return
+}
diff --git a/services/datamanager/datamanager.go b/services/datamanager/datamanager.go
new file mode 100644 (file)
index 0000000..a8e506e
--- /dev/null
@@ -0,0 +1,101 @@
+/* Keep Datamanager. Responsible for checking on and reporting on Keep Storage */
+
+package main
+
+import (
+       "flag"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/logger"
+       "git.curoverse.com/arvados.git/sdk/go/util"
+       "git.curoverse.com/arvados.git/services/datamanager/collection"
+       "git.curoverse.com/arvados.git/services/datamanager/keep"
+       "git.curoverse.com/arvados.git/services/datamanager/loggerutil"
+       "log"
+       "time"
+)
+
+var (
+       logEventTypePrefix  string
+       logFrequencySeconds int
+       minutesBetweenRuns  int
+)
+
+func init() {
+       flag.StringVar(&logEventTypePrefix,
+               "log-event-type-prefix",
+               "experimental-data-manager",
+               "Prefix to use in the event_type of our arvados log entries. Set to empty to turn off logging")
+       flag.IntVar(&logFrequencySeconds,
+               "log-frequency-seconds",
+               20,
+               "How frequently we'll write log entries in seconds.")
+       flag.IntVar(&minutesBetweenRuns,
+               "minutes-between-runs",
+               0,
+               "How many minutes we wait betwen data manager runs. 0 means run once and exit.")
+}
+
+func main() {
+       flag.Parse()
+       if minutesBetweenRuns == 0 {
+               singlerun()
+       } else {
+               waitTime := time.Minute * time.Duration(minutesBetweenRuns)
+               for {
+                       log.Println("Beginning Run")
+                       singlerun()
+                       log.Printf("Sleeping for %d minutes", minutesBetweenRuns)
+                       time.Sleep(waitTime)
+               }
+       }
+}
+
+func singlerun() {
+       arv, err := arvadosclient.MakeArvadosClient()
+       if err != nil {
+               log.Fatalf("Error setting up arvados client %s", err.Error())
+       }
+
+       if is_admin, err := util.UserIsAdmin(arv); err != nil {
+               log.Fatalf("Error querying current arvados user %s", err.Error())
+       } else if !is_admin {
+               log.Fatalf("Current user is not an admin. Datamanager can only be run by admins.")
+       }
+
+       var arvLogger *logger.Logger
+       if logEventTypePrefix != "" {
+               arvLogger = logger.NewLogger(logger.LoggerParams{Client: arv,
+                       EventTypePrefix: logEventTypePrefix,
+                       WriteInterval:   time.Second * time.Duration(logFrequencySeconds)})
+       }
+
+       loggerutil.LogRunInfo(arvLogger)
+       if arvLogger != nil {
+               arvLogger.AddWriteHook(loggerutil.LogMemoryAlloc)
+       }
+
+       collectionChannel := make(chan collection.ReadCollections)
+
+       go func() {
+               collectionChannel <- collection.GetCollectionsAndSummarize(
+                       collection.GetCollectionsParams{
+                               Client: arv, Logger: arvLogger, BatchSize: 50})
+       }()
+
+       keepServerInfo := keep.GetKeepServersAndSummarize(
+               keep.GetKeepServersParams{Client: arv, Logger: arvLogger, Limit: 1000})
+
+       readCollections := <-collectionChannel
+
+       // TODO(misha): Use these together to verify replication.
+       _ = readCollections
+       _ = keepServerInfo
+
+       // Log that we're finished. We force the recording, since go will
+       // not wait for the timer before exiting.
+       if arvLogger != nil {
+               arvLogger.FinalUpdate(func(p map[string]interface{}, e map[string]interface{}) {
+                       p["run_info"].(map[string]interface{})["finished_at"] = time.Now()
+               })
+       }
+}
diff --git a/services/datamanager/keep/keep.go b/services/datamanager/keep/keep.go
new file mode 100644 (file)
index 0000000..93246bc
--- /dev/null
@@ -0,0 +1,444 @@
+/* Deals with getting Keep Server blocks from API Server and Keep Servers. */
+
+package keep
+
+import (
+       "bufio"
+       "encoding/json"
+       "flag"
+       "fmt"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/blockdigest"
+       "git.curoverse.com/arvados.git/sdk/go/logger"
+       "git.curoverse.com/arvados.git/sdk/go/manifest"
+       "git.curoverse.com/arvados.git/services/datamanager/loggerutil"
+       "io/ioutil"
+       "log"
+       "net/http"
+       "strconv"
+       "strings"
+       "sync"
+       "time"
+)
+
+type ServerAddress struct {
+       Host string `json:"service_host"`
+       Port int    `json:"service_port"`
+       Uuid string `json:"uuid"`
+}
+
+// Info about a particular block returned by the server
+type BlockInfo struct {
+       Digest blockdigest.BlockDigest
+       Size   int
+       Mtime  int64 // TODO(misha): Replace this with a timestamp.
+}
+
+// Info about a specified block given by a server
+type BlockServerInfo struct {
+       ServerIndex int
+       Size        int
+       Mtime       int64 // TODO(misha): Replace this with a timestamp.
+}
+
+type ServerContents struct {
+       BlockDigestToInfo map[blockdigest.BlockDigest]BlockInfo
+}
+
+type ServerResponse struct {
+       Address  ServerAddress
+       Contents ServerContents
+}
+
+type ReadServers struct {
+       ReadAllServers           bool
+       KeepServerIndexToAddress []ServerAddress
+       KeepServerAddressToIndex map[ServerAddress]int
+       ServerToContents         map[ServerAddress]ServerContents
+       BlockToServers           map[blockdigest.BlockDigest][]BlockServerInfo
+       BlockReplicationCounts   map[int]int
+}
+
+type GetKeepServersParams struct {
+       Client arvadosclient.ArvadosClient
+       Logger *logger.Logger
+       Limit  int
+}
+
+type KeepServiceList struct {
+       ItemsAvailable int             `json:"items_available"`
+       KeepServers    []ServerAddress `json:"items"`
+}
+
+var (
+       // Don't access the token directly, use getDataManagerToken() to
+       // make sure it's been read.
+       dataManagerToken             string
+       dataManagerTokenFile         string
+       dataManagerTokenFileReadOnce sync.Once
+)
+
+func init() {
+       flag.StringVar(&dataManagerTokenFile,
+               "data-manager-token-file",
+               "",
+               "File with the API token we should use to contact keep servers.")
+}
+
+// TODO(misha): Change this to include the UUID as well.
+func (s ServerAddress) String() string {
+       return fmt.Sprintf("%s:%d", s.Host, s.Port)
+}
+
+func getDataManagerToken(arvLogger *logger.Logger) string {
+       readDataManagerToken := func() {
+               if dataManagerTokenFile == "" {
+                       flag.Usage()
+                       loggerutil.FatalWithMessage(arvLogger,
+                               "Data Manager Token needed, but data manager token file not specified.")
+               } else {
+                       rawRead, err := ioutil.ReadFile(dataManagerTokenFile)
+                       if err != nil {
+                               loggerutil.FatalWithMessage(arvLogger,
+                                       fmt.Sprintf("Unexpected error reading token file %s: %v",
+                                               dataManagerTokenFile,
+                                               err))
+                       }
+                       dataManagerToken = strings.TrimSpace(string(rawRead))
+               }
+       }
+
+       dataManagerTokenFileReadOnce.Do(readDataManagerToken)
+       return dataManagerToken
+}
+
+func GetKeepServersAndSummarize(params GetKeepServersParams) (results ReadServers) {
+       results = GetKeepServers(params)
+       log.Printf("Returned %d keep disks", len(results.ServerToContents))
+
+       ComputeBlockReplicationCounts(&results)
+       log.Printf("Replication level distribution: %v",
+               results.BlockReplicationCounts)
+
+       return
+}
+
+func GetKeepServers(params GetKeepServersParams) (results ReadServers) {
+       if &params.Client == nil {
+               log.Fatalf("params.Client passed to GetKeepServers() should " +
+                       "contain a valid ArvadosClient, but instead it is nil.")
+       }
+
+       sdkParams := arvadosclient.Dict{
+               "filters": [][]string{[]string{"service_type", "=", "disk"}},
+       }
+       if params.Limit > 0 {
+               sdkParams["limit"] = params.Limit
+       }
+
+       var sdkResponse KeepServiceList
+       err := params.Client.List("keep_services", sdkParams, &sdkResponse)
+
+       if err != nil {
+               loggerutil.FatalWithMessage(params.Logger,
+                       fmt.Sprintf("Error requesting keep disks from API server: %v", err))
+       }
+
+       if params.Logger != nil {
+               params.Logger.Update(func(p map[string]interface{}, e map[string]interface{}) {
+                       keepInfo := make(map[string]interface{})
+
+                       keepInfo["num_keep_servers_available"] = sdkResponse.ItemsAvailable
+                       keepInfo["num_keep_servers_received"] = len(sdkResponse.KeepServers)
+                       keepInfo["keep_servers"] = sdkResponse.KeepServers
+
+                       p["keep_info"] = keepInfo
+               })
+       }
+
+       log.Printf("Received keep services list: %+v", sdkResponse)
+
+       if len(sdkResponse.KeepServers) < sdkResponse.ItemsAvailable {
+               loggerutil.FatalWithMessage(params.Logger,
+                       fmt.Sprintf("Did not receive all available keep servers: %+v", sdkResponse))
+       }
+
+       results.KeepServerIndexToAddress = sdkResponse.KeepServers
+       results.KeepServerAddressToIndex = make(map[ServerAddress]int)
+       for i, address := range results.KeepServerIndexToAddress {
+               results.KeepServerAddressToIndex[address] = i
+       }
+
+       log.Printf("Got Server Addresses: %v", results)
+
+       // This is safe for concurrent use
+       client := http.Client{}
+
+       // Send off all the index requests concurrently
+       responseChan := make(chan ServerResponse)
+       for _, keepServer := range sdkResponse.KeepServers {
+               // The above keepsServer variable is reused for each iteration, so
+               // it would be shared across all goroutines. This would result in
+               // us querying one server n times instead of n different servers
+               // as we intended. To avoid this we add it as an explicit
+               // parameter which gets copied. This bug and solution is described
+               // in https://golang.org/doc/effective_go.html#channels
+               go func(keepServer ServerAddress) {
+                       responseChan <- GetServerContents(params.Logger,
+                               keepServer,
+                               client)
+               }(keepServer)
+       }
+
+       results.ServerToContents = make(map[ServerAddress]ServerContents)
+       results.BlockToServers = make(map[blockdigest.BlockDigest][]BlockServerInfo)
+
+       // Read all the responses
+       for i := range sdkResponse.KeepServers {
+               _ = i // Here to prevent go from complaining.
+               response := <-responseChan
+               log.Printf("Received channel response from %v containing %d files",
+                       response.Address,
+                       len(response.Contents.BlockDigestToInfo))
+               results.ServerToContents[response.Address] = response.Contents
+               serverIndex := results.KeepServerAddressToIndex[response.Address]
+               for _, blockInfo := range response.Contents.BlockDigestToInfo {
+                       results.BlockToServers[blockInfo.Digest] = append(
+                               results.BlockToServers[blockInfo.Digest],
+                               BlockServerInfo{ServerIndex: serverIndex,
+                                       Size:  blockInfo.Size,
+                                       Mtime: blockInfo.Mtime})
+               }
+       }
+       return
+}
+
+func GetServerContents(arvLogger *logger.Logger,
+       keepServer ServerAddress,
+       client http.Client) (response ServerResponse) {
+
+       GetServerStatus(arvLogger, keepServer, client)
+
+       req := CreateIndexRequest(arvLogger, keepServer)
+       resp, err := client.Do(req)
+       if err != nil {
+               loggerutil.FatalWithMessage(arvLogger,
+                       fmt.Sprintf("Error fetching %s: %v", req.URL.String(), err))
+       }
+
+       return ReadServerResponse(arvLogger, keepServer, resp)
+}
+
+func GetServerStatus(arvLogger *logger.Logger,
+       keepServer ServerAddress,
+       client http.Client) {
+       url := fmt.Sprintf("http://%s:%d/status.json",
+               keepServer.Host,
+               keepServer.Port)
+
+       if arvLogger != nil {
+               now := time.Now()
+               arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) {
+                       keepInfo := p["keep_info"].(map[string]interface{})
+                       serverInfo := make(map[string]interface{})
+                       serverInfo["status_request_sent_at"] = now
+                       serverInfo["host"] = keepServer.Host
+                       serverInfo["port"] = keepServer.Port
+
+                       keepInfo[keepServer.Uuid] = serverInfo
+               })
+       }
+
+       resp, err := client.Get(url)
+       if err != nil {
+               loggerutil.FatalWithMessage(arvLogger,
+                       fmt.Sprintf("Error getting keep status from %s: %v", url, err))
+       } else if resp.StatusCode != 200 {
+               loggerutil.FatalWithMessage(arvLogger,
+                       fmt.Sprintf("Received error code %d in response to request "+
+                               "for %s status: %s",
+                               resp.StatusCode, url, resp.Status))
+       }
+
+       var keepStatus map[string]interface{}
+       decoder := json.NewDecoder(resp.Body)
+       decoder.UseNumber()
+       err = decoder.Decode(&keepStatus)
+       if err != nil {
+               loggerutil.FatalWithMessage(arvLogger,
+                       fmt.Sprintf("Error decoding keep status from %s: %v", url, err))
+       }
+
+       if arvLogger != nil {
+               now := time.Now()
+               arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) {
+                       keepInfo := p["keep_info"].(map[string]interface{})
+                       serverInfo := keepInfo[keepServer.Uuid].(map[string]interface{})
+                       serverInfo["status_response_processed_at"] = now
+                       serverInfo["status"] = keepStatus
+               })
+       }
+}
+
+func CreateIndexRequest(arvLogger *logger.Logger,
+       keepServer ServerAddress) (req *http.Request) {
+       url := fmt.Sprintf("http://%s:%d/index", keepServer.Host, keepServer.Port)
+       log.Println("About to fetch keep server contents from " + url)
+
+       if arvLogger != nil {
+               now := time.Now()
+               arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) {
+                       keepInfo := p["keep_info"].(map[string]interface{})
+                       serverInfo := keepInfo[keepServer.Uuid].(map[string]interface{})
+                       serverInfo["index_request_sent_at"] = now
+               })
+       }
+
+       req, err := http.NewRequest("GET", url, nil)
+       if err != nil {
+               loggerutil.FatalWithMessage(arvLogger,
+                       fmt.Sprintf("Error building http request for %s: %v", url, err))
+       }
+
+       req.Header.Add("Authorization",
+               fmt.Sprintf("OAuth2 %s", getDataManagerToken(arvLogger)))
+       return
+}
+
+func ReadServerResponse(arvLogger *logger.Logger,
+       keepServer ServerAddress,
+       resp *http.Response) (response ServerResponse) {
+
+       if resp.StatusCode != 200 {
+               loggerutil.FatalWithMessage(arvLogger,
+                       fmt.Sprintf("Received error code %d in response to request "+
+                               "for %s index: %s",
+                               resp.StatusCode, keepServer.String(), resp.Status))
+       }
+
+       if arvLogger != nil {
+               now := time.Now()
+               arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) {
+                       keepInfo := p["keep_info"].(map[string]interface{})
+                       serverInfo := keepInfo[keepServer.Uuid].(map[string]interface{})
+                       serverInfo["index_response_received_at"] = now
+               })
+       }
+
+       response.Address = keepServer
+       response.Contents.BlockDigestToInfo =
+               make(map[blockdigest.BlockDigest]BlockInfo)
+       scanner := bufio.NewScanner(resp.Body)
+       numLines, numDuplicates, numSizeDisagreements := 0, 0, 0
+       for scanner.Scan() {
+               numLines++
+               blockInfo, err := parseBlockInfoFromIndexLine(scanner.Text())
+               if err != nil {
+                       loggerutil.FatalWithMessage(arvLogger,
+                               fmt.Sprintf("Error parsing BlockInfo from index line "+
+                                       "received from %s: %v",
+                                       keepServer.String(),
+                                       err))
+               }
+
+               if storedBlock, ok := response.Contents.BlockDigestToInfo[blockInfo.Digest]; ok {
+                       // This server returned multiple lines containing the same block digest.
+                       numDuplicates += 1
+                       if storedBlock.Size != blockInfo.Size {
+                               numSizeDisagreements += 1
+                               // TODO(misha): Consider failing here.
+                               message := fmt.Sprintf("Saw different sizes for the same block "+
+                                       "on %s: %+v %+v",
+                                       keepServer.String(),
+                                       storedBlock,
+                                       blockInfo)
+                               log.Println(message)
+                               if arvLogger != nil {
+                                       arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) {
+                                               keepInfo := p["keep_info"].(map[string]interface{})
+                                               serverInfo := keepInfo[keepServer.Uuid].(map[string]interface{})
+                                               var error_list []string
+                                               read_error_list, has_list := serverInfo["error_list"]
+                                               if has_list {
+                                                       error_list = read_error_list.([]string)
+                                               } // If we didn't have the list, error_list is already an empty list
+                                               serverInfo["error_list"] = append(error_list, message)
+                                       })
+                               }
+                       }
+                       // Keep the block that is bigger, or the block that's newer in
+                       // the case of a size tie.
+                       if storedBlock.Size < blockInfo.Size ||
+                               (storedBlock.Size == blockInfo.Size &&
+                                       storedBlock.Mtime < blockInfo.Mtime) {
+                               response.Contents.BlockDigestToInfo[blockInfo.Digest] = blockInfo
+                       }
+               } else {
+                       response.Contents.BlockDigestToInfo[blockInfo.Digest] = blockInfo
+               }
+       }
+       if err := scanner.Err(); err != nil {
+               loggerutil.FatalWithMessage(arvLogger,
+                       fmt.Sprintf("Received error scanning index response from %s: %v",
+                               keepServer.String(),
+                               err))
+       } else {
+               log.Printf("%s index contained %d lines with %d duplicates with "+
+                       "%d size disagreements",
+                       keepServer.String(),
+                       numLines,
+                       numDuplicates,
+                       numSizeDisagreements)
+
+               if arvLogger != nil {
+                       now := time.Now()
+                       arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) {
+                               keepInfo := p["keep_info"].(map[string]interface{})
+                               serverInfo := keepInfo[keepServer.Uuid].(map[string]interface{})
+
+                               serverInfo["processing_finished_at"] = now
+                               serverInfo["lines_received"] = numLines
+                               serverInfo["duplicates_seen"] = numDuplicates
+                               serverInfo["size_disagreements_seen"] = numSizeDisagreements
+                       })
+               }
+       }
+       resp.Body.Close()
+       return
+}
+
+func parseBlockInfoFromIndexLine(indexLine string) (blockInfo BlockInfo, err error) {
+       tokens := strings.Fields(indexLine)
+       if len(tokens) != 2 {
+               err = fmt.Errorf("Expected 2 tokens per line but received a "+
+                       "line containing %v instead.",
+                       tokens)
+       }
+
+       var locator manifest.BlockLocator
+       if locator, err = manifest.ParseBlockLocator(tokens[0]); err != nil {
+               return
+       }
+       if len(locator.Hints) > 0 {
+               err = fmt.Errorf("Block locator in index line should not contain hints "+
+                       "but it does: %v",
+                       locator)
+               return
+       }
+
+       blockInfo.Mtime, err = strconv.ParseInt(tokens[1], 10, 64)
+       if err != nil {
+               return
+       }
+       blockInfo.Digest = locator.Digest
+       blockInfo.Size = locator.Size
+       return
+}
+
+func ComputeBlockReplicationCounts(readServers *ReadServers) {
+       readServers.BlockReplicationCounts = make(map[int]int)
+       for _, infos := range readServers.BlockToServers {
+               replication := len(infos)
+               readServers.BlockReplicationCounts[replication] += 1
+       }
+}
diff --git a/services/datamanager/loggerutil/loggerutil.go b/services/datamanager/loggerutil/loggerutil.go
new file mode 100644 (file)
index 0000000..58abb11
--- /dev/null
@@ -0,0 +1,53 @@
+/* Datamanager-specific logging methods. */
+
+package loggerutil
+
+import (
+       "git.curoverse.com/arvados.git/sdk/go/logger"
+       "log"
+       "os"
+       "runtime"
+       "time"
+)
+
+// Useful to call at the begining of execution to log info about the
+// current run.
+func LogRunInfo(arvLogger *logger.Logger) {
+       if arvLogger != nil {
+               now := time.Now()
+               arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) {
+                       runInfo := make(map[string]interface{})
+                       runInfo["started_at"] = now
+                       runInfo["args"] = os.Args
+                       hostname, err := os.Hostname()
+                       if err != nil {
+                               runInfo["hostname_error"] = err.Error()
+                       } else {
+                               runInfo["hostname"] = hostname
+                       }
+                       runInfo["pid"] = os.Getpid()
+                       p["run_info"] = runInfo
+               })
+       }
+}
+
+// A LogMutator that records the current memory usage. This is most useful as a logger write hook.
+//
+// Assumes we already have a map named "run_info" in properties. LogRunInfo() can create such a map for you if you call it.
+func LogMemoryAlloc(p map[string]interface{}, e map[string]interface{}) {
+       runInfo := p["run_info"].(map[string]interface{})
+       var memStats runtime.MemStats
+       runtime.ReadMemStats(&memStats)
+       runInfo["alloc_bytes_in_use"] = memStats.Alloc
+}
+
+func FatalWithMessage(arvLogger *logger.Logger, message string) {
+       if arvLogger != nil {
+               arvLogger.FinalUpdate(func(p map[string]interface{}, e map[string]interface{}) {
+                       p["FATAL"] = message
+                       p["run_info"].(map[string]interface{})["finished_at"] = time.Now()
+               })
+       }
+
+       log.Fatalf(message)
+}
index b68574c53d55436b27396576699c29fbdcddcb63..71c4ee5a2c4b713aba669a51605f01736b02bfe0 100644 (file)
@@ -31,10 +31,11 @@ _logger = logging.getLogger('arvados.arvados_fuse')
 _disallowed_filename_characters = re.compile('[\x00/]')
 
 class SafeApi(object):
-    '''Threadsafe wrapper for API object.  This stores and returns a different api
-    object per thread, because httplib2 which underlies apiclient is not
-    threadsafe.
-    '''
+    """Threadsafe wrapper for API object.
+
+    This stores and returns a different api object per thread, because
+    httplib2 which underlies apiclient is not threadsafe.
+    """
 
     def __init__(self, config):
         self.host = config.get('ARVADOS_API_HOST')
@@ -45,8 +46,9 @@ class SafeApi(object):
 
     def localapi(self):
         if 'api' not in self.local.__dict__:
-            self.local.api = arvados.api('v1', False, self.host,
-                                         self.api_token, self.insecure)
+            self.local.api = arvados.api(
+                version='v1',
+                host=self.host, token=self.api_token, insecure=self.insecure)
         return self.local.api
 
     def localkeep(self):
@@ -63,7 +65,9 @@ class SafeApi(object):
 
 
 def convertTime(t):
-    '''Parse Arvados timestamp to unix time.'''
+    """Parse Arvados timestamp to unix time."""
+    if not t:
+        return 0
     try:
         return calendar.timegm(time.strptime(t, "%Y-%m-%dT%H:%M:%SZ"))
     except (TypeError, ValueError):
@@ -280,6 +284,7 @@ class Directory(FreshBase):
                 n.clear()
             llfuse.invalidate_entry(self.inode, str(n))
             self.inodes.del_entry(oldentries[n])
+        llfuse.invalidate_inode(self.inode)
         self.invalidate()
 
     def mtime(self):
@@ -298,15 +303,26 @@ class CollectionDirectory(Directory):
         self.collection_object = None
         if isinstance(collection, dict):
             self.collection_locator = collection['uuid']
+            self._mtime = convertTime(collection.get('modified_at'))
         else:
             self.collection_locator = collection
+            self._mtime = 0
 
     def same(self, i):
         return i['uuid'] == self.collection_locator or i['portable_data_hash'] == self.collection_locator
 
+    # Used by arv-web.py to switch the contents of the CollectionDirectory
+    def change_collection(self, new_locator):
+        """Switch the contents of the CollectionDirectory.  Must be called with llfuse.lock held."""
+        self.collection_locator = new_locator
+        self.collection_object = None
+        self.update()
+
     def new_collection(self, new_collection_object, coll_reader):
         self.collection_object = new_collection_object
 
+        self._mtime = convertTime(self.collection_object.get('modified_at'))
+
         if self.collection_object_file is not None:
             self.collection_object_file.update(self.collection_object)
 
@@ -327,6 +343,10 @@ class CollectionDirectory(Directory):
             if self.collection_object is not None and portable_data_hash_pattern.match(self.collection_locator):
                 return True
 
+            if self.collection_locator is None:
+                self.fresh()
+                return True
+
             with llfuse.lock_released:
                 coll_reader = arvados.CollectionReader(
                     self.collection_locator, self.api, self.api.localkeep(),
@@ -348,7 +368,7 @@ class CollectionDirectory(Directory):
 
             self.fresh()
             return True
-        except apiclient.errors.NotFoundError:
+        except arvados.errors.NotFoundError:
             _logger.exception("arv-mount %s: error", self.collection_locator)
         except arvados.errors.ArgumentError as detail:
             _logger.warning("arv-mount %s: error %s", self.collection_locator, detail)
@@ -376,10 +396,6 @@ class CollectionDirectory(Directory):
         else:
             return super(CollectionDirectory, self).__contains__(k)
 
-    def mtime(self):
-        self.checkupdate()
-        return convertTime(self.collection_object["modified_at"]) if self.collection_object is not None and 'modified_at' in self.collection_object else 0
-
 
 class MagicDirectory(Directory):
     '''A special directory that logically contains the set of all extant keep
@@ -480,8 +496,8 @@ class TagsDirectory(RecursiveInvalidateDirectory):
                 ).execute(num_retries=self.num_retries)
         if "items" in tags:
             self.merge(tags['items'],
-                       lambda i: i['name'] if 'name' in i else i['uuid'],
-                       lambda a, i: a.tag == i,
+                       lambda i: i['name'],
+                       lambda a, i: a.tag == i['name'],
                        lambda i: TagDirectory(self.inode, self.inodes, self.api, self.num_retries, i['name'], poll=self._poll, poll_time=self._poll_time))
 
 
diff --git a/services/fuse/gittaggers.py b/services/fuse/gittaggers.py
new file mode 120000 (symlink)
index 0000000..a9ad861
--- /dev/null
@@ -0,0 +1 @@
+../../sdk/python/gittaggers.py
\ No newline at end of file
index ffd8240a89a17e97bab323e59b3e37741f446437..9c825c67580c68da49614dd8453263a30f47892c 100644 (file)
@@ -1,32 +1,19 @@
 #!/usr/bin/env python
 
 import os
-import subprocess
-import time
+import sys
+import setuptools.command.egg_info as egg_info_cmd
 
 from setuptools import setup, find_packages
-from setuptools.command.egg_info import egg_info
 
-SETUP_DIR = os.path.dirname(__file__)
+SETUP_DIR = os.path.dirname(__file__) or '.'
 README = os.path.join(SETUP_DIR, 'README.rst')
 
-class TagBuildWithCommit(egg_info):
-    """Tag the build with the sha1 and date of the last git commit.
-
-    If a build tag has already been set (e.g., "egg_info -b", building
-    from source package), leave it alone.
-    """
-    def tags(self):
-        if self.tag_build is None:
-            git_tags = subprocess.check_output(
-                ['git', 'log', '--first-parent', '--max-count=1',
-                 '--format=format:%ct %h', SETUP_DIR]).split()
-            assert len(git_tags) == 2
-            git_tags[0] = time.strftime(
-                '%Y%m%d%H%M%S', time.gmtime(int(git_tags[0])))
-            self.tag_build = '.{}+{}'.format(*git_tags)
-        return egg_info.tags(self)
-
+try:
+    import gittaggers
+    tagger = gittaggers.EggInfoFromGit
+except ImportError:
+    tagger = egg_info_cmd.egg_info
 
 setup(name='arvados_fuse',
       version='0.1',
@@ -42,12 +29,12 @@ setup(name='arvados_fuse',
         'bin/arv-mount'
         ],
       install_requires=[
-        'arvados-python-client>=0.1.20141203150737.277b3c7',
+        'arvados-python-client>=0.1.20150206225333',
         'llfuse',
-        'python-daemon<2',
+        'python-daemon',
         ],
       test_suite='tests',
       tests_require=['PyYAML'],
       zip_safe=False,
-      cmdclass={'egg_info': TagBuildWithCommit},
+      cmdclass={'egg_info': tagger},
       )
index 84dceee13764ea6d1ab2327b27f44f1973af3ce7..f9fcd73c5d29b6d5c557594663a94ee78225aaf3 100644 (file)
@@ -1,38 +1,38 @@
-import unittest
 import arvados
 import arvados_fuse as fuse
-import threading
-import time
-import os
+import glob
+import json
 import llfuse
-import tempfile
+import os
 import shutil
 import subprocess
-import glob
+import sys
+import tempfile
+import threading
+import time
+import unittest
+
 import run_test_server
-import json
 
 class MountTestBase(unittest.TestCase):
     def setUp(self):
         self.keeptmp = tempfile.mkdtemp()
         os.environ['KEEP_LOCAL_STORE'] = self.keeptmp
         self.mounttmp = tempfile.mkdtemp()
-        run_test_server.run(False)
+        run_test_server.run()
         run_test_server.authorize_with("admin")
-        self.api = api = fuse.SafeApi(arvados.config)
+        self.api = fuse.SafeApi(arvados.config)
 
-    def make_mount(self, root_class, *root_args):
+    def make_mount(self, root_class, **root_kwargs):
         operations = fuse.Operations(os.getuid(), os.getgid())
         operations.inodes.add_entry(root_class(
-                llfuse.ROOT_INODE, operations.inodes, self.api, 0, *root_args))
+            llfuse.ROOT_INODE, operations.inodes, self.api, 0, **root_kwargs))
         llfuse.init(operations, self.mounttmp, [])
         threading.Thread(None, llfuse.main).start()
         # wait until the driver is finished initializing
         operations.initlock.wait()
 
     def tearDown(self):
-        run_test_server.stop()
-
         # llfuse.close is buggy, so use fusermount instead.
         #llfuse.close(unmount=True)
         count = 0
@@ -44,6 +44,7 @@ class MountTestBase(unittest.TestCase):
 
         os.rmdir(self.mounttmp)
         shutil.rmtree(self.keeptmp)
+        run_test_server.reset()
 
     def assertDirContents(self, subdir, expect_content):
         path = self.mounttmp
@@ -96,7 +97,7 @@ class FuseMountTest(MountTestBase):
         self.api.collections().create(body={"manifest_text":cw.manifest_text()}).execute()
 
     def runTest(self):
-        self.make_mount(fuse.CollectionDirectory, self.testcollection)
+        self.make_mount(fuse.CollectionDirectory, collection=self.testcollection)
 
         self.assertDirContents(None, ['thing1.txt', 'thing2.txt',
                                       'edgecases', 'dir1', 'dir2'])
@@ -204,15 +205,8 @@ class FuseTagsUpdateTest(MountTestBase):
         }}).execute()
 
     def runTest(self):
-        operations = fuse.Operations(os.getuid(), os.getgid())
-        e = operations.inodes.add_entry(fuse.TagsDirectory(llfuse.ROOT_INODE, operations.inodes, self.api, 0, poll_time=1))
-
-        llfuse.init(operations, self.mounttmp, [])
-        t = threading.Thread(None, lambda: llfuse.main())
-        t.start()
+        self.make_mount(fuse.TagsDirectory, poll_time=1)
 
-        # wait until the driver is finished initializing
-        operations.initlock.wait()
         self.assertIn('foo_tag', os.listdir(self.mounttmp))
 
         bar_uuid = run_test_server.fixture('collections')['bar_file']['uuid']
@@ -234,7 +228,7 @@ class FuseTagsUpdateTest(MountTestBase):
 class FuseSharedTest(MountTestBase):
     def runTest(self):
         self.make_mount(fuse.SharedDirectory,
-                        self.api.users().current().execute()['uuid'])
+                        exclude=self.api.users().current().execute()['uuid'])
 
         # shared_dirs is a list of the directories exposed
         # by fuse.SharedDirectory (i.e. any object visible
@@ -277,13 +271,30 @@ class FuseSharedTest(MountTestBase):
 class FuseHomeTest(MountTestBase):
     def runTest(self):
         self.make_mount(fuse.ProjectDirectory,
-                        self.api.users().current().execute())
+                        project_object=self.api.users().current().execute())
 
         d1 = os.listdir(self.mounttmp)
         self.assertIn('Unrestricted public data', d1)
 
         d2 = os.listdir(os.path.join(self.mounttmp, 'Unrestricted public data'))
-        self.assertEqual(['GNU General Public License, version 3'], d2)
+        public_project = run_test_server.fixture('groups')[
+            'anonymously_accessible_project']
+        found_in = 0
+        found_not_in = 0
+        for name, item in run_test_server.fixture('collections').iteritems():
+            if 'name' not in item:
+                pass
+            elif item['owner_uuid'] == public_project['uuid']:
+                self.assertIn(item['name'], d2)
+                found_in += 1
+            else:
+                # Artificial assumption here: there is no public
+                # collection fixture with the same name as a
+                # non-public collection.
+                self.assertNotIn(item['name'], d2)
+                found_not_in += 1
+        self.assertNotEqual(0, found_in)
+        self.assertNotEqual(0, found_not_in)
 
         d3 = os.listdir(os.path.join(self.mounttmp, 'Unrestricted public data', 'GNU General Public License, version 3'))
         self.assertEqual(["GNU_General_Public_License,_version_3.pdf"], d3)
index 8acf43abd4eba5094d89e4443a0d6a067139aa80..ccbd7d8790c5042566aff7bb4941f56a0c91e26a 100644 (file)
@@ -5,6 +5,7 @@ import (
        "crypto/tls"
        "fmt"
        "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/arvadostest"
        "git.curoverse.com/arvados.git/sdk/go/keepclient"
        . "gopkg.in/check.v1"
        "io"
@@ -13,7 +14,6 @@ import (
        "net/http"
        "net/url"
        "os"
-       "os/exec"
        "strings"
        "testing"
        "time"
@@ -30,11 +30,6 @@ var _ = Suite(&ServerRequiredSuite{})
 // Tests that require the Keep server running
 type ServerRequiredSuite struct{}
 
-func pythonDir() string {
-       cwd, _ := os.Getwd()
-       return fmt.Sprintf("%s/../../sdk/python/tests", cwd)
-}
-
 // Wait (up to 1 second) for keepproxy to listen on a port. This
 // avoids a race condition where we hit a "connection refused" error
 // because we start testing the proxy too soon.
@@ -57,45 +52,17 @@ func closeListener() {
 }
 
 func (s *ServerRequiredSuite) SetUpSuite(c *C) {
-       cwd, _ := os.Getwd()
-       defer os.Chdir(cwd)
-
-       os.Chdir(pythonDir())
-       {
-               cmd := exec.Command("python", "run_test_server.py", "start")
-               stderr, err := cmd.StderrPipe()
-               if err != nil {
-                       log.Fatalf("Setting up stderr pipe: %s", err)
-               }
-               go io.Copy(os.Stderr, stderr)
-               if err := cmd.Run(); err != nil {
-                       panic(fmt.Sprintf("'python run_test_server.py start' returned error %s", err))
-               }
-       }
-       {
-               cmd := exec.Command("python", "run_test_server.py", "start_keep")
-               stderr, err := cmd.StderrPipe()
-               if err != nil {
-                       log.Fatalf("Setting up stderr pipe: %s", err)
-               }
-               go io.Copy(os.Stderr, stderr)
-               if err := cmd.Run(); err != nil {
-                       panic(fmt.Sprintf("'python run_test_server.py start_keep' returned error %s", err))
-               }
-       }
+       arvadostest.StartAPI()
+       arvadostest.StartKeep()
+}
 
-       os.Setenv("ARVADOS_API_HOST", "localhost:3000")
-       os.Setenv("ARVADOS_API_TOKEN", "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
-       os.Setenv("ARVADOS_API_HOST_INSECURE", "true")
+func (s *ServerRequiredSuite) SetUpTest(c *C) {
+       arvadostest.ResetEnv()
 }
 
 func (s *ServerRequiredSuite) TearDownSuite(c *C) {
-       cwd, _ := os.Getwd()
-       defer os.Chdir(cwd)
-
-       os.Chdir(pythonDir())
-       exec.Command("python", "run_test_server.py", "stop_keep").Run()
-       exec.Command("python", "run_test_server.py", "stop").Run()
+       arvadostest.StopKeep()
+       arvadostest.StopAPI()
 }
 
 func setupProxyService() {
@@ -136,27 +103,37 @@ func setupProxyService() {
        }
 }
 
-func runProxy(c *C, args []string, token string, port int) keepclient.KeepClient {
-       os.Args = append(args, fmt.Sprintf("-listen=:%v", port))
-       os.Setenv("ARVADOS_API_TOKEN", "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
-
-       listener = nil
-       go main()
-       time.Sleep(100 * time.Millisecond)
-
-       os.Setenv("ARVADOS_KEEP_PROXY", fmt.Sprintf("http://localhost:%v", port))
-       os.Setenv("ARVADOS_API_TOKEN", token)
+func runProxy(c *C, args []string, port int, bogusClientToken bool) keepclient.KeepClient {
+       if bogusClientToken {
+               os.Setenv("ARVADOS_API_TOKEN", "bogus-token")
+       }
        arv, err := arvadosclient.MakeArvadosClient()
        c.Assert(err, Equals, nil)
-       kc, err := keepclient.MakeKeepClient(&arv)
-       c.Assert(err, Equals, nil)
+       kc := keepclient.KeepClient{
+               Arvados: &arv,
+               Want_replicas: 2,
+               Using_proxy: true,
+               Client: &http.Client{},
+       }
+       kc.SetServiceRoots(map[string]string{
+               "proxy": fmt.Sprintf("http://localhost:%v", port),
+       })
        c.Check(kc.Using_proxy, Equals, true)
        c.Check(len(kc.ServiceRoots()), Equals, 1)
        for _, root := range kc.ServiceRoots() {
                c.Check(root, Equals, fmt.Sprintf("http://localhost:%v", port))
        }
-       os.Setenv("ARVADOS_KEEP_PROXY", "")
        log.Print("keepclient created")
+       if bogusClientToken {
+               arvadostest.ResetEnv()
+       }
+
+       {
+               os.Args = append(args, fmt.Sprintf("-listen=:%v", port))
+               listener = nil
+               go main()
+       }
+
        return kc
 }
 
@@ -164,7 +141,6 @@ func (s *ServerRequiredSuite) TestPutAskGet(c *C) {
        log.Print("TestPutAndGet start")
 
        os.Args = []string{"keepproxy", "-listen=:29950"}
-       os.Setenv("ARVADOS_API_TOKEN", "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
        listener = nil
        go main()
        time.Sleep(100 * time.Millisecond)
@@ -183,7 +159,6 @@ func (s *ServerRequiredSuite) TestPutAskGet(c *C) {
                c.Check(root, Equals, "http://localhost:29950")
        }
        os.Setenv("ARVADOS_EXTERNAL_CLIENT", "")
-       log.Print("keepclient created")
 
        waitForListener()
        defer closeListener()
@@ -248,12 +223,10 @@ func (s *ServerRequiredSuite) TestPutAskGet(c *C) {
 func (s *ServerRequiredSuite) TestPutAskGetForbidden(c *C) {
        log.Print("TestPutAskGetForbidden start")
 
-       kc := runProxy(c, []string{"keepproxy"}, "123abc", 29951)
+       kc := runProxy(c, []string{"keepproxy"}, 29951, true)
        waitForListener()
        defer closeListener()
 
-       log.Print("keepclient created")
-
        hash := fmt.Sprintf("%x", md5.Sum([]byte("bar")))
 
        {
@@ -290,7 +263,7 @@ func (s *ServerRequiredSuite) TestPutAskGetForbidden(c *C) {
 func (s *ServerRequiredSuite) TestGetDisabled(c *C) {
        log.Print("TestGetDisabled start")
 
-       kc := runProxy(c, []string{"keepproxy", "-no-get"}, "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h", 29952)
+       kc := runProxy(c, []string{"keepproxy", "-no-get"}, 29952, false)
        waitForListener()
        defer closeListener()
 
@@ -330,7 +303,7 @@ func (s *ServerRequiredSuite) TestGetDisabled(c *C) {
 func (s *ServerRequiredSuite) TestPutDisabled(c *C) {
        log.Print("TestPutDisabled start")
 
-       kc := runProxy(c, []string{"keepproxy", "-no-put"}, "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h", 29953)
+       kc := runProxy(c, []string{"keepproxy", "-no-put"}, 29953, false)
        waitForListener()
        defer closeListener()
 
@@ -346,7 +319,7 @@ func (s *ServerRequiredSuite) TestPutDisabled(c *C) {
 }
 
 func (s *ServerRequiredSuite) TestCorsHeaders(c *C) {
-       runProxy(c, []string{"keepproxy"}, "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h", 29954)
+       runProxy(c, []string{"keepproxy"}, 29954, false)
        waitForListener()
        defer closeListener()
 
@@ -378,7 +351,7 @@ func (s *ServerRequiredSuite) TestCorsHeaders(c *C) {
 }
 
 func (s *ServerRequiredSuite) TestPostWithoutHash(c *C) {
-       runProxy(c, []string{"keepproxy"}, "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h", 29955)
+       runProxy(c, []string{"keepproxy"}, 29955, false)
        waitForListener()
        defer closeListener()
 
diff --git a/services/nodemanager/MANIFEST.in b/services/nodemanager/MANIFEST.in
new file mode 100644 (file)
index 0000000..9561fb1
--- /dev/null
@@ -0,0 +1 @@
+include README.rst
index 4955992faa4d7cc2da43e2d39b940615a1c63710..f5186074c6bc30c9d2230e91ea12764daddb797b 100644 (file)
@@ -2,16 +2,30 @@
 
 from __future__ import absolute_import, print_function
 
+import calendar
 import itertools
+import re
 import time
 
+ARVADOS_TIMEFMT = '%Y-%m-%dT%H:%M:%SZ'
+ARVADOS_TIMESUBSEC_RE = re.compile(r'(\.\d+)Z$')
+
 def arvados_node_fqdn(arvados_node, default_hostname='dynamic.compute'):
     hostname = arvados_node.get('hostname') or default_hostname
     return '{}.{}'.format(hostname, arvados_node['domain'])
 
 def arvados_node_mtime(node):
-    return time.mktime(time.strptime(node['modified_at'] + 'UTC',
-                                     '%Y-%m-%dT%H:%M:%SZ%Z')) - time.timezone
+    return arvados_timestamp(node['modified_at'])
+
+def arvados_timestamp(timestr):
+    subsec_match = ARVADOS_TIMESUBSEC_RE.search(timestr)
+    if subsec_match is None:
+        subsecs = .0
+    else:
+        subsecs = float(subsec_match.group(1))
+        timestr = timestr[:subsec_match.start()] + 'Z'
+    return calendar.timegm(time.strptime(timestr + 'UTC',
+                                         ARVADOS_TIMEFMT + '%Z'))
 
 def timestamp_fresh(timestamp, fresh_time):
     return (time.time() - timestamp) < fresh_time
index 48e8dcf45888de232ba9004c290023e6bf5999b7..1608b529fb848e00ac8968ef8a04a427e6f4e0a7 100644 (file)
@@ -288,7 +288,7 @@ class ComputeNodeMonitorActor(config.actor_class):
         if (self.arvados_node is None) or not timestamp_fresh(
               arvados_node_mtime(self.arvados_node), self.node_stale_after):
             return None
-        state = self.arvados_node['info'].get('slurm_state')
+        state = self.arvados_node['crunch_worker_state']
         if not state:
             return None
         result = state in states
index 3a0c2063b426ccd759ad4dd8323847d51f72bce4..369bb8f953b992fcdedded62c942ed8e57492b60 100644 (file)
@@ -3,6 +3,7 @@
 from __future__ import absolute_import, print_function
 
 import libcloud.common.types as cloud_types
+from libcloud.compute.base import NodeDriver
 
 from ...config import NETWORK_ERRORS
 
@@ -25,14 +26,15 @@ class BaseComputeNodeDriver(object):
         self.real = driver_class(**auth_kwargs)
         self.list_kwargs = list_kwargs
         self.create_kwargs = create_kwargs
+        for key in self.create_kwargs.keys():
+            init_method = getattr(self, '_init_' + key, None)
+            if init_method is not None:
+                new_pair = init_method(self.create_kwargs.pop(key))
+                if new_pair is not None:
+                    self.create_kwargs[new_pair[0]] = new_pair[1]
 
-    def __getattr__(self, name):
-        # Proxy non-extension methods to the real driver.
-        if (not name.startswith('_') and not name.startswith('ex_')
-              and hasattr(self.real, name)):
-            return getattr(self.real, name)
-        else:
-            return super(BaseComputeNodeDriver, self).__getattr__(name)
+    def _init_ping_host(self, ping_host):
+        self.ping_host = ping_host
 
     def search_for(self, term, list_method, key=lambda item: item.id):
         cache_key = (list_method, term)
@@ -52,6 +54,11 @@ class BaseComputeNodeDriver(object):
     def arvados_create_kwargs(self, arvados_node):
         raise NotImplementedError("BaseComputeNodeDriver.arvados_create_kwargs")
 
+    def _make_ping_url(self, arvados_node):
+        return 'https://{}/arvados/v1/nodes/{}/ping?ping_secret={}'.format(
+            self.ping_host, arvados_node['uuid'],
+            arvados_node['info']['ping_secret'])
+
     def create_node(self, size, arvados_node):
         kwargs = self.create_kwargs.copy()
         kwargs.update(self.arvados_create_kwargs(arvados_node))
@@ -82,3 +89,16 @@ class BaseComputeNodeDriver(object):
         # exactly an Exception, or a better-known higher-level exception.
         return (isinstance(exception, cls.CLOUD_ERRORS) or
                 getattr(exception, '__class__', None) is Exception)
+
+    # Now that we've defined all our own methods, delegate generic, public
+    # attributes of libcloud drivers that we haven't defined ourselves.
+    def _delegate_to_real(attr_name):
+        return property(
+            lambda self: getattr(self.real, attr_name),
+            lambda self, value: setattr(self.real, attr_name, value),
+            doc=getattr(getattr(NodeDriver, attr_name), '__doc__', None))
+
+    _locals = locals()
+    for _attr_name in dir(NodeDriver):
+        if (not _attr_name.startswith('_')) and (_attr_name not in _locals):
+            _locals[_attr_name] = _delegate_to_real(_attr_name)
index 255a948a6c3aa0ee2c17ae1581685d282c341813..9db3d89cb67825c677a208a3d5ebfa9961c7b384 100644 (file)
@@ -52,19 +52,10 @@ class ComputeNodeDriver(BaseComputeNodeDriver):
         super(ComputeNodeDriver, self).__init__(
             auth_kwargs, {'ex_filters': list_kwargs}, create_kwargs,
             driver_class)
-        for key in self.create_kwargs.keys():
-            init_method = getattr(self, '_init_' + key, None)
-            if init_method is not None:
-                new_pair = init_method(self.create_kwargs.pop(key))
-                if new_pair is not None:
-                    self.create_kwargs[new_pair[0]] = new_pair[1]
 
     def _init_image_id(self, image_id):
         return 'image', self.search_for(image_id, 'list_images')
 
-    def _init_ping_host(self, ping_host):
-        self.ping_host = ping_host
-
     def _init_security_groups(self, group_names):
         return 'ex_security_groups', [
             self.search_for(gname.strip(), 'ex_get_security_groups')
@@ -79,14 +70,8 @@ class ComputeNodeDriver(BaseComputeNodeDriver):
         return 'auth', key
 
     def arvados_create_kwargs(self, arvados_node):
-        result = {'name': arvados_node_fqdn(arvados_node)}
-        ping_secret = arvados_node['info'].get('ping_secret')
-        if ping_secret is not None:
-            ping_url = ('https://{}/arvados/v1/nodes/{}/ping?ping_secret={}'.
-                        format(self.ping_host, arvados_node['uuid'],
-                               ping_secret))
-            result['ex_userdata'] = ping_url
-        return result
+        return {'name': arvados_node_fqdn(arvados_node),
+                'ex_userdata': self._make_ping_url(arvados_node)}
 
     def post_create_node(self, cloud_node):
         self.real.ex_create_tags(cloud_node, self.tags)
diff --git a/services/nodemanager/arvnodeman/computenode/driver/gce.py b/services/nodemanager/arvnodeman/computenode/driver/gce.py
new file mode 100644 (file)
index 0000000..d6ea2b2
--- /dev/null
@@ -0,0 +1,126 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import, print_function
+
+import functools
+import json
+import time
+
+import libcloud.compute.providers as cloud_provider
+import libcloud.compute.types as cloud_types
+
+from . import BaseComputeNodeDriver
+from .. import arvados_node_fqdn, arvados_timestamp, ARVADOS_TIMEFMT
+
+class ComputeNodeDriver(BaseComputeNodeDriver):
+    """Compute node driver wrapper for GCE
+
+    This translates cloud driver requests to GCE's specific parameters.
+    """
+    DEFAULT_DRIVER = cloud_provider.get_driver(cloud_types.Provider.GCE)
+    SEARCH_CACHE = {}
+
+    def __init__(self, auth_kwargs, list_kwargs, create_kwargs,
+                 driver_class=DEFAULT_DRIVER):
+        list_kwargs = list_kwargs.copy()
+        tags_str = list_kwargs.pop('tags', '')
+        if not tags_str.strip():
+            self.node_tags = frozenset()
+        else:
+            self.node_tags = frozenset(t.strip() for t in tags_str.split(','))
+        create_kwargs = create_kwargs.copy()
+        create_kwargs.setdefault('external_ip', None)
+        create_kwargs.setdefault('ex_metadata', {})
+        super(ComputeNodeDriver, self).__init__(
+            auth_kwargs, list_kwargs, create_kwargs,
+            driver_class)
+
+    @staticmethod
+    def _name_key(cloud_object):
+        return cloud_object.name
+
+    def _init_image(self, image_name):
+        return 'image', self.search_for(
+            image_name, 'list_images', self._name_key)
+
+    def _init_location(self, location_name):
+        return 'location', self.search_for(
+            location_name, 'list_locations', self._name_key)
+
+    def _init_network(self, network_name):
+        return 'ex_network', self.search_for(
+            network_name, 'ex_list_networks', self._name_key)
+
+    def _init_service_accounts(self, service_accounts_str):
+        return 'ex_service_accounts', json.loads(service_accounts_str)
+
+    def _init_ssh_key(self, filename):
+        # SSH keys are delivered to GCE nodes via ex_metadata: see
+        # http://stackoverflow.com/questions/26752617/creating-sshkeys-for-gce-instance-using-libcloud
+        with open(filename) as ssh_file:
+            self.create_kwargs['ex_metadata']['sshKeys'] = (
+                'root:' + ssh_file.read().strip())
+
+    def list_sizes(self):
+        return super(ComputeNodeDriver, self).list_sizes(
+            self.create_kwargs['location'])
+
+    def arvados_create_kwargs(self, arvados_node):
+        cluster_id, _, node_id = arvados_node['uuid'].split('-')
+        result = {'name': 'compute-{}-{}'.format(node_id, cluster_id),
+                  'ex_metadata': self.create_kwargs['ex_metadata'].copy(),
+                  'ex_tags': list(self.node_tags)}
+        result['ex_metadata']['booted_at'] = time.strftime(ARVADOS_TIMEFMT,
+                                                           time.gmtime())
+        result['ex_metadata']['hostname'] = arvados_node_fqdn(arvados_node)
+        result['ex_metadata']['user-data'] = self._make_ping_url(arvados_node)
+        return result
+
+    def list_nodes(self):
+        # The GCE libcloud driver only supports filtering node lists by zone.
+        # Do our own filtering based on tag list.
+        return [node for node in
+                super(ComputeNodeDriver, self).list_nodes()
+                if self.node_tags.issubset(node.extra.get('tags', []))]
+
+    @classmethod
+    def _find_metadata(cls, metadata_items, key):
+        # Given a list of two-item metadata dictonaries, return the one with
+        # the named key.  Raise KeyError if not found.
+        try:
+            return next(data_dict for data_dict in metadata_items
+                        if data_dict.get('key') == key)
+        except StopIteration:
+            raise KeyError(key)
+
+    @classmethod
+    def _get_metadata(cls, metadata_items, key, *default):
+        try:
+            return cls._find_metadata(metadata_items, key)['value']
+        except KeyError:
+            if default:
+                return default[0]
+            raise
+
+    def sync_node(self, cloud_node, arvados_node):
+        hostname = arvados_node_fqdn(arvados_node)
+        metadata_req = cloud_node.extra['metadata'].copy()
+        metadata_items = metadata_req.setdefault('items', [])
+        try:
+            self._find_metadata(metadata_items, 'hostname')['value'] = hostname
+        except KeyError:
+            metadata_items.append({'key': 'hostname', 'value': hostname})
+        response = self.real.connection.async_request(
+            '/zones/{}/instances/{}/setMetadata'.format(
+                cloud_node.extra['zone'].name, cloud_node.name),
+            method='POST', data=metadata_req)
+        if not response.success():
+            raise Exception("setMetadata error: {}".format(response.error))
+
+    @classmethod
+    def node_start_time(cls, node):
+        try:
+            return arvados_timestamp(cls._get_metadata(
+                    node.extra['metadata']['items'], 'booted_at'))
+        except KeyError:
+            return 0
index b7ec1fc80d9a0211867b7d06e5dd8ffb272f1ff4..315df1c3f984e29a0edfc09c71f76051def7480d 100644 (file)
@@ -88,8 +88,7 @@ class NodeManagerConfig(ConfigParser.SafeConfigParser):
         http = httplib2.Http(timeout=self.getint('Arvados', 'timeout'),
                              ca_certs=certs_file,
                              disable_ssl_certificate_validation=insecure)
-        return arvados.api('v1',
-                           cache=False,  # Don't reuse an existing client.
+        return arvados.api(version='v1',
                            host=self.get('Arvados', 'host'),
                            token=self.get('Arvados', 'token'),
                            insecure=insecure,
@@ -106,14 +105,26 @@ class NodeManagerConfig(ConfigParser.SafeConfigParser):
                                         self.get_section('Cloud Create'))
 
     def node_sizes(self, all_sizes):
+        """Finds all acceptable NodeSizes for our installation.
+
+        Returns a list of (NodeSize, kwargs) pairs for each NodeSize object
+        returned by libcloud that matches a size listed in our config file.
+        """
+
         size_kwargs = {}
         for sec_name in self.sections():
             sec_words = sec_name.split(None, 2)
             if sec_words[0] != 'Size':
                 continue
             size_kwargs[sec_words[1]] = self.get_section(sec_name, int)
-        return [(size, size_kwargs[size.id]) for size in all_sizes
-                if size.id in size_kwargs]
+        # EC2 node sizes are identified by id. GCE sizes are identified by name.
+        matching_sizes = []
+        for size in all_sizes:
+            if size.id in size_kwargs:
+                matching_sizes.append((size, size_kwargs[size.id]))
+            elif size.name in size_kwargs:
+                matching_sizes.append((size, size_kwargs[size.name]))
+        return matching_sizes
 
     def shutdown_windows(self):
         return [int(n)
index 024ed2b59b3089676b520aec5212caa1caa470ba..9b41ca14d54fc52956650649170574e90d99ae78 100644 (file)
@@ -128,9 +128,11 @@ security_groups = idstring1, idstring2
 # willing to use.  The Node Manager should boot the cheapest size(s) that
 # can run jobs in the queue (N.B.: defining more than one size has not been
 # tested yet).
-# Each size section MUST define the number of cores it has.  You may also
-# want to define the number of mebibytes of scratch space for Crunch jobs.
-# You can also override Amazon's provided data fields by setting the same
-# names here.
+# Each size section MUST define the number of cores are available in this
+# size class (since libcloud does not provide any consistent API for exposing
+# this setting).
+# You may also want to define the amount of scratch space (expressed
+# in GB) for Crunch jobs.  You can also override Amazon's provided
+# data fields by setting the same names here.
 cores = 2
-scratch = 100
\ No newline at end of file
+scratch = 100
diff --git a/services/nodemanager/doc/gce.example.cfg b/services/nodemanager/doc/gce.example.cfg
new file mode 100644 (file)
index 0000000..7e7813c
--- /dev/null
@@ -0,0 +1,141 @@
+# Google Compute Engine configuration for Arvados Node Manager.
+# All times are in seconds unless specified otherwise.
+
+[Daemon]
+# Node Manager will ensure that there are at least this many nodes
+# running at all times.
+min_nodes = 0
+
+# Node Manager will not start any compute nodes when at least this
+# many are running.
+max_nodes = 8
+
+# Poll compute nodes and Arvados for new information every N seconds.
+poll_time = 60
+
+# Polls have exponential backoff when services fail to respond.
+# This is the longest time to wait between polls.
+max_poll_time = 300
+
+# If Node Manager can't succesfully poll a service for this long,
+# it will never start or stop compute nodes, on the assumption that its
+# information is too outdated.
+poll_stale_after = 600
+
+# "Node stale time" affects two related behaviors.
+# 1. If a compute node has been running for at least this long, but it
+# isn't paired with an Arvados node, do not shut it down, but leave it alone.
+# This prevents the node manager from shutting down a node that might
+# actually be doing work, but is having temporary trouble contacting the
+# API server.
+# 2. When the Node Manager starts a new compute node, it will try to reuse
+# an Arvados node that hasn't been updated for this long.
+node_stale_after = 14400
+
+# File path for Certificate Authorities
+certs_file = /etc/ssl/certs/ca-certificates.crt
+
+[Logging]
+# Log file path
+file = /var/log/arvados/node-manager.log
+
+# Log level for most Node Manager messages.
+# Choose one of DEBUG, INFO, WARNING, ERROR, or CRITICAL.
+# WARNING lets you know when polling a service fails.
+# INFO additionally lets you know when a compute node is started or stopped.
+level = INFO
+
+# You can also set different log levels for specific libraries.
+# Pykka is the Node Manager's actor library.
+# Setting this to DEBUG will display tracebacks for uncaught
+# exceptions in the actors, but it's also very chatty.
+pykka = WARNING
+
+# Setting apiclient to INFO will log the URL of every Arvados API request.
+apiclient = WARNING
+
+[Arvados]
+host = zyxwv.arvadosapi.com
+token = ARVADOS_TOKEN
+timeout = 15
+
+# Accept an untrusted SSL certificate from the API server?
+insecure = no
+
+[Cloud]
+provider = gce
+
+# Shutdown windows define periods of time when a node may and may not
+# be shut down.  These are windows in full minutes, separated by
+# commas.  Counting from the time the node is booted, the node WILL
+# NOT shut down for N1 minutes; then it MAY shut down for N2 minutes;
+# then it WILL NOT shut down for N3 minutes; and so on.  For example,
+# "54, 5, 1" means the node may shut down from the 54th to the 59th
+# minute of each hour of uptime.
+# GCE bills by the minute, and does not provide information about when
+# a node booted.  Node Manager will store this information in metadata
+# when it boots a node; if that information is not available, it will
+# assume the node booted at the epoch.  These shutdown settings are
+# very aggressive.  You may want to adjust this if you want more
+# continuity of service from a single node.
+shutdown_windows = 20, 999999
+
+[Cloud Credentials]
+user_id = client_email_address@developer.gserviceaccount.com
+key = path_to_certificate.pem
+project = project-id-from-google-cloud-dashboard
+timeout = 60
+
+# Optional settings. For full documentation see
+# http://libcloud.readthedocs.org/en/latest/compute/drivers/gce.html#libcloud.compute.drivers.gce.GCENodeDriver
+#
+# datacenter = us-central1-a
+# auth_type = SA               # SA, IA or GCE
+# scopes = https://www.googleapis.com/auth/compute
+# credential_file =
+
+[Cloud List]
+# A comma-separated list of tags that must be applied to a node for it to
+# be considered a compute node.
+# The driver will automatically apply these tags to nodes it creates.
+tags = zyxwv, compute
+
+[Cloud Create]
+# New compute nodes will send pings to Arvados at this host.
+# You may specify a port, and use brackets to disambiguate IPv6 addresses.
+ping_host = hostname:port
+
+# A file path for an SSH key that can log in to the compute node.
+# ssh_key = path
+
+# The GCE image name and network zone name to use when creating new nodes.
+# * Valid image aliases: https://cloud.google.com/sdk/gcloud/reference/compute/instances/create
+# * Valid location (zone) names: https://cloud.google.com/compute/docs/zones
+image = debian-7
+location = us-central1-a
+# network = your_network_name
+
+# JSON string of service account authorizations for this cluster.
+# See http://libcloud.readthedocs.org/en/latest/compute/drivers/gce.html#specifying-service-account-scopes
+# service_accounts = [{'email':'account@example.com', 'scopes':['storage-ro']}]
+
+[Size n1-standard-2]
+# You can define any number of Size sections to list node sizes you're
+# willing to use.  The Node Manager should boot the cheapest size(s) that
+# can run jobs in the queue (N.B.: defining more than one size has not been
+# tested yet).
+#
+# The Size fields are interpreted the same way as with a libcloud NodeSize:
+# http://libcloud.readthedocs.org/en/latest/compute/api.html#libcloud.compute.base.NodeSize
+#
+# See https://cloud.google.com/compute/docs/machine-types for a list
+# of known machine types that may be used as a Size parameter.
+#
+# Each size section MUST define the number of cores are available in this
+# size class (since libcloud does not provide any consistent API for exposing
+# this setting).
+# You may also want to define the amount of scratch space (expressed
+# in GB) for Crunch jobs.
+cores = 2
+scratch = 100
+ram = 512
diff --git a/services/nodemanager/gittaggers.py b/services/nodemanager/gittaggers.py
new file mode 120000 (symlink)
index 0000000..a9ad861
--- /dev/null
@@ -0,0 +1 @@
+../../sdk/python/gittaggers.py
\ No newline at end of file
index 6923b074ac6c6d99ccca8c46ee274542180da75e..6830e809f9b2283a65ceb8ad5ddb9bdda4115e44 100644 (file)
@@ -1,36 +1,24 @@
 #!/usr/bin/env python
 
 import os
-import subprocess
-import time
+import sys
+import setuptools.command.egg_info as egg_info_cmd
 
 from setuptools import setup, find_packages
-from setuptools.command.egg_info import egg_info
 
 SETUP_DIR = os.path.dirname(__file__) or "."
+README = os.path.join(SETUP_DIR, 'README.rst')
 
-class TagBuildWithCommit(egg_info):
-    """Tag the build with the sha1 and date of the last git commit.
-
-    If a build tag has already been set (e.g., "egg_info -b", building
-    from source package), leave it alone.
-    """
-    def tags(self):
-        if self.tag_build is None:
-            git_tags = subprocess.check_output(
-                ['git', 'log', '--first-parent', '--max-count=1',
-                 '--format=format:%ct %h', SETUP_DIR]).split()
-            assert len(git_tags) == 2
-            git_tags[0] = time.strftime(
-                '%Y%m%d%H%M%S', time.gmtime(int(git_tags[0])))
-            self.tag_build = '.{}+{}'.format(*git_tags)
-        return egg_info.tags(self)
-
+try:
+    import gittaggers
+    tagger = gittaggers.EggInfoFromGit
+except ImportError:
+    tagger = egg_info_cmd.egg_info
 
 setup(name='arvados-node-manager',
       version='0.1',
       description='Arvados compute node manager',
-      long_description=open(os.path.join(SETUP_DIR, 'README.rst')).read(),
+      long_description=open(README).read(),
       author='Arvados',
       author_email='info@arvados.org',
       url="https://arvados.org",
@@ -38,13 +26,13 @@ setup(name='arvados-node-manager',
       packages=find_packages(),
       install_requires=[
         'apache-libcloud',
-        'arvados-python-client',
+        'arvados-python-client>=0.1.20150206225333',
         'pykka',
-        'python-daemon<2',
+        'python-daemon',
         ],
       scripts=['bin/arvados-node-manager'],
       test_suite='tests',
       tests_require=['mock>=1.0'],
       zip_safe=False,
-      cmdclass={'egg_info': TagBuildWithCommit},
+      cmdclass={'egg_info': tagger},
       )
index a1dfde30e1c90eeaeeb04e2542bd53866d4eff5f..4a72f47884d8deac211f19ff1de990cd168953b9 100644 (file)
@@ -227,28 +227,29 @@ class ComputeNodeMonitorActorTestCase(testutil.ActorTestMixin,
 
     def test_in_state_when_unpaired(self):
         self.make_actor()
-        self.assertIsNone(self.node_state('idle', 'alloc'))
+        self.assertIsNone(self.node_state('idle', 'busy'))
 
     def test_in_state_when_pairing_stale(self):
         self.make_actor(arv_node=testutil.arvados_node_mock(
                 job_uuid=None, age=90000))
-        self.assertIsNone(self.node_state('idle', 'alloc'))
+        self.assertIsNone(self.node_state('idle', 'busy'))
 
     def test_in_state_when_no_state_available(self):
-        self.make_actor(arv_node=testutil.arvados_node_mock(info={}))
-        self.assertIsNone(self.node_state('idle', 'alloc'))
+        self.make_actor(arv_node=testutil.arvados_node_mock(
+                crunch_worker_state=None))
+        self.assertIsNone(self.node_state('idle', 'busy'))
 
     def test_in_idle_state(self):
         self.make_actor(2, arv_node=testutil.arvados_node_mock(job_uuid=None))
         self.assertTrue(self.node_state('idle'))
-        self.assertFalse(self.node_state('alloc'))
-        self.assertTrue(self.node_state('idle', 'alloc'))
+        self.assertFalse(self.node_state('busy'))
+        self.assertTrue(self.node_state('idle', 'busy'))
 
-    def test_in_alloc_state(self):
+    def test_in_busy_state(self):
         self.make_actor(3, arv_node=testutil.arvados_node_mock(job_uuid=True))
         self.assertFalse(self.node_state('idle'))
-        self.assertTrue(self.node_state('alloc'))
-        self.assertTrue(self.node_state('idle', 'alloc'))
+        self.assertTrue(self.node_state('busy'))
+        self.assertTrue(self.node_state('idle', 'busy'))
 
     def test_init_shutdown_scheduling(self):
         self.make_actor()
@@ -293,7 +294,8 @@ class ComputeNodeMonitorActorTestCase(testutil.ActorTestMixin,
         self.assertFalse(self.node_actor.shutdown_eligible().get(self.TIMEOUT))
 
     def test_no_shutdown_when_node_state_unknown(self):
-        self.make_actor(5, testutil.arvados_node_mock(5, info={}))
+        self.make_actor(5, testutil.arvados_node_mock(
+            5, crunch_worker_state=None))
         self.shutdowns._set_state(True, 600)
         self.assertFalse(self.node_actor.shutdown_eligible().get(self.TIMEOUT))
 
index fae63a5663d82035b43d82288de82ade2788f99b..8e528247bda3bb3147ec66f154fb4cbe07093f61 100644 (file)
@@ -12,15 +12,8 @@ import mock
 import arvnodeman.computenode.driver.ec2 as ec2
 from . import testutil
 
-class EC2ComputeNodeDriverTestCase(unittest.TestCase):
-    def setUp(self):
-        self.driver_mock = mock.MagicMock(name='driver_mock')
-
-    def new_driver(self, auth_kwargs={}, list_kwargs={}, create_kwargs={}):
-        create_kwargs.setdefault('ping_host', '100::')
-        return ec2.ComputeNodeDriver(
-            auth_kwargs, list_kwargs, create_kwargs,
-            driver_class=self.driver_mock)
+class EC2ComputeNodeDriverTestCase(testutil.DriverTestMixin, unittest.TestCase):
+    TEST_CLASS = ec2.ComputeNodeDriver
 
     def test_driver_instantiation(self):
         kwargs = {'key': 'testkey'}
@@ -37,15 +30,12 @@ class EC2ComputeNodeDriverTestCase(unittest.TestCase):
         self.assertEqual({'tag:test': 'true'},
                           list_method.call_args[1].get('ex_filters'))
 
-    def test_create_location_loaded_at_initialization(self):
-        kwargs = {'location': 'testregion'}
-        driver = self.new_driver(create_kwargs=kwargs)
-        self.assertTrue(self.driver_mock().list_locations)
-
     def test_create_image_loaded_at_initialization(self):
-        kwargs = {'image': 'testimage'}
-        driver = self.new_driver(create_kwargs=kwargs)
-        self.assertTrue(self.driver_mock().list_images)
+        list_method = self.driver_mock().list_images
+        list_method.return_value = [testutil.cloud_object_mock(c)
+                                    for c in 'abc']
+        driver = self.new_driver(create_kwargs={'image_id': 'b'})
+        self.assertEqual(1, list_method.call_count)
 
     def test_create_includes_ping_secret(self):
         arv_node = testutil.arvados_node_mock(info={'ping_secret': 'ssshh'})
diff --git a/services/nodemanager/tests/test_computenode_driver_gce.py b/services/nodemanager/tests/test_computenode_driver_gce.py
new file mode 100644 (file)
index 0000000..f995a8d
--- /dev/null
@@ -0,0 +1,158 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import, print_function
+
+import json
+import time
+import unittest
+
+import mock
+
+import arvnodeman.computenode.driver.gce as gce
+from . import testutil
+
+class GCEComputeNodeDriverTestCase(testutil.DriverTestMixin, unittest.TestCase):
+    TEST_CLASS = gce.ComputeNodeDriver
+
+    def test_driver_instantiation(self):
+        kwargs = {'user_id': 'foo'}
+        driver = self.new_driver(auth_kwargs=kwargs)
+        self.assertTrue(self.driver_mock.called)
+        self.assertEqual(kwargs, self.driver_mock.call_args[1])
+
+    def test_create_image_loaded_at_initialization_by_name(self):
+        image_mocks = [testutil.cloud_object_mock(c) for c in 'abc']
+        list_method = self.driver_mock().list_images
+        list_method.return_value = image_mocks
+        driver = self.new_driver(create_kwargs={'image': 'B'})
+        self.assertEqual(1, list_method.call_count)
+
+    def test_list_sizes_requires_location_match(self):
+        locations = [testutil.cloud_object_mock(name)
+                     for name in ['there', 'here', 'other']]
+        self.driver_mock().list_locations.return_value = locations
+        driver = self.new_driver(create_kwargs={'location': 'HERE'})
+        driver.list_sizes()
+        self.assertIs(locations[1],
+                      self.driver_mock().list_sizes.call_args[0][0])
+
+    def test_create_includes_ping_secret(self):
+        arv_node = testutil.arvados_node_mock(info={'ping_secret': 'ssshh'})
+        driver = self.new_driver()
+        driver.create_node(testutil.MockSize(1), arv_node)
+        metadata = self.driver_mock().create_node.call_args[1]['ex_metadata']
+        self.assertIn('ping_secret=ssshh', metadata.get('user-data'))
+
+    def test_create_sets_default_hostname(self):
+        driver = self.new_driver()
+        driver.create_node(testutil.MockSize(1),
+                           testutil.arvados_node_mock(254, hostname=None))
+        create_kwargs = self.driver_mock().create_node.call_args[1]
+        self.assertEqual('compute-0000000000000fe-zzzzz',
+                         create_kwargs.get('name'))
+        self.assertEqual('dynamic.compute.zzzzz.arvadosapi.com',
+                         create_kwargs.get('ex_metadata', {}).get('hostname'))
+
+    def test_create_tags_from_list_tags(self):
+        driver = self.new_driver(list_kwargs={'tags': 'testA, testB'})
+        driver.create_node(testutil.MockSize(1), testutil.arvados_node_mock())
+        self.assertEqual(['testA', 'testB'],
+                         self.driver_mock().create_node.call_args[1]['ex_tags'])
+
+    def test_list_nodes_requires_tags_match(self):
+        # A node matches if our list tags are a subset of the node's tags.
+        # Test behavior with no tags, no match, partial matches, different
+        # order, and strict supersets.
+        cloud_mocks = [
+            testutil.cloud_node_mock(node_num, tags=tag_set)
+            for node_num, tag_set in enumerate(
+                [[], ['bad'], ['good'], ['great'], ['great', 'ok'],
+                 ['great', 'good'], ['good', 'fantastic', 'great']])]
+        cloud_mocks.append(testutil.cloud_node_mock())
+        self.driver_mock().list_nodes.return_value = cloud_mocks
+        driver = self.new_driver(list_kwargs={'tags': 'good, great'})
+        self.assertItemsEqual(['5', '6'], [n.id for n in driver.list_nodes()])
+
+    def build_gce_metadata(self, metadata_dict):
+        # Convert a plain metadata dictionary to the GCE data structure.
+        return {
+            'kind': 'compute#metadata',
+            'fingerprint': 'testprint',
+            'items': [{'key': key, 'value': metadata_dict[key]}
+                      for key in metadata_dict],
+            }
+
+    def check_sync_node_updates_hostname_tag(self, plain_metadata):
+        start_metadata = self.build_gce_metadata(plain_metadata)
+        arv_node = testutil.arvados_node_mock(1)
+        cloud_node = testutil.cloud_node_mock(
+            2, metadata=start_metadata.copy(),
+            zone=testutil.cloud_object_mock('testzone'))
+        driver = self.new_driver()
+        driver.sync_node(cloud_node, arv_node)
+        args, kwargs = self.driver_mock().connection.async_request.call_args
+        self.assertEqual('/zones/TESTZONE/instances/2/setMetadata', args[0])
+        for key in ['kind', 'fingerprint']:
+            self.assertEqual(start_metadata[key], kwargs['data'][key])
+        plain_metadata['hostname'] = 'compute1.zzzzz.arvadosapi.com'
+        self.assertEqual(
+            plain_metadata,
+            {item['key']: item['value'] for item in kwargs['data']['items']})
+
+    def test_sync_node_updates_hostname_tag(self):
+        self.check_sync_node_updates_hostname_tag(
+            {'testkey': 'testvalue', 'hostname': 'startvalue'})
+
+    def test_sync_node_adds_hostname_tag(self):
+        self.check_sync_node_updates_hostname_tag({'testkey': 'testval'})
+
+    def test_sync_node_raises_exception_on_failure(self):
+        arv_node = testutil.arvados_node_mock(8)
+        cloud_node = testutil.cloud_node_mock(
+            9, metadata={}, zone=testutil.cloud_object_mock('failzone'))
+        mock_response = self.driver_mock().connection.async_request()
+        mock_response.success.return_value = False
+        mock_response.error = 'sync error test'
+        driver = self.new_driver()
+        with self.assertRaises(Exception) as err_check:
+            driver.sync_node(cloud_node, arv_node)
+        self.assertIs(err_check.exception.__class__, Exception)
+        self.assertIn('sync error test', str(err_check.exception))
+
+    def test_node_create_time_zero_for_unknown_nodes(self):
+        node = testutil.cloud_node_mock()
+        self.assertEqual(0, gce.ComputeNodeDriver.node_start_time(node))
+
+    def test_node_create_time_for_known_node(self):
+        node = testutil.cloud_node_mock(metadata=self.build_gce_metadata(
+                {'booted_at': '1970-01-01T00:01:05Z'}))
+        self.assertEqual(65, gce.ComputeNodeDriver.node_start_time(node))
+
+    def test_node_create_time_recorded_when_node_boots(self):
+        start_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
+        arv_node = testutil.arvados_node_mock()
+        driver = self.new_driver()
+        driver.create_node(testutil.MockSize(1), arv_node)
+        metadata = self.driver_mock().create_node.call_args[1]['ex_metadata']
+        self.assertLessEqual(start_time, metadata.get('booted_at'))
+
+    def test_deliver_ssh_key_in_metadata(self):
+        test_ssh_key = 'ssh-rsa-foo'
+        arv_node = testutil.arvados_node_mock(1)
+        with mock.patch('__builtin__.open',
+                        mock.mock_open(read_data=test_ssh_key)) as mock_file:
+            driver = self.new_driver(create_kwargs={'ssh_key': 'ssh-key-file'})
+        mock_file.assert_called_once_with('ssh-key-file')
+        driver.create_node(testutil.MockSize(1), arv_node)
+        metadata = self.driver_mock().create_node.call_args[1]['ex_metadata']
+        self.assertEqual('root:ssh-rsa-foo', metadata.get('sshKeys'))
+
+    def test_create_driver_with_service_accounts(self):
+        service_accounts = {'email': 'foo@bar', 'scopes': ['storage-full']}
+        srv_acct_config = {'service_accounts': json.dumps(service_accounts)}
+        arv_node = testutil.arvados_node_mock(1)
+        driver = self.new_driver(create_kwargs=srv_acct_config)
+        driver.create_node(testutil.MockSize(1), arv_node)
+        self.assertEqual(
+            service_accounts,
+            self.driver_mock().create_node.call_args[1]['ex_service_accounts'])
index 96fcde9524b910b56b45a2aea01bd3bdf06066cf..bdba83ade10f5b30b293cf36c1c37fd6570e0137 100644 (file)
@@ -49,8 +49,18 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
     def monitor_list(self):
         return pykka.ActorRegistry.get_by_class(ComputeNodeMonitorActor)
 
+    def monitored_arvados_nodes(self):
+        pairings = []
+        for future in [actor.proxy().arvados_node
+                       for actor in self.monitor_list()]:
+            try:
+                pairings.append(future.get(self.TIMEOUT))
+            except pykka.ActorDeadError:
+                pass
+        return pairings
+
     def alive_monitor_count(self):
-        return sum(1 for actor in self.monitor_list() if actor.is_alive())
+        return len(self.monitored_arvados_nodes())
 
     def assertShutdownCancellable(self, expected=True):
         self.assertTrue(self.node_shutdown.start.called)
@@ -65,9 +75,7 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
         self.assertTrue(self.node_setup.start.called)
 
     def check_monitors_arvados_nodes(self, *arv_nodes):
-        pairings = [monitor.proxy().arvados_node
-                    for monitor in self.monitor_list() if monitor.is_alive()]
-        self.assertItemsEqual(arv_nodes, pykka.get_all(pairings, self.TIMEOUT))
+        self.assertItemsEqual(arv_nodes, self.monitored_arvados_nodes())
 
     def test_node_pairing(self):
         cloud_node = testutil.cloud_node_mock(1)
index 30808ac73816e9056d6ee8c91025305e7570520e..1c53c68489c6f920eaf53f3c97253f6464c31793 100644 (file)
@@ -2,6 +2,7 @@
 
 from __future__ import absolute_import, print_function
 
+import datetime
 import threading
 import time
 
@@ -13,22 +14,33 @@ from . import pykka_timeout
 no_sleep = mock.patch('time.sleep', lambda n: None)
 
 def arvados_node_mock(node_num=99, job_uuid=None, age=0, **kwargs):
+    mod_time = datetime.datetime.utcnow() - datetime.timedelta(seconds=age)
     if job_uuid is True:
         job_uuid = 'zzzzz-jjjjj-jobjobjobjobjob'
-    slurm_state = 'idle' if (job_uuid is None) else 'alloc'
+    crunch_worker_state = 'idle' if (job_uuid is None) else 'busy'
     node = {'uuid': 'zzzzz-yyyyy-{:015x}'.format(node_num),
-            'created_at': '2014-01-01T01:02:03Z',
-            'modified_at': time.strftime('%Y-%m-%dT%H:%M:%SZ',
-                                         time.gmtime(time.time() - age)),
+            'created_at': '2014-01-01T01:02:03.04050607Z',
+            'modified_at': mod_time.strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
+            'slot_number': node_num,
             'hostname': 'compute{}'.format(node_num),
             'domain': 'zzzzz.arvadosapi.com',
             'ip_address': ip_address_mock(node_num),
             'job_uuid': job_uuid,
-            'info': {'slurm_state': slurm_state}}
+            'crunch_worker_state': crunch_worker_state,
+            'info': {'ping_secret': 'defaulttestsecret'}}
     node.update(kwargs)
     return node
 
-def cloud_node_mock(node_num=99):
+def cloud_object_mock(name_id):
+    # A very generic mock, useful for stubbing libcloud objects we
+    # only search for and pass around, like locations, subnets, etc.
+    cloud_object = mock.NonCallableMagicMock(['id', 'name'],
+                                             name='cloud_object')
+    cloud_object.id = str(name_id)
+    cloud_object.name = cloud_object.id.upper()
+    return cloud_object
+
+def cloud_node_mock(node_num=99, **extra):
     node = mock.NonCallableMagicMock(
         ['id', 'name', 'state', 'public_ips', 'private_ips', 'driver', 'size',
          'image', 'extra'],
@@ -37,6 +49,7 @@ def cloud_node_mock(node_num=99):
     node.name = node.id
     node.public_ips = []
     node.private_ips = [ip_address_mock(node_num)]
+    node.extra = extra
     return node
 
 def ip_address_mock(last_octet):
@@ -104,6 +117,21 @@ class ActorTestMixin(object):
                 return result
 
 
+class DriverTestMixin(object):
+    def setUp(self):
+        self.driver_mock = mock.MagicMock(name='driver_mock')
+        super(DriverTestMixin, self).setUp()
+
+    def new_driver(self, auth_kwargs={}, list_kwargs={}, create_kwargs={}):
+        create_kwargs.setdefault('ping_host', '100::')
+        return self.TEST_CLASS(
+            auth_kwargs, list_kwargs, create_kwargs,
+            driver_class=self.driver_mock)
+
+    def driver_method_args(self, method_name):
+        return getattr(self.driver_mock(), method_name).call_args
+
+
 class RemotePollLoopActorTestMixin(ActorTestMixin):
     def build_monitor(self, *args, **kwargs):
         self.timer = mock.MagicMock(name='timer_mock')