end
def index
+ @objects = nil if !defined?(@objects)
find_objects_for_index if !@objects
render_index
end
end
def choose
+ @objects = nil if !defined?(@objects)
params[:limit] ||= 40
respond_to do |f|
if params[:partial]
def accept_uuid_as_id_param
- if params[:id] and params[:id].match /\D/
+ if params[:id] and params[:id].match(/\D/)
params[:uuid] = params.delete :id
end
end
helper_method :user_notifications
def user_notifications
+ @errors = nil if !defined?(@errors)
return [] if @errors or not current_user.andand.is_active or not Rails.configuration.show_user_notifications
@notifications ||= @@notification_tests.map do |t|
t.call(self, current_user)
helper_method :my_starred_projects
def my_starred_projects user
- return if @starred_projects
+ return if defined?(@starred_projects) && @starred_projects
links = Link.filter([['tail_uuid', '=', user.uuid],
['link_class', '=', 'star'],
['head_uuid', 'is_a', 'arvados#group']]).select(%w(head_uuid))
# That is: get toplevel projects under home, get subprojects of
# these projects, and so on until we hit the limit.
def my_wanted_projects(user, page_size=100)
- return @my_wanted_projects if @my_wanted_projects
+ return @my_wanted_projects if defined?(@my_wanted_projects) && @my_wanted_projects
from_top = []
uuids = [user.uuid]
end
def build_my_wanted_projects_tree(user, page_size=100)
- return @my_wanted_projects_tree if @my_wanted_projects_tree
+ return @my_wanted_projects_tree if defined?(@my_wanted_projects_tree) && @my_wanted_projects_tree
parent_of = {user.uuid => 'me'}
my_wanted_projects(user, page_size).each do |ob|
children_of[parent_of[ob.uuid]] ||= []
children_of[parent_of[ob.uuid]] << ob
end
- buildtree = lambda do |children_of, root_uuid=false|
+ buildtree = lambda do |chldrn_of, root_uuid=false|
tree = {}
- children_of[root_uuid].andand.each do |ob|
- tree[ob] = buildtree.call(children_of, ob.uuid)
+ chldrn_of[root_uuid].andand.each do |ob|
+ tree[ob] = buildtree.call(chldrn_of, ob.uuid)
end
tree
end
def remove_items
@removed_uuids = []
- links = []
params[:item_uuids].collect { |uuid| ArvadosBase.find uuid }.each do |item|
if item.class == Collection or item.class == Group
# Use delete API on collections and projects/groups
end
def current_api_host
- Rails.configuration.arvados_v1_base.gsub /https?:\/\/|\/arvados\/v1/,''
+ Rails.configuration.arvados_v1_base.gsub(/https?:\/\/|\/arvados\/v1/, '')
end
def current_uuid_prefix
def cwl_inputs_required(object, inputs_schema, set_attr_path)
r = 0
inputs_schema.each do |input|
- required, primary_type, param_id = cwl_input_info(input)
- dn, attrvalue = cwl_input_value(object, input, set_attr_path + [param_id])
+ required, _, param_id = cwl_input_info(input)
+ _, attrvalue = cwl_input_value(object, input, set_attr_path + [param_id])
r += 1 if required and attrvalue.nil?
end
r
private
def is_textile?( object, attr )
- is_textile = object.textile_attributes.andand.include?(attr)
+ object.textile_attributes.andand.include?(attr)
end
end
# For the benefit of themes that still expect $arvados_api_client to work:
class ArvadosClientProxyHack
def method_missing *args
- ArvadosApiClient.new_or_current.send *args
+ ArvadosApiClient.new_or_current.send(*args)
end
end
$arvados_api_client = ArvadosClientProxyHack.new
f0 = '' if f0 == '.'
f0 = f0[2..-1] if f0[0..1] == './'
f0 += '/' if not f0.empty?
- file_path = "#{f0}#{file[1]}"
+ "#{f0}#{file[1]}"
end
##
end
else
if step[:progress] and
- (re = step[:progress].match /^(\d+)\+(\d+)\/(\d+)$/)
+ (re = step[:progress].match(/^(\d+)\+(\d+)\/(\d+)$/))
pj[:progress] = (((re[1].to_f + re[2].to_f/2) / re[3].to_f) rescue 0.5)
else
pj[:progress] = 0.0
{label: 'output'})
end
# Input collection nodes
- output_pdhs = @opts[:output_collections].values.collect{|c|
- c[:portable_data_hash]}
+ output_pdhs = @opts[:output_collections].values.collect{|oc|
+ oc[:portable_data_hash]}
ProvenanceHelper::cr_input_pdhs(cr).each do |pdh|
if not output_pdhs.include?(pdh)
# Search for collections on the same project first
- cols = @opts[:input_collections][pdh].andand.select{|c|
- c[:owner_uuid] == cr[:owner_uuid]}
+ cols = @opts[:input_collections][pdh].andand.select{|ic|
+ ic[:owner_uuid] == cr[:owner_uuid]}
if not cols or cols.empty?
# Search for any collection with this PDH
cols = @opts[:input_collections][pdh]
end
def self.columns
+ @discovered_columns = [] if !defined?(@discovered_columns)
return @discovered_columns if @discovered_columns.andand.any?
- @discovered_columns = []
@attribute_info ||= {}
schema = arvados_api_client.discovery[:schemas][self.to_s.to_sym]
return @discovered_columns if schema.nil?
if opts[:class].is_a? Class
return opts[:class]
end
- if uuid.match /^[0-9a-f]{32}(\+[^,]+)*(,[0-9a-f]{32}(\+[^,]+)*)*$/
+ if uuid.match(/^[0-9a-f]{32}(\+[^,]+)*(,[0-9a-f]{32}(\+[^,]+)*)*$/)
return Collection
end
resource_class = nil
- uuid.match /^[0-9a-z]{5}-([0-9a-z]{5})-[0-9a-z]{15}$/ do |re|
+ uuid.match(/^[0-9a-z]{5}-([0-9a-z]{5})-[0-9a-z]{15}$/) do |re|
resource_class ||= arvados_api_client.
kind_class(self.uuid_infix_object_kind[re[1]])
end
if opts[:referring_object] and
opts[:referring_attr] and
- opts[:referring_attr].match /_uuid$/
+ opts[:referring_attr].match(/_uuid$/)
resource_class ||= arvados_api_client.
kind_class(opts[:referring_object].
attributes[opts[:referring_attr].
@fetch_multiple_pages = true
@arvados_api_token = Thread.current[:arvados_api_token]
@reader_tokens = Thread.current[:reader_tokens]
+ @results = nil
+ @count = nil
+ @offset = 0
+ @cond = nil
+ @eager = nil
+ @select = nil
+ @orderby_spec = nil
+ @filters = nil
+ @distinct = nil
+ @include_trash = nil
+ @limit = nil
end
def eager(bool=true)
end
end
end
- @cond.keys.select { |x| x.match /_kind$/ }.each do |kind_key|
+ @cond.keys.select { |x| x.match(/_kind$/) }.each do |kind_key|
if @cond[kind_key].is_a? Class
@cond = @cond.merge({ kind_key => 'arvados#' + arvados_api_client.class_kind(@cond[kind_key]) })
end
def each(&block)
if not @results.nil?
- @results.each &block
+ @results.each(&block)
else
self.each_page do |items|
items.each do |i|
arvados_api_client.api "collections/#{self.uuid}/", "used_by"
end
- def uuid
- if self[:uuid].nil?
- return self[:portable_data_hash]
- else
- super
- end
- end
-
def friendly_link_name lookup=nil
name || portable_data_hash
end
@container = Container.find(container_uuid)
end
end
+ @container = nil if !defined?(@container)
@child_proxies = child_objects
end
} %>
</ul>
</li>
- <% if @name_link or @object %>
+ <% if (defined?(@name_link) && @name_link) or (defined?(@object) && @object) %>
<li class="nav-separator">
<i class="fa fa-lg fa-angle-double-right"></i>
</li>
<meta property="og:type" content="article" />
<meta property="og:url" content="<%= request.url %>" />
<meta property="og:site_name" content="<%= Rails.configuration.site_name %>" />
- <% if @object %>
+ <% if defined?(@object) && @object %>
<% if @object.respond_to?(:name) and @object.name.present? %>
<meta property="og:title" content="<%= @object.name%>" />
<% end %>
recent_cr_containers = recent_crs.map {|cr| cr.container_uuid}.compact.uniq
preload_objects_for_dataclass(Container, recent_cr_containers) if recent_cr_containers.andand.any?
- # fetch children of all the active crs in one call, if there are any
- active_crs = recent_crs.each {|cr| cr if (cr.priority.andand > 0 and cr.state != 'Final' and cr.container_uuid)}
-
wus = {}
outputs = []
recent_procs.each do |p|
# end
ActiveSupport::Inflector.inflections do |inflect|
- inflect.plural /^([Ss]pecimen)$/i, '\1s'
- inflect.singular /^([Ss]pecimen)s?/i, '\1'
- inflect.plural /^([Hh]uman)$/i, '\1s'
- inflect.singular /^([Hh]uman)s?/i, '\1'
+ inflect.plural(/^([Ss]pecimen)$/i, '\1s')
+ inflect.singular(/^([Ss]pecimen)s?/i, '\1')
+ inflect.plural(/^([Hh]uman)$/i, '\1s')
+ inflect.singular(/^([Hh]uman)s?/i, '\1')
end
%w(application.default application).each do |cfgfile|
path = "#{::Rails.root.to_s}/config/#{cfgfile}.yml"
- if File.exists? path
+ if File.exist? path
yaml = ERB.new(IO.read path).result(binding)
confs = YAML.load(yaml, deserialize_symbols: true)
$application_config.merge!(confs['common'] || {})
# exception if not found. Use this with assertions to explain that
# the error signifies a failed test rather than an unexpected error
# during a testing procedure.
- def find? *args
+ def find?(*args)
begin
- find *args
+ find(*args)
rescue Capybara::ElementNotFound
false
end
def after_teardown
if self.class.want_reset_api_fixtures[:after_each_test] and
- @want_reset_api_fixtures != false
+ (!defined?(@want_reset_api_fixtures) or @want_reset_api_fixtures != false)
self.class.reset_api_fixtures_now
end
super
RUN gpg --import --no-tty /tmp/mpapis.asc && \
gpg --import --no-tty /tmp/pkuczynski.asc && \
curl -L https://get.rvm.io | bash -s stable && \
- /usr/local/rvm/bin/rvm install 2.3 && \
- /usr/local/rvm/bin/rvm alias create default ruby-2.3 && \
- /usr/local/rvm/bin/rvm-exec default gem install bundler && \
+ /usr/local/rvm/bin/rvm install 2.5 && \
+ /usr/local/rvm/bin/rvm alias create default ruby-2.5 && \
/usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.10.2
# Install golang binary
RUN gpg --import --no-tty /tmp/mpapis.asc && \
gpg --import --no-tty /tmp/pkuczynski.asc && \
curl -L https://get.rvm.io | bash -s stable && \
- /usr/local/rvm/bin/rvm install 2.3 && \
- /usr/local/rvm/bin/rvm alias create default ruby-2.3 && \
- /usr/local/rvm/bin/rvm-exec default gem install bundler && \
+ /usr/local/rvm/bin/rvm install 2.5 && \
+ /usr/local/rvm/bin/rvm alias create default ruby-2.5 && \
/usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.10.2
# Install golang binary
RUN gpg --import --no-tty /tmp/mpapis.asc && \
gpg --import --no-tty /tmp/pkuczynski.asc && \
curl -L https://get.rvm.io | bash -s stable && \
- /usr/local/rvm/bin/rvm install 2.3 && \
- /usr/local/rvm/bin/rvm alias create default ruby-2.3 && \
- /usr/local/rvm/bin/rvm-exec default gem install bundler && \
+ /usr/local/rvm/bin/rvm install 2.5 && \
+ /usr/local/rvm/bin/rvm alias create default ruby-2.5 && \
/usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.10.2
# Install golang binary
RUN gpg --import --no-tty /tmp/mpapis.asc && \
gpg --import --no-tty /tmp/pkuczynski.asc && \
curl -L https://get.rvm.io | bash -s stable && \
- /usr/local/rvm/bin/rvm install 2.3 && \
- /usr/local/rvm/bin/rvm alias create default ruby-2.3 && \
- /usr/local/rvm/bin/rvm-exec default gem install bundler && \
+ /usr/local/rvm/bin/rvm install 2.5 && \
+ /usr/local/rvm/bin/rvm alias create default ruby-2.5 && \
/usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.10.2
# Install golang binary
RUN gpg --import --no-tty /tmp/mpapis.asc && \
gpg --import --no-tty /tmp/pkuczynski.asc && \
curl -L https://get.rvm.io | bash -s stable && \
- /usr/local/rvm/bin/rvm install 2.3 && \
- /usr/local/rvm/bin/rvm alias create default ruby-2.3 && \
- /usr/local/rvm/bin/rvm-exec default gem install bundler && \
+ /usr/local/rvm/bin/rvm install 2.5 && \
+ /usr/local/rvm/bin/rvm alias create default ruby-2.5 && \
/usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.10.2
# Install golang binary
RUN gpg --import --no-tty /tmp/mpapis.asc && \
gpg --import --no-tty /tmp/pkuczynski.asc && \
curl -L https://get.rvm.io | bash -s stable && \
- /usr/local/rvm/bin/rvm install 2.3 && \
- /usr/local/rvm/bin/rvm alias create default ruby-2.3
+ /usr/local/rvm/bin/rvm install 2.5 && \
+ /usr/local/rvm/bin/rvm alias create default ruby-2.5
# udev daemon can't start in a container, so don't try.
RUN mkdir -p /etc/udev/disabled
RUN gpg --import --no-tty /tmp/mpapis.asc && \
gpg --import --no-tty /tmp/pkuczynski.asc && \
curl -L https://get.rvm.io | bash -s stable && \
- /usr/local/rvm/bin/rvm install 2.3 && \
- /usr/local/rvm/bin/rvm alias create default ruby-2.3
+ /usr/local/rvm/bin/rvm install 2.5 && \
+ /usr/local/rvm/bin/rvm alias create default ruby-2.5
# udev daemon can't start in a container, so don't try.
RUN mkdir -p /etc/udev/disabled
RUN gpg --import --no-tty /tmp/mpapis.asc && \
gpg --import --no-tty /tmp/pkuczynski.asc && \
curl -L https://get.rvm.io | bash -s stable && \
- /usr/local/rvm/bin/rvm install 2.3 && \
- /usr/local/rvm/bin/rvm alias create default ruby-2.3
+ /usr/local/rvm/bin/rvm install 2.5 && \
+ /usr/local/rvm/bin/rvm alias create default ruby-2.5
# udev daemon can't start in a container, so don't try.
RUN mkdir -p /etc/udev/disabled
RUN gpg --import --no-tty /tmp/mpapis.asc && \
gpg --import --no-tty /tmp/pkuczynski.asc && \
curl -L https://get.rvm.io | bash -s stable && \
- /usr/local/rvm/bin/rvm install 2.3 && \
- /usr/local/rvm/bin/rvm alias create default ruby-2.3
+ /usr/local/rvm/bin/rvm install 2.5 && \
+ /usr/local/rvm/bin/rvm alias create default ruby-2.5
# udev daemon can't start in a container, so don't try.
RUN mkdir -p /etc/udev/disabled
&& cd "$WORKSPACE/$srcpath" \
&& bundle_install_trylocal \
&& gem build "$gemname.gemspec" \
- && with_test_gemset gem install --no-ri --no-rdoc $(ls -t "$gemname"-*.gem|head -n1)
+ && with_test_gemset gem install --no-document $(ls -t "$gemname"-*.gem|head -n1)
}
install_sdk/ruby() {
SPDX-License-Identifier: CC-BY-SA-3.0
{% endcomment %}
-Ruby 2.3 is recommended; Ruby 2.1 is also known to work.
+Ruby 2.5 is recommended; Ruby 2.3 is also known to work.
h4(#rvm). *Option 1: Install with RVM*
<notextile>
-<pre><code><span class="userinput">sudo gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
-\curl -sSL https://get.rvm.io | sudo bash -s stable --ruby=2.3
+<pre><code><span class="userinput">sudo gpg --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 7D2BAF1CF37B13E2069D6956105BD0E739499BDB
+\curl -sSL https://get.rvm.io | sudo bash -s stable --ruby=2.5
</span></code></pre></notextile>
Either log out and log back in to activate RVM, or explicitly load it in all open shells like this:
<notextile>
<pre><code><span class="userinput">mkdir -p ~/src
cd ~/src
-curl -f http://cache.ruby-lang.org/pub/ruby/2.3/ruby-2.3.3.tar.gz | tar xz
-cd ruby-2.3.3
+curl -f http://cache.ruby-lang.org/pub/ruby/2.5/ruby-2.5.5.tar.gz | tar xz
+cd ruby-2.5.5
./configure --disable-install-rdoc
make
sudo make install
--- /dev/null
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Ruby 2.3 is recommended; Ruby 2.1 is also known to work.
+
+h4(#rvm). *Option 1: Install with RVM*
+
+<notextile>
+<pre><code><span class="userinput">sudo gpg --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 7D2BAF1CF37B13E2069D6956105BD0E739499BDB
+\curl -sSL https://get.rvm.io | sudo bash -s stable --ruby=2.3
+</span></code></pre></notextile>
+
+Either log out and log back in to activate RVM, or explicitly load it in all open shells like this:
+
+<notextile>
+<pre><code><span class="userinput">source /usr/local/rvm/scripts/rvm
+</span></code></pre></notextile>
+
+Once RVM is activated in your shell, install Bundler:
+
+<notextile>
+<pre><code>~$ <span class="userinput">gem install bundler</span>
+</code></pre></notextile>
+
+h4(#fromsource). *Option 2: Install from source*
+
+Install prerequisites for Debian 8:
+
+<notextile>
+<pre><code><span class="userinput">sudo apt-get install \
+ bison build-essential gettext libcurl3 libcurl3-gnutls \
+ libcurl4-openssl-dev libpcre3-dev libreadline-dev \
+ libssl-dev libxslt1.1 zlib1g-dev
+</span></code></pre></notextile>
+
+Install prerequisites for CentOS 7:
+
+<notextile>
+<pre><code><span class="userinput">sudo yum install \
+ libyaml-devel glibc-headers autoconf gcc-c++ glibc-devel \
+ patch readline-devel zlib-devel libffi-devel openssl-devel \
+ make automake libtool bison sqlite-devel tar
+</span></code></pre></notextile>
+
+Install prerequisites for Ubuntu 12.04 or 14.04:
+
+<notextile>
+<pre><code><span class="userinput">sudo apt-get install \
+ gawk g++ gcc make libc6-dev libreadline6-dev zlib1g-dev libssl-dev \
+ libyaml-dev libsqlite3-dev sqlite3 autoconf libgdbm-dev \
+ libncurses5-dev automake libtool bison pkg-config libffi-dev curl
+</span></code></pre></notextile>
+
+Build and install Ruby:
+
+<notextile>
+<pre><code><span class="userinput">mkdir -p ~/src
+cd ~/src
+curl -f http://cache.ruby-lang.org/pub/ruby/2.3/ruby-2.3.3.tar.gz | tar xz
+cd ruby-2.3.3
+./configure --disable-install-rdoc
+make
+sudo make install
+
+sudo -i gem install bundler</span>
+</code></pre></notextile>
h3(#install_ruby_and_bundler). Install Ruby and Bundler
-{% include 'install_ruby_and_bundler' %}
+{% include 'install_ruby_and_bundler_sso' %}
h3(#install_web_server). Set up a Web server
import arvados
api = arvados.api()
container_request_uuid="qr1hi-xvhdp-zzzzzzzzzzzzzzz"
-container_request = arvados.api().container_requests().get(uuid=container_request_uuid).execute()
+container_request = api.container_requests().get(uuid=container_request_uuid).execute()
print(container_request["mounts"]["/var/lib/cwl/cwl.input.json"])
{% endcodeblock %}
import arvados.collection
api = arvados.api()
container_request_uuid="qr1hi-xvhdp-zzzzzzzzzzzzzzz"
-container_request = arvados.api().container_requests().get(uuid=container_request_uuid).execute()
+container_request = api.container_requests().get(uuid=container_request_uuid).execute()
collection = arvados.collection.CollectionReader(container_request["output_uuid"])
print(collection.open("cwl.output.json").read())
{% endcodeblock %}
+h2. Get state of a CWL workflow
+
+{% codeblock as python %}
+import arvados
+def get_cr_state(cr_uuid):
+ api = arvados.api()
+ cr = api.container_requests().get(uuid=cr_uuid).execute()
+ if cr['container_uuid'] is None:
+ return cr['state']
+ c = api.containers().get(uuid=cr['container_uuid']).execute()
+ if cr['state'] == 'Final' and c['state'] != 'Complete':
+ return 'Cancelled'
+ elif c['state'] in ['Locked', 'Queued']:
+ if c['priority'] == 0:
+ return 'On hold'
+ else:
+ return 'Queued'
+ elif c['state'] == 'Complete' and c['exit_code'] != 0
+ return 'Failed'
+ elif c['state'] == 'Running':
+ if c['runtime_status'].get('error', None):
+ return 'Failing'
+ elif c['runtime_status'].get('warning', None):
+ return 'Warning'
+ return c['state']
+container_request_uuid = 'qr1hi-xvhdp-zzzzzzzzzzzzzzz'
+print(get_cr_state(container_request_uuid))
+{% endcodeblock %}
+
h2. List input of child requests
{% codeblock as python %}
api = arvados.api()
parent_request_uuid = "qr1hi-xvhdp-zzzzzzzzzzzzzzz"
namefilter = "bwa%" # the "like" filter uses SQL pattern match syntax
-container_request = arvados.api().container_requests().get(uuid=parent_request_uuid).execute()
+container_request = api.container_requests().get(uuid=parent_request_uuid).execute()
parent_container_uuid = container_request["container_uuid"]
-child_requests = arvados.api().container_requests().list(filters=[
+child_requests = api.container_requests().list(filters=[
["requesting_container_uuid", "=", parent_container_uuid],
["name", "like", namefilter]]).execute()
for c in child_requests["items"]:
api = arvados.api()
parent_request_uuid = "qr1hi-xvhdp-zzzzzzzzzzzzzzz"
namefilter = "bwa%" # the "like" filter uses SQL pattern match syntax
-container_request = arvados.api().container_requests().get(uuid=parent_request_uuid).execute()
+container_request = api.container_requests().get(uuid=parent_request_uuid).execute()
parent_container_uuid = container_request["container_uuid"]
-child_requests = arvados.api().container_requests().list(filters=[
+child_requests = api.container_requests().list(filters=[
["requesting_container_uuid", "=", parent_container_uuid],
["name", "like", namefilter]]).execute()
output_uuids = [c["output_uuid"] for c in child_requests["items"]]
-collections = arvados.api().collections().list(filters=[["uuid", "in", output_uuids]]).execute()
+collections = api.collections().list(filters=[["uuid", "in", output_uuids]]).execute()
uuid_to_pdh = {c["uuid"]: c["portable_data_hash"] for c in collections["items"]}
for c in child_requests["items"]:
print("%s -> %s" % (c["name"], uuid_to_pdh[c["output_uuid"]]))
import arvados
api = arvados.api()
parent_request_uuid = "qr1hi-xvhdp-zzzzzzzzzzzzzzz"
-container_request = arvados.api().container_requests().get(uuid=parent_request_uuid).execute()
+container_request = api.container_requests().get(uuid=parent_request_uuid).execute()
parent_container_uuid = container_request["container_uuid"]
-child_requests = arvados.api().container_requests().list(filters=[
+child_requests = api.container_requests().list(filters=[
["requesting_container_uuid", "=", parent_container_uuid]], limit=1000).execute()
child_containers = {c["container_uuid"]: c for c in child_requests["items"]}
-cancelled_child_containers = arvados.api().containers().list(filters=[
+cancelled_child_containers = api.containers().list(filters=[
["exit_code", "!=", "0"],
["uuid", "in", child_containers.keys()]], limit=1000).execute()
for c in cancelled_child_containers["items"]:
import arvados.collection
api = arvados.api()
container_request_uuid = "qr1hi-xvhdp-zzzzzzzzzzzzzzz"
-container_request = arvados.api().container_requests().get(uuid=container_request_uuid).execute()
+container_request = api.container_requests().get(uuid=container_request_uuid).execute()
collection = arvados.collection.CollectionReader(container_request["log_uuid"])
for c in collection:
print(collection.open(c).read())
if pattern is None:
continue
sfpath = substitute(primary["location"], pattern)
- required = builder.do_eval(sf["required"], context=primary)
+ required = builder.do_eval(sf.get("required"), context=primary)
if fsaccess.exists(sfpath):
primary["secondaryFiles"].append({"location": sfpath, "class": "File"})
# Note that arvados/build/run-build-packages.sh looks at this
# file to determine what version of cwltool and schema-salad to build.
install_requires=[
- 'cwltool==1.0.20190603140227',
+ 'cwltool==1.0.20190607183319',
'schema-salad==4.2.20190417121603',
'typing >= 3.6.4',
'ruamel.yaml >=0.15.54, <=0.15.77',
cwltool: "http://commonwl.org/cwltool#"
requirements:
SubworkflowFeatureRequirement: {}
+ DockerRequirement:
+ dockerPull: arvados/jobs:1.4.0.20190604172024
inputs:
i:
type: File
class TestContainer(unittest.TestCase):
+ def setUp(self):
+ cwltool.process._names = set()
+
def helper(self, runner, enable_reuse=True):
document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.1")
class TestWorkflow(unittest.TestCase):
+ def setUp(self):
+ cwltool.process._names = set()
+
def helper(self, runner, enable_reuse=True):
document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.1")
class TestJob(unittest.TestCase):
+ def setUp(self):
+ cwltool.process._names = set()
+
def helper(self, runner, enable_reuse=True):
document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.1")
class TestWorkflow(unittest.TestCase):
+
+ def setUp(self):
+ cwltool.process._names = set()
+
def helper(self, runner, enable_reuse=True):
document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.1")
import mock
import sys
import unittest
+import cwltool.process
from io import BytesIO
class TestSubmit(unittest.TestCase):
+
+ def setUp(self):
+ cwltool.process._names = set()
+
@mock.patch("arvados_cwl.arvdocker.arv_docker_get_image")
@mock.patch("time.sleep")
@stubs
arv: "http://arvados.org/cwl#"
requirements:
SubworkflowFeatureRequirement: {}
+hints:
+ DockerRequirement:
+ dockerPull: arvados/jobs:1.4.0.20190604172024
steps:
step1:
requirements:
arv:RunInSingleContainer: {}
in: []
out: []
- run: default-dir6.cwl
\ No newline at end of file
+ run: default-dir6.cwl
arv: "http://arvados.org/cwl#"
requirements:
SubworkflowFeatureRequirement: {}
+hints:
+ DockerRequirement:
+ dockerPull: arvados/jobs:1.4.0.20190604172024
steps:
step1:
requirements:
arv:RunInSingleContainer: {}
in: []
out: []
- run: default-dir7.cwl
\ No newline at end of file
+ run: default-dir7.cwl
ScatterFeatureRequirement: {}
InlineJavascriptRequirement: {}
StepInputExpressionRequirement: {}
+hints:
+ DockerRequirement:
+ dockerPull: arvados/jobs:1.4.0.20190604172024
steps:
substep:
in:
ScatterFeatureRequirement: {}
InlineJavascriptRequirement: {}
StepInputExpressionRequirement: {}
+hints:
+ DockerRequirement:
+ dockerPull: arvados/jobs:1.4.0.20190604172024
steps:
substep:
in:
ScatterFeatureRequirement: {}
InlineJavascriptRequirement: {}
StepInputExpressionRequirement: {}
+hints:
+ DockerRequirement:
+ dockerPull: arvados/jobs:1.4.0.20190604172024
steps:
substep:
in:
out: [out]
hints:
- class: arv:RunInSingleContainer
+ - class: DockerRequirement
+ dockerPull: arvados/jobs:1.4.0.20190604172024
run:
class: Workflow
id: mysub
out:
type: string
outputBinding:
- outputEval: "out"
+ outputEval: $("out")
baseCommand: cat
hints:
arv:IntermediateOutput:
outputTTL: 60
+ DockerRequirement:
+ dockerPull: arvados/jobs:1.4.0.20190604172024
steps:
substep:
in:
out:
type: string
outputBinding:
- outputEval: "out"
+ outputEval: $("out")
baseCommand: cat
"zzzzz-bi6l4-yyyyyyyyyyyyyyy": ks0.url,
"zzzzz-bi6l4-xxxxxxxxxxxxxxx": ks0.url,
"zzzzz-bi6l4-wwwwwwwwwwwwwww": ks0.url,
- uuid: ks.url},
+ uuid: ks.url},
nil,
map[string]string{
"zzzzz-bi6l4-yyyyyyyyyyyyyyy": ks0.url,
"zzzzz-bi6l4-xxxxxxxxxxxxxxx": ks0.url,
"zzzzz-bi6l4-wwwwwwwwwwwwwww": ks0.url,
- uuid: ks.url},
+ uuid: ks.url},
)
r, n, uri, err := kc.Get(hash + "+K@" + uuid)
import arvados
import arvados.collection
import base64
+import ciso8601
import copy
import datetime
import errno
Do not save upload state in a cache file for resuming.
""")
+_group = upload_opts.add_mutually_exclusive_group()
+_group.add_argument('--trash-at', metavar='YYYY-MM-DDTHH:MM', default=None,
+ help="""
+Set the trash date of the resulting collection to an absolute date in the future.
+The accepted format is defined by the ISO 8601 standard. Examples: 20090103, 2009-01-03, 20090103T181505, 2009-01-03T18:15:05.\n
+Timezone information can be added. If not, the provided date/time is assumed as being in the local system's timezone.
+""")
+_group.add_argument('--trash-after', type=int, metavar='DAYS', default=None,
+ help="""
+Set the trash date of the resulting collection to an amount of days from the
+date/time that the upload process finishes.
+""")
+
arg_parser = argparse.ArgumentParser(
description='Copy data from the local filesystem to Keep.',
parents=[upload_opts, run_opts, arv_cmd.retry_opt])
put_threads=None, replication_desired=None, filename=None,
update_time=60.0, update_collection=None, storage_classes=None,
logger=logging.getLogger('arvados.arv_put'), dry_run=False,
- follow_links=True, exclude_paths=[], exclude_names=None):
+ follow_links=True, exclude_paths=[], exclude_names=None,
+ trash_at=None):
self.paths = paths
self.resume = resume
self.use_cache = use_cache
self.follow_links = follow_links
self.exclude_paths = exclude_paths
self.exclude_names = exclude_names
+ self._trash_at = trash_at
+
+ if self._trash_at is not None:
+ if type(self._trash_at) not in [datetime.datetime, datetime.timedelta]:
+ raise TypeError('trash_at should be None, timezone-naive datetime or timedelta')
+ if type(self._trash_at) == datetime.datetime and self._trash_at.tzinfo is not None:
+ raise TypeError('provided trash_at datetime should be timezone-naive')
if not self.use_cache and self.resume:
raise ArvPutArgumentConflict('resume cannot be True when use_cache is False')
if self.use_cache:
self._cache_file.close()
+ def _collection_trash_at(self):
+ """
+ Returns the trash date that the collection should use at save time.
+ Takes into account absolute/relative trash_at values requested
+ by the user.
+ """
+ if type(self._trash_at) == datetime.timedelta:
+ # Get an absolute datetime for trash_at
+ return datetime.datetime.utcnow() + self._trash_at
+ return self._trash_at
+
def save_collection(self):
if self.update:
# Check if files should be updated on the remote collection.
# The file already exist on remote collection, skip it.
pass
self._remote_collection.save(storage_classes=self.storage_classes,
- num_retries=self.num_retries)
+ num_retries=self.num_retries,
+ trash_at=self._collection_trash_at())
else:
if self.storage_classes is None:
self.storage_classes = ['default']
name=self.name, owner_uuid=self.owner_uuid,
storage_classes=self.storage_classes,
ensure_unique_name=self.ensure_unique_name,
- num_retries=self.num_retries)
+ num_retries=self.num_retries,
+ trash_at=self._collection_trash_at())
def destroy_cache(self):
if self.use_cache:
self._save_state()
except Exception as e:
self.logger.error("Unexpected error trying to save cache file: {}".format(e))
+ # Keep remote collection's trash_at attribute synced when using relative expire dates
+ if self._remote_collection is not None and type(self._trash_at) == datetime.timedelta:
+ try:
+ self._api_client.collections().update(
+ uuid=self._remote_collection.manifest_locator(),
+ body={'trash_at': self._collection_trash_at().strftime("%Y-%m-%dT%H:%M:%S.%fZ")}
+ ).execute(num_retries=self.num_retries)
+ except Exception as e:
+ self.logger.error("Unexpected error trying to update remote collection's expire date: {}".format(e))
else:
self.bytes_written = self.bytes_skipped
# Call the reporter, if any
def collection_name(self):
return self._my_collection().api_response()['name'] if self._my_collection().api_response() else None
+ def collection_trash_at(self):
+ return self._my_collection().get_trash_at()
+
def manifest_locator(self):
return self._my_collection().manifest_locator()
if install_sig_handlers:
arv_cmd.install_signal_handlers()
+ # Trash arguments validation
+ trash_at = None
+ if args.trash_at is not None:
+ # ciso8601 considers YYYYMM as invalid but YYYY-MM as valid, so here we
+ # make sure the user provides a complete YYYY-MM-DD date.
+ if not re.match(r'^\d{4}(?P<dash>-?)\d{2}?(?P=dash)\d{2}', args.trash_at):
+ logger.error("--trash-at argument format invalid, use --help to see examples.")
+ sys.exit(1)
+ # Check if no time information was provided. In that case, assume end-of-day.
+ if re.match(r'^\d{4}(?P<dash>-?)\d{2}?(?P=dash)\d{2}$', args.trash_at):
+ args.trash_at += 'T23:59:59'
+ try:
+ trash_at = ciso8601.parse_datetime(args.trash_at)
+ except:
+ logger.error("--trash-at argument format invalid, use --help to see examples.")
+ sys.exit(1)
+ else:
+ if trash_at.tzinfo is not None:
+ # Timezone aware datetime provided.
+ utcoffset = -trash_at.utcoffset()
+ else:
+ # Timezone naive datetime provided. Assume is local.
+ if time.daylight:
+ utcoffset = datetime.timedelta(seconds=time.altzone)
+ else:
+ utcoffset = datetime.timedelta(seconds=time.timezone)
+ # Convert to UTC timezone naive datetime.
+ trash_at = trash_at.replace(tzinfo=None) + utcoffset
+
+ if trash_at <= datetime.datetime.utcnow():
+ logger.error("--trash-at argument must be set in the future")
+ sys.exit(1)
+ if args.trash_after is not None:
+ if args.trash_after < 1:
+ logger.error("--trash-after argument must be >= 1")
+ sys.exit(1)
+ trash_at = datetime.timedelta(seconds=(args.trash_after * 24 * 60 * 60))
+
# Determine the name to use
if args.name:
if args.stream or args.raw:
dry_run=args.dry_run,
follow_links=args.follow_links,
exclude_paths=exclude_paths,
- exclude_names=exclude_names)
+ exclude_names=exclude_names,
+ trash_at=trash_at)
except ResumeCacheConflict:
logger.error("\n".join([
"arv-put: Another process is already uploading this data.",
" --no-resume to start a new resume cache.",
" --no-cache to disable resume cache."]))
sys.exit(1)
- except CollectionUpdateError as error:
+ except (CollectionUpdateError, PathDoesNotExistError) as error:
logger.error("\n".join([
"arv-put: %s" % str(error)]))
sys.exit(1)
except ArvPutUploadNotPending:
# No files pending for upload
sys.exit(0)
- except PathDoesNotExistError as error:
- logger.error("\n".join([
- "arv-put: %s" % str(error)]))
- sys.exit(1)
if not args.dry_run and not args.update_collection and args.resume and writer.bytes_written > 0:
logger.warning("\n".join([
output = ','.join(writer.data_locators())
else:
try:
+ expiration_notice = ""
+ if writer.collection_trash_at() is not None:
+ # Get the local timezone-naive version, and log it with timezone information.
+ if time.daylight:
+ local_trash_at = writer.collection_trash_at().replace(tzinfo=None) - datetime.timedelta(seconds=time.altzone)
+ else:
+ local_trash_at = writer.collection_trash_at().replace(tzinfo=None) - datetime.timedelta(seconds=time.timezone)
+ expiration_notice = ". It will expire on {} {}.".format(
+ local_trash_at.strftime("%Y-%m-%d %H:%M:%S"), time.strftime("%z"))
if args.update_collection:
- logger.info(u"Collection updated: '{}'".format(writer.collection_name()))
+ logger.info(u"Collection updated: '{}'{}".format(
+ writer.collection_name(), expiration_notice))
else:
- logger.info(u"Collection saved as '{}'".format(writer.collection_name()))
+ logger.info(u"Collection saved as '{}'{}".format(
+ writer.collection_name(), expiration_notice))
if args.portable_data_hash:
output = writer.portable_data_hash()
else:
from builtins import range
from functools import partial
import apiclient
+import ciso8601
import datetime
import hashlib
import json
def test_cache_is_locked(self):
with tempfile.NamedTemporaryFile() as cachefile:
- cache = arv_put.ResumeCache(cachefile.name)
+ _ = arv_put.ResumeCache(cachefile.name)
self.assertRaises(arv_put.ResumeCacheConflict,
arv_put.ResumeCache, cachefile.name)
def test_passing_nonexistant_path_raise_exception(self):
uuid_str = str(uuid.uuid4())
with self.assertRaises(arv_put.PathDoesNotExistError):
- cwriter = arv_put.ArvPutUploadJob(["/this/path/does/not/exist/{}".format(uuid_str)])
+ arv_put.ArvPutUploadJob(["/this/path/does/not/exist/{}".format(uuid_str)])
def test_writer_works_without_cache(self):
cwriter = arv_put.ArvPutUploadJob(['/dev/null'], resume=False)
fake_httplib2_response(403), b'{}')
with mock.patch('arvados.collection.Collection.save_new',
new=coll_save_mock):
- with self.assertRaises(SystemExit) as exc_test:
+ with self.assertRaises(SystemExit):
self.call_main_with_args(['/dev/null'])
self.assertRegex(
self.main_stderr.getvalue(), matcher)
BAD_UUID = 'zzzzz-tpzed-zzzzzzzzzzzzzzz'
self.authorize_with('active')
with self.assertRaises(apiclient.errors.HttpError):
- result = arv_put.desired_project_uuid(arv_put.api_client, BAD_UUID,
+ arv_put.desired_project_uuid(arv_put.api_client, BAD_UUID,
0)
def test_short_put_from_stdin(self):
# we're about to create is not present in our test fixture.
manifest_uuid = "00b4e9f40ac4dd432ef89749f1c01e74+47"
with self.assertRaises(apiclient.errors.HttpError):
- notfound = arv_put.api_client.collections().get(
+ arv_put.api_client.collections().get(
uuid=manifest_uuid).execute()
datadir = self.make_tmpdir()
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self.ENVIRON)
- (out, err) = p.communicate()
+ (_, err) = p.communicate()
self.assertRegex(err.decode(), r'INFO: Collection saved as ')
self.assertEqual(p.returncode, 0)
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self.ENVIRON)
- (out, err) = p.communicate()
+ (_, err) = p.communicate()
self.assertRegex(err.decode(), r'INFO: Creating new cache file at ')
self.assertEqual(p.returncode, 0)
cache_filepath = re.search(r'INFO: Creating new cache file at (.*)',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self.ENVIRON)
- (out, err) = p.communicate()
+ (_, err) = p.communicate()
self.assertRegex(
err.decode(),
r'INFO: Cache expired, starting from scratch.*')
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self.ENVIRON)
- (out, err) = p.communicate()
+ (_, err) = p.communicate()
self.assertRegex(err.decode(), r'INFO: Creating new cache file at ')
self.assertEqual(p.returncode, 0)
cache_filepath = re.search(r'INFO: Creating new cache file at (.*)',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self.ENVIRON)
- (out, err) = p.communicate()
+ (_, err) = p.communicate()
self.assertRegex(
err.decode(),
r'ERROR: arv-put: Resume cache contains invalid signature.*')
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self.ENVIRON)
- (out, err) = p.communicate()
+ (_, err) = p.communicate()
self.assertRegex(err.decode(), r'INFO: Creating new cache file at ')
self.assertEqual(p.returncode, 0)
cache_filepath = re.search(r'INFO: Creating new cache file at (.*)',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self.ENVIRON)
- (out, err) = p.communicate()
+ (_, err) = p.communicate()
self.assertRegex(
err.decode(),
r'WARNING: Uploaded file \'.*barfile.txt\' access token expired, will re-upload it from scratch')
c = arv_put.api_client.collections().get(uuid=updated_col['uuid']).execute()
self.assertRegex(c['manifest_text'], r'^\..* .*:44:file2\n')
+ def test_put_collection_with_utc_expiring_datetime(self):
+ tmpdir = self.make_tmpdir()
+ trash_at = (datetime.datetime.utcnow() + datetime.timedelta(days=90)).strftime('%Y%m%dT%H%MZ')
+ with open(os.path.join(tmpdir, 'file1'), 'w') as f:
+ f.write('Relaxing in basins at the end of inlets terminates the endless tests from the box')
+ col = self.run_and_find_collection(
+ "",
+ ['--no-progress', '--trash-at', trash_at, tmpdir])
+ self.assertNotEqual(None, col['uuid'])
+ c = arv_put.api_client.collections().get(uuid=col['uuid']).execute()
+ self.assertEqual(ciso8601.parse_datetime(trash_at),
+ ciso8601.parse_datetime(c['trash_at']))
+
+ def test_put_collection_with_timezone_aware_expiring_datetime(self):
+ tmpdir = self.make_tmpdir()
+ trash_at = (datetime.datetime.utcnow() + datetime.timedelta(days=90)).strftime('%Y%m%dT%H%M-0300')
+ with open(os.path.join(tmpdir, 'file1'), 'w') as f:
+ f.write('Relaxing in basins at the end of inlets terminates the endless tests from the box')
+ col = self.run_and_find_collection(
+ "",
+ ['--no-progress', '--trash-at', trash_at, tmpdir])
+ self.assertNotEqual(None, col['uuid'])
+ c = arv_put.api_client.collections().get(uuid=col['uuid']).execute()
+ self.assertEqual(
+ ciso8601.parse_datetime(trash_at).replace(tzinfo=None) + datetime.timedelta(hours=3),
+ ciso8601.parse_datetime(c['trash_at']).replace(tzinfo=None))
+
+ def test_put_collection_with_timezone_naive_expiring_datetime(self):
+ tmpdir = self.make_tmpdir()
+ trash_at = (datetime.datetime.utcnow() + datetime.timedelta(days=90)).strftime('%Y%m%dT%H%M')
+ with open(os.path.join(tmpdir, 'file1'), 'w') as f:
+ f.write('Relaxing in basins at the end of inlets terminates the endless tests from the box')
+ col = self.run_and_find_collection(
+ "",
+ ['--no-progress', '--trash-at', trash_at, tmpdir])
+ self.assertNotEqual(None, col['uuid'])
+ c = arv_put.api_client.collections().get(uuid=col['uuid']).execute()
+ if time.daylight:
+ offset = datetime.timedelta(seconds=time.altzone)
+ else:
+ offset = datetime.timedelta(seconds=time.timezone)
+ self.assertEqual(
+ ciso8601.parse_datetime(trash_at) + offset,
+ ciso8601.parse_datetime(c['trash_at']).replace(tzinfo=None))
+
+ def test_put_collection_with_expiring_date_only(self):
+ tmpdir = self.make_tmpdir()
+ trash_at = '2140-01-01'
+ end_of_day = datetime.timedelta(hours=23, minutes=59, seconds=59)
+ with open(os.path.join(tmpdir, 'file1'), 'w') as f:
+ f.write('Relaxing in basins at the end of inlets terminates the endless tests from the box')
+ col = self.run_and_find_collection(
+ "",
+ ['--no-progress', '--trash-at', trash_at, tmpdir])
+ self.assertNotEqual(None, col['uuid'])
+ c = arv_put.api_client.collections().get(uuid=col['uuid']).execute()
+ if time.daylight:
+ offset = datetime.timedelta(seconds=time.altzone)
+ else:
+ offset = datetime.timedelta(seconds=time.timezone)
+ self.assertEqual(
+ ciso8601.parse_datetime(trash_at) + end_of_day + offset,
+ ciso8601.parse_datetime(c['trash_at']).replace(tzinfo=None))
+
+ def test_put_collection_with_invalid_absolute_expiring_datetimes(self):
+ cases = ['2100', '210010','2100-10', '2100-Oct']
+ tmpdir = self.make_tmpdir()
+ with open(os.path.join(tmpdir, 'file1'), 'w') as f:
+ f.write('Relaxing in basins at the end of inlets terminates the endless tests from the box')
+ for test_datetime in cases:
+ with self.assertRaises(AssertionError):
+ self.run_and_find_collection(
+ "",
+ ['--no-progress', '--trash-at', test_datetime, tmpdir])
+
+ def test_put_collection_with_relative_expiring_datetime(self):
+ expire_after = 7
+ dt_before = datetime.datetime.utcnow() + datetime.timedelta(days=expire_after)
+ tmpdir = self.make_tmpdir()
+ with open(os.path.join(tmpdir, 'file1'), 'w') as f:
+ f.write('Relaxing in basins at the end of inlets terminates the endless tests from the box')
+ col = self.run_and_find_collection(
+ "",
+ ['--no-progress', '--trash-after', str(expire_after), tmpdir])
+ self.assertNotEqual(None, col['uuid'])
+ dt_after = datetime.datetime.utcnow() + datetime.timedelta(days=expire_after)
+ c = arv_put.api_client.collections().get(uuid=col['uuid']).execute()
+ trash_at = ciso8601.parse_datetime(c['trash_at']).replace(tzinfo=None)
+ self.assertTrue(dt_before < trash_at)
+ self.assertTrue(dt_after > trash_at)
+
+ def test_put_collection_with_invalid_relative_expiring_datetime(self):
+ expire_after = 0 # Must be >= 1
+ tmpdir = self.make_tmpdir()
+ with open(os.path.join(tmpdir, 'file1'), 'w') as f:
+ f.write('Relaxing in basins at the end of inlets terminates the endless tests from the box')
+ with self.assertRaises(AssertionError):
+ self.run_and_find_collection(
+ "",
+ ['--no-progress', '--trash-after', str(expire_after), tmpdir])
+
def test_upload_directory_reference_without_trailing_slash(self):
tmpdir1 = self.make_tmpdir()
tmpdir2 = self.make_tmpdir()
uglifier (~> 2.0)
BUNDLED WITH
- 1.16.6
+ 1.17.3
conditions[0] << " and #{ar_table_name}.#{attr} in (?)"
conditions << value
end
- elsif value.is_a? String or value.is_a? Fixnum or value == true or value == false
+ elsif value.is_a? String or value.is_a? Integer or value == true or value == false
conditions[0] << " and #{ar_table_name}.#{attr}=?"
conditions << value
elsif value.is_a? Hash
begin
must_git gitdir, "branch"
rescue GitError => e
- raise unless /Not a git repository/ =~ e.to_s
+ raise unless /Not a git repository/i =~ e.to_s
# OK, this just means we need to create a blank cache repository
# before fetching.
FileUtils.mkdir_p gitdir
--- /dev/null
+PATH
+ remote: .
+ specs:
+ arvados-login-sync (1.3.3.20190528194843)
+ arvados (~> 1.3.0, >= 1.3.0)
+
+GEM
+ remote: https://rubygems.org/
+ specs:
+ activesupport (5.0.7.2)
+ concurrent-ruby (~> 1.0, >= 1.0.2)
+ i18n (>= 0.7, < 2)
+ minitest (~> 5.1)
+ tzinfo (~> 1.1)
+ addressable (2.6.0)
+ public_suffix (>= 2.0.2, < 4.0)
+ andand (1.3.3)
+ arvados (1.3.3.20190320201707)
+ activesupport (>= 3)
+ andand (~> 1.3, >= 1.3.3)
+ arvados-google-api-client (>= 0.7, < 0.8.9)
+ i18n (~> 0)
+ json (>= 1.7.7, < 3)
+ jwt (>= 0.1.5, < 2)
+ arvados-google-api-client (0.8.7.2)
+ activesupport (>= 3.2, < 5.1)
+ addressable (~> 2.3)
+ autoparse (~> 0.3)
+ extlib (~> 0.9)
+ faraday (~> 0.9)
+ googleauth (~> 0.3)
+ launchy (~> 2.4)
+ multi_json (~> 1.10)
+ retriable (~> 1.4)
+ signet (~> 0.6)
+ autoparse (0.3.3)
+ addressable (>= 2.3.1)
+ extlib (>= 0.9.15)
+ multi_json (>= 1.0.0)
+ concurrent-ruby (1.1.5)
+ extlib (0.9.16)
+ faraday (0.15.4)
+ multipart-post (>= 1.2, < 3)
+ googleauth (0.8.1)
+ faraday (~> 0.12)
+ jwt (>= 1.4, < 3.0)
+ memoist (~> 0.16)
+ multi_json (~> 1.11)
+ os (>= 0.9, < 2.0)
+ signet (~> 0.7)
+ i18n (0.9.5)
+ concurrent-ruby (~> 1.0)
+ json (2.2.0)
+ jwt (1.5.6)
+ launchy (2.4.3)
+ addressable (~> 2.3)
+ memoist (0.16.0)
+ metaclass (0.0.4)
+ minitest (5.11.3)
+ mocha (1.8.0)
+ metaclass (~> 0.0.1)
+ multi_json (1.13.1)
+ multipart-post (2.1.1)
+ os (1.0.1)
+ public_suffix (3.0.3)
+ rake (12.3.2)
+ retriable (1.4.1)
+ signet (0.11.0)
+ addressable (~> 2.3)
+ faraday (~> 0.9)
+ jwt (>= 1.5, < 3.0)
+ multi_json (~> 1.10)
+ thread_safe (0.3.6)
+ tzinfo (1.2.5)
+ thread_safe (~> 0.1)
+
+PLATFORMS
+ ruby
+
+DEPENDENCIES
+ arvados-login-sync!
+ minitest (>= 5.0.0)
+ mocha (>= 1.5.0)
+ rake
+
+BUNDLED WITH
+ 1.17.3