end
def render_markup(markup)
- raw RedCloth.new(markup.to_s).to_html(:refs_arvados, :textile) if markup
+ sanitize(raw(RedCloth.new(markup.to_s).to_html(:refs_arvados, :textile))) if markup
end
def human_readable_bytes_html(n)
end
return h(n)
- #raw = n.to_s
- #cooked = ''
- #while raw.length > 3
- # cooked = ',' + raw[-3..-1] + cooked
- # raw = raw[0..-4]
- #end
- #cooked = raw + cooked
end
def resource_class_for_uuid(attrvalue, opts={})
end
# Keep locators are expected to be of the form \"...<pdh/file_path>\"
- JSON_KEEP_LOCATOR_REGEXP = /(.*)(([0-9a-f]{32}\+\d+)(.*)\"(.*))/
+ JSON_KEEP_LOCATOR_REGEXP = /([0-9a-f]{32}\+\d+[^'"]*?)(?=['"]|\z|$)/
def keep_locator_in_json str
- JSON_KEEP_LOCATOR_REGEXP.match str
+ # Return a list of all matches
+ str.scan(JSON_KEEP_LOCATOR_REGEXP).flatten
end
private
<% data_height = data_height || 100 %>
<div style="max-height:<%=data_height%>px; overflow:auto;">
- <% text_data.each_line do |l| %>
- <% text_part = l %>
- <% match = keep_locator_in_json l %>
+ <% text_data.each_line do |line| %>
+ <% matches = keep_locator_in_json line %>
- <%
- if match
- text_part = match[1]
- rindex = match[2].rindex('"'); match2 = match[2][0..rindex-1]
- quote_char = '"'
+ <% if matches.nil? or matches.empty? %>
+ <span style="white-space: pre-wrap; margin: none;"><%= line %></span>
+ <% else
+ subs = []
+ matches.uniq.each do |loc|
+ pdh, filename = loc.split('/', 2)
- pdh_readable = object_readable(match2)
- file_link = ''
- if pdh_readable and match[4].size > 0
- link_params = {controller: 'collections', action: 'show_file', uuid: match[3], file: match[4][1..-1]}
- preview_allowed = preview_allowed_for(match[4])
- if preview_allowed
- file_link = link_to(raw(match[4]), link_params.merge(disposition: 'inline'))
- else
- file_link = link_to(raw(match[4]), link_params.merge(disposition: 'attachment'))
+ if object_readable(pdh)
+ # Add PDH link
+ replacement = link_to_arvados_object_if_readable(pdh, pdh, friendly_name: true)
+ if filename
+ link_params = {controller: 'collections', action: 'show_file', uuid: pdh, file: filename}
+ if preview_allowed_for(filename)
+ params = {disposition: 'inline'}
+ else
+ params = {disposition: 'attachment'}
+ end
+ file_link = link_to(raw("/"+filename), link_params.merge(params))
+ # Add file link
+ replacement << file_link
end
+ # Add link(s) substitution
+ subs << [loc, replacement]
end
end
- %>
-
- <span style="white-space: pre-wrap; margin: none;"><%= text_part %><% if match %><% if pdh_readable then %><%= link_to_arvados_object_if_readable(match[3], match[3], friendly_name: true) %><%= file_link%><% else %><%= match2%><% end %><%=quote_char+match[5]%><br/><% end %></span>
+ # Replace all readable locators with links
+ subs.each do |loc, link|
+ line.gsub!(loc, link)
+ end %>
+ <span style="white-space: pre-wrap; margin: none;"><%= raw line %></span>
+ <% end %>
<% end %>
</div>
assert_response :success
assert_match /hello/, @response.body
+ assert_includes @response.body, "href=\"\/collections/fa7aeb5140e2848d39b416daeef4ffc5+45/baz\?" # locator on command
+ assert_includes @response.body, "href=\"\/collections/fa7aeb5140e2848d39b416daeef4ffc5+45/foobar\?" # locator on command
assert_includes @response.body, "href=\"\/collections/fa7aeb5140e2848d39b416daeef4ffc5+45/foo" # mount input1
assert_includes @response.body, "href=\"\/collections/fa7aeb5140e2848d39b416daeef4ffc5+45/bar" # mount input2
assert_includes @response.body, "href=\"\/collections/1fd08fc162a5c6413070a8bd0bffc818+150" # mount workflow
project = api_fixture('groups')['aproject']
use_token :active
found = Group.find(project['uuid'])
- found.description = 'Textile description with link to home page <a href="/">take me home</a>.'
+ found.description = '<b>Textile</b> description with link to home page <a href="/">take me home</a>.'
found.save!
get(:show, {id: project['uuid']}, session_for(:active))
- assert_includes @response.body, 'Textile description with link to home page <a href="/">take me home</a>.'
+ assert_includes @response.body, '<b>Textile</b> description with link to home page <a href="/">take me home</a>.'
+ end
+
+ test "find a project and edit description to unsafe html description" do
+ project = api_fixture('groups')['aproject']
+ use_token :active
+ found = Group.find(project['uuid'])
+ found.description = 'Textile description with unsafe script tag <script language="javascript">alert("Hello there")</script>.'
+ found.save!
+ get(:show, {id: project['uuid']}, session_for(:active))
+ assert_includes @response.body, 'Textile description with unsafe script tag alert("Hello there").'
end
test "find a project and edit description to textile description with link to object" do
assert_text 'created_at'
if cancelable
- assert_text 'priority: 1' if type.include?('container')
+ assert_text 'priority: 501' if type.include?('container')
if type.include?('pipeline')
assert_selector 'a', text: 'Pause'
first('a,link', text: 'Pause').click
#distribution(s)|name|version|iteration|type|architecture|extra fpm arguments
debian8,debian9,centos7|python-gflags|2.0|2|python|all
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|google-api-python-client|1.6.2|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|google-api-python-client|1.6.2|2|python|all
debian8,debian9,ubuntu1404,centos7|oauth2client|1.5.2|2|python|all
debian8,debian9,ubuntu1404,centos7|pyasn1|0.1.7|2|python|all
debian8,debian9,ubuntu1404,centos7|pyasn1-modules|0.0.5|2|python|all
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|rsa|3.4.2|2|python|all
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|uritemplate|3.0.0|2|python|all
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|httplib2|0.9.2|3|python|all
-debian8,debian9,centos7|ws4py|0.3.5|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|rsa|3.4.2|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|uritemplate|3.0.0|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|httplib2|0.9.2|3|python|all
+debian8,debian9,centos7,ubuntu1404,ubuntu1604|ws4py|0.4.2|2|python|all
debian8,debian9,centos7|pykka|1.2.1|2|python|all
debian8,debian9,ubuntu1404,centos7|six|1.10.0|2|python|all
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|ciso8601|1.0.6|3|python|amd64
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|ciso8601|1.0.6|3|python|amd64
debian8,debian9,centos7|pycrypto|2.6.1|3|python|amd64
-debian8,debian9,ubuntu1404,ubuntu1604|backports.ssl_match_hostname|3.5.0.1|2|python|all
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|llfuse|1.2|3|python|amd64
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804|backports.ssl_match_hostname|3.5.0.1|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|llfuse|1.2|3|python|amd64
debian8,debian9,ubuntu1404,centos7|pycurl|7.19.5.3|3|python|amd64
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|pyyaml|3.12|2|python|amd64
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|rdflib|4.2.2|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|pyyaml|3.12|2|python|amd64
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|rdflib|4.2.2|2|python|all
debian8,debian9,ubuntu1404,centos7|shellescape|3.4.1|2|python|all
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|mistune|0.7.3|2|python|all
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|typing|3.5.3.0|2|python|all
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|avro|1.8.1|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|mistune|0.7.3|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|typing|3.6.4|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|avro|1.8.1|2|python|all
debian8,debian9,ubuntu1404,centos7|ruamel.ordereddict|0.4.9|2|python|amd64
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|cachecontrol|0.11.7|2|python|all
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|pathlib2|2.3.2|2|python|all
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|scandir|1.7|2|python|all
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|docker-py|1.7.2|2|python3|all
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|cachecontrol|0.11.7|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|pathlib2|2.3.2|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|scandir|1.7|2|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|docker-py|1.7.2|2|python3|all
debian8,debian9,centos7|six|1.10.0|2|python3|all
debian8,debian9,ubuntu1404,centos7|requests|2.12.4|2|python3|all
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|websocket-client|0.37.0|2|python3|all
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|websocket-client|0.37.0|2|python3|all
ubuntu1404|requests|2.4.3|2|python|all
centos7|contextlib2|0.5.4|2|python|all
centos7|isodate|0.5.4|2|python|all
centos7|pbr|0.11.1|2|python|all
centos7|pyparsing|2.1.10|2|python|all
centos7|keepalive|0.5|2|python|all
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|lockfile|0.12.2|2|python|all|--epoch 1
-debian8,debian9,ubuntu1404,ubuntu1604,centos7|subprocess32|3.5.1|2|python|all
+centos7|networkx|1.11|0|python|all
+centos7|psutil|5.0.1|0|python|all
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|lockfile|0.12.2|2|python|all|--epoch 1
+debian8,debian9,ubuntu1404,ubuntu1604,ubuntu1804,centos7|subprocess32|3.5.1|2|python|all
all|ruamel.yaml|0.14.12|2|python|amd64|--python-setup-py-arguments --single-version-externally-managed
all|cwltest|1.0.20180518074130|4|python|all|--depends 'python-futures >= 3.0.5' --depends 'python-subprocess32 >= 3.5.0'
all|junit-xml|1.8|3|python|all
all|future|0.16.0|2|python|all
all|future|0.16.0|2|python3|all
all|mypy-extensions|0.3.0|1|python|all
+all|prov|1.5.1|0|python|all
+all|bagit|1.6.4|0|python|all
+all|typing-extensions|3.6.5|0|python|all
#
# SPDX-License-Identifier: AGPL-3.0
-all: centos7/generated debian8/generated debian9/generated ubuntu1204/generated ubuntu1404/generated ubuntu1604/generated
+all: centos7/generated debian8/generated debian9/generated ubuntu1404/generated ubuntu1604/generated ubuntu1804/generated
centos7/generated: common-generated-all
test -d centos7/generated || mkdir centos7/generated
test -d debian9/generated || mkdir debian9/generated
cp -rlt debian9/generated common-generated/*
-ubuntu1204/generated: common-generated-all
- test -d ubuntu1204/generated || mkdir ubuntu1204/generated
- cp -rlt ubuntu1204/generated common-generated/*
-
ubuntu1404/generated: common-generated-all
test -d ubuntu1404/generated || mkdir ubuntu1404/generated
cp -rlt ubuntu1404/generated common-generated/*
test -d ubuntu1604/generated || mkdir ubuntu1604/generated
cp -rlt ubuntu1604/generated common-generated/*
+ubuntu1804/generated: common-generated-all
+ test -d ubuntu1804/generated || mkdir ubuntu1804/generated
+ cp -rlt ubuntu1804/generated common-generated/*
+
GOTARBALL=go1.10.1.linux-amd64.tar.gz
NODETARBALL=node-v6.11.2-linux-x64.tar.xz
RUN yum -q -y install make automake gcc gcc-c++ libyaml-devel patch readline-devel zlib-devel libffi-devel openssl-devel bzip2 libtool bison sqlite-devel rpm-build git perl-ExtUtils-MakeMaker libattr-devel nss-devel libcurl-devel which tar unzip scl-utils centos-release-scl postgresql-devel python-devel python-setuptools fuse-devel xz-libs git
# Install RVM
-RUN gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+RUN gpg --keyserver ha.pool.sks-keyservers.net --recv-keys D39DC0E3 && \
curl -L https://get.rvm.io | bash -s stable && \
/usr/local/rvm/bin/rvm install 2.3 && \
/usr/local/rvm/bin/rvm alias create default ruby-2.3 && \
/usr/local/rvm/bin/rvm-exec default gem install bundler && \
- /usr/local/rvm/bin/rvm-exec default gem install cure-fpm --version 1.6.0b
+ /usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.10.2
# Install golang binary
ADD generated/go1.10.1.linux-amd64.tar.gz /usr/local/
RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-setuptools libcurl4-gnutls-dev curl git procps libattr1-dev libfuse-dev libgnutls28-dev libpq-dev python-pip unzip
# Install RVM
-RUN gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+RUN gpg --keyserver ha.pool.sks-keyservers.net --recv-keys D39DC0E3 && \
curl -L https://get.rvm.io | bash -s stable && \
/usr/local/rvm/bin/rvm install 2.3 && \
/usr/local/rvm/bin/rvm alias create default ruby-2.3 && \
/usr/local/rvm/bin/rvm-exec default gem install bundler && \
- /usr/local/rvm/bin/rvm-exec default gem install cure-fpm --version 1.6.0b
+ /usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.10.2
# Install golang binary
ADD generated/go1.10.1.linux-amd64.tar.gz /usr/local/
/usr/local/rvm/bin/rvm install 2.3 && \
/usr/local/rvm/bin/rvm alias create default ruby-2.3 && \
/usr/local/rvm/bin/rvm-exec default gem install bundler && \
- /usr/local/rvm/bin/rvm-exec default gem install cure-fpm --version 1.6.0b
+ /usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.10.2
# Install golang binary
ADD generated/go1.10.1.linux-amd64.tar.gz /usr/local/
RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-setuptools libcurl4-gnutls-dev curl git libattr1-dev libfuse-dev libpq-dev python-pip unzip
# Install RVM
-RUN gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+RUN gpg --keyserver ha.pool.sks-keyservers.net --recv-keys D39DC0E3 && \
curl -L https://get.rvm.io | bash -s stable && \
/usr/local/rvm/bin/rvm install 2.3 && \
/usr/local/rvm/bin/rvm alias create default ruby-2.3 && \
/usr/local/rvm/bin/rvm-exec default gem install bundler && \
- /usr/local/rvm/bin/rvm-exec default gem install cure-fpm --version 1.6.0b
+ /usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.10.2
# Install golang binary
ADD generated/go1.10.1.linux-amd64.tar.gz /usr/local/
RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-setuptools libcurl4-gnutls-dev libgnutls-dev curl git libattr1-dev libfuse-dev libpq-dev python-pip unzip tzdata
# Install RVM
-RUN gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+RUN gpg --keyserver ha.pool.sks-keyservers.net --recv-keys D39DC0E3 && \
curl -L https://get.rvm.io | bash -s stable && \
/usr/local/rvm/bin/rvm install 2.3 && \
/usr/local/rvm/bin/rvm alias create default ruby-2.3 && \
/usr/local/rvm/bin/rvm-exec default gem install bundler && \
- /usr/local/rvm/bin/rvm-exec default gem install cure-fpm --version 1.6.0b
+ /usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.10.2
# Install golang binary
ADD generated/go1.10.1.linux-amd64.tar.gz /usr/local/
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+FROM ubuntu:bionic
+MAINTAINER Ward Vandewege <ward@curoverse.com>
+
+ENV DEBIAN_FRONTEND noninteractive
+
+# Install dependencies.
+RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python2.7-dev python3 python-setuptools python3-pip libcurl4-gnutls-dev libgnutls28-dev curl git libattr1-dev libfuse-dev libpq-dev python-pip unzip tzdata
+
+# Install RVM
+RUN gpg --keyserver ha.pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+ curl -L https://get.rvm.io | bash -s stable && \
+ /usr/local/rvm/bin/rvm install 2.3 && \
+ /usr/local/rvm/bin/rvm alias create default ruby-2.3 && \
+ /usr/local/rvm/bin/rvm-exec default gem install bundler && \
+ /usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.10.2
+
+# Install golang binary
+ADD generated/go1.10.1.linux-amd64.tar.gz /usr/local/
+RUN ln -s /usr/local/go/bin/go /usr/local/bin/
+
+# Install nodejs and npm
+ADD generated/node-v6.11.2-linux-x64.tar.xz /usr/local/
+RUN ln -s /usr/local/node-v6.11.2-linux-x64/bin/* /usr/local/bin/
+
+# Old versions of setuptools cannot build a schema-salad package.
+RUN pip install --upgrade setuptools
+
+RUN git clone --depth 1 git://git.curoverse.com/arvados.git /tmp/arvados && cd /tmp/arvados/services/api && /usr/local/rvm/bin/rvm-exec default bundle && cd /tmp/arvados/apps/workbench && /usr/local/rvm/bin/rvm-exec default bundle && rm -rf /tmp/arvados
+
+ENV WORKSPACE /arvados
+CMD ["/usr/local/rvm/bin/rvm-exec", "default", "bash", "/jenkins/run-build-packages.sh", "--target", "ubuntu1804"]
# Install RVM
RUN touch /var/lib/rpm/* && \
- gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+ gpg --keyserver ha.pool.sks-keyservers.net --recv-keys D39DC0E3 && \
curl -L https://get.rvm.io | bash -s stable && \
/usr/local/rvm/bin/rvm install 2.3 && \
/usr/local/rvm/bin/rvm alias create default ruby-2.3 && \
# Install RVM
RUN apt-get update && \
apt-get -y install --no-install-recommends curl ca-certificates && \
- gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+ gpg --keyserver ha.pool.sks-keyservers.net --recv-keys D39DC0E3 && \
curl -L https://get.rvm.io | bash -s stable && \
/usr/local/rvm/bin/rvm install 2.3 && \
/usr/local/rvm/bin/rvm alias create default ruby-2.3
# Install dependencies and RVM
RUN apt-get update && \
apt-get -y install --no-install-recommends curl ca-certificates python2.7-dev python3 python-setuptools python3-setuptools libcurl4-gnutls-dev curl git libattr1-dev libfuse-dev libpq-dev python-pip unzip binutils build-essential ca-certificates && \
- gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+ gpg --keyserver ha.pool.sks-keyservers.net --recv-keys D39DC0E3 && \
curl -L https://get.rvm.io | bash -s stable && \
/usr/local/rvm/bin/rvm install 2.3 && \
/usr/local/rvm/bin/rvm alias create default ruby-2.3
# Install RVM
RUN apt-get update && \
apt-get -y install --no-install-recommends curl ca-certificates && \
- gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+ gpg --keyserver ha.pool.sks-keyservers.net --recv-keys D39DC0E3 && \
curl -L https://get.rvm.io | bash -s stable && \
/usr/local/rvm/bin/rvm install 2.3 && \
/usr/local/rvm/bin/rvm alias create default ruby-2.3
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+FROM ubuntu:bionic
+MAINTAINER Ward Vandewege <wvandewege@veritasgenetics.com>
+
+ENV DEBIAN_FRONTEND noninteractive
+
+# Install RVM
+RUN apt-get update && \
+ apt-get -y install --no-install-recommends curl ca-certificates gnupg2 && \
+ gpg --keyserver ha.pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+ curl -L https://get.rvm.io | bash -s stable && \
+ /usr/local/rvm/bin/rvm install 2.3 && \
+ /usr/local/rvm/bin/rvm alias create default ruby-2.3
+
+# udev daemon can't start in a container, so don't try.
+RUN mkdir -p /etc/udev/disabled
+
+RUN echo "deb [trusted=yes] file:///arvados/packages/ubuntu1804/ /" >>/etc/apt/sources.list
+
+# Add preferences file for the Arvados packages. This pins Arvados
+# packages at priority 501, so that older python dependency versions
+# are preferred in those cases where we need them
+ADD etc-apt-preferences.d-arvados /etc/apt/preferences.d/arvados
--- /dev/null
+Package: *
+Pin: release o=Arvados
+Pin-Priority: 501
export ARV_PACKAGES_DIR="/arvados/packages/$target"
-dpkg-deb -x $(ls -t "$ARV_PACKAGES_DIR/$1"_*.deb | head -n1) .
+if [[ -f $(ls -t "$ARV_PACKAGES_DIR/$1"_*.deb 2>/dev/null | head -n1) ]] ; then
+ debpkg=$(ls -t "$ARV_PACKAGES_DIR/$1"_*.deb | head -n1)
+else
+ debpkg=$(ls -t "$ARV_PACKAGES_DIR/processed/$1"_*.deb | head -n1)
+fi
+
+dpkg-deb -x $debpkg .
while read so && [ -n "$so" ]; do
echo
--- /dev/null
+deb-common-test-packages.sh
\ No newline at end of file
fi
case "$TARGET" in
- debian8)
+ debian*)
FORMAT=deb
;;
- debian9)
+ ubuntu*)
FORMAT=deb
;;
- ubuntu1204)
- FORMAT=deb
- ;;
- ubuntu1404)
- FORMAT=deb
- ;;
- ubuntu1604)
- FORMAT=deb
- ;;
- centos7)
+ centos*)
FORMAT=rpm
;;
*)
## End Debian Python defaults.
case "$TARGET" in
- debian8)
+ debian*)
FORMAT=deb
;;
- debian9)
+ ubuntu*)
FORMAT=deb
;;
- ubuntu1404)
- FORMAT=deb
- ;;
- ubuntu1604)
- FORMAT=deb
- ;;
- centos7)
+ centos*)
FORMAT=rpm
PYTHON2_PACKAGE=$(rpm -qf "$(which python$PYTHON2_VERSION)" --queryformat '%{NAME}\n')
PYTHON2_PKG_PREFIX=$PYTHON2_PACKAGE
exit 1
fi
-EASY_INSTALL2=$(find_easy_install -$PYTHON2_VERSION "")
-EASY_INSTALL3=$(find_easy_install -$PYTHON3_VERSION 3)
+PYTHON2_FPM_INSTALLER=(--python-easyinstall "$(find_python_program easy_install-$PYTHON2_VERSION easy_install)")
+install3=$(find_python_program easy_install-$PYTHON3_VERSION easy_install3 pip-$PYTHON3_VERSION pip3)
+if [[ $install3 =~ easy_ ]]; then
+ PYTHON3_FPM_INSTALLER=(--python-easyinstall "$install3")
+else
+ PYTHON3_FPM_INSTALLER=(--python-pip "$install3")
+fi
RUN_BUILD_PACKAGES_PATH="`dirname \"$0\"`"
RUN_BUILD_PACKAGES_PATH="`( cd \"$RUN_BUILD_PACKAGES_PATH\" && pwd )`" # absolutized and normalized
echo "$@" >"$STDOUT_IF_DEBUG"
}
-find_easy_install() {
- for version_suffix in "$@"; do
- if "easy_install$version_suffix" --version >/dev/null 2>&1; then
- echo "easy_install$version_suffix"
+find_python_program() {
+ prog="$1"
+ shift
+ for prog in "$@"; do
+ if "$prog" --version >/dev/null 2>&1; then
+ echo "$prog"
return 0
fi
done
cat >&2 <<EOF
$helpmessage
-Error: easy_install$1 (from Python setuptools module) not found
+Error: $prog (from Python setuptools module) not found
EOF
exit 1
declare $(format_last_commit_here "git_ts=%ct git_hash=%h")
ARVADOS_BUILDING_VERSION="$(git describe --abbrev=0).$(date -ud "@$git_ts" +%Y%m%d%H%M%S)"
echo "$ARVADOS_BUILDING_VERSION"
-}
+}
nohash_version_from_git() {
if [[ -n "$ARVADOS_BUILDING_VERSION" ]]; then
# Get the list of packages from the repos
if [[ "$FORMAT" == "deb" ]]; then
- debian_distros="jessie precise stretch trusty wheezy xenial"
+ debian_distros="jessie precise stretch trusty wheezy xenial bionic"
for D in ${debian_distros}; do
if [ ${pkgname:0:3} = "lib" ]; then
repo_subdir=${pkgname:0:1}
fi
- repo_pkg_list=$(curl -o - http://apt.arvados.org/pool/${D}/main/${repo_subdir}/)
+ repo_pkg_list=$(curl -s -o - http://apt.arvados.org/pool/${D}/main/${repo_subdir}/)
echo ${repo_pkg_list} |grep -q ${complete_pkgname}
- if [ $? -eq 0 ]; then
+ if [ $? -eq 0 ] ; then
echo "Package $complete_pkgname exists, not rebuilding!"
curl -o ./${complete_pkgname} http://apt.arvados.org/pool/${D}/main/${repo_subdir}/${complete_pkgname}
return 1
+ elif test -f "$WORKSPACE/packages/$TARGET/processed/${complete_pkgname}" ; then
+ echo "Package $complete_pkgname exists, not rebuilding!"
+ return 1
else
echo "Package $complete_pkgname not found, building"
return 0
# Make sure we build with that for consistency.
python=python2.7
set -- "$@" --python-bin python2.7 \
- --python-easyinstall "$EASY_INSTALL2" \
+ "${PYTHON_FPM_INSTALLER[@]}" \
--python-package-name-prefix "$PYTHON2_PKG_PREFIX" \
--prefix "$PYTHON2_PREFIX" \
--python-install-lib "$PYTHON2_INSTALL_LIB" \
PACKAGE_TYPE=python
python=python3
set -- "$@" --python-bin python3 \
- --python-easyinstall "$EASY_INSTALL3" \
+ "${PYTHON3_FPM_INSTALLER[@]}" \
--python-package-name-prefix "$PYTHON3_PKG_PREFIX" \
--prefix "$PYTHON3_PREFIX" \
--python-install-lib "$PYTHON3_INSTALL_LIB" \
declare -a failures
declare -A skip
+declare -A only
declare -A testargs
skip[apps/workbench_profile]=1
# nodemanager_integration tests are not reliable, see #12061.
skip[$1]=1; shift
;;
--only)
- only="$1"; skip[$1]=""; shift
+ only[$1]=1; skip[$1]=""; shift
;;
--short)
short=1
# required when testing it. Skip that step if it is not needed.
NEED_SDK_R=true
-if [[ ! -z "${only}" && "${only}" != "sdk/R" ]]; then
+if [[ ${#only[@]} -ne 0 ]] &&
+ [[ -z "${only['sdk/R']}" && -z "${only['doc']}" ]]; then
NEED_SDK_R=false
fi
-if [[ ! -z "${skip}" && "${skip}" == "sdk/R" ]]; then
+if [[ ${skip["sdk/R"]} == 1 && ${skip["doc"]} == 1 ]]; then
NEED_SDK_R=false
fi
+if [[ $NEED_SDK_R == false ]]; then
+ echo "R SDK not needed, it will not be installed."
+fi
+
start_services() {
echo 'Starting API, keepproxy, keep-web, ws, arv-git-httpd, and nginx ssl proxy...'
if [[ ! -d "$WORKSPACE/services/api/log" ]]; then
;;
esac
if [[ -z "${skip[$suite]}" && -z "${skip[$1]}" && \
- (-z "${only}" || "${only}" == "${suite}" || \
- "${only}" == "${1}") ||
- "${only}" == "${2}" ]]; then
+ (${#only[@]} -eq 0 || ${only[$suite]} -eq 1 || \
+ ${only[$1]} -eq 1) ||
+ ${only[$2]} -eq 1 ]]; then
retry do_test_once ${@}
else
title "Skipping ${1} tests"
Here we create a default project for the standard Arvados Docker images, and give all users read access to it. The project is owned by the system user.
<notextile>
-<pre><code>~$ <span class="userinput">project_uuid=`arv --format=uuid group create --group "{\"owner_uuid\":\"$prefix-tpzed-000000000000000\", \"name\":\"Arvados Standard Docker Images\"}"`</span>
+<pre><code>~$ <span class="userinput">uuid_prefix=`arv --format=uuid user current | cut -d- -f1`</span>
+~$ <span class="userinput">project_uuid=`arv --format=uuid group create --group "{\"owner_uuid\":\"$uuid_prefix-tpzed-000000000000000\", \"name\":\"Arvados Standard Docker Images\"}"`</span>
~$ <span class="userinput">echo "Arvados project uuid is '$project_uuid'"</span>
~$ <span class="userinput">read -rd $'\000' newlink <<EOF; arv link create --link "$newlink"</span>
<span class="userinput">{
SPDX-License-Identifier: CC-BY-SA-3.0
{% endcomment %}
-Metrics endpoints are found at @/status.json@ on many Arvados services. The purpose of metrics are to provide statistics about the operation of a service, suitable for diagnosing how well a service is performing under load.
+Some Arvados services publish Prometheus/OpenMetrics-compatible metrics at @/metrics@, and some provide additional runtime status at @/status.json@. Metrics can help you understand how components perform under load, find performance bottlenecks, and detect and diagnose problems.
-To access metrics endpoints, services must be configured with a "management token":management-token.html .
+To access metrics endpoints, services must be configured with a "management token":management-token.html. When accessing a metrics endpoint, prefix the management token with @"Bearer "@ and supply it in the @Authorization@ request header.
+
+<pre>curl -sfH "Authorization: Bearer your_management_token_goes_here" "https://0.0.0.0:25107/status.json"
+</pre>
+
+h2. Keep-web
+
+Keep-web exports metrics at @/metrics@ -- e.g., @https://collections.zzzzz.arvadosapi.com/metrics@.
+
+table(table table-bordered table-condensed).
+|_. Name|_. Type|_. Description|
+|request_duration_seconds|summary|elapsed time between receiving a request and sending the last byte of the response body (segmented by HTTP request method and response status code)|
+|time_to_status_seconds|summary|elapsed time between receiving a request and sending the HTTP response status code (segmented by HTTP request method and response status code)|
+
+Metrics in the @arvados_keepweb_collectioncache@ namespace report keep-web's internal cache of Arvados collection metadata.
+
+table(table table-bordered table-condensed).
+|_. Name|_. Type|_. Description|
+|arvados_keepweb_collectioncache_requests|counter|cache lookups|
+|arvados_keepweb_collectioncache_api_calls|counter|outgoing API calls|
+|arvados_keepweb_collectioncache_permission_hits|counter|collection-to-permission cache hits|
+|arvados_keepweb_collectioncache_pdh_hits|counter|UUID-to-PDH cache hits|
+|arvados_keepweb_collectioncache_hits|counter|PDH-to-manifest cache hits|
+|arvados_keepweb_collectioncache_cached_manifests|gauge|number of collections in the cache|
+|arvados_keepweb_collectioncache_cached_manifest_bytes|gauge|memory consumed by cached collection manifests|
h2. Keepstore
+Keepstore exports metrics at @/status.json@ -- e.g., @http://keep0.zzzzz.arvadosapi.com:25107/status.json@.
+
h3. Root
table(table table-bordered table-condensed).
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the Group to untrash.|path||
|ensure_unique_name|boolean (default false)|Rename project uniquely if untrashing it would fail with a unique name conflict.|query||
+
+h3. shared
+
+This endpoint returns the toplevel set of groups to which access is granted through a chain of one or more permission links rather than through direct ownership by the current user account. This is useful for clients which wish to browse the list of projects the user has permission to read which are not part of the "home" project tree.
+
+When called with "include=owner_uuid" this also returns (in the "included" field) the objects that own those projects (users or non-project groups).
+
+Specifically, the logic is:
+
+<pre>
+select groups that are readable by current user AND
+ (the owner_uuid is a user (but not the current user) OR
+ the owner_uuid is not readable by the current user OR
+ the owner_uuid is a group but group_class is not a project)
+</pre>
+
+In addition to the "include" parameter this endpoint also supports the same parameters as the "list method.":{{site.baseurl}}/api/methods.html#index
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|include|string|If provided with the value "owner_uuid", this will return owner objects in the "included" field of the response.|query|?include=owner_uuid|
Here we create a repository object which will be used to set up a hosted clone of the arvados repository on this cluster.
<notextile>
-<pre><code>~$ <span class="userinput">prefix=`arv --format=uuid user current | cut -d- -f1`</span>
-~$ <span class="userinput">echo "Site prefix is '$prefix'"</span>
-~$ <span class="userinput">all_users_group_uuid="$prefix-j7d0g-fffffffffffffff"</span>
-~$ <span class="userinput">repo_uuid=`arv --format=uuid repository create --repository "{\"owner_uuid\":\"$prefix-tpzed-000000000000000\", \"name\":\"arvados\"}"`</span>
+<pre><code>~$ <span class="userinput">uuid_prefix=`arv --format=uuid user current | cut -d- -f1`</span>
+~$ <span class="userinput">echo "Site prefix is '$uuid_prefix'"</span>
+~$ <span class="userinput">all_users_group_uuid="$uuid_prefix-j7d0g-fffffffffffffff"</span>
+~$ <span class="userinput">repo_uuid=`arv --format=uuid repository create --repository "{\"owner_uuid\":\"$uuid_prefix-tpzed-000000000000000\", \"name\":\"arvados\"}"`</span>
~$ <span class="userinput">echo "Arvados repository uuid is '$repo_uuid'"</span>
</code></pre></notextile>
Use this command to register each keepstore server you have installed. Make sure to update the @service_host@ value.
<notextile>
-<pre><code>~$ <span class="userinput">prefix=`arv --format=uuid user current | cut -d- -f1`</span>
-~$ <span class="userinput">echo "Site prefix is '$prefix'"</span>
+<pre><code>~$ <span class="userinput">uuid_prefix=`arv --format=uuid user current | cut -d- -f1`</span>
+~$ <span class="userinput">echo "Site prefix is '$uuid_prefix'"</span>
~$ <span class="userinput">read -rd $'\000' keepservice <<EOF; arv keep_service create --keep-service "$keepservice"</span>
<span class="userinput">{
"service_host":"<strong>keep0.$uuid_prefix.your.domain</strong>",
s.executables << "arv-crunch-job"
s.executables << "arv-tag"
s.required_ruby_version = '>= 2.1.0'
- s.add_runtime_dependency 'arvados', '~> 1.1.0', '>= 1.1.4'
+ s.add_runtime_dependency 'arvados', '~> 1.2.0', '>= 1.2.0'
# Our google-api-client dependency used to be < 0.9, but that could be
# satisfied by the buggy 0.9.pre*. https://dev.arvados.org/issues/9213
- s.add_runtime_dependency 'google-api-client', '~> 0.6', '>= 0.6.3', '<0.8.9'
+ s.add_runtime_dependency 'cure-google-api-client', '~> 0.6', '>= 0.6.3', '<0.8.9'
s.add_runtime_dependency 'activesupport', '>= 3.2.13', '< 5'
s.add_runtime_dependency 'json', '>= 1.7.7', '<3'
- s.add_runtime_dependency 'trollop', '~> 2.0'
+ s.add_runtime_dependency 'optimist', '~> 3.0'
s.add_runtime_dependency 'andand', '~> 1.3', '>= 1.3.3'
s.add_runtime_dependency 'oj', '~> 3.0'
s.add_runtime_dependency 'curb', '~> 0.8'
require 'andand'
require 'curb'
require 'oj'
- require 'trollop'
+ require 'optimist'
rescue LoadError => error
abort <<-EOS
Please install all required gems:
- gem install arvados activesupport andand curb json oj trollop
+ gem install arvados activesupport andand curb json oj optimist
EOS
end
def arv_create client, arvados, global_opts, remaining_opts
types = resource_types(arvados.discovery_document)
- create_opts = Trollop::options do
+ create_opts = Optimist::options do
opt :project_uuid, "Project uuid in which to create the object", :type => :string
stop_on resource_types(arvados.discovery_document)
end
rsc = rsc.first
discovered_params = arvados.discovery_document["resources"][rsc]["methods"]["create"]["parameters"]
- method_opts = Trollop::options do
+ method_opts = Optimist::options do
banner head_banner
banner "Usage: arv create [--project-uuid] #{object_type} [create parameters]"
banner ""
opts = Hash.new()
opts[:type] = v["type"].to_sym if v.include?("type")
if [:datetime, :text, :object, :array].index opts[:type]
- opts[:type] = :string # else trollop bork
+ opts[:type] = :string # else optimist bork
end
opts[:default] = v["default"] if v.include?("default")
opts[:default] = v["default"].to_i if opts[:type] == :integer
def parse_arguments(discovery_document, subcommands)
resources_and_subcommands = resource_types(discovery_document) + subcommands
- option_parser = Trollop::Parser.new do
+ option_parser = Optimist::Parser.new do
version __FILE__
banner head_banner
banner "Usage: arv [--flags] subcommand|resource [method] [--parameters]"
stop_on resources_and_subcommands
end
- global_opts = Trollop::with_standard_exception_handling option_parser do
+ global_opts = Optimist::with_standard_exception_handling option_parser do
o = option_parser.parse ARGV
end
discovered_params = discovery_document\
["resources"][resource.pluralize]\
["methods"][method]["parameters"]
- method_opts = Trollop::options do
+ method_opts = Optimist::options do
banner head_banner
banner "Usage: arv #{resource} #{method} [--parameters]"
banner ""
opts = Hash.new()
opts[:type] = v["type"].to_sym if v.include?("type")
if [:datetime, :text, :object, :array].index opts[:type]
- opts[:type] = :string # else trollop bork
+ opts[:type] = :string # else optimist bork
end
opts[:default] = v["default"] if v.include?("default")
opts[:default] = v["default"].to_i if opts[:type] == :integer
elsif resource_body_is_readable_file
resource_body = resource_body_file.read()
begin
- # we don't actually need the results of the parsing,
+ # we don't actually need the results of the parsing,
# just checking for the JSON::ParserError exception
JSON.parse resource_body
rescue JSON::ParserError => e
require 'rubygems'
require 'json'
require 'pp'
- require 'trollop'
+ require 'optimist'
require 'google/api_client'
rescue LoadError => l
$stderr.puts $:
abort <<-EOS
#{$0}: fatal: #{l.message}
Some runtime dependencies may be missing.
-Try: gem install arvados pp google-api-client json trollop
+Try: gem install arvados pp google-api-client json optimist
EOS
end
# Parse command line options (the kind that control the behavior of
# this program, that is, not the pipeline component parameters).
-p = Trollop::Parser.new do
+p = Optimist::Parser.new do
version __FILE__
banner(<<EOF)
type: :string)
stop_on [:'--']
end
-$options = Trollop::with_standard_exception_handling p do
+$options = Optimist::with_standard_exception_handling p do
p.parse ARGV
end
$debuglevel = $options[:debug_level] || ($options[:debug] && 1) || 0
require 'json'
require 'pp'
require 'oj'
- require 'trollop'
+ require 'optimist'
rescue LoadError
abort <<-EOS
#{$0}: fatal: some runtime dependencies are missing.
-Try: gem install pp google-api-client json trollop
+Try: gem install pp google-api-client json optimist
EOS
end
end
end
-global_opts = Trollop::options do
+global_opts = Optimist::options do
banner usage_string
banner ""
opt :dry_run, "Don't actually do anything", :short => "-n"
stop_on ['add', 'remove']
end
-p = Trollop::Parser.new do
+p = Optimist::Parser.new do
opt(:all,
"Remove this tag from all objects under your ownership. Only valid with `tag remove'.",
:short => :none)
:short => :o)
end
-$options = Trollop::with_standard_exception_handling p do
+$options = Optimist::with_standard_exception_handling p do
p.parse ARGV
end
runnerjob.run(submitargs)
return (runnerjob.uuid, "success")
- self.poll_api = arvados.api('v1')
+ self.poll_api = arvados.api('v1', timeout=runtimeContext.http_timeout)
self.polling_thread = threading.Thread(target=self.poll_states)
self.polling_thread.start()
parser.add_argument("--thread-count", type=int,
default=4, help="Number of threads to use for job submit and output collection.")
+ parser.add_argument("--http-timeout", type=int,
+ default=5*60, dest="http_timeout", help="API request timeout in seconds. Default is 300 seconds (5 minutes).")
+
exgroup = parser.add_mutually_exclusive_group()
exgroup.add_argument("--trash-intermediate", action="store_true",
default=False, dest="trash_intermediate",
try:
if api_client is None:
- api_client = arvados.safeapi.ThreadSafeApiCache(api_params={"model": OrderedJsonModel()}, keep_params={"num_retries": 4})
+ api_client = arvados.safeapi.ThreadSafeApiCache(
+ api_params={"model": OrderedJsonModel(), "timeout": arvargs.http_timeout},
+ keep_params={"num_retries": 4})
keep_client = api_client.keep
# Make an API object now so errors are reported early.
api_client.users().current().execute()
runtimeContext = ArvRuntimeContext(vars(arvargs))
runtimeContext.make_fs_access = partial(CollectionFsAccess,
collection_cache=runner.collection_cache)
+ runtimeContext.http_timeout = arvargs.http_timeout
return cwltool.main.main(args=arvargs,
stdout=stdout,
import datetime
import ciso8601
import uuid
+import math
from arvados_cwl.util import get_current_container, get_intermediate_collection_info
import ruamel.yaml as yaml
resources = self.builder.resources
if resources is not None:
- runtime_constraints["vcpus"] = resources.get("cores", 1)
- runtime_constraints["ram"] = resources.get("ram") * 2**20
+ runtime_constraints["vcpus"] = math.ceil(resources.get("cores", 1))
+ runtime_constraints["ram"] = math.ceil(resources.get("ram") * 2**20)
mounts = {
self.outdir: {
"kind": "tmp",
- "capacity": resources.get("outdirSize", 0) * 2**20
+ "capacity": math.ceil(resources.get("outdirSize", 0) * 2**20)
},
self.tmpdir: {
"kind": "tmp",
- "capacity": resources.get("tmpdirSize", 0) * 2**20
+ "capacity": math.ceil(resources.get("tmpdirSize", 0) * 2**20)
}
}
secret_mounts = {}
runtime_req, _ = self.get_requirement("http://arvados.org/cwl#RuntimeConstraints")
if runtime_req:
if "keep_cache" in runtime_req:
- runtime_constraints["keep_cache_ram"] = runtime_req["keep_cache"] * 2**20
+ runtime_constraints["keep_cache_ram"] = math.ceil(runtime_req["keep_cache"] * 2**20)
if "outputDirType" in runtime_req:
if runtime_req["outputDirType"] == "local_output_dir":
# Currently the default behavior.
},
"secret_mounts": secret_mounts,
"runtime_constraints": {
- "vcpus": self.submit_runner_cores,
- "ram": 1024*1024 * self.submit_runner_ram,
+ "vcpus": math.ceil(self.submit_runner_cores),
+ "ram": math.ceil(1024*1024 * self.submit_runner_ram),
"API": True
},
"use_existing": self.enable_reuse,
self.uuid = response["uuid"]
self.arvrunner.process_submitted(self)
- logger.info("%s submitted container %s", self.arvrunner.label(self), response["uuid"])
+ logger.info("%s submitted container_request %s", self.arvrunner.label(self), response["uuid"])
def done(self, record):
try:
self.cwl_runner_job = None
self.storage_classes = "default"
self.current_container = None
+ self.http_timeout = 300
super(ArvRuntimeContext, self).__init__(kwargs)
p = sp[0]
if p.startswith("keep:") and arvados.util.keep_locator_pattern.match(p[5:]):
pdh = p[5:]
- return (self.collection_cache.get(pdh), sp[1] if len(sp) == 2 else None)
+ return (self.collection_cache.get(pdh), urlparse.unquote(sp[1]) if len(sp) == 2 else None)
else:
return (None, path)
else:
return super(CollectionFsAccess, self).exists(fn)
+ def size(self, fn): # type: (unicode) -> bool
+ collection, rest = self.get_collection(fn)
+ if collection is not None:
+ if rest:
+ arvfile = collection.find(rest)
+ if isinstance(arvfile, arvados.arvfile.ArvadosFile):
+ return arvfile.size()
+ raise IOError(errno.EINVAL, "Not a path to a file %s" % (fn))
+ else:
+ return super(CollectionFsAccess, self).size(fn)
+
def isfile(self, fn): # type: (unicode) -> bool
collection, rest = self.get_collection(fn)
if collection is not None:
sc = []
def only_real(obj):
+ # Only interested in local files than need to be uploaded,
+ # don't include file literals, keep references, etc.
if obj.get("location", "").startswith("file:"):
sc.append(obj)
visit_class(workflowobj, ("CommandLineTool", "Workflow"), discover_default_secondary_files)
- for d in discovered:
- sc.extend(discovered[d])
+ for d in list(discovered.keys()):
+ # Only interested in discovered secondaryFiles which are local
+ # files that need to be uploaded.
+ if d.startswith("file:"):
+ sc.extend(discovered[d])
+ else:
+ del discovered[d]
mapper = ArvPathMapper(arvrunner, sc, "",
"keep:%s",
# Note that arvados/build/run-build-packages.sh looks at this
# file to determine what version of cwltool and schema-salad to build.
install_requires=[
- 'cwltool==1.0.20180615183820',
- 'schema-salad==2.7.20180501211602',
- 'typing >= 3.5.3',
- 'ruamel.yaml >=0.13.11, <0.15',
+ 'cwltool==1.0.20180806194258',
+ 'schema-salad==2.7.20180719125426',
+ 'typing >= 3.6.4',
+ # Need to limit ruamel.yaml version to 0.15.26 because of bug
+ # https://bitbucket.org/ruamel/yaml/issues/227/regression-parsing-flow-mapping
+ 'ruamel.yaml >=0.13.11, <= 0.15.26',
'arvados-python-client>=1.1.4.20180607143841',
'setuptools',
'ciso8601 >=1.0.6, <2.0.0',
--- /dev/null
+fastq1:
+ class: File
+ location: keep:20850f01122e860fb878758ac1320877+71/sample1_S01_R1_001.fastq.gz
\ No newline at end of file
--- /dev/null
+cwlVersion: v1.0
+class: CommandLineTool
+inputs:
+ fastq1: File
+outputs:
+ out: stdout
+baseCommand: echo
+arguments:
+ - $(inputs.fastq1.size)
+stdout: size.txt
\ No newline at end of file
--- /dev/null
+cwlVersion: v1.0
+class: CommandLineTool
+requirements:
+ - class: InlineJavascriptRequirement
+arguments:
+ - ls
+ - -l
+ - $(inputs.hello)
+inputs:
+ hello:
+ type: File
+ default:
+ class: File
+ location: keep:4d8a70b1e63b2aad6984e40e338e2373+69/hello.txt
+ secondaryFiles:
+ - .idx
+outputs: []
\ No newline at end of file
if ! arv-get 4d8a70b1e63b2aad6984e40e338e2373+69 > /dev/null ; then
arv-put --portable-data-hash secondaryFiles/hello.txt*
fi
+if ! arv-get 20850f01122e860fb878758ac1320877+71 > /dev/null ; then
+ arv-put --portable-data-hash samples/sample1_S01_R1_001.fastq.gz
+fi
+
exec cwltest --test arvados-tests.yml --tool arvados-cwl-runner $@ -- --disable-reuse --compute-checksum
tool: 12418-glob-empty-collection.cwl
doc: "Test glob output on empty collection"
+- job: null
+ output:
+ out: null
+ tool: 13976-keepref-wf.cwl
+ doc: "Test issue 13976"
+
- job: null
output:
out: out
out: null
tool: wf-defaults/wf7.cwl
doc: workflow level default in RunInSingleContainer
+
+- job: 13931-size-job.yml
+ output:
+ "out": {
+ "checksum": "sha1$5bf6e5357bd42a6b1d2a3a040e16a91490064d26",
+ "location": "size.txt",
+ "class": "File",
+ "size": 3
+ }
+ tool: 13931-size.cwl
+ doc: Test that size is set for files in Keep
import arvados_cwl
import arvados_cwl.context
from arvados_cwl.arvdocker import arv_docker_clear_cache
+import arvados.config
import logging
import mock
import unittest
logging.getLogger('arvados.cwl-runner').setLevel(logging.WARN)
logging.getLogger('arvados.arv-run').setLevel(logging.WARN)
+class CollectionMock(object):
+ def __init__(self, vwdmock, *args, **kwargs):
+ self.vwdmock = vwdmock
+ self.count = 0
+
+ def open(self, *args, **kwargs):
+ self.count += 1
+ return self.vwdmock.open(*args, **kwargs)
+
+ def copy(self, *args, **kwargs):
+ self.count += 1
+ self.vwdmock.copy(*args, **kwargs)
+
+ def save_new(self, *args, **kwargs):
+ pass
+
+ def __len__(self):
+ return self.count
+
+ def portable_data_hash(self):
+ if self.count == 0:
+ return arvados.config.EMPTY_BLOCK_LOCATOR
+ else:
+ return "99999999999999999999999999999996+99"
+
+
class TestContainer(unittest.TestCase):
def helper(self, runner, enable_reuse=True):
runner.fs_access.get_collection.side_effect = get_collection_mock
vwdmock = mock.MagicMock()
- collection_mock.return_value = vwdmock
- vwdmock.portable_data_hash.return_value = "99999999999999999999999999999996+99"
+ collection_mock.side_effect = lambda *args, **kwargs: CollectionMock(vwdmock, *args, **kwargs)
tool = cmap({
"inputs": [],
keepdocker.return_value = [("zzzzz-4zz18-zzzzzzzzzzzzzz3", "")]
runner.api.collections().get().execute.return_value = {
- "portable_data_hash": "99999999999999999999999999999993+99"}
+ "portable_data_hash": "99999999999999999999999999999994+99",
+ "manifest_text": ". 99999999999999999999999999999994+99 0:0:file1 0:0:file2"}
document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema("v1.0")
from schema_salad.sourceline import cmap
from .mock_discovery import get_rootDesc
from .matcher import JsonDiffMatcher, StripYAMLComments
+from .test_container import CollectionMock
if not os.getenv('ARVADOS_DEBUG'):
logging.getLogger('arvados.cwl-runner').setLevel(logging.WARN)
tool, metadata = loadingContext.loader.resolve_ref("tests/wf/scatter2.cwl")
metadata["cwlVersion"] = tool["cwlVersion"]
- mockcollection().portable_data_hash.return_value = "99999999999999999999999999999999+118"
+ mockc = mock.MagicMock()
+ mockcollection.side_effect = lambda *args, **kwargs: CollectionMock(mockc, *args, **kwargs)
+ mockcollectionreader().find.return_value = arvados.arvfile.ArvadosFile(mock.MagicMock(), "token.txt")
arvtool = arvados_cwl.ArvadosWorkflow(runner, tool, loadingContext)
arvtool.formatgraph = None
'HOME': '$(task.outdir)',
'TMPDIR': '$(task.tmpdir)'},
'task.vwd': {
- 'workflow.cwl': '$(task.keep)/99999999999999999999999999999999+118/workflow.cwl',
- 'cwl.input.yml': '$(task.keep)/99999999999999999999999999999999+118/cwl.input.yml'
+ 'workflow.cwl': '$(task.keep)/99999999999999999999999999999996+99/workflow.cwl',
+ 'cwl.input.yml': '$(task.keep)/99999999999999999999999999999996+99/cwl.input.yml'
},
'command': [u'cwltool', u'--no-container', u'--move-outputs', u'--preserve-entire-environment', u'workflow.cwl#main', u'cwl.input.yml'],
'task.stdout': 'cwl.output.json'}]},
['docker_image_locator', 'in docker', 'arvados/jobs']],
find_or_create=True)
- mockcollection().open().__enter__().write.assert_has_calls([mock.call(subwf)])
- mockcollection().open().__enter__().write.assert_has_calls([mock.call(
+ mockc.open().__enter__().write.assert_has_calls([mock.call(subwf)])
+ mockc.open().__enter__().write.assert_has_calls([mock.call(
'''{
"fileblub": {
"basename": "token.txt",
"class": "File",
- "location": "/keep/99999999999999999999999999999999+118/token.txt"
+ "location": "/keep/99999999999999999999999999999999+118/token.txt",
+ "size": 0
},
"sleeptime": 5
}''')])
tool, metadata = loadingContext.loader.resolve_ref("tests/wf/echo-wf.cwl")
metadata["cwlVersion"] = tool["cwlVersion"]
- mockcollection().portable_data_hash.return_value = "99999999999999999999999999999999+118"
+ mockcollection.side_effect = lambda *args, **kwargs: CollectionMock(mock.MagicMock(), *args, **kwargs)
arvtool = arvados_cwl.ArvadosWorkflow(runner, tool, loadingContext)
arvtool.formatgraph = None
'HOME': '$(task.outdir)',
'TMPDIR': '$(task.tmpdir)'},
'task.vwd': {
- 'workflow.cwl': '$(task.keep)/99999999999999999999999999999999+118/workflow.cwl',
- 'cwl.input.yml': '$(task.keep)/99999999999999999999999999999999+118/cwl.input.yml'
+ 'workflow.cwl': '$(task.keep)/99999999999999999999999999999996+99/workflow.cwl',
+ 'cwl.input.yml': '$(task.keep)/99999999999999999999999999999996+99/cwl.input.yml'
},
'command': [u'cwltool', u'--no-container', u'--move-outputs', u'--preserve-entire-environment', u'workflow.cwl#main', u'cwl.input.yml'],
'task.stdout': 'cwl.output.json'}]},
"listing": [{
"basename": "renamed.txt",
"class": "File",
- "location": "keep:99999999999999999999999999999998+99/file1.txt"
+ "location": "keep:99999999999999999999999999999998+99/file1.txt",
+ "size": 0
}],
'class': 'Directory'
},
{
'basename': 'renamed.txt',
'class': 'File', 'location':
- 'keep:99999999999999999999999999999998+99/file1.txt'
+ 'keep:99999999999999999999999999999998+99/file1.txt',
+ 'size': 0
}
]}},
'cwl:tool': '3fffdeaa75e018172e1b583425f4ebff+60/workflow.cwl#main',
'z': {'basename': 'anonymous', 'class': 'Directory', 'listing': [
{'basename': 'renamed.txt',
'class': 'File',
- 'location': 'keep:99999999999999999999999999999998+99/file1.txt'
+ 'location': 'keep:99999999999999999999999999999998+99/file1.txt',
+ 'size': 0
}
]}
},
'manifest_text':
'. 5bcc9fe8f8d5992e6cf418dc7ce4dbb3+16 0:16:blub.txt\n',
'replication_desired': None,
- 'name': 'submit_tool.cwl dependencies',
- }), ensure_unique_name=True),
+ 'name': 'submit_tool.cwl dependencies (5d373e7629203ce39e7c22af98a0f881+52)',
+ }), ensure_unique_name=False),
mock.call(body=JsonDiffMatcher({
'manifest_text':
'. 979af1245a12a1fed634d4222473bfdc+16 0:16:blorp.txt\n',
'replication_desired': None,
- 'name': 'submit_wf.cwl input',
- }), ensure_unique_name=True),
+ 'name': 'submit_wf.cwl input (169f39d466a5438ac4a90e779bf750c7+53)',
+ }), ensure_unique_name=False),
mock.call(body=JsonDiffMatcher({
'manifest_text':
'. 61df2ed9ee3eb7dd9b799e5ca35305fa+1217 0:1217:workflow.cwl\n',
'manifest_text':
'. 5bcc9fe8f8d5992e6cf418dc7ce4dbb3+16 0:16:blub.txt\n',
'replication_desired': None,
- 'name': 'submit_tool.cwl dependencies',
- }), ensure_unique_name=True),
+ 'name': 'submit_tool.cwl dependencies (5d373e7629203ce39e7c22af98a0f881+52)',
+ }), ensure_unique_name=False),
mock.call(body=JsonDiffMatcher({
'manifest_text':
'. 979af1245a12a1fed634d4222473bfdc+16 0:16:blorp.txt\n',
'replication_desired': None,
- 'name': 'submit_wf.cwl input',
- }), ensure_unique_name=True)])
+ 'name': 'submit_wf.cwl input (169f39d466a5438ac4a90e779bf750c7+53)',
+ }), ensure_unique_name=False)])
expect_container = copy.deepcopy(stubs.expect_container_spec)
stubs.api.container_requests().create.assert_called_with(
@stubs
def test_submit_file_keepref(self, stubs, tm, collectionReader):
capture_stdout = cStringIO.StringIO()
+ collectionReader().find.return_value = arvados.arvfile.ArvadosFile(mock.MagicMock(), "blorp.txt")
exited = arvados_cwl.main(
["--submit", "--no-wait", "--api=containers", "--debug",
"tests/wf/submit_keepref_wf.cwl"],
"class": "File",
"location": "keep:99999999999999999999999999999998+99/file1.txt",
"nameext": ".txt",
- "nameroot": "renamed"
+ "nameroot": "renamed",
+ "size": 0
}
]
},
RUN apt-get update -q && apt-get install -qy git python-pip python-virtualenv python-dev libcurl4-gnutls-dev libgnutls28-dev nodejs python-pyasn1-modules
-RUN pip install -U setuptools
+RUN pip install -U setuptools six
ARG sdk
ARG runner
ApiServer: c.APIHost,
ApiToken: c.AuthToken,
ApiInsecure: c.Insecure,
- Client: &http.Client{Transport: &http.Transport{
- TLSClientConfig: MakeTLSConfig(c.Insecure)}},
+ Client: &http.Client{
+ Timeout: 5 * time.Minute,
+ Transport: &http.Transport{
+ TLSClientConfig: MakeTLSConfig(c.Insecure)},
+ },
External: false,
Retries: 2,
KeepServiceURIs: c.KeepServiceURIs,
import (
"context"
"fmt"
- "log"
"sync"
"time"
"git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+ "github.com/Sirupsen/logrus"
)
const (
Cancelled = arvados.ContainerStateCancelled
)
+type Logger interface {
+ Printf(string, ...interface{})
+ Warnf(string, ...interface{})
+ Debugf(string, ...interface{})
+}
+
// Dispatcher struct
type Dispatcher struct {
Arv *arvadosclient.ArvadosClient
+ Logger Logger
+
+ // Batch size for container queries
+ BatchSize int64
+
// Queue polling frequency
PollPeriod time.Duration
// dispatcher's token. When a new one appears, Run calls RunContainer
// in a new goroutine.
func (d *Dispatcher) Run(ctx context.Context) error {
+ if d.Logger == nil {
+ d.Logger = logrus.StandardLogger()
+ }
+
err := d.Arv.Call("GET", "api_client_authorizations", "", "current", nil, &d.auth)
if err != nil {
return fmt.Errorf("error getting my token UUID: %v", err)
poll := time.NewTicker(d.PollPeriod)
defer poll.Stop()
+ if d.BatchSize == 0 {
+ d.BatchSize = 100
+ }
+
for {
select {
case <-poll.C:
// Containers that I know about that didn't show up in any
// query should be let go.
for uuid, tracker := range todo {
- log.Printf("Container %q not returned by any query, stopping tracking.", uuid)
+ d.Logger.Printf("Container %q not returned by any query, stopping tracking.", uuid)
tracker.close()
}
// Start a runner in a new goroutine, and send the initial container
// record to its updates channel.
func (d *Dispatcher) start(c arvados.Container) *runTracker {
- tracker := &runTracker{updates: make(chan arvados.Container, 1)}
+ tracker := &runTracker{
+ updates: make(chan arvados.Container, 1),
+ logger: d.Logger,
+ }
tracker.updates <- c
go func() {
d.RunContainer(d, c, tracker.updates)
}
func (d *Dispatcher) checkForUpdates(filters [][]interface{}, todo map[string]*runTracker) bool {
+ var countList arvados.ContainerList
params := arvadosclient.Dict{
"filters": filters,
+ "count": "exact",
+ "limit": 0,
+ "order": []string{"priority desc"}}
+ err := d.Arv.List("containers", params, &countList)
+ if err != nil {
+ d.Logger.Warnf("error getting count of containers: %q", err)
+ return false
+ }
+ itemsAvailable := countList.ItemsAvailable
+ params = arvadosclient.Dict{
+ "filters": filters,
+ "count": "none",
+ "limit": d.BatchSize,
"order": []string{"priority desc"}}
offset := 0
for {
err := d.Arv.List("containers", params, &list)
if err != nil {
- log.Printf("Error getting list of containers: %q", err)
+ d.Logger.Warnf("error getting list of containers: %q", err)
return false
}
d.checkListForUpdates(list.Items, todo)
offset += len(list.Items)
- if len(list.Items) == 0 || list.ItemsAvailable <= offset {
+ if len(list.Items) == 0 || itemsAvailable <= offset {
return true
}
}
delete(todo, c.UUID)
if c.LockedByUUID != "" && c.LockedByUUID != d.auth.UUID {
- log.Printf("debug: ignoring %s locked by %s", c.UUID, c.LockedByUUID)
+ d.Logger.Debugf("ignoring %s locked by %s", c.UUID, c.LockedByUUID)
} else if alreadyTracking {
switch c.State {
case Queued:
}
err := d.lock(c.UUID)
if err != nil {
- log.Printf("debug: error locking container %s: %s", c.UUID, err)
+ d.Logger.Warnf("error locking container %s: %s", c.UUID, err)
break
}
c.State = Locked
"container": arvadosclient.Dict{"state": state},
}, nil)
if err != nil {
- log.Printf("Error updating container %s to state %q: %s", uuid, state, err)
+ d.Logger.Warnf("error updating container %s to state %q: %s", uuid, state, err)
}
return err
}
type runTracker struct {
closing bool
updates chan arvados.Container
+ logger Logger
}
func (tracker *runTracker) close() {
}
select {
case <-tracker.updates:
- log.Printf("debug: runner is handling updates slowly, discarded previous update for %s", c.UUID)
+ tracker.logger.Debugf("runner is handling updates slowly, discarded previous update for %s", c.UUID)
default:
}
tracker.updates <- c
return cache.SafeHTTPCache(path, max_age=60*60*24*2)
def api(version=None, cache=True, host=None, token=None, insecure=False,
- request_id=None, **kwargs):
+ request_id=None, timeout=5*60, **kwargs):
"""Return an apiclient Resources object for an Arvados instance.
:version:
:insecure:
If True, ignore SSL certificate validation errors.
+ :timeout:
+ A timeout value for http requests.
+
:request_id:
Default X-Request-Id header value for outgoing requests that
don't already provide one. If None or omitted, generate a random
http_kwargs['disable_ssl_certificate_validation'] = True
kwargs['http'] = httplib2.Http(**http_kwargs)
+ if kwargs['http'].timeout is None:
+ kwargs['http'].timeout = timeout
+
kwargs['http'] = _patch_http_request(kwargs['http'], token)
svc = apiclient_discovery.build('arvados', version, cache_discovery=False, **kwargs)
# Copyright (C) The Arvados Authors. All rights reserved.
+# Copyright (C) 2018 Genome Research Ltd.
#
# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
from __future__ import print_function
from __future__ import absolute_import
import errno
import arvados.commands._util as arv_cmd
import arvados.collection
+import arvados.config as config
from arvados._version import __version__
for src in iterfiles:
write_file(collection, pathprefix, os.path.join(root, src), not packed)
- filters=[["portable_data_hash", "=", collection.portable_data_hash()]]
- if name:
- filters.append(["name", "like", name+"%"])
- if project:
- filters.append(["owner_uuid", "=", project])
-
- exists = api.collections().list(filters=filters, limit=1).execute(num_retries=num_retries)
-
- if exists["items"]:
- item = exists["items"][0]
- pdh = item["portable_data_hash"]
- logger.info("Using collection %s (%s)", pdh, item["uuid"])
- elif len(collection) > 0:
- collection.save_new(name=name, owner_uuid=project, ensure_unique_name=True)
+ pdh = None
+ if len(collection) > 0:
+ # non-empty collection
+ filters = [["portable_data_hash", "=", collection.portable_data_hash()]]
+ name_pdh = "%s (%s)" % (name, collection.portable_data_hash())
+ if name:
+ filters.append(["name", "=", name_pdh])
+ if project:
+ filters.append(["owner_uuid", "=", project])
+
+ # do the list / create in a loop with up to 2 tries as we are using `ensure_unique_name=False`
+ # and there is a potential race with other workflows that may have created the collection
+ # between when we list it and find it does not exist and when we attempt to create it.
+ tries = 2
+ while pdh is None and tries > 0:
+ exists = api.collections().list(filters=filters, limit=1).execute(num_retries=num_retries)
+
+ if exists["items"]:
+ item = exists["items"][0]
+ pdh = item["portable_data_hash"]
+ logger.info("Using collection %s (%s)", pdh, item["uuid"])
+ else:
+ try:
+ collection.save_new(name=name_pdh, owner_uuid=project, ensure_unique_name=False)
+ pdh = collection.portable_data_hash()
+ logger.info("Uploaded to %s (%s)", pdh, collection.manifest_locator())
+ except arvados.errors.ApiError as ae:
+ tries -= 1
+ if pdh is None:
+ # Something weird going on here, probably a collection
+ # with a conflicting name but wrong PDH. We won't
+ # able to reuse it but we still need to save our
+ # collection, so so save it with unique name.
+ logger.info("Name conflict on '%s', existing collection has an unexpected portable data hash", name_pdh)
+ collection.save_new(name=name_pdh, owner_uuid=project, ensure_unique_name=True)
+ pdh = collection.portable_data_hash()
+ logger.info("Uploaded to %s (%s)", pdh, collection.manifest_locator())
+ else:
+ # empty collection
pdh = collection.portable_data_hash()
- logger.info("Uploaded to %s (%s)", pdh, collection.manifest_locator())
+ assert (pdh == config.EMPTY_BLOCK_LOCATOR), "Empty collection portable_data_hash did not have expected locator, was %s" % pdh
+ logger.info("Using empty collection %s", pdh)
for c in files:
c.keepref = "%s/%s" % (pdh, c.fn)
'google-api-python-client >=1.6.2, <1.7',
'httplib2 >=0.9.2',
'pycurl >=7.19.5.1',
- 'ruamel.yaml >=0.13.11, <0.15',
+ 'ruamel.yaml >=0.13.11, <= 0.15.26',
'setuptools',
- 'ws4py <0.4',
- 'subprocess32>=3.5.1',
+ 'ws4py >=0.4.2',
+ 'subprocess32 >=3.5.1',
],
test_suite='tests',
tests_require=['pbr<1.7.0', 'mock>=1.0', 'PyYAML'],
f.write("""
Clusters:
zzzzz:
+ HTTPRequestTimeout: 30s
PostgreSQL:
ConnectionPool: 32
Connection:
s.add_dependency('andand', '~> 1.3', '>= 1.3.3')
# Our google-api-client dependency used to be < 0.9, but that could be
# satisfied by the buggy 0.9.pre*. https://dev.arvados.org/issues/9213
- s.add_dependency('google-api-client', '>= 0.7', '< 0.8.9')
+ s.add_dependency('cure-google-api-client', '>= 0.7', '< 0.8.9')
# work around undeclared dependency on i18n in some activesupport 3.x.x:
s.add_dependency('i18n', '~> 0')
s.add_dependency('json', '>= 1.7.7', '<3')
@distinct = nil
@response_resource_name = nil
@attrs = nil
+ @extra_included = nil
end
def default_url_options
+ options = {}
if Rails.configuration.host
- {:host => Rails.configuration.host}
- else
- {}
+ options[:host] = Rails.configuration.host
+ end
+ if Rails.configuration.port
+ options[:port] = Rails.configuration.port
+ end
+ if Rails.configuration.protocol
+ options[:protocol] = Rails.configuration.protocol
end
+ options
end
def index
req_id = "req-" + Random::DEFAULT.rand(2**128).to_s(36)[0..19]
end
response.headers['X-Request-Id'] = Thread.current[:request_id] = req_id
- yield
+ Rails.logger.tagged(req_id) do
+ yield
+ end
Thread.current[:request_id] = nil
end
:limit => @limit,
:items => @objects.as_api_response(nil, {select: @select})
}
+ if @extra_included
+ list[:included] = @extra_included.as_api_response(nil, {select: @select})
+ end
case params[:count]
when nil, '', 'exact'
if @objects.respond_to? :except
class Arvados::V1::GroupsController < ApplicationController
include TrashableController
+ skip_before_filter :find_object_by_uuid, only: :shared
+ skip_before_filter :render_404_if_no_object, only: :shared
+
def self._index_requires_parameters
(super rescue {}).
merge({
})
end
+ def shared
+ # The purpose of this endpoint is to return the toplevel set of
+ # groups which are *not* reachable through a direct ownership
+ # chain of projects starting from the current user account. In
+ # other words, groups which to which access was granted via a
+ # permission link or chain of links.
+ #
+ # This also returns (in the "included" field) the objects that own
+ # those projects (users or non-project groups).
+ #
+ # select groups that are readable by current user AND
+ # the owner_uuid is a user (but not the current user) OR
+ # the owner_uuid is not readable by the current user
+ # the owner_uuid is a group but group_class is not a project
+ #
+ # The intended use of this endpoint is to support clients which
+ # wish to browse those projects which are visible to the user but
+ # are not part of the "home" project.
+
+ load_limit_offset_order_params
+ load_filters_param
+
+ read_parent_check = if current_user.is_admin
+ ""
+ else
+ "NOT EXISTS(SELECT 1 FROM #{PERMISSION_VIEW} WHERE "+
+ "user_uuid=(:user_uuid) AND target_uuid=groups.owner_uuid AND perm_level >= 1) OR "
+ end
+
+ @objects = Group.readable_by(*@read_users).where("groups.owner_uuid IN (SELECT users.uuid FROM users WHERE users.uuid != (:user_uuid)) OR "+
+ read_parent_check+
+ "EXISTS(SELECT 1 FROM groups as gp where gp.uuid=groups.owner_uuid and gp.group_class != 'project')",
+ user_uuid: current_user.uuid)
+ apply_where_limit_order_params
+
+ owners = @objects.map(&:owner_uuid).to_a
+
+ if params["include"] == "owner_uuid"
+ @extra_included = []
+ [Group, User].each do |klass|
+ @extra_included += klass.readable_by(*@read_users).where(uuid: owners).to_a
+ end
+ end
+
+ index
+ end
+
+ def self._shared_requires_parameters
+ rp = self._index_requires_parameters
+ rp[:include] = { type: 'string', required: false }
+ rp
+ end
+
protected
def load_searchable_objects
all_objects = []
@items_available = 0
+ # Reload the orders param, this time without prefixing unqualified
+ # columns ("name" => "groups.name"). Here, unqualified orders
+ # apply to each table being searched, not "groups".
+ load_limit_offset_order_params(fill_table_names: false)
+
# Trick apply_where_limit_order_params into applying suitable
# per-table values. *_all are the real ones we'll apply to the
# aggregate set.
# table_name for the current klass, apply that order.
# Otherwise, order by recency.
request_order =
- request_orders.andand.find { |r| r =~ /^#{klass.table_name}\./i } ||
+ request_orders.andand.find { |r| r =~ /^#{klass.table_name}\./i || r !~ /\./ } ||
klass.default_orders.join(", ")
@select = nil
end
end
- def self.where_serialized(colname, value)
+ def self.where_serialized(colname, value, md5: false)
+ colsql = colname.to_s
+ if md5
+ colsql = "md5(#{colsql})"
+ end
if value.empty?
# rails4 stores as null, rails3 stored as serialized [] or {}
- sql = "#{colname.to_s} is null or #{colname.to_s} IN (?)"
+ sql = "#{colsql} is null or #{colsql} IN (?)"
sorted = value
else
- sql = "#{colname.to_s} IN (?)"
+ sql = "#{colsql} IN (?)"
sorted = deep_sort_hash(value)
end
- where(sql, [sorted.to_yaml, SafeJSON.dump(sorted)])
+ params = [sorted.to_yaml, SafeJSON.dump(sorted)]
+ if md5
+ params = params.map { |x| Digest::MD5.hexdigest(x) }
+ end
+ where(sql, params)
end
Serializer = {
end
def manifest_files
+ return '' if !self.manifest_text
+
names = ''
- if self.manifest_text
- self.manifest_text.scan(/ \d+:\d+:(\S+)/) do |name|
- names << name.first.gsub('\040',' ') + "\n"
- break if names.length > 2**12
- end
+ self.manifest_text.scan(/ \d+:\d+:(\S+)/) do |name|
+ names << name.first.gsub('\040',' ') + "\n"
end
-
- if self.manifest_text and names.length < 2**12
- self.manifest_text.scan(/^\.\/(\S+)/m) do |stream_name|
- names << stream_name.first.gsub('\040',' ') + "\n"
- break if names.length > 2**12
- end
+ self.manifest_text.scan(/^\.\/(\S+)/m) do |stream_name|
+ names << stream_name.first.gsub('\040',' ') + "\n"
end
-
- names[0,2**12]
+ names
end
def default_empty_manifest
def self.find_reusable(attrs)
log_reuse_info { "starting with #{Container.all.count} container records in database" }
- candidates = Container.where_serialized(:command, attrs[:command])
+ candidates = Container.where_serialized(:command, attrs[:command], md5: true)
log_reuse_info(candidates) { "after filtering on command #{attrs[:command].inspect}" }
candidates = candidates.where('cwd = ?', attrs[:cwd])
log_reuse_info(candidates) { "after filtering on cwd #{attrs[:cwd].inspect}" }
- candidates = candidates.where_serialized(:environment, attrs[:environment])
+ candidates = candidates.where_serialized(:environment, attrs[:environment], md5: true)
log_reuse_info(candidates) { "after filtering on environment #{attrs[:environment].inspect}" }
candidates = candidates.where('output_path = ?', attrs[:output_path])
candidates = candidates.where('container_image = ?', image)
log_reuse_info(candidates) { "after filtering on container_image #{image.inspect} (resolved from #{attrs[:container_image].inspect})" }
- candidates = candidates.where_serialized(:mounts, resolve_mounts(attrs[:mounts]))
+ candidates = candidates.where_serialized(:mounts, resolve_mounts(attrs[:mounts]), md5: true)
log_reuse_info(candidates) { "after filtering on mounts #{attrs[:mounts].inspect}" }
- candidates = candidates.where('secret_mounts_md5 = ?', Digest::MD5.hexdigest(SafeJSON.dump(self.deep_sort_hash(attrs[:secret_mounts]))))
- log_reuse_info(candidates) { "after filtering on mounts #{attrs[:mounts].inspect}" }
+ secret_mounts_md5 = Digest::MD5.hexdigest(SafeJSON.dump(self.deep_sort_hash(attrs[:secret_mounts])))
+ candidates = candidates.where('secret_mounts_md5 = ?', secret_mounts_md5)
+ log_reuse_info(candidates) { "after filtering on secret_mounts_md5 #{secret_mounts_md5.inspect}" }
- candidates = candidates.where_serialized(:runtime_constraints, resolve_runtime_constraints(attrs[:runtime_constraints]))
+ candidates = candidates.where_serialized(:runtime_constraints, resolve_runtime_constraints(attrs[:runtime_constraints]), md5: true)
log_reuse_info(candidates) { "after filtering on runtime_constraints #{attrs[:runtime_constraints].inspect}" }
log_reuse_info { "checking for state=Complete with readable output and log..." }
validates :command, :container_image, :output_path, :cwd, :presence => true
validates :output_ttl, numericality: { only_integer: true, greater_than_or_equal_to: 0 }
validates :priority, numericality: { only_integer: true, greater_than_or_equal_to: 0, less_than_or_equal_to: 1000 }
+ validate :validate_datatypes
validate :validate_scheduling_parameters
validate :validate_state_change
validate :check_update_whitelist
end
end
+ def validate_datatypes
+ command.each do |c|
+ if !c.is_a? String
+ errors.add(:command, "must be an array of strings but has entry #{c.class}")
+ end
+ end
+ environment.each do |k,v|
+ if !k.is_a?(String) || !v.is_a?(String)
+ errors.add(:environment, "must be an map of String to String but has entry #{k.class} to #{v.class}")
+ end
+ end
+ [:mounts, :secret_mounts].each do |m|
+ self[m].each do |k, v|
+ if !k.is_a?(String) || !v.is_a?(Hash)
+ errors.add(m, "must be an map of String to Hash but is has entry #{k.class} to #{v.class}")
+ end
+ if v["kind"].nil?
+ errors.add(m, "each item must have a 'kind' field")
+ end
+ [[String, ["kind", "portable_data_hash", "uuid", "device_type",
+ "path", "commit", "repository_name", "git_url"]],
+ [Integer, ["capacity"]]].each do |t, fields|
+ fields.each do |f|
+ if !v[f].nil? && !v[f].is_a?(t)
+ errors.add(m, "#{k}: #{f} must be a #{t} but is #{v[f].class}")
+ end
+ end
+ end
+ ["writable", "exclude_from_output"].each do |f|
+ if !v[f].nil? && !v[f].is_a?(TrueClass) && !v[f].is_a?(FalseClass)
+ errors.add(m, "#{k}: #{f} must be a #{t} but is #{v[f].class}")
+ end
+ end
+ end
+ end
+ end
+
def validate_scheduling_parameters
if self.state == Committed
if scheduling_parameters.include? 'partitions' and
c = get_requesting_container()
if !c.nil?
self.requesting_container_uuid = c.uuid
- self.priority = c.priority>0 ? 1 : 0
+ # Determine the priority of container request for the requesting
+ # container.
+ self.priority = ContainerRequest.where(container_uuid: self.requesting_container_uuid).maximum("priority") || 0
end
end
### Overriding default advertised hostnames/URLs
###
- # If not false, this is the hostname that will be used for root_url and
- # advertised in the discovery document. By default, use the default Rails
- # logic for deciding on a hostname.
+ # If not false, this is the hostname, port, and protocol that will be used
+ # for root_url and advertised in the discovery document. By default, use
+ # the default Rails logic for deciding on a hostname.
host: false
+ port: false
+ protocol: false
# Base part of SSH git clone url given with repository resources. If
# true, the default "git@git.(uuid_prefix).arvadosapi.com:" is
resources :groups do
get 'contents', on: :collection
get 'contents', on: :member
+ get 'shared', on: :collection
post 'trash', on: :member
post 'untrash', on: :member
end
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class IndexAllFilenames < ActiveRecord::Migration
+ def up
+ ActiveRecord::Base.connection.execute 'ALTER TABLE collections ALTER COLUMN file_names TYPE text'
+ end
+ def down
+ ActiveRecord::Base.connection.execute 'ALTER TABLE collections ALTER COLUMN file_names TYPE varchar(8192)'
+ end
+end
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddPdhAndTrashIndexToCollections < ActiveRecord::Migration
+ def change
+ add_index :collections, [:portable_data_hash, :trash_at]
+ end
+end
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddLockIndexToContainers < ActiveRecord::Migration
+ def change
+ # For the current code in sdk/go/dispatch:
+ add_index :containers, [:locked_by_uuid, :priority]
+ # For future dispatchers that use filters instead of offset for
+ # more predictable paging:
+ add_index :containers, [:locked_by_uuid, :uuid]
+ end
+end
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class DropPdhIndexFromCollections < ActiveRecord::Migration
+ def change
+ remove_index :collections, column: :portable_data_hash
+ end
+end
--- /dev/null
+class AddMd5IndexToContainers < ActiveRecord::Migration
+ def up
+ ActiveRecord::Base.connection.execute 'CREATE INDEX index_containers_on_reuse_columns on containers (md5(command), cwd, md5(environment), output_path, container_image, md5(mounts), secret_mounts_md5, md5(runtime_constraints))'
+ end
+ def down
+ ActiveRecord::Base.connection.execute 'DROP INDEX index_containers_on_reuse_columns'
+ end
+end
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class AddQueueIndexToContainers < ActiveRecord::Migration
+ def up
+ ActiveRecord::Base.connection.execute 'CREATE INDEX index_containers_on_queued_state on containers (state, (priority > 0))'
+ end
+ def down
+ ActiveRecord::Base.connection.execute 'DROP INDEX index_containers_on_queued_state'
+ end
+end
SET statement_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = on;
+SELECT pg_catalog.set_config('search_path', '', false);
SET check_function_bodies = false;
SET client_min_messages = warning;
-- COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language';
-SET search_path = public, pg_catalog;
-
SET default_tablespace = '';
SET default_with_oids = false;
-- Name: api_client_authorizations; Type: TABLE; Schema: public; Owner: -
--
-CREATE TABLE api_client_authorizations (
+CREATE TABLE public.api_client_authorizations (
id integer NOT NULL,
api_token character varying(255) NOT NULL,
api_client_id integer NOT NULL,
-- Name: api_client_authorizations_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
-CREATE SEQUENCE api_client_authorizations_id_seq
+CREATE SEQUENCE public.api_client_authorizations_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
-- Name: api_client_authorizations_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
-ALTER SEQUENCE api_client_authorizations_id_seq OWNED BY api_client_authorizations.id;
+ALTER SEQUENCE public.api_client_authorizations_id_seq OWNED BY public.api_client_authorizations.id;
--
-- Name: api_clients; Type: TABLE; Schema: public; Owner: -
--
-CREATE TABLE api_clients (
+CREATE TABLE public.api_clients (
id integer NOT NULL,
uuid character varying(255),
owner_uuid character varying(255),
-- Name: api_clients_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
-CREATE SEQUENCE api_clients_id_seq
+CREATE SEQUENCE public.api_clients_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
-- Name: api_clients_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
-ALTER SEQUENCE api_clients_id_seq OWNED BY api_clients.id;
+ALTER SEQUENCE public.api_clients_id_seq OWNED BY public.api_clients.id;
--
-- Name: authorized_keys; Type: TABLE; Schema: public; Owner: -
--
-CREATE TABLE authorized_keys (
+CREATE TABLE public.authorized_keys (
id integer NOT NULL,
uuid character varying(255) NOT NULL,
owner_uuid character varying(255) NOT NULL,
-- Name: authorized_keys_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
-CREATE SEQUENCE authorized_keys_id_seq
+CREATE SEQUENCE public.authorized_keys_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
-- Name: authorized_keys_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
-ALTER SEQUENCE authorized_keys_id_seq OWNED BY authorized_keys.id;
+ALTER SEQUENCE public.authorized_keys_id_seq OWNED BY public.authorized_keys.id;
--
-- Name: collections; Type: TABLE; Schema: public; Owner: -
--
-CREATE TABLE collections (
+CREATE TABLE public.collections (
id integer NOT NULL,
owner_uuid character varying(255),
created_at timestamp without time zone NOT NULL,
description character varying(524288),
properties jsonb,
delete_at timestamp without time zone,
- file_names character varying(8192),
+ file_names text,
trash_at timestamp without time zone,
is_trashed boolean DEFAULT false NOT NULL,
storage_classes_desired jsonb DEFAULT '["default"]'::jsonb,
-- Name: collections_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
-CREATE SEQUENCE collections_id_seq
+CREATE SEQUENCE public.collections_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
-- Name: collections_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
-ALTER SEQUENCE collections_id_seq OWNED BY collections.id;
+ALTER SEQUENCE public.collections_id_seq OWNED BY public.collections.id;
--
-- Name: commit_ancestors; Type: TABLE; Schema: public; Owner: -
--
-CREATE TABLE commit_ancestors (
+CREATE TABLE public.commit_ancestors (
id integer NOT NULL,
repository_name character varying(255),
descendant character varying(255) NOT NULL,
-- Name: commit_ancestors_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
-CREATE SEQUENCE commit_ancestors_id_seq
+CREATE SEQUENCE public.commit_ancestors_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
-- Name: commit_ancestors_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
-ALTER SEQUENCE commit_ancestors_id_seq OWNED BY commit_ancestors.id;
+ALTER SEQUENCE public.commit_ancestors_id_seq OWNED BY public.commit_ancestors.id;
--
-- Name: commits; Type: TABLE; Schema: public; Owner: -
--
-CREATE TABLE commits (
+CREATE TABLE public.commits (
id integer NOT NULL,
repository_name character varying(255),
sha1 character varying(255),
-- Name: commits_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
-CREATE SEQUENCE commits_id_seq
+CREATE SEQUENCE public.commits_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
-- Name: commits_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
-ALTER SEQUENCE commits_id_seq OWNED BY commits.id;
+ALTER SEQUENCE public.commits_id_seq OWNED BY public.commits.id;
--
-- Name: container_requests; Type: TABLE; Schema: public; Owner: -
--
-CREATE TABLE container_requests (
+CREATE TABLE public.container_requests (
id integer NOT NULL,
uuid character varying(255),
owner_uuid character varying(255),
-- Name: container_requests_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
-CREATE SEQUENCE container_requests_id_seq
+CREATE SEQUENCE public.container_requests_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
-- Name: container_requests_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
-ALTER SEQUENCE container_requests_id_seq OWNED BY container_requests.id;
+ALTER SEQUENCE public.container_requests_id_seq OWNED BY public.container_requests.id;
--
-- Name: containers; Type: TABLE; Schema: public; Owner: -
--
-CREATE TABLE containers (
+CREATE TABLE public.containers (
id integer NOT NULL,
uuid character varying(255),
owner_uuid character varying(255),
-- Name: containers_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
-CREATE SEQUENCE containers_id_seq
+CREATE SEQUENCE public.containers_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
-- Name: containers_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
-ALTER SEQUENCE containers_id_seq OWNED BY containers.id;
+ALTER SEQUENCE public.containers_id_seq OWNED BY public.containers.id;
--
-- Name: groups; Type: TABLE; Schema: public; Owner: -
--
-CREATE TABLE groups (
+CREATE TABLE public.groups (
id integer NOT NULL,
uuid character varying(255),
owner_uuid character varying(255),
-- Name: groups_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
-CREATE SEQUENCE groups_id_seq
+CREATE SEQUENCE public.groups_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
-- Name: groups_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
-ALTER SEQUENCE groups_id_seq OWNED BY groups.id;
+ALTER SEQUENCE public.groups_id_seq OWNED BY public.groups.id;
--
-- Name: humans; Type: TABLE; Schema: public; Owner: -
--
-CREATE TABLE humans (
+CREATE TABLE public.humans (
id integer NOT NULL,
uuid character varying(255) NOT NULL,
owner_uuid character varying(255) NOT NULL,
-- Name: humans_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
-CREATE SEQUENCE humans_id_seq
+CREATE SEQUENCE public.humans_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
-- Name: humans_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
-ALTER SEQUENCE humans_id_seq OWNED BY humans.id;
+ALTER SEQUENCE public.humans_id_seq OWNED BY public.humans.id;
--
-- Name: job_tasks; Type: TABLE; Schema: public; Owner: -
--
-CREATE TABLE job_tasks (
+CREATE TABLE public.job_tasks (
id integer NOT NULL,
uuid character varying(255),
owner_uuid character varying(255),
-- Name: job_tasks_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
-CREATE SEQUENCE job_tasks_id_seq
+CREATE SEQUENCE public.job_tasks_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
-- Name: job_tasks_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
-ALTER SEQUENCE job_tasks_id_seq OWNED BY job_tasks.id;
+ALTER SEQUENCE public.job_tasks_id_seq OWNED BY public.job_tasks.id;
--
-- Name: job_tasks_qsequence_seq; Type: SEQUENCE; Schema: public; Owner: -
--
-CREATE SEQUENCE job_tasks_qsequence_seq
+CREATE SEQUENCE public.job_tasks_qsequence_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
-- Name: job_tasks_qsequence_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
-ALTER SEQUENCE job_tasks_qsequence_seq OWNED BY job_tasks.qsequence;
+ALTER SEQUENCE public.job_tasks_qsequence_seq OWNED BY public.job_tasks.qsequence;
--
-- Name: jobs; Type: TABLE; Schema: public; Owner: -
--
-CREATE TABLE jobs (
+CREATE TABLE public.jobs (
id integer NOT NULL,
uuid character varying(255),
owner_uuid character varying(255),
-- Name: jobs_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
-CREATE SEQUENCE jobs_id_seq
+CREATE SEQUENCE public.jobs_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
-- Name: jobs_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
-ALTER SEQUENCE jobs_id_seq OWNED BY jobs.id;
+ALTER SEQUENCE public.jobs_id_seq OWNED BY public.jobs.id;
--
-- Name: keep_disks; Type: TABLE; Schema: public; Owner: -
--
-CREATE TABLE keep_disks (
+CREATE TABLE public.keep_disks (
id integer NOT NULL,
uuid character varying(255) NOT NULL,
owner_uuid character varying(255) NOT NULL,
-- Name: keep_disks_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
-CREATE SEQUENCE keep_disks_id_seq
+CREATE SEQUENCE public.keep_disks_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
-- Name: keep_disks_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
-ALTER SEQUENCE keep_disks_id_seq OWNED BY keep_disks.id;
+ALTER SEQUENCE public.keep_disks_id_seq OWNED BY public.keep_disks.id;
--
-- Name: keep_services; Type: TABLE; Schema: public; Owner: -
--
-CREATE TABLE keep_services (
+CREATE TABLE public.keep_services (
id integer NOT NULL,
uuid character varying(255) NOT NULL,
owner_uuid character varying(255) NOT NULL,
-- Name: keep_services_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
-CREATE SEQUENCE keep_services_id_seq
+CREATE SEQUENCE public.keep_services_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
-- Name: keep_services_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
-ALTER SEQUENCE keep_services_id_seq OWNED BY keep_services.id;
+ALTER SEQUENCE public.keep_services_id_seq OWNED BY public.keep_services.id;
--
-- Name: links; Type: TABLE; Schema: public; Owner: -
--
-CREATE TABLE links (
+CREATE TABLE public.links (
id integer NOT NULL,
uuid character varying(255),
owner_uuid character varying(255),
-- Name: links_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
-CREATE SEQUENCE links_id_seq
+CREATE SEQUENCE public.links_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
-- Name: links_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
-ALTER SEQUENCE links_id_seq OWNED BY links.id;
+ALTER SEQUENCE public.links_id_seq OWNED BY public.links.id;
--
-- Name: logs; Type: TABLE; Schema: public; Owner: -
--
-CREATE TABLE logs (
+CREATE TABLE public.logs (
id integer NOT NULL,
uuid character varying(255),
owner_uuid character varying(255),
-- Name: logs_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
-CREATE SEQUENCE logs_id_seq
+CREATE SEQUENCE public.logs_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
-- Name: logs_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
-ALTER SEQUENCE logs_id_seq OWNED BY logs.id;
+ALTER SEQUENCE public.logs_id_seq OWNED BY public.logs.id;
--
-- Name: users; Type: TABLE; Schema: public; Owner: -
--
-CREATE TABLE users (
+CREATE TABLE public.users (
id integer NOT NULL,
uuid character varying(255),
owner_uuid character varying(255) NOT NULL,
-- Name: materialized_permission_view; Type: MATERIALIZED VIEW; Schema: public; Owner: -
--
-CREATE MATERIALIZED VIEW materialized_permission_view AS
+CREATE MATERIALIZED VIEW public.materialized_permission_view AS
WITH RECURSIVE perm_value(name, val) AS (
VALUES ('can_read'::text,(1)::smallint), ('can_login'::text,1), ('can_write'::text,2), ('can_manage'::text,3)
), perm_edges(tail_uuid, head_uuid, val, follow, trashed) AS (
((pv.val = 3) OR (groups.uuid IS NOT NULL)) AS follow,
(0)::smallint AS trashed,
(0)::smallint AS followtrash
- FROM ((links
+ FROM ((public.links
LEFT JOIN perm_value pv ON ((pv.name = (links.name)::text)))
- LEFT JOIN groups ON (((pv.val < 3) AND ((groups.uuid)::text = (links.head_uuid)::text))))
+ LEFT JOIN public.groups ON (((pv.val < 3) AND ((groups.uuid)::text = (links.head_uuid)::text))))
WHERE ((links.link_class)::text = 'permission'::text)
UNION ALL
SELECT groups.owner_uuid,
ELSE 0
END AS "case",
1
- FROM groups
+ FROM public.groups
), perm(val, follow, user_uuid, target_uuid, trashed) AS (
SELECT (3)::smallint AS val,
true AS follow,
(users.uuid)::character varying(32) AS user_uuid,
(users.uuid)::character varying(32) AS target_uuid,
(0)::smallint AS trashed
- FROM users
+ FROM public.users
UNION
SELECT (LEAST((perm_1.val)::integer, edges.val))::smallint AS val,
edges.follow,
-- Name: nodes; Type: TABLE; Schema: public; Owner: -
--
-CREATE TABLE nodes (
+CREATE TABLE public.nodes (
id integer NOT NULL,
uuid character varying(255),
owner_uuid character varying(255),
-- Name: nodes_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
-CREATE SEQUENCE nodes_id_seq
+CREATE SEQUENCE public.nodes_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
-- Name: nodes_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
-ALTER SEQUENCE nodes_id_seq OWNED BY nodes.id;
+ALTER SEQUENCE public.nodes_id_seq OWNED BY public.nodes.id;
--
-- Name: permission_refresh_lock; Type: TABLE; Schema: public; Owner: -
--
-CREATE TABLE permission_refresh_lock (
+CREATE TABLE public.permission_refresh_lock (
id integer NOT NULL
);
-- Name: permission_refresh_lock_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
-CREATE SEQUENCE permission_refresh_lock_id_seq
+CREATE SEQUENCE public.permission_refresh_lock_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
-- Name: permission_refresh_lock_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
-ALTER SEQUENCE permission_refresh_lock_id_seq OWNED BY permission_refresh_lock.id;
+ALTER SEQUENCE public.permission_refresh_lock_id_seq OWNED BY public.permission_refresh_lock.id;
--
-- Name: pipeline_instances; Type: TABLE; Schema: public; Owner: -
--
-CREATE TABLE pipeline_instances (
+CREATE TABLE public.pipeline_instances (
id integer NOT NULL,
uuid character varying(255),
owner_uuid character varying(255),
-- Name: pipeline_instances_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
-CREATE SEQUENCE pipeline_instances_id_seq
+CREATE SEQUENCE public.pipeline_instances_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
-- Name: pipeline_instances_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
-ALTER SEQUENCE pipeline_instances_id_seq OWNED BY pipeline_instances.id;
+ALTER SEQUENCE public.pipeline_instances_id_seq OWNED BY public.pipeline_instances.id;
--
-- Name: pipeline_templates; Type: TABLE; Schema: public; Owner: -
--
-CREATE TABLE pipeline_templates (
+CREATE TABLE public.pipeline_templates (
id integer NOT NULL,
uuid character varying(255),
owner_uuid character varying(255),
-- Name: pipeline_templates_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
-CREATE SEQUENCE pipeline_templates_id_seq
+CREATE SEQUENCE public.pipeline_templates_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
-- Name: pipeline_templates_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
-ALTER SEQUENCE pipeline_templates_id_seq OWNED BY pipeline_templates.id;
+ALTER SEQUENCE public.pipeline_templates_id_seq OWNED BY public.pipeline_templates.id;
--
-- Name: repositories; Type: TABLE; Schema: public; Owner: -
--
-CREATE TABLE repositories (
+CREATE TABLE public.repositories (
id integer NOT NULL,
uuid character varying(255) NOT NULL,
owner_uuid character varying(255) NOT NULL,
-- Name: repositories_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
-CREATE SEQUENCE repositories_id_seq
+CREATE SEQUENCE public.repositories_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
-- Name: repositories_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
-ALTER SEQUENCE repositories_id_seq OWNED BY repositories.id;
+ALTER SEQUENCE public.repositories_id_seq OWNED BY public.repositories.id;
--
-- Name: schema_migrations; Type: TABLE; Schema: public; Owner: -
--
-CREATE TABLE schema_migrations (
+CREATE TABLE public.schema_migrations (
version character varying(255) NOT NULL
);
-- Name: specimens; Type: TABLE; Schema: public; Owner: -
--
-CREATE TABLE specimens (
+CREATE TABLE public.specimens (
id integer NOT NULL,
uuid character varying(255),
owner_uuid character varying(255),
-- Name: specimens_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
-CREATE SEQUENCE specimens_id_seq
+CREATE SEQUENCE public.specimens_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
-- Name: specimens_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
-ALTER SEQUENCE specimens_id_seq OWNED BY specimens.id;
+ALTER SEQUENCE public.specimens_id_seq OWNED BY public.specimens.id;
--
-- Name: traits; Type: TABLE; Schema: public; Owner: -
--
-CREATE TABLE traits (
+CREATE TABLE public.traits (
id integer NOT NULL,
uuid character varying(255) NOT NULL,
owner_uuid character varying(255) NOT NULL,
-- Name: traits_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
-CREATE SEQUENCE traits_id_seq
+CREATE SEQUENCE public.traits_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
-- Name: traits_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
-ALTER SEQUENCE traits_id_seq OWNED BY traits.id;
+ALTER SEQUENCE public.traits_id_seq OWNED BY public.traits.id;
--
-- Name: users_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
-CREATE SEQUENCE users_id_seq
+CREATE SEQUENCE public.users_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
-- Name: users_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
-ALTER SEQUENCE users_id_seq OWNED BY users.id;
+ALTER SEQUENCE public.users_id_seq OWNED BY public.users.id;
--
-- Name: virtual_machines; Type: TABLE; Schema: public; Owner: -
--
-CREATE TABLE virtual_machines (
+CREATE TABLE public.virtual_machines (
id integer NOT NULL,
uuid character varying(255) NOT NULL,
owner_uuid character varying(255) NOT NULL,
-- Name: virtual_machines_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
-CREATE SEQUENCE virtual_machines_id_seq
+CREATE SEQUENCE public.virtual_machines_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
-- Name: virtual_machines_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
-ALTER SEQUENCE virtual_machines_id_seq OWNED BY virtual_machines.id;
+ALTER SEQUENCE public.virtual_machines_id_seq OWNED BY public.virtual_machines.id;
--
-- Name: workflows; Type: TABLE; Schema: public; Owner: -
--
-CREATE TABLE workflows (
+CREATE TABLE public.workflows (
id integer NOT NULL,
uuid character varying(255),
owner_uuid character varying(255),
-- Name: workflows_id_seq; Type: SEQUENCE; Schema: public; Owner: -
--
-CREATE SEQUENCE workflows_id_seq
+CREATE SEQUENCE public.workflows_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
-- Name: workflows_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
--
-ALTER SEQUENCE workflows_id_seq OWNED BY workflows.id;
+ALTER SEQUENCE public.workflows_id_seq OWNED BY public.workflows.id;
--
-- Name: api_client_authorizations id; Type: DEFAULT; Schema: public; Owner: -
--
-ALTER TABLE ONLY api_client_authorizations ALTER COLUMN id SET DEFAULT nextval('api_client_authorizations_id_seq'::regclass);
+ALTER TABLE ONLY public.api_client_authorizations ALTER COLUMN id SET DEFAULT nextval('public.api_client_authorizations_id_seq'::regclass);
--
-- Name: api_clients id; Type: DEFAULT; Schema: public; Owner: -
--
-ALTER TABLE ONLY api_clients ALTER COLUMN id SET DEFAULT nextval('api_clients_id_seq'::regclass);
+ALTER TABLE ONLY public.api_clients ALTER COLUMN id SET DEFAULT nextval('public.api_clients_id_seq'::regclass);
--
-- Name: authorized_keys id; Type: DEFAULT; Schema: public; Owner: -
--
-ALTER TABLE ONLY authorized_keys ALTER COLUMN id SET DEFAULT nextval('authorized_keys_id_seq'::regclass);
+ALTER TABLE ONLY public.authorized_keys ALTER COLUMN id SET DEFAULT nextval('public.authorized_keys_id_seq'::regclass);
--
-- Name: collections id; Type: DEFAULT; Schema: public; Owner: -
--
-ALTER TABLE ONLY collections ALTER COLUMN id SET DEFAULT nextval('collections_id_seq'::regclass);
+ALTER TABLE ONLY public.collections ALTER COLUMN id SET DEFAULT nextval('public.collections_id_seq'::regclass);
--
-- Name: commit_ancestors id; Type: DEFAULT; Schema: public; Owner: -
--
-ALTER TABLE ONLY commit_ancestors ALTER COLUMN id SET DEFAULT nextval('commit_ancestors_id_seq'::regclass);
+ALTER TABLE ONLY public.commit_ancestors ALTER COLUMN id SET DEFAULT nextval('public.commit_ancestors_id_seq'::regclass);
--
-- Name: commits id; Type: DEFAULT; Schema: public; Owner: -
--
-ALTER TABLE ONLY commits ALTER COLUMN id SET DEFAULT nextval('commits_id_seq'::regclass);
+ALTER TABLE ONLY public.commits ALTER COLUMN id SET DEFAULT nextval('public.commits_id_seq'::regclass);
--
-- Name: container_requests id; Type: DEFAULT; Schema: public; Owner: -
--
-ALTER TABLE ONLY container_requests ALTER COLUMN id SET DEFAULT nextval('container_requests_id_seq'::regclass);
+ALTER TABLE ONLY public.container_requests ALTER COLUMN id SET DEFAULT nextval('public.container_requests_id_seq'::regclass);
--
-- Name: containers id; Type: DEFAULT; Schema: public; Owner: -
--
-ALTER TABLE ONLY containers ALTER COLUMN id SET DEFAULT nextval('containers_id_seq'::regclass);
+ALTER TABLE ONLY public.containers ALTER COLUMN id SET DEFAULT nextval('public.containers_id_seq'::regclass);
--
-- Name: groups id; Type: DEFAULT; Schema: public; Owner: -
--
-ALTER TABLE ONLY groups ALTER COLUMN id SET DEFAULT nextval('groups_id_seq'::regclass);
+ALTER TABLE ONLY public.groups ALTER COLUMN id SET DEFAULT nextval('public.groups_id_seq'::regclass);
--
-- Name: humans id; Type: DEFAULT; Schema: public; Owner: -
--
-ALTER TABLE ONLY humans ALTER COLUMN id SET DEFAULT nextval('humans_id_seq'::regclass);
+ALTER TABLE ONLY public.humans ALTER COLUMN id SET DEFAULT nextval('public.humans_id_seq'::regclass);
--
-- Name: job_tasks id; Type: DEFAULT; Schema: public; Owner: -
--
-ALTER TABLE ONLY job_tasks ALTER COLUMN id SET DEFAULT nextval('job_tasks_id_seq'::regclass);
+ALTER TABLE ONLY public.job_tasks ALTER COLUMN id SET DEFAULT nextval('public.job_tasks_id_seq'::regclass);
--
-- Name: jobs id; Type: DEFAULT; Schema: public; Owner: -
--
-ALTER TABLE ONLY jobs ALTER COLUMN id SET DEFAULT nextval('jobs_id_seq'::regclass);
+ALTER TABLE ONLY public.jobs ALTER COLUMN id SET DEFAULT nextval('public.jobs_id_seq'::regclass);
--
-- Name: keep_disks id; Type: DEFAULT; Schema: public; Owner: -
--
-ALTER TABLE ONLY keep_disks ALTER COLUMN id SET DEFAULT nextval('keep_disks_id_seq'::regclass);
+ALTER TABLE ONLY public.keep_disks ALTER COLUMN id SET DEFAULT nextval('public.keep_disks_id_seq'::regclass);
--
-- Name: keep_services id; Type: DEFAULT; Schema: public; Owner: -
--
-ALTER TABLE ONLY keep_services ALTER COLUMN id SET DEFAULT nextval('keep_services_id_seq'::regclass);
+ALTER TABLE ONLY public.keep_services ALTER COLUMN id SET DEFAULT nextval('public.keep_services_id_seq'::regclass);
--
-- Name: links id; Type: DEFAULT; Schema: public; Owner: -
--
-ALTER TABLE ONLY links ALTER COLUMN id SET DEFAULT nextval('links_id_seq'::regclass);
+ALTER TABLE ONLY public.links ALTER COLUMN id SET DEFAULT nextval('public.links_id_seq'::regclass);
--
-- Name: logs id; Type: DEFAULT; Schema: public; Owner: -
--
-ALTER TABLE ONLY logs ALTER COLUMN id SET DEFAULT nextval('logs_id_seq'::regclass);
+ALTER TABLE ONLY public.logs ALTER COLUMN id SET DEFAULT nextval('public.logs_id_seq'::regclass);
--
-- Name: nodes id; Type: DEFAULT; Schema: public; Owner: -
--
-ALTER TABLE ONLY nodes ALTER COLUMN id SET DEFAULT nextval('nodes_id_seq'::regclass);
+ALTER TABLE ONLY public.nodes ALTER COLUMN id SET DEFAULT nextval('public.nodes_id_seq'::regclass);
--
-- Name: permission_refresh_lock id; Type: DEFAULT; Schema: public; Owner: -
--
-ALTER TABLE ONLY permission_refresh_lock ALTER COLUMN id SET DEFAULT nextval('permission_refresh_lock_id_seq'::regclass);
+ALTER TABLE ONLY public.permission_refresh_lock ALTER COLUMN id SET DEFAULT nextval('public.permission_refresh_lock_id_seq'::regclass);
--
-- Name: pipeline_instances id; Type: DEFAULT; Schema: public; Owner: -
--
-ALTER TABLE ONLY pipeline_instances ALTER COLUMN id SET DEFAULT nextval('pipeline_instances_id_seq'::regclass);
+ALTER TABLE ONLY public.pipeline_instances ALTER COLUMN id SET DEFAULT nextval('public.pipeline_instances_id_seq'::regclass);
--
-- Name: pipeline_templates id; Type: DEFAULT; Schema: public; Owner: -
--
-ALTER TABLE ONLY pipeline_templates ALTER COLUMN id SET DEFAULT nextval('pipeline_templates_id_seq'::regclass);
+ALTER TABLE ONLY public.pipeline_templates ALTER COLUMN id SET DEFAULT nextval('public.pipeline_templates_id_seq'::regclass);
--
-- Name: repositories id; Type: DEFAULT; Schema: public; Owner: -
--
-ALTER TABLE ONLY repositories ALTER COLUMN id SET DEFAULT nextval('repositories_id_seq'::regclass);
+ALTER TABLE ONLY public.repositories ALTER COLUMN id SET DEFAULT nextval('public.repositories_id_seq'::regclass);
--
-- Name: specimens id; Type: DEFAULT; Schema: public; Owner: -
--
-ALTER TABLE ONLY specimens ALTER COLUMN id SET DEFAULT nextval('specimens_id_seq'::regclass);
+ALTER TABLE ONLY public.specimens ALTER COLUMN id SET DEFAULT nextval('public.specimens_id_seq'::regclass);
--
-- Name: traits id; Type: DEFAULT; Schema: public; Owner: -
--
-ALTER TABLE ONLY traits ALTER COLUMN id SET DEFAULT nextval('traits_id_seq'::regclass);
+ALTER TABLE ONLY public.traits ALTER COLUMN id SET DEFAULT nextval('public.traits_id_seq'::regclass);
--
-- Name: users id; Type: DEFAULT; Schema: public; Owner: -
--
-ALTER TABLE ONLY users ALTER COLUMN id SET DEFAULT nextval('users_id_seq'::regclass);
+ALTER TABLE ONLY public.users ALTER COLUMN id SET DEFAULT nextval('public.users_id_seq'::regclass);
--
-- Name: virtual_machines id; Type: DEFAULT; Schema: public; Owner: -
--
-ALTER TABLE ONLY virtual_machines ALTER COLUMN id SET DEFAULT nextval('virtual_machines_id_seq'::regclass);
+ALTER TABLE ONLY public.virtual_machines ALTER COLUMN id SET DEFAULT nextval('public.virtual_machines_id_seq'::regclass);
--
-- Name: workflows id; Type: DEFAULT; Schema: public; Owner: -
--
-ALTER TABLE ONLY workflows ALTER COLUMN id SET DEFAULT nextval('workflows_id_seq'::regclass);
+ALTER TABLE ONLY public.workflows ALTER COLUMN id SET DEFAULT nextval('public.workflows_id_seq'::regclass);
--
-- Name: api_client_authorizations api_client_authorizations_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
-ALTER TABLE ONLY api_client_authorizations
+ALTER TABLE ONLY public.api_client_authorizations
ADD CONSTRAINT api_client_authorizations_pkey PRIMARY KEY (id);
-- Name: api_clients api_clients_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
-ALTER TABLE ONLY api_clients
+ALTER TABLE ONLY public.api_clients
ADD CONSTRAINT api_clients_pkey PRIMARY KEY (id);
-- Name: authorized_keys authorized_keys_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
-ALTER TABLE ONLY authorized_keys
+ALTER TABLE ONLY public.authorized_keys
ADD CONSTRAINT authorized_keys_pkey PRIMARY KEY (id);
-- Name: collections collections_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
-ALTER TABLE ONLY collections
+ALTER TABLE ONLY public.collections
ADD CONSTRAINT collections_pkey PRIMARY KEY (id);
-- Name: commit_ancestors commit_ancestors_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
-ALTER TABLE ONLY commit_ancestors
+ALTER TABLE ONLY public.commit_ancestors
ADD CONSTRAINT commit_ancestors_pkey PRIMARY KEY (id);
-- Name: commits commits_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
-ALTER TABLE ONLY commits
+ALTER TABLE ONLY public.commits
ADD CONSTRAINT commits_pkey PRIMARY KEY (id);
-- Name: container_requests container_requests_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
-ALTER TABLE ONLY container_requests
+ALTER TABLE ONLY public.container_requests
ADD CONSTRAINT container_requests_pkey PRIMARY KEY (id);
-- Name: containers containers_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
-ALTER TABLE ONLY containers
+ALTER TABLE ONLY public.containers
ADD CONSTRAINT containers_pkey PRIMARY KEY (id);
-- Name: groups groups_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
-ALTER TABLE ONLY groups
+ALTER TABLE ONLY public.groups
ADD CONSTRAINT groups_pkey PRIMARY KEY (id);
-- Name: humans humans_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
-ALTER TABLE ONLY humans
+ALTER TABLE ONLY public.humans
ADD CONSTRAINT humans_pkey PRIMARY KEY (id);
-- Name: job_tasks job_tasks_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
-ALTER TABLE ONLY job_tasks
+ALTER TABLE ONLY public.job_tasks
ADD CONSTRAINT job_tasks_pkey PRIMARY KEY (id);
-- Name: jobs jobs_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
-ALTER TABLE ONLY jobs
+ALTER TABLE ONLY public.jobs
ADD CONSTRAINT jobs_pkey PRIMARY KEY (id);
-- Name: keep_disks keep_disks_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
-ALTER TABLE ONLY keep_disks
+ALTER TABLE ONLY public.keep_disks
ADD CONSTRAINT keep_disks_pkey PRIMARY KEY (id);
-- Name: keep_services keep_services_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
-ALTER TABLE ONLY keep_services
+ALTER TABLE ONLY public.keep_services
ADD CONSTRAINT keep_services_pkey PRIMARY KEY (id);
-- Name: links links_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
-ALTER TABLE ONLY links
+ALTER TABLE ONLY public.links
ADD CONSTRAINT links_pkey PRIMARY KEY (id);
-- Name: logs logs_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
-ALTER TABLE ONLY logs
+ALTER TABLE ONLY public.logs
ADD CONSTRAINT logs_pkey PRIMARY KEY (id);
-- Name: nodes nodes_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
-ALTER TABLE ONLY nodes
+ALTER TABLE ONLY public.nodes
ADD CONSTRAINT nodes_pkey PRIMARY KEY (id);
-- Name: permission_refresh_lock permission_refresh_lock_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
-ALTER TABLE ONLY permission_refresh_lock
+ALTER TABLE ONLY public.permission_refresh_lock
ADD CONSTRAINT permission_refresh_lock_pkey PRIMARY KEY (id);
-- Name: pipeline_instances pipeline_instances_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
-ALTER TABLE ONLY pipeline_instances
+ALTER TABLE ONLY public.pipeline_instances
ADD CONSTRAINT pipeline_instances_pkey PRIMARY KEY (id);
-- Name: pipeline_templates pipeline_templates_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
-ALTER TABLE ONLY pipeline_templates
+ALTER TABLE ONLY public.pipeline_templates
ADD CONSTRAINT pipeline_templates_pkey PRIMARY KEY (id);
-- Name: repositories repositories_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
-ALTER TABLE ONLY repositories
+ALTER TABLE ONLY public.repositories
ADD CONSTRAINT repositories_pkey PRIMARY KEY (id);
-- Name: specimens specimens_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
-ALTER TABLE ONLY specimens
+ALTER TABLE ONLY public.specimens
ADD CONSTRAINT specimens_pkey PRIMARY KEY (id);
-- Name: traits traits_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
-ALTER TABLE ONLY traits
+ALTER TABLE ONLY public.traits
ADD CONSTRAINT traits_pkey PRIMARY KEY (id);
-- Name: users users_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
-ALTER TABLE ONLY users
+ALTER TABLE ONLY public.users
ADD CONSTRAINT users_pkey PRIMARY KEY (id);
-- Name: virtual_machines virtual_machines_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
-ALTER TABLE ONLY virtual_machines
+ALTER TABLE ONLY public.virtual_machines
ADD CONSTRAINT virtual_machines_pkey PRIMARY KEY (id);
-- Name: workflows workflows_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
-ALTER TABLE ONLY workflows
+ALTER TABLE ONLY public.workflows
ADD CONSTRAINT workflows_pkey PRIMARY KEY (id);
-- Name: api_client_authorizations_search_index; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX api_client_authorizations_search_index ON api_client_authorizations USING btree (api_token, created_by_ip_address, last_used_by_ip_address, default_owner_uuid, uuid);
+CREATE INDEX api_client_authorizations_search_index ON public.api_client_authorizations USING btree (api_token, created_by_ip_address, last_used_by_ip_address, default_owner_uuid, uuid);
--
-- Name: api_clients_search_index; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX api_clients_search_index ON api_clients USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name, url_prefix);
+CREATE INDEX api_clients_search_index ON public.api_clients USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name, url_prefix);
--
-- Name: authorized_keys_search_index; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX authorized_keys_search_index ON authorized_keys USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name, key_type, authorized_user_uuid);
+CREATE INDEX authorized_keys_search_index ON public.authorized_keys USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name, key_type, authorized_user_uuid);
--
-- Name: collection_index_on_properties; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX collection_index_on_properties ON collections USING gin (properties);
+CREATE INDEX collection_index_on_properties ON public.collections USING gin (properties);
--
-- Name: collections_full_text_search_idx; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX collections_full_text_search_idx ON collections USING gin (to_tsvector('english'::regconfig, (((((((((((((((((COALESCE(owner_uuid, ''::character varying))::text || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(portable_data_hash, ''::character varying))::text) || ' '::text) || (COALESCE(uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || COALESCE((properties)::text, ''::text)) || ' '::text) || (COALESCE(file_names, ''::character varying))::text)));
+CREATE INDEX collections_full_text_search_idx ON public.collections USING gin (to_tsvector('english'::regconfig, (((((((((((((((((COALESCE(owner_uuid, ''::character varying))::text || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(portable_data_hash, ''::character varying))::text) || ' '::text) || (COALESCE(uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || COALESCE((properties)::text, ''::text)) || ' '::text) || COALESCE(file_names, (''::character varying)::text))));
--
-- Name: collections_search_index; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX collections_search_index ON collections USING btree (owner_uuid, modified_by_client_uuid, modified_by_user_uuid, portable_data_hash, uuid, name);
+CREATE INDEX collections_search_index ON public.collections USING btree (owner_uuid, modified_by_client_uuid, modified_by_user_uuid, portable_data_hash, uuid, name);
--
-- Name: container_requests_full_text_search_idx; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX container_requests_full_text_search_idx ON container_requests USING gin (to_tsvector('english'::regconfig, (((((((((((((((((((((((((((((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(description, ''::text)) || ' '::text) || COALESCE((properties)::text, ''::text)) || ' '::text) || (COALESCE(state, ''::character varying))::text) || ' '::text) || (COALESCE(requesting_container_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(container_uuid, ''::character varying))::text) || ' '::text) || COALESCE(runtime_constraints, ''::text)) || ' '::text) || (COALESCE(container_image, ''::character varying))::text) || ' '::text) || COALESCE(environment, ''::text)) || ' '::text) || (COALESCE(cwd, ''::character varying))::text) || ' '::text) || COALESCE(command, ''::text)) || ' '::text) || (COALESCE(output_path, ''::character varying))::text) || ' '::text) || COALESCE(filters, ''::text)) || ' '::text) || COALESCE(scheduling_parameters, ''::text)) || ' '::text) || (COALESCE(output_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(log_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(output_name, ''::character varying))::text)));
+CREATE INDEX container_requests_full_text_search_idx ON public.container_requests USING gin (to_tsvector('english'::regconfig, (((((((((((((((((((((((((((((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(description, ''::text)) || ' '::text) || COALESCE((properties)::text, ''::text)) || ' '::text) || (COALESCE(state, ''::character varying))::text) || ' '::text) || (COALESCE(requesting_container_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(container_uuid, ''::character varying))::text) || ' '::text) || COALESCE(runtime_constraints, ''::text)) || ' '::text) || (COALESCE(container_image, ''::character varying))::text) || ' '::text) || COALESCE(environment, ''::text)) || ' '::text) || (COALESCE(cwd, ''::character varying))::text) || ' '::text) || COALESCE(command, ''::text)) || ' '::text) || (COALESCE(output_path, ''::character varying))::text) || ' '::text) || COALESCE(filters, ''::text)) || ' '::text) || COALESCE(scheduling_parameters, ''::text)) || ' '::text) || (COALESCE(output_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(log_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(output_name, ''::character varying))::text)));
--
-- Name: container_requests_index_on_properties; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX container_requests_index_on_properties ON container_requests USING gin (properties);
+CREATE INDEX container_requests_index_on_properties ON public.container_requests USING gin (properties);
--
-- Name: container_requests_search_index; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX container_requests_search_index ON container_requests USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name, state, requesting_container_uuid, container_uuid, container_image, cwd, output_path, output_uuid, log_uuid, output_name);
+CREATE INDEX container_requests_search_index ON public.container_requests USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name, state, requesting_container_uuid, container_uuid, container_image, cwd, output_path, output_uuid, log_uuid, output_name);
--
-- Name: containers_search_index; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX containers_search_index ON containers USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, state, log, cwd, output_path, output, container_image, auth_uuid, locked_by_uuid);
+CREATE INDEX containers_search_index ON public.containers USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, state, log, cwd, output_path, output, container_image, auth_uuid, locked_by_uuid);
--
-- Name: group_index_on_properties; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX group_index_on_properties ON groups USING gin (properties);
+CREATE INDEX group_index_on_properties ON public.groups USING gin (properties);
--
-- Name: groups_full_text_search_idx; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX groups_full_text_search_idx ON groups USING gin (to_tsvector('english'::regconfig, (((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || (COALESCE(group_class, ''::character varying))::text) || ' '::text) || COALESCE((properties)::text, ''::text))));
+CREATE INDEX groups_full_text_search_idx ON public.groups USING gin (to_tsvector('english'::regconfig, (((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || (COALESCE(group_class, ''::character varying))::text) || ' '::text) || COALESCE((properties)::text, ''::text))));
--
-- Name: groups_search_index; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX groups_search_index ON groups USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name, group_class);
+CREATE INDEX groups_search_index ON public.groups USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name, group_class);
--
-- Name: humans_search_index; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX humans_search_index ON humans USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid);
+CREATE INDEX humans_search_index ON public.humans USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid);
--
-- Name: index_api_client_authorizations_on_api_client_id; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_api_client_authorizations_on_api_client_id ON api_client_authorizations USING btree (api_client_id);
+CREATE INDEX index_api_client_authorizations_on_api_client_id ON public.api_client_authorizations USING btree (api_client_id);
--
-- Name: index_api_client_authorizations_on_api_token; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_api_client_authorizations_on_api_token ON api_client_authorizations USING btree (api_token);
+CREATE UNIQUE INDEX index_api_client_authorizations_on_api_token ON public.api_client_authorizations USING btree (api_token);
--
-- Name: index_api_client_authorizations_on_expires_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_api_client_authorizations_on_expires_at ON api_client_authorizations USING btree (expires_at);
+CREATE INDEX index_api_client_authorizations_on_expires_at ON public.api_client_authorizations USING btree (expires_at);
--
-- Name: index_api_client_authorizations_on_user_id; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_api_client_authorizations_on_user_id ON api_client_authorizations USING btree (user_id);
+CREATE INDEX index_api_client_authorizations_on_user_id ON public.api_client_authorizations USING btree (user_id);
--
-- Name: index_api_client_authorizations_on_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_api_client_authorizations_on_uuid ON api_client_authorizations USING btree (uuid);
+CREATE UNIQUE INDEX index_api_client_authorizations_on_uuid ON public.api_client_authorizations USING btree (uuid);
--
-- Name: index_api_clients_on_created_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_api_clients_on_created_at ON api_clients USING btree (created_at);
+CREATE INDEX index_api_clients_on_created_at ON public.api_clients USING btree (created_at);
--
-- Name: index_api_clients_on_modified_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_api_clients_on_modified_at ON api_clients USING btree (modified_at);
+CREATE INDEX index_api_clients_on_modified_at ON public.api_clients USING btree (modified_at);
--
-- Name: index_api_clients_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_api_clients_on_owner_uuid ON api_clients USING btree (owner_uuid);
+CREATE INDEX index_api_clients_on_owner_uuid ON public.api_clients USING btree (owner_uuid);
--
-- Name: index_api_clients_on_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_api_clients_on_uuid ON api_clients USING btree (uuid);
+CREATE UNIQUE INDEX index_api_clients_on_uuid ON public.api_clients USING btree (uuid);
--
-- Name: index_authkeys_on_user_and_expires_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_authkeys_on_user_and_expires_at ON authorized_keys USING btree (authorized_user_uuid, expires_at);
+CREATE INDEX index_authkeys_on_user_and_expires_at ON public.authorized_keys USING btree (authorized_user_uuid, expires_at);
--
-- Name: index_authorized_keys_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_authorized_keys_on_owner_uuid ON authorized_keys USING btree (owner_uuid);
+CREATE INDEX index_authorized_keys_on_owner_uuid ON public.authorized_keys USING btree (owner_uuid);
--
-- Name: index_authorized_keys_on_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_authorized_keys_on_uuid ON authorized_keys USING btree (uuid);
+CREATE UNIQUE INDEX index_authorized_keys_on_uuid ON public.authorized_keys USING btree (uuid);
--
-- Name: index_collections_on_created_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_collections_on_created_at ON collections USING btree (created_at);
+CREATE INDEX index_collections_on_created_at ON public.collections USING btree (created_at);
--
-- Name: index_collections_on_delete_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_collections_on_delete_at ON collections USING btree (delete_at);
+CREATE INDEX index_collections_on_delete_at ON public.collections USING btree (delete_at);
--
-- Name: index_collections_on_is_trashed; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_collections_on_is_trashed ON collections USING btree (is_trashed);
+CREATE INDEX index_collections_on_is_trashed ON public.collections USING btree (is_trashed);
--
-- Name: index_collections_on_modified_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_collections_on_modified_at ON collections USING btree (modified_at);
+CREATE INDEX index_collections_on_modified_at ON public.collections USING btree (modified_at);
--
-- Name: index_collections_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_collections_on_modified_at_uuid ON collections USING btree (modified_at DESC, uuid);
+CREATE INDEX index_collections_on_modified_at_uuid ON public.collections USING btree (modified_at DESC, uuid);
--
-- Name: index_collections_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_collections_on_owner_uuid ON collections USING btree (owner_uuid);
+CREATE INDEX index_collections_on_owner_uuid ON public.collections USING btree (owner_uuid);
--
-- Name: index_collections_on_owner_uuid_and_name; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_collections_on_owner_uuid_and_name ON collections USING btree (owner_uuid, name) WHERE (is_trashed = false);
+CREATE UNIQUE INDEX index_collections_on_owner_uuid_and_name ON public.collections USING btree (owner_uuid, name) WHERE (is_trashed = false);
--
--- Name: index_collections_on_portable_data_hash; Type: INDEX; Schema: public; Owner: -
+-- Name: index_collections_on_portable_data_hash_and_trash_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_collections_on_portable_data_hash ON collections USING btree (portable_data_hash);
+CREATE INDEX index_collections_on_portable_data_hash_and_trash_at ON public.collections USING btree (portable_data_hash, trash_at);
--
-- Name: index_collections_on_trash_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_collections_on_trash_at ON collections USING btree (trash_at);
+CREATE INDEX index_collections_on_trash_at ON public.collections USING btree (trash_at);
--
-- Name: index_collections_on_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_collections_on_uuid ON collections USING btree (uuid);
+CREATE UNIQUE INDEX index_collections_on_uuid ON public.collections USING btree (uuid);
--
-- Name: index_commit_ancestors_on_descendant_and_ancestor; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_commit_ancestors_on_descendant_and_ancestor ON commit_ancestors USING btree (descendant, ancestor);
+CREATE UNIQUE INDEX index_commit_ancestors_on_descendant_and_ancestor ON public.commit_ancestors USING btree (descendant, ancestor);
--
-- Name: index_commits_on_repository_name_and_sha1; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_commits_on_repository_name_and_sha1 ON commits USING btree (repository_name, sha1);
+CREATE UNIQUE INDEX index_commits_on_repository_name_and_sha1 ON public.commits USING btree (repository_name, sha1);
--
-- Name: index_container_requests_on_container_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_container_requests_on_container_uuid ON container_requests USING btree (container_uuid);
+CREATE INDEX index_container_requests_on_container_uuid ON public.container_requests USING btree (container_uuid);
--
-- Name: index_container_requests_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_container_requests_on_modified_at_uuid ON container_requests USING btree (modified_at DESC, uuid);
+CREATE INDEX index_container_requests_on_modified_at_uuid ON public.container_requests USING btree (modified_at DESC, uuid);
--
-- Name: index_container_requests_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_container_requests_on_owner_uuid ON container_requests USING btree (owner_uuid);
+CREATE INDEX index_container_requests_on_owner_uuid ON public.container_requests USING btree (owner_uuid);
--
-- Name: index_container_requests_on_requesting_container_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_container_requests_on_requesting_container_uuid ON container_requests USING btree (requesting_container_uuid);
+CREATE INDEX index_container_requests_on_requesting_container_uuid ON public.container_requests USING btree (requesting_container_uuid);
--
-- Name: index_container_requests_on_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_container_requests_on_uuid ON container_requests USING btree (uuid);
+CREATE UNIQUE INDEX index_container_requests_on_uuid ON public.container_requests USING btree (uuid);
--
-- Name: index_containers_on_auth_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_containers_on_auth_uuid ON containers USING btree (auth_uuid);
+CREATE INDEX index_containers_on_auth_uuid ON public.containers USING btree (auth_uuid);
+
+
+--
+-- Name: index_containers_on_locked_by_uuid_and_priority; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_containers_on_locked_by_uuid_and_priority ON public.containers USING btree (locked_by_uuid, priority);
+
+
+--
+-- Name: index_containers_on_locked_by_uuid_and_uuid; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_containers_on_locked_by_uuid_and_uuid ON public.containers USING btree (locked_by_uuid, uuid);
--
-- Name: index_containers_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_containers_on_modified_at_uuid ON containers USING btree (modified_at DESC, uuid);
+CREATE INDEX index_containers_on_modified_at_uuid ON public.containers USING btree (modified_at DESC, uuid);
--
-- Name: index_containers_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_containers_on_owner_uuid ON containers USING btree (owner_uuid);
+CREATE INDEX index_containers_on_owner_uuid ON public.containers USING btree (owner_uuid);
+
+
+--
+-- Name: index_containers_on_queued_state; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_containers_on_queued_state ON public.containers USING btree (state, ((priority > 0)));
+
+
+--
+-- Name: index_containers_on_reuse_columns; Type: INDEX; Schema: public; Owner: -
+--
+
+CREATE INDEX index_containers_on_reuse_columns ON public.containers USING btree (md5(command), cwd, md5(environment), output_path, container_image, md5(mounts), secret_mounts_md5, md5(runtime_constraints));
--
-- Name: index_containers_on_secret_mounts_md5; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_containers_on_secret_mounts_md5 ON containers USING btree (secret_mounts_md5);
+CREATE INDEX index_containers_on_secret_mounts_md5 ON public.containers USING btree (secret_mounts_md5);
--
-- Name: index_containers_on_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_containers_on_uuid ON containers USING btree (uuid);
+CREATE UNIQUE INDEX index_containers_on_uuid ON public.containers USING btree (uuid);
--
-- Name: index_groups_on_created_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_groups_on_created_at ON groups USING btree (created_at);
+CREATE INDEX index_groups_on_created_at ON public.groups USING btree (created_at);
--
-- Name: index_groups_on_delete_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_groups_on_delete_at ON groups USING btree (delete_at);
+CREATE INDEX index_groups_on_delete_at ON public.groups USING btree (delete_at);
--
-- Name: index_groups_on_group_class; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_groups_on_group_class ON groups USING btree (group_class);
+CREATE INDEX index_groups_on_group_class ON public.groups USING btree (group_class);
--
-- Name: index_groups_on_is_trashed; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_groups_on_is_trashed ON groups USING btree (is_trashed);
+CREATE INDEX index_groups_on_is_trashed ON public.groups USING btree (is_trashed);
--
-- Name: index_groups_on_modified_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_groups_on_modified_at ON groups USING btree (modified_at);
+CREATE INDEX index_groups_on_modified_at ON public.groups USING btree (modified_at);
--
-- Name: index_groups_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_groups_on_modified_at_uuid ON groups USING btree (modified_at DESC, uuid);
+CREATE INDEX index_groups_on_modified_at_uuid ON public.groups USING btree (modified_at DESC, uuid);
--
-- Name: index_groups_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_groups_on_owner_uuid ON groups USING btree (owner_uuid);
+CREATE INDEX index_groups_on_owner_uuid ON public.groups USING btree (owner_uuid);
--
-- Name: index_groups_on_owner_uuid_and_name; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_groups_on_owner_uuid_and_name ON groups USING btree (owner_uuid, name) WHERE (is_trashed = false);
+CREATE UNIQUE INDEX index_groups_on_owner_uuid_and_name ON public.groups USING btree (owner_uuid, name) WHERE (is_trashed = false);
--
-- Name: index_groups_on_trash_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_groups_on_trash_at ON groups USING btree (trash_at);
+CREATE INDEX index_groups_on_trash_at ON public.groups USING btree (trash_at);
--
-- Name: index_groups_on_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_groups_on_uuid ON groups USING btree (uuid);
+CREATE UNIQUE INDEX index_groups_on_uuid ON public.groups USING btree (uuid);
--
-- Name: index_humans_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_humans_on_owner_uuid ON humans USING btree (owner_uuid);
+CREATE INDEX index_humans_on_owner_uuid ON public.humans USING btree (owner_uuid);
--
-- Name: index_humans_on_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_humans_on_uuid ON humans USING btree (uuid);
+CREATE UNIQUE INDEX index_humans_on_uuid ON public.humans USING btree (uuid);
--
-- Name: index_job_tasks_on_created_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_job_tasks_on_created_at ON job_tasks USING btree (created_at);
+CREATE INDEX index_job_tasks_on_created_at ON public.job_tasks USING btree (created_at);
--
-- Name: index_job_tasks_on_created_by_job_task_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_job_tasks_on_created_by_job_task_uuid ON job_tasks USING btree (created_by_job_task_uuid);
+CREATE INDEX index_job_tasks_on_created_by_job_task_uuid ON public.job_tasks USING btree (created_by_job_task_uuid);
--
-- Name: index_job_tasks_on_job_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_job_tasks_on_job_uuid ON job_tasks USING btree (job_uuid);
+CREATE INDEX index_job_tasks_on_job_uuid ON public.job_tasks USING btree (job_uuid);
--
-- Name: index_job_tasks_on_modified_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_job_tasks_on_modified_at ON job_tasks USING btree (modified_at);
+CREATE INDEX index_job_tasks_on_modified_at ON public.job_tasks USING btree (modified_at);
--
-- Name: index_job_tasks_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_job_tasks_on_owner_uuid ON job_tasks USING btree (owner_uuid);
+CREATE INDEX index_job_tasks_on_owner_uuid ON public.job_tasks USING btree (owner_uuid);
--
-- Name: index_job_tasks_on_sequence; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_job_tasks_on_sequence ON job_tasks USING btree (sequence);
+CREATE INDEX index_job_tasks_on_sequence ON public.job_tasks USING btree (sequence);
--
-- Name: index_job_tasks_on_success; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_job_tasks_on_success ON job_tasks USING btree (success);
+CREATE INDEX index_job_tasks_on_success ON public.job_tasks USING btree (success);
--
-- Name: index_job_tasks_on_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_job_tasks_on_uuid ON job_tasks USING btree (uuid);
+CREATE UNIQUE INDEX index_job_tasks_on_uuid ON public.job_tasks USING btree (uuid);
--
-- Name: index_jobs_on_created_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_jobs_on_created_at ON jobs USING btree (created_at);
+CREATE INDEX index_jobs_on_created_at ON public.jobs USING btree (created_at);
--
-- Name: index_jobs_on_finished_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_jobs_on_finished_at ON jobs USING btree (finished_at);
+CREATE INDEX index_jobs_on_finished_at ON public.jobs USING btree (finished_at);
--
-- Name: index_jobs_on_modified_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_jobs_on_modified_at ON jobs USING btree (modified_at);
+CREATE INDEX index_jobs_on_modified_at ON public.jobs USING btree (modified_at);
--
-- Name: index_jobs_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_jobs_on_modified_at_uuid ON jobs USING btree (modified_at DESC, uuid);
+CREATE INDEX index_jobs_on_modified_at_uuid ON public.jobs USING btree (modified_at DESC, uuid);
--
-- Name: index_jobs_on_output; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_jobs_on_output ON jobs USING btree (output);
+CREATE INDEX index_jobs_on_output ON public.jobs USING btree (output);
--
-- Name: index_jobs_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_jobs_on_owner_uuid ON jobs USING btree (owner_uuid);
+CREATE INDEX index_jobs_on_owner_uuid ON public.jobs USING btree (owner_uuid);
--
-- Name: index_jobs_on_script; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_jobs_on_script ON jobs USING btree (script);
+CREATE INDEX index_jobs_on_script ON public.jobs USING btree (script);
--
-- Name: index_jobs_on_script_parameters_digest; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_jobs_on_script_parameters_digest ON jobs USING btree (script_parameters_digest);
+CREATE INDEX index_jobs_on_script_parameters_digest ON public.jobs USING btree (script_parameters_digest);
--
-- Name: index_jobs_on_started_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_jobs_on_started_at ON jobs USING btree (started_at);
+CREATE INDEX index_jobs_on_started_at ON public.jobs USING btree (started_at);
--
-- Name: index_jobs_on_submit_id; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_jobs_on_submit_id ON jobs USING btree (submit_id);
+CREATE UNIQUE INDEX index_jobs_on_submit_id ON public.jobs USING btree (submit_id);
--
-- Name: index_jobs_on_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_jobs_on_uuid ON jobs USING btree (uuid);
+CREATE UNIQUE INDEX index_jobs_on_uuid ON public.jobs USING btree (uuid);
--
-- Name: index_keep_disks_on_filesystem_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_keep_disks_on_filesystem_uuid ON keep_disks USING btree (filesystem_uuid);
+CREATE INDEX index_keep_disks_on_filesystem_uuid ON public.keep_disks USING btree (filesystem_uuid);
--
-- Name: index_keep_disks_on_last_ping_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_keep_disks_on_last_ping_at ON keep_disks USING btree (last_ping_at);
+CREATE INDEX index_keep_disks_on_last_ping_at ON public.keep_disks USING btree (last_ping_at);
--
-- Name: index_keep_disks_on_node_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_keep_disks_on_node_uuid ON keep_disks USING btree (node_uuid);
+CREATE INDEX index_keep_disks_on_node_uuid ON public.keep_disks USING btree (node_uuid);
--
-- Name: index_keep_disks_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_keep_disks_on_owner_uuid ON keep_disks USING btree (owner_uuid);
+CREATE INDEX index_keep_disks_on_owner_uuid ON public.keep_disks USING btree (owner_uuid);
--
-- Name: index_keep_disks_on_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_keep_disks_on_uuid ON keep_disks USING btree (uuid);
+CREATE UNIQUE INDEX index_keep_disks_on_uuid ON public.keep_disks USING btree (uuid);
--
-- Name: index_keep_services_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_keep_services_on_owner_uuid ON keep_services USING btree (owner_uuid);
+CREATE INDEX index_keep_services_on_owner_uuid ON public.keep_services USING btree (owner_uuid);
--
-- Name: index_keep_services_on_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_keep_services_on_uuid ON keep_services USING btree (uuid);
+CREATE UNIQUE INDEX index_keep_services_on_uuid ON public.keep_services USING btree (uuid);
--
-- Name: index_links_on_created_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_links_on_created_at ON links USING btree (created_at);
+CREATE INDEX index_links_on_created_at ON public.links USING btree (created_at);
--
-- Name: index_links_on_head_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_links_on_head_uuid ON links USING btree (head_uuid);
+CREATE INDEX index_links_on_head_uuid ON public.links USING btree (head_uuid);
--
-- Name: index_links_on_modified_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_links_on_modified_at ON links USING btree (modified_at);
+CREATE INDEX index_links_on_modified_at ON public.links USING btree (modified_at);
--
-- Name: index_links_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_links_on_modified_at_uuid ON links USING btree (modified_at DESC, uuid);
+CREATE INDEX index_links_on_modified_at_uuid ON public.links USING btree (modified_at DESC, uuid);
--
-- Name: index_links_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_links_on_owner_uuid ON links USING btree (owner_uuid);
+CREATE INDEX index_links_on_owner_uuid ON public.links USING btree (owner_uuid);
--
-- Name: index_links_on_tail_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_links_on_tail_uuid ON links USING btree (tail_uuid);
+CREATE INDEX index_links_on_tail_uuid ON public.links USING btree (tail_uuid);
--
-- Name: index_links_on_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_links_on_uuid ON links USING btree (uuid);
+CREATE UNIQUE INDEX index_links_on_uuid ON public.links USING btree (uuid);
--
-- Name: index_logs_on_created_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_logs_on_created_at ON logs USING btree (created_at);
+CREATE INDEX index_logs_on_created_at ON public.logs USING btree (created_at);
--
-- Name: index_logs_on_event_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_logs_on_event_at ON logs USING btree (event_at);
+CREATE INDEX index_logs_on_event_at ON public.logs USING btree (event_at);
--
-- Name: index_logs_on_event_type; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_logs_on_event_type ON logs USING btree (event_type);
+CREATE INDEX index_logs_on_event_type ON public.logs USING btree (event_type);
--
-- Name: index_logs_on_modified_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_logs_on_modified_at ON logs USING btree (modified_at);
+CREATE INDEX index_logs_on_modified_at ON public.logs USING btree (modified_at);
--
-- Name: index_logs_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_logs_on_modified_at_uuid ON logs USING btree (modified_at DESC, uuid);
+CREATE INDEX index_logs_on_modified_at_uuid ON public.logs USING btree (modified_at DESC, uuid);
--
-- Name: index_logs_on_object_owner_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_logs_on_object_owner_uuid ON logs USING btree (object_owner_uuid);
+CREATE INDEX index_logs_on_object_owner_uuid ON public.logs USING btree (object_owner_uuid);
--
-- Name: index_logs_on_object_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_logs_on_object_uuid ON logs USING btree (object_uuid);
+CREATE INDEX index_logs_on_object_uuid ON public.logs USING btree (object_uuid);
--
-- Name: index_logs_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_logs_on_owner_uuid ON logs USING btree (owner_uuid);
+CREATE INDEX index_logs_on_owner_uuid ON public.logs USING btree (owner_uuid);
--
-- Name: index_logs_on_summary; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_logs_on_summary ON logs USING btree (summary);
+CREATE INDEX index_logs_on_summary ON public.logs USING btree (summary);
--
-- Name: index_logs_on_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_logs_on_uuid ON logs USING btree (uuid);
+CREATE UNIQUE INDEX index_logs_on_uuid ON public.logs USING btree (uuid);
--
-- Name: index_nodes_on_created_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_nodes_on_created_at ON nodes USING btree (created_at);
+CREATE INDEX index_nodes_on_created_at ON public.nodes USING btree (created_at);
--
-- Name: index_nodes_on_hostname; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_nodes_on_hostname ON nodes USING btree (hostname);
+CREATE INDEX index_nodes_on_hostname ON public.nodes USING btree (hostname);
--
-- Name: index_nodes_on_modified_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_nodes_on_modified_at ON nodes USING btree (modified_at);
+CREATE INDEX index_nodes_on_modified_at ON public.nodes USING btree (modified_at);
--
-- Name: index_nodes_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_nodes_on_owner_uuid ON nodes USING btree (owner_uuid);
+CREATE INDEX index_nodes_on_owner_uuid ON public.nodes USING btree (owner_uuid);
--
-- Name: index_nodes_on_slot_number; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_nodes_on_slot_number ON nodes USING btree (slot_number);
+CREATE UNIQUE INDEX index_nodes_on_slot_number ON public.nodes USING btree (slot_number);
--
-- Name: index_nodes_on_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_nodes_on_uuid ON nodes USING btree (uuid);
+CREATE UNIQUE INDEX index_nodes_on_uuid ON public.nodes USING btree (uuid);
--
-- Name: index_pipeline_instances_on_created_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_pipeline_instances_on_created_at ON pipeline_instances USING btree (created_at);
+CREATE INDEX index_pipeline_instances_on_created_at ON public.pipeline_instances USING btree (created_at);
--
-- Name: index_pipeline_instances_on_modified_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_pipeline_instances_on_modified_at ON pipeline_instances USING btree (modified_at);
+CREATE INDEX index_pipeline_instances_on_modified_at ON public.pipeline_instances USING btree (modified_at);
--
-- Name: index_pipeline_instances_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_pipeline_instances_on_modified_at_uuid ON pipeline_instances USING btree (modified_at DESC, uuid);
+CREATE INDEX index_pipeline_instances_on_modified_at_uuid ON public.pipeline_instances USING btree (modified_at DESC, uuid);
--
-- Name: index_pipeline_instances_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_pipeline_instances_on_owner_uuid ON pipeline_instances USING btree (owner_uuid);
+CREATE INDEX index_pipeline_instances_on_owner_uuid ON public.pipeline_instances USING btree (owner_uuid);
--
-- Name: index_pipeline_instances_on_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_pipeline_instances_on_uuid ON pipeline_instances USING btree (uuid);
+CREATE UNIQUE INDEX index_pipeline_instances_on_uuid ON public.pipeline_instances USING btree (uuid);
--
-- Name: index_pipeline_templates_on_created_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_pipeline_templates_on_created_at ON pipeline_templates USING btree (created_at);
+CREATE INDEX index_pipeline_templates_on_created_at ON public.pipeline_templates USING btree (created_at);
--
-- Name: index_pipeline_templates_on_modified_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_pipeline_templates_on_modified_at ON pipeline_templates USING btree (modified_at);
+CREATE INDEX index_pipeline_templates_on_modified_at ON public.pipeline_templates USING btree (modified_at);
--
-- Name: index_pipeline_templates_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_pipeline_templates_on_modified_at_uuid ON pipeline_templates USING btree (modified_at DESC, uuid);
+CREATE INDEX index_pipeline_templates_on_modified_at_uuid ON public.pipeline_templates USING btree (modified_at DESC, uuid);
--
-- Name: index_pipeline_templates_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_pipeline_templates_on_owner_uuid ON pipeline_templates USING btree (owner_uuid);
+CREATE INDEX index_pipeline_templates_on_owner_uuid ON public.pipeline_templates USING btree (owner_uuid);
--
-- Name: index_pipeline_templates_on_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_pipeline_templates_on_uuid ON pipeline_templates USING btree (uuid);
+CREATE UNIQUE INDEX index_pipeline_templates_on_uuid ON public.pipeline_templates USING btree (uuid);
--
-- Name: index_repositories_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_repositories_on_modified_at_uuid ON repositories USING btree (modified_at DESC, uuid);
+CREATE INDEX index_repositories_on_modified_at_uuid ON public.repositories USING btree (modified_at DESC, uuid);
--
-- Name: index_repositories_on_name; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_repositories_on_name ON repositories USING btree (name);
+CREATE UNIQUE INDEX index_repositories_on_name ON public.repositories USING btree (name);
--
-- Name: index_repositories_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_repositories_on_owner_uuid ON repositories USING btree (owner_uuid);
+CREATE INDEX index_repositories_on_owner_uuid ON public.repositories USING btree (owner_uuid);
--
-- Name: index_repositories_on_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_repositories_on_uuid ON repositories USING btree (uuid);
+CREATE UNIQUE INDEX index_repositories_on_uuid ON public.repositories USING btree (uuid);
--
-- Name: index_specimens_on_created_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_specimens_on_created_at ON specimens USING btree (created_at);
+CREATE INDEX index_specimens_on_created_at ON public.specimens USING btree (created_at);
--
-- Name: index_specimens_on_modified_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_specimens_on_modified_at ON specimens USING btree (modified_at);
+CREATE INDEX index_specimens_on_modified_at ON public.specimens USING btree (modified_at);
--
-- Name: index_specimens_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_specimens_on_owner_uuid ON specimens USING btree (owner_uuid);
+CREATE INDEX index_specimens_on_owner_uuid ON public.specimens USING btree (owner_uuid);
--
-- Name: index_specimens_on_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_specimens_on_uuid ON specimens USING btree (uuid);
+CREATE UNIQUE INDEX index_specimens_on_uuid ON public.specimens USING btree (uuid);
--
-- Name: index_traits_on_name; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_traits_on_name ON traits USING btree (name);
+CREATE INDEX index_traits_on_name ON public.traits USING btree (name);
--
-- Name: index_traits_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_traits_on_owner_uuid ON traits USING btree (owner_uuid);
+CREATE INDEX index_traits_on_owner_uuid ON public.traits USING btree (owner_uuid);
--
-- Name: index_traits_on_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_traits_on_uuid ON traits USING btree (uuid);
+CREATE UNIQUE INDEX index_traits_on_uuid ON public.traits USING btree (uuid);
--
-- Name: index_users_on_created_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_users_on_created_at ON users USING btree (created_at);
+CREATE INDEX index_users_on_created_at ON public.users USING btree (created_at);
--
-- Name: index_users_on_modified_at; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_users_on_modified_at ON users USING btree (modified_at);
+CREATE INDEX index_users_on_modified_at ON public.users USING btree (modified_at);
--
-- Name: index_users_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_users_on_modified_at_uuid ON users USING btree (modified_at DESC, uuid);
+CREATE INDEX index_users_on_modified_at_uuid ON public.users USING btree (modified_at DESC, uuid);
--
-- Name: index_users_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_users_on_owner_uuid ON users USING btree (owner_uuid);
+CREATE INDEX index_users_on_owner_uuid ON public.users USING btree (owner_uuid);
--
-- Name: index_users_on_username; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_users_on_username ON users USING btree (username);
+CREATE UNIQUE INDEX index_users_on_username ON public.users USING btree (username);
--
-- Name: index_users_on_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_users_on_uuid ON users USING btree (uuid);
+CREATE UNIQUE INDEX index_users_on_uuid ON public.users USING btree (uuid);
--
-- Name: index_virtual_machines_on_hostname; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_virtual_machines_on_hostname ON virtual_machines USING btree (hostname);
+CREATE INDEX index_virtual_machines_on_hostname ON public.virtual_machines USING btree (hostname);
--
-- Name: index_virtual_machines_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_virtual_machines_on_modified_at_uuid ON virtual_machines USING btree (modified_at DESC, uuid);
+CREATE INDEX index_virtual_machines_on_modified_at_uuid ON public.virtual_machines USING btree (modified_at DESC, uuid);
--
-- Name: index_virtual_machines_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_virtual_machines_on_owner_uuid ON virtual_machines USING btree (owner_uuid);
+CREATE INDEX index_virtual_machines_on_owner_uuid ON public.virtual_machines USING btree (owner_uuid);
--
-- Name: index_virtual_machines_on_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_virtual_machines_on_uuid ON virtual_machines USING btree (uuid);
+CREATE UNIQUE INDEX index_virtual_machines_on_uuid ON public.virtual_machines USING btree (uuid);
--
-- Name: index_workflows_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_workflows_on_modified_at_uuid ON workflows USING btree (modified_at DESC, uuid);
+CREATE INDEX index_workflows_on_modified_at_uuid ON public.workflows USING btree (modified_at DESC, uuid);
--
-- Name: index_workflows_on_owner_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX index_workflows_on_owner_uuid ON workflows USING btree (owner_uuid);
+CREATE INDEX index_workflows_on_owner_uuid ON public.workflows USING btree (owner_uuid);
--
-- Name: index_workflows_on_uuid; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX index_workflows_on_uuid ON workflows USING btree (uuid);
+CREATE UNIQUE INDEX index_workflows_on_uuid ON public.workflows USING btree (uuid);
--
-- Name: job_tasks_search_index; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX job_tasks_search_index ON job_tasks USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, job_uuid, created_by_job_task_uuid);
+CREATE INDEX job_tasks_search_index ON public.job_tasks USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, job_uuid, created_by_job_task_uuid);
--
-- Name: jobs_full_text_search_idx; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX jobs_full_text_search_idx ON jobs USING gin (to_tsvector('english'::regconfig, (((((((((((((((((((((((((((((((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(submit_id, ''::character varying))::text) || ' '::text) || (COALESCE(script, ''::character varying))::text) || ' '::text) || (COALESCE(script_version, ''::character varying))::text) || ' '::text) || COALESCE(script_parameters, ''::text)) || ' '::text) || (COALESCE(cancelled_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(cancelled_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(output, ''::character varying))::text) || ' '::text) || (COALESCE(is_locked_by_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(log, ''::character varying))::text) || ' '::text) || COALESCE(tasks_summary, ''::text)) || ' '::text) || COALESCE(runtime_constraints, ''::text)) || ' '::text) || (COALESCE(repository, ''::character varying))::text) || ' '::text) || (COALESCE(supplied_script_version, ''::character varying))::text) || ' '::text) || (COALESCE(docker_image_locator, ''::character varying))::text) || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || (COALESCE(state, ''::character varying))::text) || ' '::text) || (COALESCE(arvados_sdk_version, ''::character varying))::text) || ' '::text) || COALESCE(components, ''::text))));
+CREATE INDEX jobs_full_text_search_idx ON public.jobs USING gin (to_tsvector('english'::regconfig, (((((((((((((((((((((((((((((((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(submit_id, ''::character varying))::text) || ' '::text) || (COALESCE(script, ''::character varying))::text) || ' '::text) || (COALESCE(script_version, ''::character varying))::text) || ' '::text) || COALESCE(script_parameters, ''::text)) || ' '::text) || (COALESCE(cancelled_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(cancelled_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(output, ''::character varying))::text) || ' '::text) || (COALESCE(is_locked_by_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(log, ''::character varying))::text) || ' '::text) || COALESCE(tasks_summary, ''::text)) || ' '::text) || COALESCE(runtime_constraints, ''::text)) || ' '::text) || (COALESCE(repository, ''::character varying))::text) || ' '::text) || (COALESCE(supplied_script_version, ''::character varying))::text) || ' '::text) || (COALESCE(docker_image_locator, ''::character varying))::text) || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || (COALESCE(state, ''::character varying))::text) || ' '::text) || (COALESCE(arvados_sdk_version, ''::character varying))::text) || ' '::text) || COALESCE(components, ''::text))));
--
-- Name: jobs_search_index; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX jobs_search_index ON jobs USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, submit_id, script, script_version, cancelled_by_client_uuid, cancelled_by_user_uuid, output, is_locked_by_uuid, log, repository, supplied_script_version, docker_image_locator, state, arvados_sdk_version);
+CREATE INDEX jobs_search_index ON public.jobs USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, submit_id, script, script_version, cancelled_by_client_uuid, cancelled_by_user_uuid, output, is_locked_by_uuid, log, repository, supplied_script_version, docker_image_locator, state, arvados_sdk_version);
--
-- Name: keep_disks_search_index; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX keep_disks_search_index ON keep_disks USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, ping_secret, node_uuid, filesystem_uuid, keep_service_uuid);
+CREATE INDEX keep_disks_search_index ON public.keep_disks USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, ping_secret, node_uuid, filesystem_uuid, keep_service_uuid);
--
-- Name: keep_services_search_index; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX keep_services_search_index ON keep_services USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, service_host, service_type);
+CREATE INDEX keep_services_search_index ON public.keep_services USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, service_host, service_type);
--
-- Name: links_index_on_properties; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX links_index_on_properties ON links USING gin (properties);
+CREATE INDEX links_index_on_properties ON public.links USING gin (properties);
--
-- Name: links_search_index; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX links_search_index ON links USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, tail_uuid, link_class, name, head_uuid);
+CREATE INDEX links_search_index ON public.links USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, tail_uuid, link_class, name, head_uuid);
--
-- Name: links_tail_name_unique_if_link_class_name; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX links_tail_name_unique_if_link_class_name ON links USING btree (tail_uuid, name) WHERE ((link_class)::text = 'name'::text);
+CREATE UNIQUE INDEX links_tail_name_unique_if_link_class_name ON public.links USING btree (tail_uuid, name) WHERE ((link_class)::text = 'name'::text);
--
-- Name: logs_search_index; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX logs_search_index ON logs USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, object_uuid, event_type, object_owner_uuid);
+CREATE INDEX logs_search_index ON public.logs USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, object_uuid, event_type, object_owner_uuid);
--
-- Name: nodes_index_on_info; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX nodes_index_on_info ON nodes USING gin (info);
+CREATE INDEX nodes_index_on_info ON public.nodes USING gin (info);
--
-- Name: nodes_index_on_properties; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX nodes_index_on_properties ON nodes USING gin (properties);
+CREATE INDEX nodes_index_on_properties ON public.nodes USING gin (properties);
--
-- Name: nodes_search_index; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX nodes_search_index ON nodes USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, hostname, domain, ip_address, job_uuid);
+CREATE INDEX nodes_search_index ON public.nodes USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, hostname, domain, ip_address, job_uuid);
--
-- Name: permission_target_trashed; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX permission_target_trashed ON materialized_permission_view USING btree (trashed, target_uuid);
+CREATE INDEX permission_target_trashed ON public.materialized_permission_view USING btree (trashed, target_uuid);
--
-- Name: permission_target_user_trashed_level; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX permission_target_user_trashed_level ON materialized_permission_view USING btree (user_uuid, trashed, perm_level);
+CREATE INDEX permission_target_user_trashed_level ON public.materialized_permission_view USING btree (user_uuid, trashed, perm_level);
--
-- Name: pipeline_instances_full_text_search_idx; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX pipeline_instances_full_text_search_idx ON pipeline_instances USING gin (to_tsvector('english'::regconfig, (((((((((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(pipeline_template_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(components, ''::text)) || ' '::text) || COALESCE(properties, ''::text)) || ' '::text) || (COALESCE(state, ''::character varying))::text) || ' '::text) || COALESCE(components_summary, ''::text)) || ' '::text) || (COALESCE(description, ''::character varying))::text)));
+CREATE INDEX pipeline_instances_full_text_search_idx ON public.pipeline_instances USING gin (to_tsvector('english'::regconfig, (((((((((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(pipeline_template_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(components, ''::text)) || ' '::text) || COALESCE(properties, ''::text)) || ' '::text) || (COALESCE(state, ''::character varying))::text) || ' '::text) || COALESCE(components_summary, ''::text)) || ' '::text) || (COALESCE(description, ''::character varying))::text)));
--
-- Name: pipeline_instances_search_index; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX pipeline_instances_search_index ON pipeline_instances USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, pipeline_template_uuid, name, state);
+CREATE INDEX pipeline_instances_search_index ON public.pipeline_instances USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, pipeline_template_uuid, name, state);
--
-- Name: pipeline_template_owner_uuid_name_unique; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX pipeline_template_owner_uuid_name_unique ON pipeline_templates USING btree (owner_uuid, name);
+CREATE UNIQUE INDEX pipeline_template_owner_uuid_name_unique ON public.pipeline_templates USING btree (owner_uuid, name);
--
-- Name: pipeline_templates_full_text_search_idx; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX pipeline_templates_full_text_search_idx ON pipeline_templates USING gin (to_tsvector('english'::regconfig, (((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(components, ''::text)) || ' '::text) || (COALESCE(description, ''::character varying))::text)));
+CREATE INDEX pipeline_templates_full_text_search_idx ON public.pipeline_templates USING gin (to_tsvector('english'::regconfig, (((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(components, ''::text)) || ' '::text) || (COALESCE(description, ''::character varying))::text)));
--
-- Name: pipeline_templates_search_index; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX pipeline_templates_search_index ON pipeline_templates USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name);
+CREATE INDEX pipeline_templates_search_index ON public.pipeline_templates USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name);
--
-- Name: repositories_search_index; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX repositories_search_index ON repositories USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name);
+CREATE INDEX repositories_search_index ON public.repositories USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name);
--
-- Name: specimens_search_index; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX specimens_search_index ON specimens USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, material);
+CREATE INDEX specimens_search_index ON public.specimens USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, material);
--
-- Name: traits_search_index; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX traits_search_index ON traits USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name);
+CREATE INDEX traits_search_index ON public.traits USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name);
--
-- Name: unique_schema_migrations; Type: INDEX; Schema: public; Owner: -
--
-CREATE UNIQUE INDEX unique_schema_migrations ON schema_migrations USING btree (version);
+CREATE UNIQUE INDEX unique_schema_migrations ON public.schema_migrations USING btree (version);
--
-- Name: users_search_index; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX users_search_index ON users USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, email, first_name, last_name, identity_url, default_owner_uuid, username, redirect_to_user_uuid);
+CREATE INDEX users_search_index ON public.users USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, email, first_name, last_name, identity_url, default_owner_uuid, username, redirect_to_user_uuid);
--
-- Name: virtual_machines_search_index; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX virtual_machines_search_index ON virtual_machines USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, hostname);
+CREATE INDEX virtual_machines_search_index ON public.virtual_machines USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, hostname);
--
-- Name: workflows_full_text_search_idx; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX workflows_full_text_search_idx ON workflows USING gin (to_tsvector('english'::regconfig, (((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(description, ''::text))));
+CREATE INDEX workflows_full_text_search_idx ON public.workflows USING gin (to_tsvector('english'::regconfig, (((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(description, ''::text))));
--
-- Name: workflows_search_idx; Type: INDEX; Schema: public; Owner: -
--
-CREATE INDEX workflows_search_idx ON workflows USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name);
+CREATE INDEX workflows_search_idx ON public.workflows USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name);
--
INSERT INTO schema_migrations (version) VALUES ('20180608123145');
+INSERT INTO schema_migrations (version) VALUES ('20180806133039');
+
+INSERT INTO schema_migrations (version) VALUES ('20180820130357');
+
+INSERT INTO schema_migrations (version) VALUES ('20180820132617');
+
+INSERT INTO schema_migrations (version) VALUES ('20180820135808');
+
+INSERT INTO schema_migrations (version) VALUES ('20180824152014');
+
+INSERT INTO schema_migrations (version) VALUES ('20180824155207');
+
# Load params[:limit], params[:offset] and params[:order]
# into @limit, @offset, @orders
- def load_limit_offset_order_params
+ def load_limit_offset_order_params(fill_table_names: true)
if params[:limit]
unless params[:limit].to_s.match(/^\d+$/)
raise ArgumentError.new("Invalid value for limit parameter")
# has used set_table_name to use an alternate table name from the Rails standard.
# I could not find a perfect way to handle this well, but ActiveRecord::Base.send(:descendants)
# would be a place to start if this ever becomes necessary.
- if attr.match(/^[a-z][_a-z0-9]+$/) and
- model_class.columns.collect(&:name).index(attr) and
- ['asc','desc'].index direction.downcase
- @orders << "#{table_name}.#{attr} #{direction.downcase}"
+ if (attr.match(/^[a-z][_a-z0-9]+$/) &&
+ model_class.columns.collect(&:name).index(attr) &&
+ ['asc','desc'].index(direction.downcase))
+ if fill_table_names
+ @orders << "#{table_name}.#{attr} #{direction.downcase}"
+ else
+ @orders << "#{attr} #{direction.downcase}"
+ end
elsif attr.match(/^([a-z][_a-z0-9]+)\.([a-z][_a-z0-9]+)$/) and
['asc','desc'].index(direction.downcase) and
ActiveRecord::Base.connection.tables.include?($1) and
api_token: 2p1pou8p4ls208mcbedeewlotghppenobcyrmyhq8pyf51xd8u
expires_at: 2038-01-01 00:00:00
+user_bar_in_sharing_group:
+ uuid: zzzzz-gj3su-62hryf5fht531mz
+ api_client: untrusted
+ user: user_bar_in_sharing_group
+ api_token: 5vy55akwq85vghh80wc2cuxl4p8psay73lkpqf5c2cxvp6rmm6
+ expires_at: 2038-01-01 00:00:00
+
user1_with_load:
uuid: zzzzz-gj3su-357z32aux8dg2s1
api_client: untrusted
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
name: running
state: Committed
- priority: 1
+ priority: 501
created_at: <%= 2.minute.ago.to_s(:db) %>
updated_at: <%= 1.minute.ago.to_s(:db) %>
modified_at: <%= 1.minute.ago.to_s(:db) %>
container_image: test
cwd: test
output_path: test
- command: ["echo", "hello"]
+ command: ["echo", "hello", "/bin/sh", "-c", "'cat' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/foobar' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/baz' '|' 'gzip' '>' '/dev/null'"]
runtime_constraints:
vcpus: 1
ram: 123
log: ea10d51bcf88862dbcc36eb292017dfd+45
output: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
output_path: test
- command: ["echo", "hello"]
+ command: ["echo", "hello", "/bin/sh", "-c", "'cat' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/foobar' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/baz' '|' 'gzip' '>' '/dev/null'"]
runtime_constraints:
ram: 12000000000
vcpus: 4
description: Users who can share objects with each other
group_class: role
+project_owned_by_foo:
+ uuid: zzzzz-j7d0g-lsjm0ibr0ydwpzx
+ owner_uuid: zzzzz-tpzed-81hsbo6mk8nl05c
+ created_at: 2014-02-03T17:22:54Z
+ modified_at: 2014-02-03T17:22:54Z
+ name: project_owned_by_foo
+ group_class: project
+
empty_project:
uuid: zzzzz-j7d0g-9otoxmrksam74q6
owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
post :create, {
container_request: minimal_cr.merge(
- secret_mounts: {'/foo' => {'type' => 'json', 'content' => 'bar'}}),
+ secret_mounts: {'/foo' => {'kind' => 'json', 'content' => 'bar'}}),
}
assert_response :success
patch :update, {
id: req.uuid,
container_request: {
- secret_mounts: {'/foo' => {'type' => 'json', 'content' => 'bar'}},
+ secret_mounts: {'/foo' => {'kind' => 'json', 'content' => 'bar'}},
},
}
assert_response :success
test "update without deleting secret_mounts" do
authorize_with :active
req = container_requests(:uncommitted)
- req.update_attributes!(secret_mounts: {'/foo' => {'type' => 'json', 'content' => 'bar'}})
+ req.update_attributes!(secret_mounts: {'/foo' => {'kind' => 'json', 'content' => 'bar'}})
patch :update, {
id: req.uuid,
end
end
+ test "list trashed collections and projects" do
+ authorize_with :active
+ get(:contents, {
+ format: :json,
+ include_trash: true,
+ filters: [
+ ['uuid', 'is_a', ['arvados#collection', 'arvados#group']],
+ ['is_trashed', '=', true],
+ ],
+ limit: 10000,
+ })
+ assert_response :success
+ found_uuids = json_response['items'].collect { |i| i['uuid'] }
+ assert_includes found_uuids, groups(:trashed_project).uuid
+ refute_includes found_uuids, groups(:aproject).uuid
+ assert_includes found_uuids, collections(:expired_collection).uuid
+ refute_includes found_uuids, collections(:w_a_z_file).uuid
+ end
+
test "list objects in home project" do
authorize_with :active
get :contents, {
assert_includes ids, collections(:baz_file_in_asubproject).uuid
end
- [['asc', :<=],
- ['desc', :>=]].each do |order, operator|
- test "user with project read permission can sort project collections #{order}" do
+ [
+ ['collections.name', 'asc', :<=, "name"],
+ ['collections.name', 'desc', :>=, "name"],
+ ['name', 'asc', :<=, "name"],
+ ['name', 'desc', :>=, "name"],
+ ['collections.created_at', 'asc', :<=, "created_at"],
+ ['collections.created_at', 'desc', :>=, "created_at"],
+ ['created_at', 'asc', :<=, "created_at"],
+ ['created_at', 'desc', :>=, "created_at"],
+ ].each do |column, order, operator, field|
+ test "user with project read permission can sort projects on #{column} #{order}" do
authorize_with :project_viewer
get :contents, {
id: groups(:asubproject).uuid,
format: :json,
filters: [['uuid', 'is_a', "arvados#collection"]],
- order: "collections.name #{order}"
+ order: "#{column} #{order}"
}
- sorted_names = json_response['items'].collect { |item| item["name"] }
- # Here we avoid assuming too much about the database
- # collation. Both "alice"<"Bob" and "alice">"Bob" can be
- # correct. Hopefully it _is_ safe to assume that if "a" comes
- # before "b" in the ascii alphabet, "aX">"bY" is never true for
- # any strings X and Y.
- reliably_sortable_names = sorted_names.select do |name|
- name[0] >= 'a' and name[0] <= 'z'
- end.uniq do |name|
- name[0]
- end
- # Preserve order of sorted_names. But do not use &=. If
- # sorted_names has out-of-order duplicates, we want to preserve
- # them here, so we can detect them and fail the test below.
- sorted_names.select! do |name|
- reliably_sortable_names.include? name
- end
- actually_checked_anything = false
- previous = nil
- sorted_names.each do |entry|
- if previous
- assert_operator(previous, operator, entry,
- "Entries sorted incorrectly.")
- actually_checked_anything = true
+ sorted_values = json_response['items'].collect { |item| item[field] }
+ if field == "name"
+ # Here we avoid assuming too much about the database
+ # collation. Both "alice"<"Bob" and "alice">"Bob" can be
+ # correct. Hopefully it _is_ safe to assume that if "a" comes
+ # before "b" in the ascii alphabet, "aX">"bY" is never true for
+ # any strings X and Y.
+ reliably_sortable_names = sorted_values.select do |name|
+ name[0] >= 'a' && name[0] <= 'z'
+ end.uniq do |name|
+ name[0]
+ end
+ # Preserve order of sorted_values. But do not use &=. If
+ # sorted_values has out-of-order duplicates, we want to preserve
+ # them here, so we can detect them and fail the test below.
+ sorted_values.select! do |name|
+ reliably_sortable_names.include? name
end
- previous = entry
end
- assert actually_checked_anything, "Didn't even find two names to compare."
+ assert_sorted(operator, sorted_values)
end
end
- test 'list objects across multiple projects' do
- authorize_with :project_viewer
- get :contents, {
- format: :json,
- filters: [['uuid', 'is_a', 'arvados#specimen']]
- }
- assert_response :success
- found_uuids = json_response['items'].collect { |i| i['uuid'] }
- [[:in_aproject, true],
- [:in_asubproject, true],
- [:owned_by_private_group, false]].each do |specimen_fixture, should_find|
- if should_find
- assert_includes found_uuids, specimens(specimen_fixture).uuid, "did not find specimen fixture '#{specimen_fixture}'"
- else
- refute_includes found_uuids, specimens(specimen_fixture).uuid, "found specimen fixture '#{specimen_fixture}'"
+ def assert_sorted(operator, sorted_items)
+ actually_checked_anything = false
+ previous = nil
+ sorted_items.each do |entry|
+ if !previous.nil?
+ assert_operator(previous, operator, entry,
+ "Entries sorted incorrectly.")
+ actually_checked_anything = true
end
+ previous = entry
end
+ assert actually_checked_anything, "Didn't even find two items to compare."
end
# Even though the project_viewer tests go through other controllers,
assert_not_nil Group.readable_by(users(auth)).where(uuid: groups(:trashed_subproject).uuid).first
end
end
+
+ test 'get shared owned by another user' do
+ authorize_with :user_bar_in_sharing_group
+
+ act_as_system_user do
+ Link.create!(
+ tail_uuid: users(:user_bar_in_sharing_group).uuid,
+ link_class: 'permission',
+ name: 'can_read',
+ head_uuid: groups(:project_owned_by_foo).uuid)
+ end
+
+ get :shared, {:filters => [["group_class", "=", "project"]], :include => "owner_uuid"}
+
+ assert_equal 1, json_response['items'].length
+ assert_equal json_response['items'][0]["uuid"], groups(:project_owned_by_foo).uuid
+
+ assert_equal 1, json_response['included'].length
+ assert_equal json_response['included'][0]["uuid"], users(:user_foo_in_sharing_group).uuid
+ end
+
+ test 'get shared, owned by unreadable project' do
+ authorize_with :user_bar_in_sharing_group
+
+ act_as_system_user do
+ Group.find_by_uuid(groups(:project_owned_by_foo).uuid).update!(owner_uuid: groups(:aproject).uuid)
+ Link.create!(
+ tail_uuid: users(:user_bar_in_sharing_group).uuid,
+ link_class: 'permission',
+ name: 'can_read',
+ head_uuid: groups(:project_owned_by_foo).uuid)
+ end
+
+ get :shared, {:filters => [["group_class", "=", "project"]], :include => "owner_uuid"}
+
+ assert_equal 1, json_response['items'].length
+ assert_equal json_response['items'][0]["uuid"], groups(:project_owned_by_foo).uuid
+
+ assert_equal 0, json_response['included'].length
+ end
+
+ test 'get shared, owned by non-project' do
+ authorize_with :user_bar_in_sharing_group
+
+ act_as_system_user do
+ Group.find_by_uuid(groups(:project_owned_by_foo).uuid).update!(owner_uuid: groups(:group_for_sharing_tests).uuid)
+ end
+
+ get :shared, {:filters => [["group_class", "=", "project"]], :include => "owner_uuid"}
+
+ assert_equal 1, json_response['items'].length
+ assert_equal json_response['items'][0]["uuid"], groups(:project_owned_by_foo).uuid
+
+ assert_equal 1, json_response['included'].length
+ assert_equal json_response['included'][0]["uuid"], groups(:group_for_sharing_tests).uuid
+ end
+
end
cr.container_image = "img3"
cr.cwd = "/tmp3"
cr.environment = {"BUP" => "BOP"}
- cr.mounts = {"BAR" => "BAZ"}
+ cr.mounts = {"BAR" => {"kind" => "BAZ"}}
cr.output_path = "/tmp4"
cr.priority = 2
cr.runtime_constraints = {"vcpus" => 4}
end
[
- {"vcpus" => 1},
- {"vcpus" => 1, "ram" => nil},
- {"vcpus" => 0, "ram" => 123},
- {"vcpus" => "1", "ram" => "123"}
- ].each do |invalid_constraints|
- test "Create with #{invalid_constraints}" do
+ {"runtime_constraints" => {"vcpus" => 1}},
+ {"runtime_constraints" => {"vcpus" => 1, "ram" => nil}},
+ {"runtime_constraints" => {"vcpus" => 0, "ram" => 123}},
+ {"runtime_constraints" => {"vcpus" => "1", "ram" => "123"}},
+ {"mounts" => {"FOO" => "BAR"}},
+ {"mounts" => {"FOO" => {}}},
+ {"mounts" => {"FOO" => {"kind" => "tmp", "capacity" => 42.222}}},
+ {"command" => ["echo", 55]},
+ {"environment" => {"FOO" => 55}}
+ ].each do |value|
+ test "Create with invalid #{value}" do
set_user_from_auth :active
assert_raises(ActiveRecord::RecordInvalid) do
- cr = create_minimal_req!(state: "Committed",
- priority: 1,
- runtime_constraints: invalid_constraints)
+ cr = create_minimal_req!({state: "Committed",
+ priority: 1}.merge(value))
cr.save!
end
end
- test "Update with #{invalid_constraints}" do
+ test "Update with invalid #{value}" do
set_user_from_auth :active
cr = create_minimal_req!(state: "Uncommitted", priority: 1)
cr.save!
assert_raises(ActiveRecord::RecordInvalid) do
cr = ContainerRequest.find_by_uuid cr.uuid
- cr.update_attributes!(state: "Committed",
- runtime_constraints: invalid_constraints)
+ cr.update_attributes!({state: "Committed",
+ priority: 1}.merge(value))
end
end
end
end
[
- ['running_container_auth', 'zzzzz-dz642-runningcontainr', 1],
+ ['running_container_auth', 'zzzzz-dz642-runningcontainr', 501],
['active_no_prefs', nil, 0],
].each do |token, expected, expected_priority|
test "create as #{token} and expect requesting_container_uuid to be #{expected}" do
test "Container create" do
act_as_system_user do
c, _ = minimal_new(environment: {},
- mounts: {"BAR" => "FOO"},
+ mounts: {"BAR" => {"kind" => "FOO"}},
output_path: "/tmp",
priority: 1,
runtime_constraints: {"vcpus" => 1, "ram" => 1})
test "Container valid priority" do
act_as_system_user do
c, _ = minimal_new(environment: {},
- mounts: {"BAR" => "FOO"},
+ mounts: {"BAR" => {"kind" => "FOO"}},
output_path: "/tmp",
priority: 1,
runtime_constraints: {"vcpus" => 1, "ram" => 1})
test "Container serialized hash attributes sorted before save" do
- env = {"C" => 3, "B" => 2, "A" => 1}
- m = {"F" => {"kind" => 3}, "E" => {"kind" => 2}, "D" => {"kind" => 1}}
+ env = {"C" => "3", "B" => "2", "A" => "1"}
+ m = {"F" => {"kind" => "3"}, "E" => {"kind" => "2"}, "D" => {"kind" => "1"}}
rc = {"vcpus" => 1, "ram" => 1, "keep_cache_ram" => 1}
c, _ = minimal_new(environment: env, mounts: m, runtime_constraints: rc)
assert_equal c.environment.to_json, Container.deep_sort_hash(env).to_json
"context"
"flag"
"fmt"
- "log"
"os"
"os/exec"
"os/signal"
"git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
"git.curoverse.com/arvados.git/sdk/go/dispatch"
+ "github.com/Sirupsen/logrus"
)
var version = "dev"
func main() {
err := doMain()
if err != nil {
- log.Fatalf("%q", err)
+ logrus.Fatalf("%q", err)
}
}
)
func doMain() error {
+ logger := logrus.StandardLogger()
+ if os.Getenv("DEBUG") != "" {
+ logger.SetLevel(logrus.DebugLevel)
+ }
+ logger.Formatter = &logrus.JSONFormatter{
+ TimestampFormat: "2006-01-02T15:04:05.000000000Z07:00",
+ }
+
flags := flag.NewFlagSet("crunch-dispatch-local", flag.ExitOnError)
pollInterval := flags.Int(
return nil
}
- log.Printf("crunch-dispatch-local %s started", version)
+ logger.Printf("crunch-dispatch-local %s started", version)
runningCmds = make(map[string]*exec.Cmd)
arv, err := arvadosclient.MakeArvadosClient()
if err != nil {
- log.Printf("Error making Arvados client: %v", err)
+ logger.Errorf("error making Arvados client: %v", err)
return err
}
arv.Retries = 25
dispatcher := dispatch.Dispatcher{
+ Logger: logger,
Arv: arv,
RunContainer: run,
PollPeriod: time.Duration(*pollInterval) * time.Second,
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)
sig := <-c
- log.Printf("Received %s, shutting down", sig)
+ logger.Printf("Received %s, shutting down", sig)
signal.Stop(c)
cancel()
cmd.Stderr = os.Stderr
cmd.Stdout = os.Stderr
- log.Printf("Starting container %v", uuid)
+ dispatcher.Logger.Printf("starting container %v", uuid)
// Add this crunch job to the list of runningCmds only if we
// succeed in starting crunch-run.
runningCmdsMutex.Lock()
if err := startCmd(container, cmd); err != nil {
runningCmdsMutex.Unlock()
- log.Printf("Error starting %v for %v: %q", *crunchRunCommand, uuid, err)
+ dispatcher.Logger.Warnf("error starting %q for %s: %s", *crunchRunCommand, uuid, err)
dispatcher.UpdateState(uuid, dispatch.Cancelled)
} else {
runningCmds[uuid] = cmd
go func() {
if _, err := cmd.Process.Wait(); err != nil {
- log.Printf("Error while waiting for crunch job to finish for %v: %q", uuid, err)
+ dispatcher.Logger.Warnf("error while waiting for crunch job to finish for %v: %q", uuid, err)
}
- log.Printf("sending done")
+ dispatcher.Logger.Debugf("sending done")
done <- struct{}{}
}()
case c := <-status:
// Interrupt the child process if priority changes to 0
if (c.State == dispatch.Locked || c.State == dispatch.Running) && c.Priority == 0 {
- log.Printf("Sending SIGINT to pid %d to cancel container %v", cmd.Process.Pid, uuid)
+ dispatcher.Logger.Printf("sending SIGINT to pid %d to cancel container %v", cmd.Process.Pid, uuid)
cmd.Process.Signal(os.Interrupt)
}
}
}
close(done)
- log.Printf("Finished container run for %v", uuid)
+ dispatcher.Logger.Printf("finished container run for %v", uuid)
// Remove the crunch job from runningCmds
runningCmdsMutex.Lock()
// If the container is not finalized, then change it to "Cancelled".
err := dispatcher.Arv.Get("containers", uuid, nil, &container)
if err != nil {
- log.Printf("Error getting final container state: %v", err)
+ dispatcher.Logger.Warnf("error getting final container state: %v", err)
}
if container.State == dispatch.Locked || container.State == dispatch.Running {
- log.Printf("After %s process termination, container state for %v is %q. Updating it to %q",
- *crunchRunCommand, container.State, uuid, dispatch.Cancelled)
+ dispatcher.Logger.Warnf("after %q process termination, container state for %v is %q; updating it to %q",
+ *crunchRunCommand, uuid, container.State, dispatch.Cancelled)
dispatcher.UpdateState(uuid, dispatch.Cancelled)
}
for range status {
}
- log.Printf("Finalized container %v", uuid)
+ dispatcher.Logger.Printf("finalized container %v", uuid)
}
"bytes"
"context"
"io"
- "log"
"net/http"
"net/http/httptest"
"os"
"os/exec"
- "strings"
+ "regexp"
"testing"
"time"
"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
"git.curoverse.com/arvados.git/sdk/go/arvadostest"
"git.curoverse.com/arvados.git/sdk/go/dispatch"
+ "github.com/Sirupsen/logrus"
. "gopkg.in/check.v1"
)
initialArgs = os.Args
arvadostest.StartAPI()
runningCmds = make(map[string]*exec.Cmd)
+ logrus.SetFormatter(&logrus.TextFormatter{DisableColors: true})
}
func (s *TestSuite) TearDownSuite(c *C) {
apiStubResponses := make(map[string]arvadostest.StubResponse)
apiStubResponses["/arvados/v1/containers"] = arvadostest.StubResponse{500, string(`{}`)}
- testWithServerStub(c, apiStubResponses, "echo", "Error getting list of containers")
+ testWithServerStub(c, apiStubResponses, "echo", "error getting count of containers")
}
func (s *MockArvadosServerSuite) Test_APIErrorUpdatingContainerState(c *C) {
arvadostest.StubResponse{200, string(`{"uuid":"zzzzz-dz642-xxxxxxxxxxxxxx2", "state":"Running", "priority":1, "locked_by_uuid": "` + arvadostest.Dispatch1AuthUUID + `"}`)}
testWithServerStub(c, apiStubResponses, "echo",
- `After echo process termination, container state for Running is "zzzzz-dz642-xxxxxxxxxxxxxx2". Updating it to "Cancelled"`)
+ `after \\"echo\\" process termination, container state for zzzzz-dz642-xxxxxxxxxxxxxx2 is \\"Running\\"; updating it to \\"Cancelled\\"`)
}
func (s *MockArvadosServerSuite) Test_ErrorRunningContainer(c *C) {
apiStubResponses["/arvados/v1/containers/zzzzz-dz642-xxxxxxxxxxxxxx3/lock"] =
arvadostest.StubResponse{200, string(`{"uuid":"zzzzz-dz642-xxxxxxxxxxxxxx3", "state":"Locked", "priority":1}`)}
- testWithServerStub(c, apiStubResponses, "nosuchcommand", "Error starting nosuchcommand for zzzzz-dz642-xxxxxxxxxxxxxx3")
+ testWithServerStub(c, apiStubResponses, "nosuchcommand", `error starting \\"nosuchcommand\\" for zzzzz-dz642-xxxxxxxxxxxxxx3`)
}
func testWithServerStub(c *C, apiStubResponses map[string]arvadostest.StubResponse, crunchCmd string, expected string) {
}
buf := bytes.NewBuffer(nil)
- log.SetOutput(io.MultiWriter(buf, os.Stderr))
- defer log.SetOutput(os.Stderr)
+ logrus.SetOutput(io.MultiWriter(buf, os.Stderr))
+ defer logrus.SetOutput(os.Stderr)
*crunchRunCommand = crunchCmd
ctx, cancel := context.WithCancel(context.Background())
dispatcher := dispatch.Dispatcher{
Arv: arv,
- PollPeriod: time.Duration(1) * time.Second,
+ PollPeriod: time.Second / 20,
RunContainer: func(d *dispatch.Dispatcher, c arvados.Container, s <-chan arvados.Container) {
run(d, c, s)
cancel()
return cmd.Start()
}
+ re := regexp.MustCompile(`(?ms).*` + expected + `.*`)
go func() {
- for i := 0; i < 80 && !strings.Contains(buf.String(), expected); i++ {
+ for i := 0; i < 80 && !re.MatchString(buf.String()); i++ {
time.Sleep(100 * time.Millisecond)
}
cancel()
"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
"git.curoverse.com/arvados.git/sdk/go/config"
"git.curoverse.com/arvados.git/sdk/go/dispatch"
+ "github.com/Sirupsen/logrus"
"github.com/coreos/go-systemd/daemon"
)
+type logger interface {
+ dispatch.Logger
+ Fatalf(string, ...interface{})
+}
+
const initialNiceValue int64 = 10000
var (
type Dispatcher struct {
*dispatch.Dispatcher
+ logger logrus.FieldLogger
cluster *arvados.Cluster
sqCheck *SqueueChecker
slurm Slurm
// Minimum time between two attempts to run the same container
MinRetryPeriod arvados.Duration
+
+ // Batch size for container queries
+ BatchSize int64
}
func main() {
- disp := &Dispatcher{}
+ logger := logrus.StandardLogger()
+ if os.Getenv("DEBUG") != "" {
+ logger.SetLevel(logrus.DebugLevel)
+ }
+ logger.Formatter = &logrus.JSONFormatter{
+ TimestampFormat: "2006-01-02T15:04:05.000000000Z07:00",
+ }
+ disp := &Dispatcher{logger: logger}
err := disp.Run(os.Args[0], os.Args[1:])
if err != nil {
- log.Fatal(err)
+ logrus.Fatalf("%s", err)
}
}
return nil
}
- log.Printf("crunch-dispatch-slurm %s started", version)
+ disp.logger.Printf("crunch-dispatch-slurm %s started", version)
err := disp.readConfig(*configPath)
if err != nil {
os.Setenv("ARVADOS_KEEP_SERVICES", strings.Join(disp.Client.KeepServiceURIs, " "))
os.Setenv("ARVADOS_EXTERNAL_CLIENT", "")
} else {
- log.Printf("warning: Client credentials missing from config, so falling back on environment variables (deprecated).")
+ disp.logger.Warnf("Client credentials missing from config, so falling back on environment variables (deprecated).")
}
if *dumpConfig {
siteConfig, err := arvados.GetConfig(arvados.DefaultConfigFile)
if os.IsNotExist(err) {
- log.Printf("warning: no cluster config (%s), proceeding with no node types defined", err)
+ disp.logger.Warnf("no cluster config (%s), proceeding with no node types defined", err)
} else if err != nil {
return fmt.Errorf("error loading config: %s", err)
} else if disp.cluster, err = siteConfig.GetCluster(""); err != nil {
// setup() initializes private fields after configure().
func (disp *Dispatcher) setup() {
+ if disp.logger == nil {
+ disp.logger = logrus.StandardLogger()
+ }
arv, err := arvadosclient.MakeArvadosClient()
if err != nil {
- log.Fatalf("Error making Arvados client: %v", err)
+ disp.logger.Fatalf("Error making Arvados client: %v", err)
}
arv.Retries = 25
- disp.slurm = &slurmCLI{}
+ disp.slurm = NewSlurmCLI()
disp.sqCheck = &SqueueChecker{
+ Logger: disp.logger,
Period: time.Duration(disp.PollPeriod),
PrioritySpread: disp.PrioritySpread,
Slurm: disp.slurm,
}
disp.Dispatcher = &dispatch.Dispatcher{
Arv: arv,
+ Logger: disp.logger,
+ BatchSize: disp.BatchSize,
RunContainer: disp.runContainer,
PollPeriod: time.Duration(disp.PollPeriod),
MinRetryPeriod: time.Duration(disp.MinRetryPeriod),
crArgs = append(crArgs, container.UUID)
crScript := strings.NewReader(execScript(crArgs))
- disp.sqCheck.L.Lock()
- defer disp.sqCheck.L.Unlock()
-
sbArgs, err := disp.sbatchArgs(container)
if err != nil {
return err
case <-ctx.Done():
// Disappeared from squeue
if err := disp.Arv.Get("containers", ctr.UUID, nil, &ctr); err != nil {
- log.Printf("Error getting final container state for %s: %s", ctr.UUID, err)
+ log.Printf("error getting final container state for %s: %s", ctr.UUID, err)
}
switch ctr.State {
case dispatch.Running:
}
}
func (disp *Dispatcher) scancel(ctr arvados.Container) {
- disp.sqCheck.L.Lock()
err := disp.slurm.Cancel(ctr.UUID)
- disp.sqCheck.L.Unlock()
-
if err != nil {
log.Printf("scancel: %s", err)
time.Sleep(time.Second)
"fmt"
"io"
"io/ioutil"
- "log"
"net/http"
"net/http/httptest"
"os"
"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
"git.curoverse.com/arvados.git/sdk/go/arvadostest"
"git.curoverse.com/arvados.git/sdk/go/dispatch"
+ "github.com/Sirupsen/logrus"
. "gopkg.in/check.v1"
)
var containers arvados.ContainerList
err = arv.List("containers", params, &containers)
c.Check(err, IsNil)
- c.Check(len(containers.Items), Equals, 1)
+ c.Assert(len(containers.Items), Equals, 1)
s.disp.CrunchRunCommand = []string{"echo"}
}
s.disp.slurm = &s.slurm
- s.disp.sqCheck = &SqueueChecker{Period: 500 * time.Millisecond, Slurm: s.disp.slurm}
+ s.disp.sqCheck = &SqueueChecker{
+ Logger: logrus.StandardLogger(),
+ Period: 500 * time.Millisecond,
+ Slurm: s.disp.slurm,
+ }
err = s.disp.Dispatcher.Run(ctx)
<-doneRun
apiStubResponses["/arvados/v1/api_client_authorizations/current"] = arvadostest.StubResponse{200, `{"uuid":"` + arvadostest.Dispatch1AuthUUID + `"}`}
apiStubResponses["/arvados/v1/containers"] = arvadostest.StubResponse{500, string(`{}`)}
- s.testWithServerStub(c, apiStubResponses, "echo", "Error getting list of containers")
+ s.testWithServerStub(c, apiStubResponses, "echo", "error getting count of containers")
}
func (s *StubbedSuite) testWithServerStub(c *C, apiStubResponses map[string]arvadostest.StubResponse, crunchCmd string, expected string) {
}
buf := bytes.NewBuffer(nil)
- log.SetOutput(io.MultiWriter(buf, os.Stderr))
- defer log.SetOutput(os.Stderr)
+ logrus.SetOutput(io.MultiWriter(buf, os.Stderr))
+ defer logrus.SetOutput(os.Stderr)
s.disp.CrunchRunCommand = []string{crunchCmd}
Renice(name string, nice int64) error
}
-type slurmCLI struct{}
+type slurmCLI struct {
+ runSemaphore chan bool
+}
+
+func NewSlurmCLI() *slurmCLI {
+ return &slurmCLI{
+ runSemaphore: make(chan bool, 3),
+ }
+}
func (scli *slurmCLI) Batch(script io.Reader, args []string) error {
return scli.run(script, "sbatch", args)
}
func (scli *slurmCLI) run(stdin io.Reader, prog string, args []string) error {
+ scli.runSemaphore <- true
+ defer func() { <-scli.runSemaphore }()
cmd := exec.Command(prog, args...)
cmd.Stdin = stdin
out, err := cmd.CombinedOutput()
import (
"bytes"
"fmt"
- "log"
"sort"
"strings"
"sync"
// Squeue implements asynchronous polling monitor of the SLURM queue using the
// command 'squeue'.
type SqueueChecker struct {
+ Logger logger
Period time.Duration
PrioritySpread int64
Slurm Slurm
queue map[string]*slurmJob
startOnce sync.Once
done chan struct{}
- sync.Cond
+ lock sync.RWMutex
+ notify sync.Cond
}
// HasUUID checks if a given container UUID is in the slurm queue.
func (sqc *SqueueChecker) HasUUID(uuid string) bool {
sqc.startOnce.Do(sqc.start)
- sqc.L.Lock()
- defer sqc.L.Unlock()
+ sqc.lock.RLock()
+ defer sqc.lock.RUnlock()
// block until next squeue broadcast signaling an update.
- sqc.Wait()
+ sqc.notify.Wait()
_, exists := sqc.queue[uuid]
return exists
}
// container.
func (sqc *SqueueChecker) SetPriority(uuid string, want int64) {
sqc.startOnce.Do(sqc.start)
- sqc.L.Lock()
- defer sqc.L.Unlock()
- job, ok := sqc.queue[uuid]
- if !ok {
+
+ sqc.lock.RLock()
+ job := sqc.queue[uuid]
+ if job == nil {
// Wait in case the slurm job was just submitted and
// will appear in the next squeue update.
- sqc.Wait()
- if job, ok = sqc.queue[uuid]; !ok {
- return
- }
+ sqc.notify.Wait()
+ job = sqc.queue[uuid]
+ }
+ needUpdate := job != nil && job.wantPriority != want
+ sqc.lock.RUnlock()
+
+ if needUpdate {
+ sqc.lock.Lock()
+ job.wantPriority = want
+ sqc.lock.Unlock()
}
- job.wantPriority = want
}
// adjust slurm job nice values as needed to ensure slurm priority
// order matches Arvados priority order.
func (sqc *SqueueChecker) reniceAll() {
- sqc.L.Lock()
- defer sqc.L.Unlock()
-
+ // This is slow (it shells out to scontrol many times) and no
+ // other goroutines update sqc.queue or any of the job fields
+ // we use here, so we don't acquire a lock.
jobs := make([]*slurmJob, 0, len(sqc.queue))
for _, j := range sqc.queue {
if j.wantPriority == 0 {
}
err := sqc.Slurm.Renice(job.uuid, niceNew)
if err != nil && niceNew > slurm15NiceLimit && strings.Contains(err.Error(), "Invalid nice value") {
- log.Printf("container %q clamping nice values at %d, priority order will not be correct -- see https://dev.arvados.org/projects/arvados/wiki/SLURM_integration#Limited-nice-values-SLURM-15", job.uuid, slurm15NiceLimit)
+ sqc.Logger.Warnf("container %q clamping nice values at %d, priority order will not be correct -- see https://dev.arvados.org/projects/arvados/wiki/SLURM_integration#Limited-nice-values-SLURM-15", job.uuid, slurm15NiceLimit)
job.hitNiceLimit = true
}
}
// queued). If it succeeds, it updates sqc.queue and wakes up any
// goroutines that are waiting in HasUUID() or All().
func (sqc *SqueueChecker) check() {
- // Mutex between squeue sync and running sbatch or scancel. This
- // establishes a sequence so that squeue doesn't run concurrently with
- // sbatch or scancel; the next update of squeue will occur only after
- // sbatch or scancel has completed.
- sqc.L.Lock()
- defer sqc.L.Unlock()
-
cmd := sqc.Slurm.QueueCommand([]string{"--all", "--noheader", "--format=%j %y %Q %T %r"})
stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
cmd.Stdout, cmd.Stderr = stdout, stderr
if err := cmd.Run(); err != nil {
- log.Printf("Error running %q %q: %s %q", cmd.Path, cmd.Args, err, stderr.String())
+ sqc.Logger.Warnf("Error running %q %q: %s %q", cmd.Path, cmd.Args, err, stderr.String())
return
}
var uuid, state, reason string
var n, p int64
if _, err := fmt.Sscan(line, &uuid, &n, &p, &state, &reason); err != nil {
- log.Printf("warning: ignoring unparsed line in squeue output: %q", line)
+ sqc.Logger.Warnf("ignoring unparsed line in squeue output: %q", line)
continue
}
+
+ // No other goroutines write to jobs' priority or nice
+ // fields, so we can read and write them without
+ // locks.
replacing, ok := sqc.queue[uuid]
if !ok {
replacing = &slurmJob{uuid: uuid}
// "launch failed requeued held" seems to be
// another manifestation of this problem,
// resolved the same way.
- log.Printf("releasing held job %q (priority=%d, state=%q, reason=%q)", uuid, p, state, reason)
+ sqc.Logger.Printf("releasing held job %q (priority=%d, state=%q, reason=%q)", uuid, p, state, reason)
sqc.Slurm.Release(uuid)
- } else if p < 1<<20 && replacing.wantPriority > 0 {
- log.Printf("warning: job %q has low priority %d, nice %d, state %q, reason %q", uuid, p, n, state, reason)
+ } else if state != "RUNNING" && p <= 2*slurm15NiceLimit && replacing.wantPriority > 0 {
+ sqc.Logger.Warnf("job %q has low priority %d, nice %d, state %q, reason %q", uuid, p, n, state, reason)
}
}
+ sqc.lock.Lock()
sqc.queue = newq
- sqc.Broadcast()
+ sqc.lock.Unlock()
+ sqc.notify.Broadcast()
}
// Initialize, and start a goroutine to call check() once per
// squeue.Period until terminated by calling Stop().
func (sqc *SqueueChecker) start() {
- sqc.L = &sync.Mutex{}
+ sqc.notify.L = sqc.lock.RLocker()
sqc.done = make(chan struct{})
go func() {
ticker := time.NewTicker(sqc.Period)
case <-ticker.C:
sqc.check()
sqc.reniceAll()
+ select {
+ case <-ticker.C:
+ // If this iteration took
+ // longer than sqc.Period,
+ // consume the next tick and
+ // wait. Otherwise we would
+ // starve other goroutines.
+ default:
+ }
}
}
}()
// names reported by squeue.
func (sqc *SqueueChecker) All() []string {
sqc.startOnce.Do(sqc.start)
- sqc.L.Lock()
- defer sqc.L.Unlock()
- sqc.Wait()
+ sqc.lock.RLock()
+ defer sqc.lock.RUnlock()
+ sqc.notify.Wait()
var uuids []string
for u := range sqc.queue {
uuids = append(uuids, u)
import (
"time"
+ "github.com/Sirupsen/logrus"
. "gopkg.in/check.v1"
)
queue: uuids[0] + " 10000 4294000000 PENDING Resources\n" + uuids[1] + " 10000 4294000111 PENDING Resources\n" + uuids[2] + " 10000 0 PENDING BadConstraints\n",
}
sqc := &SqueueChecker{
+ Logger: logrus.StandardLogger(),
Slurm: slurm,
Period: time.Hour,
}
queue: test.squeue,
}
sqc := &SqueueChecker{
+ Logger: logrus.StandardLogger(),
Slurm: slurm,
PrioritySpread: test.spread,
Period: time.Hour,
rejectNice10K: true,
}
sqc := &SqueueChecker{
+ Logger: logrus.StandardLogger(),
Slurm: slurm,
PrioritySpread: 1,
Period: time.Hour,
slurm := &slurmFake{}
sqc := &SqueueChecker{
+ Logger: logrus.StandardLogger(),
Slurm: slurm,
Period: time.Hour,
}
"PollPeriod": "10s",
"SbatchArguments": ["--partition=foo", "--exclude=node13"],
"ReserveExtraRAM": 268435456,
+ "BatchSize": 10000
}`)
func usage(fs *flag.FlagSet) {
with llfuse.lock_released:
if not self._current_user:
self._current_user = self.api.users().current().execute(num_retries=self.num_retries)
- return self._current_user["uuid"] in self.project_object["writable_by"]
+ return self._current_user["uuid"] in self.project_object.get("writable_by", [])
def persisted(self):
return True
if not self.stale():
return
- all_projects = arvados.util.list_all(
- self.api.groups().list, self.num_retries,
- filters=[['group_class','=','project']],
- select=["uuid", "owner_uuid"])
- objects = {}
- for ob in all_projects:
- objects[ob['uuid']] = ob
-
+ contents = {}
roots = []
root_owners = set()
- current_uuid = self.current_user['uuid']
- for ob in all_projects:
- if ob['owner_uuid'] != current_uuid and ob['owner_uuid'] not in objects:
- roots.append(ob['uuid'])
- root_owners.add(ob['owner_uuid'])
-
- lusers = arvados.util.list_all(
- self.api.users().list, self.num_retries,
- filters=[['uuid','in', list(root_owners)]])
- lgroups = arvados.util.list_all(
- self.api.groups().list, self.num_retries,
- filters=[['uuid','in', list(root_owners)+roots]])
-
- for l in lusers:
- objects[l["uuid"]] = l
- for l in lgroups:
- objects[l["uuid"]] = l
+ objects = {}
+
+ methods = self.api._rootDesc.get('resources')["groups"]['methods']
+ if 'httpMethod' in methods.get('shared', {}):
+ page = []
+ while True:
+ resp = self.api.groups().shared(filters=[['group_class', '=', 'project']]+page,
+ order="uuid",
+ limit=10000,
+ count="none",
+ include="owner_uuid").execute()
+ if not resp["items"]:
+ break
+ page = [["uuid", ">", resp["items"][len(resp["items"])-1]["uuid"]]]
+ for r in resp["items"]:
+ objects[r["uuid"]] = r
+ roots.append(r["uuid"])
+ for r in resp["included"]:
+ objects[r["uuid"]] = r
+ root_owners.add(r["uuid"])
+ else:
+ all_projects = arvados.util.list_all(
+ self.api.groups().list, self.num_retries,
+ filters=[['group_class','=','project']],
+ select=["uuid", "owner_uuid"])
+ for ob in all_projects:
+ objects[ob['uuid']] = ob
+
+ current_uuid = self.current_user['uuid']
+ for ob in all_projects:
+ if ob['owner_uuid'] != current_uuid and ob['owner_uuid'] not in objects:
+ roots.append(ob['uuid'])
+ root_owners.add(ob['owner_uuid'])
+
+ lusers = arvados.util.list_all(
+ self.api.users().list, self.num_retries,
+ filters=[['uuid','in', list(root_owners)]])
+ lgroups = arvados.util.list_all(
+ self.api.groups().list, self.num_retries,
+ filters=[['uuid','in', list(root_owners)+roots]])
+
+ for l in lusers:
+ objects[l["uuid"]] = l
+ for l in lgroups:
+ objects[l["uuid"]] = l
- contents = {}
for r in root_owners:
if r in objects:
obr = objects[r]
import (
"sync"
- "sync/atomic"
"time"
"git.curoverse.com/arvados.git/sdk/go/arvados"
MaxUUIDEntries int
registry *prometheus.Registry
- stats cacheStats
metrics cacheMetrics
pdhs *lru.TwoQueueCache
collections *lru.TwoQueueCache
setupOnce sync.Once
}
-// cacheStats is EOL - add new metrics to cacheMetrics instead
-type cacheStats struct {
- Requests uint64 `json:"Cache.Requests"`
- CollectionBytes uint64 `json:"Cache.CollectionBytes"`
- CollectionEntries int `json:"Cache.CollectionEntries"`
- CollectionHits uint64 `json:"Cache.CollectionHits"`
- PDHHits uint64 `json:"Cache.UUIDHits"`
- PermissionHits uint64 `json:"Cache.PermissionHits"`
- APICalls uint64 `json:"Cache.APICalls"`
-}
-
type cacheMetrics struct {
requests prometheus.Counter
collectionBytes prometheus.Gauge
"select": []string{"portable_data_hash"},
}
-func (c *cache) Stats() cacheStats {
- c.setupOnce.Do(c.setup)
- return cacheStats{
- Requests: atomic.LoadUint64(&c.stats.Requests),
- CollectionBytes: c.collectionBytes(),
- CollectionEntries: c.collections.Len(),
- CollectionHits: atomic.LoadUint64(&c.stats.CollectionHits),
- PDHHits: atomic.LoadUint64(&c.stats.PDHHits),
- PermissionHits: atomic.LoadUint64(&c.stats.PermissionHits),
- APICalls: atomic.LoadUint64(&c.stats.APICalls),
- }
-}
-
// Update saves a modified version (fs) to an existing collection
// (coll) and, if successful, updates the relevant cache entries so
// subsequent calls to Get() reflect the modifications.
func (c *cache) Get(arv *arvadosclient.ArvadosClient, targetID string, forceReload bool) (*arvados.Collection, error) {
c.setupOnce.Do(c.setup)
-
- atomic.AddUint64(&c.stats.Requests, 1)
c.metrics.requests.Inc()
permOK := false
c.permissions.Remove(permKey)
} else {
permOK = true
- atomic.AddUint64(&c.stats.PermissionHits, 1)
c.metrics.permissionHits.Inc()
}
}
c.pdhs.Remove(targetID)
} else {
pdh = ent.pdh
- atomic.AddUint64(&c.stats.PDHHits, 1)
c.metrics.pdhHits.Inc()
}
}
// likely, the cached PDH is still correct; if so,
// _and_ the current token has permission, we can
// use our cached manifest.
- atomic.AddUint64(&c.stats.APICalls, 1)
c.metrics.apiCalls.Inc()
var current arvados.Collection
err := arv.Get("collections", targetID, selectPDH, ¤t)
}
// Collection manifest is not cached.
- atomic.AddUint64(&c.stats.APICalls, 1)
c.metrics.apiCalls.Inc()
err := arv.Get("collections", targetID, nil, &collection)
if err != nil {
c.collections.Remove(key)
return nil
}
- atomic.AddUint64(&c.stats.CollectionHits, 1)
c.metrics.collectionHits.Inc()
return ent.collection
}
package main
import (
+ "bytes"
+
"git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
"git.curoverse.com/arvados.git/sdk/go/arvadostest"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/common/expfmt"
"gopkg.in/check.v1"
)
+func (s *UnitSuite) checkCacheMetrics(c *check.C, reg *prometheus.Registry, regs ...string) {
+ mfs, err := reg.Gather()
+ c.Check(err, check.IsNil)
+ buf := &bytes.Buffer{}
+ enc := expfmt.NewEncoder(buf, expfmt.FmtText)
+ for _, mf := range mfs {
+ c.Check(enc.Encode(mf), check.IsNil)
+ }
+ mm := buf.String()
+ for _, reg := range regs {
+ c.Check(mm, check.Matches, `(?ms).*collectioncache_`+reg+`\n.*`)
+ }
+}
+
func (s *UnitSuite) TestCache(c *check.C) {
arv, err := arvadosclient.MakeArvadosClient()
c.Assert(err, check.Equals, nil)
cache := DefaultConfig().Cache
+ cache.registry = prometheus.NewRegistry()
// Hit the same collection 5 times using the same token. Only
// the first req should cause an API call; the next 4 should
c.Check(coll.PortableDataHash, check.Equals, arvadostest.FooPdh)
c.Check(coll.ManifestText[:2], check.Equals, ". ")
}
- c.Check(cache.Stats().Requests, check.Equals, uint64(5))
- c.Check(cache.Stats().CollectionHits, check.Equals, uint64(4))
- c.Check(cache.Stats().PermissionHits, check.Equals, uint64(4))
- c.Check(cache.Stats().PDHHits, check.Equals, uint64(4))
- c.Check(cache.Stats().APICalls, check.Equals, uint64(1))
+ s.checkCacheMetrics(c, cache.registry,
+ "requests 5",
+ "hits 4",
+ "permission_hits 4",
+ "pdh_hits 4",
+ "api_calls 1")
// Hit the same collection 2 more times, this time requesting
// it by PDH and using a different token. The first req should
c.Check(coll2.ManifestText[:2], check.Equals, ". ")
c.Check(coll2.ManifestText, check.Not(check.Equals), coll.ManifestText)
- c.Check(cache.Stats().Requests, check.Equals, uint64(5+1))
- c.Check(cache.Stats().CollectionHits, check.Equals, uint64(4+0))
- c.Check(cache.Stats().PermissionHits, check.Equals, uint64(4+0))
- c.Check(cache.Stats().PDHHits, check.Equals, uint64(4+0))
- c.Check(cache.Stats().APICalls, check.Equals, uint64(1+1))
+ s.checkCacheMetrics(c, cache.registry,
+ "requests 6",
+ "hits 4",
+ "permission_hits 4",
+ "pdh_hits 4",
+ "api_calls 2")
coll2, err = cache.Get(arv, arvadostest.FooPdh, false)
c.Check(err, check.Equals, nil)
c.Check(coll2.PortableDataHash, check.Equals, arvadostest.FooPdh)
c.Check(coll2.ManifestText[:2], check.Equals, ". ")
- c.Check(cache.Stats().Requests, check.Equals, uint64(5+2))
- c.Check(cache.Stats().CollectionHits, check.Equals, uint64(4+1))
- c.Check(cache.Stats().PermissionHits, check.Equals, uint64(4+1))
- c.Check(cache.Stats().PDHHits, check.Equals, uint64(4+0))
- c.Check(cache.Stats().APICalls, check.Equals, uint64(1+1))
+ s.checkCacheMetrics(c, cache.registry,
+ "requests 7",
+ "hits 5",
+ "permission_hits 5",
+ "pdh_hits 4",
+ "api_calls 2")
// Alternating between two collections N times should produce
// only 2 more API calls.
_, err := cache.Get(arv, target, false)
c.Check(err, check.Equals, nil)
}
- c.Check(cache.Stats().Requests, check.Equals, uint64(5+2+20))
- c.Check(cache.Stats().CollectionHits, check.Equals, uint64(4+1+18))
- c.Check(cache.Stats().PermissionHits, check.Equals, uint64(4+1+18))
- c.Check(cache.Stats().PDHHits, check.Equals, uint64(4+0+18))
- c.Check(cache.Stats().APICalls, check.Equals, uint64(1+1+2))
+ s.checkCacheMetrics(c, cache.registry,
+ "requests 27",
+ "hits 23",
+ "permission_hits 23",
+ "pdh_hits 22",
+ "api_calls 4")
}
func (s *UnitSuite) TestCacheForceReloadByPDH(c *check.C) {
c.Assert(err, check.Equals, nil)
cache := DefaultConfig().Cache
+ cache.registry = prometheus.NewRegistry()
for _, forceReload := range []bool{false, true, false, true} {
_, err := cache.Get(arv, arvadostest.FooPdh, forceReload)
c.Check(err, check.Equals, nil)
}
- c.Check(cache.Stats().Requests, check.Equals, uint64(4))
- c.Check(cache.Stats().CollectionHits, check.Equals, uint64(3))
- c.Check(cache.Stats().PermissionHits, check.Equals, uint64(1))
- c.Check(cache.Stats().PDHHits, check.Equals, uint64(0))
- c.Check(cache.Stats().APICalls, check.Equals, uint64(3))
+ s.checkCacheMetrics(c, cache.registry,
+ "requests 4",
+ "hits 3",
+ "permission_hits 1",
+ "pdh_hits 0",
+ "api_calls 3")
}
func (s *UnitSuite) TestCacheForceReloadByUUID(c *check.C) {
c.Assert(err, check.Equals, nil)
cache := DefaultConfig().Cache
+ cache.registry = prometheus.NewRegistry()
for _, forceReload := range []bool{false, true, false, true} {
_, err := cache.Get(arv, arvadostest.FooCollection, forceReload)
c.Check(err, check.Equals, nil)
}
- c.Check(cache.Stats().Requests, check.Equals, uint64(4))
- c.Check(cache.Stats().CollectionHits, check.Equals, uint64(3))
- c.Check(cache.Stats().PermissionHits, check.Equals, uint64(1))
- c.Check(cache.Stats().PDHHits, check.Equals, uint64(3))
- c.Check(cache.Stats().APICalls, check.Equals, uint64(3))
+ s.checkCacheMetrics(c, cache.registry,
+ "requests 4",
+ "hits 3",
+ "permission_hits 1",
+ "pdh_hits 3",
+ "api_calls 3")
}
}
func (h *handler) serveStatus(w http.ResponseWriter, r *http.Request) {
- status := struct {
- cacheStats
- Version string
- }{
- cacheStats: h.Config.Cache.Stats(),
- Version: version,
- }
- json.NewEncoder(w).Encode(status)
+ json.NewEncoder(w).Encode(struct{ Version string }{version})
}
// updateOnSuccess wraps httpserver.ResponseWriter. If the handler
}
var (
+ corsAllowHeadersHeader = strings.Join([]string{
+ "Authorization", "Content-Type", "Range",
+ // WebDAV request headers:
+ "Depth", "Destination", "If", "Lock-Token", "Overwrite", "Timeout",
+ }, ", ")
writeMethod = map[string]bool{
"COPY": true,
"DELETE": true,
statusCode = http.StatusMethodNotAllowed
return
}
- w.Header().Set("Access-Control-Allow-Headers", "Authorization, Content-Type, Range")
+ w.Header().Set("Access-Control-Allow-Headers", corsAllowHeadersHeader)
w.Header().Set("Access-Control-Allow-Methods", "COPY, DELETE, GET, MKCOL, MOVE, OPTIONS, POST, PROPFIND, PUT, RMCOL")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Max-Age", "86400")
c.Check(resp.Body.String(), check.Equals, "")
c.Check(resp.Header().Get("Access-Control-Allow-Origin"), check.Equals, "*")
c.Check(resp.Header().Get("Access-Control-Allow-Methods"), check.Equals, "COPY, DELETE, GET, MKCOL, MOVE, OPTIONS, POST, PROPFIND, PUT, RMCOL")
- c.Check(resp.Header().Get("Access-Control-Allow-Headers"), check.Equals, "Authorization, Content-Type, Range")
+ c.Check(resp.Header().Get("Access-Control-Allow-Headers"), check.Equals, "Authorization, Content-Type, Range, Depth, Destination, If, Lock-Token, Overwrite, Timeout")
// Check preflight for a disallowed request
resp = httptest.NewRecorder()
var status map[string]interface{}
err := json.NewDecoder(resp.Body).Decode(&status)
c.Check(err, check.IsNil)
- c.Check(status["Cache.Requests"], check.Equals, float64(0))
c.Check(status["Version"], check.Not(check.Equals), "")
}
}
client := s3.New(auth, region)
+ if region.EC2Endpoint.Signer == aws.V4Signature {
+ // Currently affects only eu-central-1
+ client.Signature = aws.V4Signature
+ }
client.ConnectTimeout = time.Duration(v.ConnectTimeout)
client.ReadTimeout = time.Duration(v.ReadTimeout)
v.bucket = &s3bucket{
s.files = ["bin/arvados-login-sync", "agpl-3.0.txt"]
s.executables << "arvados-login-sync"
s.required_ruby_version = '>= 2.1.0'
- s.add_runtime_dependency 'arvados', '~> 1.1.0', '>= 1.1.4'
+ s.add_runtime_dependency 'arvados', '~> 1.2.0', '>= 1.2.0'
s.homepage =
'https://arvados.org'
end