gem 'httpclient', '~> 2.5'
# This fork has Rails 4 compatible routes
-gem 'themes_for_rails', git: 'https://github.com/holtkampw/themes_for_rails', ref: '1fd2d7897d75ae0d6375f4c390df87b8e91ad417'
+gem 'themes_for_rails', git: 'https://github.com/curoverse/themes_for_rails'
gem "deep_merge", :require => 'deep_merge/rails_compat'
GIT
- remote: https://github.com/holtkampw/themes_for_rails
- revision: 1fd2d7897d75ae0d6375f4c390df87b8e91ad417
- ref: 1fd2d7897d75ae0d6375f4c390df87b8e91ad417
+ remote: https://github.com/curoverse/themes_for_rails
+ revision: 61154877047d2346890bda0b7be5827cf51a6a76
specs:
themes_for_rails (0.5.1)
rails (>= 3.0.0)
remote: https://rubygems.org/
specs:
RedCloth (4.2.9)
- actionmailer (4.1.9)
- actionpack (= 4.1.9)
- actionview (= 4.1.9)
+ actionmailer (4.1.12)
+ actionpack (= 4.1.12)
+ actionview (= 4.1.12)
mail (~> 2.5, >= 2.5.4)
- actionpack (4.1.9)
- actionview (= 4.1.9)
- activesupport (= 4.1.9)
+ actionpack (4.1.12)
+ actionview (= 4.1.12)
+ activesupport (= 4.1.12)
rack (~> 1.5.2)
rack-test (~> 0.6.2)
- actionview (4.1.9)
- activesupport (= 4.1.9)
+ actionview (4.1.12)
+ activesupport (= 4.1.12)
builder (~> 3.1)
erubis (~> 2.7.0)
- activemodel (4.1.9)
- activesupport (= 4.1.9)
+ activemodel (4.1.12)
+ activesupport (= 4.1.12)
builder (~> 3.1)
- activerecord (4.1.9)
- activemodel (= 4.1.9)
- activesupport (= 4.1.9)
+ activerecord (4.1.12)
+ activemodel (= 4.1.12)
+ activesupport (= 4.1.12)
arel (~> 5.0.0)
activerecord-nulldb-adapter (0.3.1)
activerecord (>= 2.0.0)
- activesupport (4.1.9)
+ activesupport (4.1.12)
i18n (~> 0.6, >= 0.6.9)
json (~> 1.7, >= 1.7.7)
minitest (~> 5.1)
uuidtools (>= 2.1.0)
headless (1.0.2)
highline (1.6.21)
- hike (1.2.3)
httpclient (2.6.0.1)
i18n (0.7.0)
jquery-rails (3.1.2)
railties (>= 3.0, < 5.0)
thor (>= 0.14, < 2.0)
- json (1.8.2)
+ json (1.8.3)
jwt (0.1.13)
multi_json (>= 1.5)
launchy (2.4.3)
mail (2.6.3)
mime-types (>= 1.16, < 3)
metaclass (0.0.4)
- mime-types (2.4.3)
+ mime-types (2.6.1)
mini_portile (0.6.2)
- minitest (5.5.1)
+ minitest (5.7.0)
mocha (1.1.0)
metaclass (~> 0.0.1)
morrisjs-rails (0.5.1)
railties (> 3.1, < 5)
- multi_json (1.10.1)
+ multi_json (1.11.1)
multipart-post (1.2.0)
net-scp (1.2.1)
net-ssh (>= 2.6.5)
cliver (~> 0.3.1)
multi_json (~> 1.0)
websocket-driver (>= 0.2.0)
- rack (1.5.2)
+ rack (1.5.5)
rack-mini-profiler (0.9.2)
rack (>= 1.1.3)
rack-test (0.6.3)
rack (>= 1.0)
- rails (4.1.9)
- actionmailer (= 4.1.9)
- actionpack (= 4.1.9)
- actionview (= 4.1.9)
- activemodel (= 4.1.9)
- activerecord (= 4.1.9)
- activesupport (= 4.1.9)
+ rails (4.1.12)
+ actionmailer (= 4.1.12)
+ actionpack (= 4.1.12)
+ actionview (= 4.1.12)
+ activemodel (= 4.1.12)
+ activerecord (= 4.1.12)
+ activesupport (= 4.1.12)
bundler (>= 1.3.0, < 2.0)
- railties (= 4.1.9)
+ railties (= 4.1.12)
sprockets-rails (~> 2.0)
rails-perftest (0.0.5)
- railties (4.1.9)
- actionpack (= 4.1.9)
- activesupport (= 4.1.9)
+ railties (4.1.12)
+ actionpack (= 4.1.12)
+ activesupport (= 4.1.12)
rake (>= 0.8.7)
thor (>= 0.18.1, < 2.0)
rake (10.4.2)
simplecov-rcov (0.2.3)
simplecov (>= 0.4.1)
slop (3.6.0)
- sprockets (2.12.3)
- hike (~> 1.2)
- multi_json (~> 1.0)
+ sprockets (3.2.0)
rack (~> 1.0)
- tilt (~> 1.1, != 1.3.0)
- sprockets-rails (2.2.2)
+ sprockets-rails (2.3.2)
actionpack (>= 3.0)
activesupport (>= 3.0)
sprockets (>= 2.8, < 4.0)
libv8 (~> 3.16.14.0)
ref
thor (0.19.1)
- thread_safe (0.3.4)
+ thread_safe (0.3.5)
tilt (1.4.1)
tzinfo (1.2.2)
thread_safe (~> 0.1)
@@notification_tests = []
@@notification_tests.push lambda { |controller, current_user|
+ return nil if Rails.configuration.shell_in_a_box_url
AuthorizedKey.limit(1).where(authorized_user_uuid: current_user.uuid).each do
return nil
end
end
def public # Yes 'public' is the name of the action for public projects
- return render_not_found if not Rails.configuration.anonymous_user_token
+ return render_not_found if not Rails.configuration.anonymous_user_token or not Rails.configuration.enable_public_projects_page
@objects = using_specific_api_token Rails.configuration.anonymous_user_token do
Group.where(group_class: 'project').order("updated_at DESC")
end
if params['openid_prefix'] && params['openid_prefix'].size>0
setup_params[:openid_prefix] = params['openid_prefix']
end
- if params['repo_name'] && params['repo_name'].size>0
- setup_params[:repo_name] = params['repo_name']
- end
if params['vm_uuid'] && params['vm_uuid'].size>0
setup_params[:vm_uuid] = params['vm_uuid']
end
end
super
end
+
+ def webshell
+ return render_not_found if not Rails.configuration.shell_in_a_box_url
+ @webshell_url = Rails.configuration.shell_in_a_box_url % {
+ uuid: @object.uuid,
+ hostname: @object.hostname,
+ }
+ render layout: false
+ end
+
end
else
s = ""
if days > 0
- s += "#{days}<span class='time-label-divider'>d</span> "
+ s += "#{days}<span class='time-label-divider'>d</span>"
end
if (hours > 0)
s += "#{minutes}<span class='time-label-divider'>m</span>"
- if not round_to_min
+ if not round_to_min or (days == 0 and hours == 0 and minutes == 0)
s += "#{seconds}<span class='time-label-divider'>s</span>"
end
end
</li>
<% end %>
<% else %>
- <% if Rails.configuration.anonymous_user_token %>
+ <% if Rails.configuration.anonymous_user_token and Rails.configuration.enable_public_projects_page %>
<li><%= link_to 'Browse public projects', "/projects/public" %></li>
<% end %>
<li class="dropdown hover-dropdown login-menu">
<span class="caret"></span>
</a>
<ul class="dropdown-menu" style="min-width: 20em" role="menu">
- <% if Rails.configuration.anonymous_user_token %>
+ <% if Rails.configuration.anonymous_user_token and Rails.configuration.enable_public_projects_page %>
<li><%= link_to 'Browse public projects', "/projects/public", class: 'btn btn-xs btn-default pull-right' %></li>
<% end %>
<li>
<div class="col-md-3">
<% if current_job[:started_at] %>
<% walltime = ((if current_job[:finished_at] then current_job[:finished_at] else Time.now() end) - current_job[:started_at]) %>
- <% cputime = tasks.map { |task|
- if task.started_at and task.job_uuid == current_job[:uuid]
- (if task.finished_at then task.finished_at else Time.now() end) - task.started_at
- else
- 0
- end
- }.reduce(:+) || 0 %>
- <%= render_runtime(walltime, false, false) %>
- <% if cputime > 0 %> / <%= render_runtime(cputime, false, false) %> (<%= (cputime/walltime).round(1) %>⨯)<% end %>
+ <% cputime = (current_job[:runtime_constraints].andand[:min_nodes] || 1) *
+ ((current_job[:finished_at] || Time.now()) - current_job[:started_at]) %>
+ <%= render_runtime(walltime, false) %>
+ <% if cputime > 0 %> / <%= render_runtime(cputime, false) %> (<%= (cputime/walltime).round(1) %>⨯)<% end %>
<% end %>
</div>
<% end %>
<%# column offset 5 %>
<div class="col-md-6">
<% queuetime = Time.now - Time.parse(current_job[:created_at].to_s) %>
- Queued for <%= render_runtime(queuetime, true) %>.
+ Queued for <%= render_runtime(queuetime, false) %>.
<% begin %>
<% if current_job[:queue_position] == 0 %>
This job is next in the queue to run.
</p>
<% end %>
-<% tasks = JobTask.filter([['job_uuid', 'in', job_uuids]]).results %>
<% runningtime = determine_wallclock_runtime(pipeline_jobs.map {|j| j[:job]}.compact) %>
<p>
end %>
<%= if walltime > runningtime
- render_runtime(walltime, true, false)
+ render_runtime(walltime, false)
else
- render_runtime(runningtime, true, false)
+ render_runtime(runningtime, false)
end %><% if @object.finished_at %> at <%= render_localized_date(@object.finished_at) %><% end %>.
<% else %>
This pipeline is <%= if @object.state.start_with? 'Running' then 'active' else @object.state.downcase end %>.
ran
<% end %>
for
- <% cputime = tasks.map { |task|
- if task.started_at
- (if task.finished_at then task.finished_at else Time.now() end) - task.started_at
- else
- 0
- end
+ <%
+ cputime = pipeline_jobs.map { |j|
+ if j[:job][:started_at]
+ (j[:job][:runtime_constraints].andand[:min_nodes] || 1) * ((j[:job][:finished_at] || Time.now()) - j[:job][:started_at])
+ else
+ 0
+ end
}.reduce(:+) || 0 %>
- <%= render_runtime(runningtime, true, false) %><% if (walltime - runningtime) > 0 %>
- (<%= render_runtime(walltime - runningtime, true, false) %> queued)<% end %><% if cputime == 0 %>.<% else %>
+ <%= render_runtime(runningtime, false) %><% if (walltime - runningtime) > 0 %>
+ (<%= render_runtime(walltime - runningtime, false) %> queued)<% end %><% if cputime == 0 %>.<% else %>
and used
- <%= render_runtime(cputime, true, false) %>
- of CPU time (<%= (cputime/runningtime).round(1) %>⨯ scaling).
+ <%= render_runtime(cputime, false) %>
+ of node allocation time (<%= (cputime/runningtime).round(1) %>⨯ scaling).
<% end %>
</p>
%>
<% pipeline_jobs.each_with_index do |pj, i| %>
- <%= render partial: 'running_component', locals: {tasks: tasks, pj: pj, i: i, expanded: false} %>
+ <%= render partial: 'running_component', locals: {pj: pj, i: i, expanded: false} %>
<% end %>
<%= writable ? 'writable' : 'read-only' %>
</td>
<td style="word-break:break-all;">
+ <code><%= repo.http_fetch_url %></code><br/>
<code><%= writable ? repo.push_url : repo.fetch_url %></code>
</td>
<td>
<div id="manage_virtual_machines" class="panel-body">
<p>
- For more information see <%= link_to raw('Arvados Docs → User Guide → SSH access'),
- "#{Rails.configuration.arvados_docsite}/user/getting_started/ssh-access-unix.html",
+ For more information see <%= link_to raw('Arvados Docs → User Guide → VM access'),
+ "#{Rails.configuration.arvados_docsite}/user/getting_started/vm-login-with-webshell.html",
target: "_blank"%>.
- <% if @my_virtual_machines.any? or true %>
- A sample <code>~/.ssh/config</code> entry is provided below.
- <% end %>
</p>
<% if !@my_virtual_machines.any? %>
<th> Host name </th>
<th> Login name </th>
<th> Command line </th>
+ <% if Rails.configuration.shell_in_a_box_url %>
+ <th> Web shell <span class="label label-info">beta</span></th>
+ <% end %>
</tr>
</thead>
<tbody>
<% end %>
<% end %>
</td>
+ <% if Rails.configuration.shell_in_a_box_url %>
+ <td>
+ <% @my_vm_logins[vm[:uuid]].andand.each do |login| %>
+ <%= link_to webshell_virtual_machine_path(vm, login: login), title: "Open a terminal session in your browser", class: 'btn btn-xs btn-default', target: "_blank" do %>
+ Log in as <%= login %><br />
+ <% end %>
+ <% end %>
+ </td>
+ <% end %>
</tr>
<% end %>
</tbody>
</table>
- <p><i>~/.ssh/config:</i></p>
+ <p>Sample SSH Conig <i>~/.ssh/config:</i></p>
<pre>Host *.arvados
TCPKeepAlive yes
ServerAliveInterval 60
<div class="modal-header">
<button type="button" class="close" onClick="reset_form()" data-dismiss="modal" aria-hidden="true">×</button>
<div>
- <div class="col-sm-6"> <h4 class="modal-title">Setup User</h4> </div>
+ <div class="col-sm-6"> <h4 class="modal-title">Setup Shell Account</h4> </div>
<div class="spinner spinner-32px spinner-h-center col-sm-1" hidden="true"></div>
</div>
<br/>
<% disable_email = uuid != nil %>
<% identity_url_prefix = @current_selections[:identity_url_prefix] %>
<% disable_url_prefix = identity_url_prefix != nil %>
- <% selected_repo = @current_selections[:repo_name] %>
<% selected_vm = @current_selections[:vm_uuid] %>
<% groups = @current_selections[:groups] %>
value="<%= Rails.configuration.default_openid_prefix %>">
<% end %>
</div>
- <div class="form-group">
- <label for="repo_name">Repository Name and Shell Login</label>
- <input class="form-control" id="repo_name" maxlength="250" name="repo_name" type="text" value="<%=selected_repo%>">
- </div>
<div class="form-group">
<label for="vm_uuid">Virtual Machine</label>
<select class="form-control" name="vm_uuid">
</blockquote>
<p>
- As an admin, you can setup this user. Please input a VM and
- repository for the user. If you had previously provided any of
- these items, they are pre-filled for you and you can leave them
- as is if you would like to reuse them.
+ As an admin, you can setup a shell account for this user.
+ The login name is automatically generated from the user's e-mail address.
</p>
<blockquote>
- <%= link_to "Setup #{@object.full_name}", setup_popup_user_url(id: @object.uuid), {class: 'btn btn-primary', :remote => true, 'data-toggle' => "modal", 'data-target' => '#user-setup-modal-window'} %>
+ <%= link_to "Setup shell account #{'for ' if @object.full_name.present?} #{@object.full_name}", setup_popup_user_url(id: @object.uuid), {class: 'btn btn-primary', :remote => true, 'data-toggle' => "modal", 'data-target' => '#user-setup-modal-window'} %>
</blockquote>
<p>
--- /dev/null
+<html>
+ <title><%= @object.hostname %> / <%= Rails.configuration.site_name %></title>
+ <link rel="stylesheet" href="<%= asset_path 'webshell/styles.css' %>" type="text/css">
+ <style type="text/css">
+ body {
+ margin: 0px;
+ }
+ </style>
+ <script type="text/javascript"><!--
+ (function() {
+ // We would like to hide overflowing lines as this can lead to
+ // visually jarring results if the browser substitutes oversized
+ // Unicode characters from different fonts. Unfortunately, a bug
+ // in Firefox prevents it from allowing multi-line text
+ // selections whenever we change the "overflow" style. So, only
+ // do so for non-Netscape browsers.
+ if (typeof navigator.appName == 'undefined' ||
+ navigator.appName != 'Netscape') {
+ document.write('<style type="text/css">' +
+ '#vt100 #console div, #vt100 #alt_console div {' +
+ ' overflow: hidden;' +
+ '}' +
+ '</style>');
+ }
+ })();
+
+ function login(username, token) {
+ var sh = new ShellInABox("<%= j @webshell_url %>");
+ setTimeout(function() {
+ sh.keysPressed("<%= j params[:login] %>\n");
+ setTimeout(function() {
+ sh.keysPressed("<%= j Thread.current[:arvados_api_token] %>\n");
+ sh.vt100('(sent authentication token)\n');
+ }, 2000);
+ }, 2000);
+ }
+ // -->
+</script>
+ <link rel="icon" href="<%= asset_path 'favicon.ico' %>" type="image/x-icon">
+ <script type="text/javascript" src="<%= asset_path 'webshell/shell_in_a_box.js' %>"></script>
+ </head>
+ <!-- Load ShellInABox from a timer as Konqueror sometimes fails to
+ correctly deal with the enclosing frameset (if any), if we do not
+ do this
+ -->
+<body onload="setTimeout(login, 1000)"
+ scroll="no"><noscript>JavaScript must be enabled for ShellInABox</noscript>
+</body>
+</html>
# in the directory where your API server is running.
anonymous_user_token: false
+ # when anonymous_user_token is configured, show public projects page
+ enable_public_projects_page: true
+
# Ask Arvados API server to compress its response payloads.
api_response_compression: true
+
+ # ShellInABox service endpoint URL for a given VM. If false, do not
+ # offer web shell logins.
+ #
+ # E.g., using a path-based proxy server to forward connections to shell hosts:
+ # https://webshell.uuid_prefix.arvadosapi.com/%{hostname}
+ #
+ # E.g., using a name-based proxy server to forward connections to shell hosts:
+ # https://%{hostname}.webshell.uuid_prefix.arvadosapi.com/
+ shell_in_a_box_url: false
ks.each do |kk|
cfg = cfg.send(kk)
end
- if cfg.respond_to?(k.to_sym) and !cfg.send(k).nil?
- # Config must have been set already in environments/*.rb.
+ if v.nil? and cfg.respond_to?(k) and !cfg.send(k).nil?
+ # Config is nil in *.yml, but has been set already in
+ # environments/*.rb (or has a Rails default). Don't overwrite
+ # the default/upstream config with nil.
#
# After config files have been migrated, this mechanism should
- # be deprecated, then removed.
+ # be removed.
+ Rails.logger.warn <<EOS
+DEPRECATED: Inheriting config.#{ks.join '.'} from Rails config.
+ Please move this config into config/application.yml.
+EOS
elsif v.nil?
# Config variables are not allowed to be nil. Make a "naughty"
# list, and present it below.
resources :traits
resources :api_client_authorizations
resources :virtual_machines
+ get '/virtual_machines/:id/webshell/:login' => 'virtual_machines#webshell', :as => :webshell_virtual_machine
resources :authorized_keys
resources :job_tasks
resources :jobs do
--- /dev/null
+See also
+* VirtualMachinesController#webshell
+* https://code.google.com/p/shellinabox/source/browse/#git%2Fshellinabox
--- /dev/null
+<?xml version="1.0" encoding="utf-8"?>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml" xml:lang="en" lang="en">
+<head>
+</head>
+<body><pre class="box"><div
+ ><i id="27">Esc</i><i id="112">F1</i><i id="113">F2</i><i id="114">F3</i
+ ><i id="115">F4</i><i id="116">F5</i><i id="117">F6</i><i id="118">F7</i
+ ><i id="119">F8</i><i id="120">F9</i><i id="121">F10</i><i id="122">F11</i
+ ><i id="123">F12</i><br
+ /><b><span class="unshifted">`</span><span class="shifted">~</span></b
+ ><b><span class="unshifted">1</span><span class="shifted">!</span></b
+ ><b><span class="unshifted">2</span><span class="shifted">@</span></b
+ ><b><span class="unshifted">3</span><span class="shifted">#</span></b
+ ><b><span class="unshifted">4</span><span class="shifted">$</span></b
+ ><b><span class="unshifted">5</span><span class="shifted">%</span></b
+ ><b><span class="unshifted">6</span><span class="shifted">^</span></b
+ ><b><span class="unshifted">7</span><span class="shifted">&</span></b
+ ><b><span class="unshifted">8</span><span class="shifted">*</span></b
+ ><b><span class="unshifted">9</span><span class="shifted">(</span></b
+ ><b><span class="unshifted">0</span><span class="shifted">)</span></b
+ ><b><span class="unshifted">-</span><span class="shifted">_</span></b
+ ><b><span class="unshifted">=</span><span class="shifted">+</span></b
+ ><i id="8"> ← </i
+ ><br
+ /><i id="9">Tab</i
+ ><b>Q</b><b>W</b><b>E</b><b>R</b><b>T</b><b>Y</b><b>U</b><b>I</b><b>O</b
+ ><b>P</b
+ ><b><span class="unshifted">[</span><span class="shifted">{</span></b
+ ><b><span class="unshifted">]</span><span class="shifted">}</span></b
+ ><b><span class="unshifted">\</span><span class="shifted">|</span></b
+ ><br
+ /><u>Tab </u
+ ><b>A</b><b>S</b><b>D</b><b>F</b><b>G</b><b>H</b><b>J</b><b>K</b><b>L</b
+ ><b><span class="unshifted">;</span><span class="shifted">:</span></b
+ ><b><span class="unshifted">'</span><span class="shifted">"</span></b
+ ><i id="13">Enter</i
+ ><br
+ /><u> </u
+ ><i id="16">Shift</i
+ ><b>Z</b><b>X</b><b>C</b><b>V</b><b>B</b><b>N</b><b>M</b
+ ><b><span class="unshifted">,</span><span class="shifted"><</span></b
+ ><b><span class="unshifted">.</span><span class="shifted">></span></b
+ ><b><span class="unshifted">/</span><span class="shifted">?</span></b
+ ><i id="16">Shift</i
+ ><br
+ /><u>XXX</u
+ ><i id="17">Ctrl</i
+ ><i id="18">Alt</i
+ ><i style="width: 25ex"> </i
+ ></div
+ > <div
+ ><i id="45">Ins</i><i id="46">Del</i><i id="36">Home</i><i id="35">End</i
+ ><br
+ /><u> </u><br
+ /><u> </u><br
+ /><u>Ins</u><s> </s><b id="38">↑</b><s> </s><u> </u
+ ><b id="33">⇑</b><br
+ /><u>Ins</u><b id="37">←</b><b id="40">↓</b
+ ><b id="39">→</b><u> </u><b id="34">⇓</b
+ ></div
+></pre></body></html>
--- /dev/null
+// This file contains code from shell_in_a_box.js and vt100.js
+
+
+// ShellInABox.js -- Use XMLHttpRequest to provide an AJAX terminal emulator.
+// Copyright (C) 2008-2010 Markus Gutschke <markus@shellinabox.com>
+//
+// This program is free software; you can redistribute it and/or modify
+// it under the terms of the GNU General Public License version 2 as
+// published by the Free Software Foundation.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this program; if not, write to the Free Software Foundation, Inc.,
+// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+//
+// In addition to these license terms, the author grants the following
+// additional rights:
+//
+// If you modify this program, or any covered work, by linking or
+// combining it with the OpenSSL project's OpenSSL library (or a
+// modified version of that library), containing parts covered by the
+// terms of the OpenSSL or SSLeay licenses, the author
+// grants you additional permission to convey the resulting work.
+// Corresponding Source for a non-source form of such a combination
+// shall include the source code for the parts of OpenSSL used as well
+// as that of the covered work.
+//
+// You may at your option choose to remove this additional permission from
+// the work, or from any part of it.
+//
+// It is possible to build this program in a way that it loads OpenSSL
+// libraries at run-time. If doing so, the following notices are required
+// by the OpenSSL and SSLeay licenses:
+//
+// This product includes software developed by the OpenSSL Project
+// for use in the OpenSSL Toolkit. (http://www.openssl.org/)
+//
+// This product includes cryptographic software written by Eric Young
+// (eay@cryptsoft.com)
+//
+//
+// The most up-to-date version of this program is always available from
+// http://shellinabox.com
+//
+//
+// Notes:
+//
+// The author believes that for the purposes of this license, you meet the
+// requirements for publishing the source code, if your web server publishes
+// the source in unmodified form (i.e. with licensing information, comments,
+// formatting, and identifier names intact). If there are technical reasons
+// that require you to make changes to the source code when serving the
+// JavaScript (e.g to remove pre-processor directives from the source), these
+// changes should be done in a reversible fashion.
+//
+// The author does not consider websites that reference this script in
+// unmodified form, and web servers that serve this script in unmodified form
+// to be derived works. As such, they are believed to be outside of the
+// scope of this license and not subject to the rights or restrictions of the
+// GNU General Public License.
+//
+// If in doubt, consult a legal professional familiar with the laws that
+// apply in your country.
+
+// #define XHR_UNITIALIZED 0
+// #define XHR_OPEN 1
+// #define XHR_SENT 2
+// #define XHR_RECEIVING 3
+// #define XHR_LOADED 4
+
+// IE does not define XMLHttpRequest by default, so we provide a suitable
+// wrapper.
+if (typeof XMLHttpRequest == 'undefined') {
+ XMLHttpRequest = function() {
+ try { return new ActiveXObject('Msxml2.XMLHTTP.6.0');} catch (e) { }
+ try { return new ActiveXObject('Msxml2.XMLHTTP.3.0');} catch (e) { }
+ try { return new ActiveXObject('Msxml2.XMLHTTP'); } catch (e) { }
+ try { return new ActiveXObject('Microsoft.XMLHTTP'); } catch (e) { }
+ throw new Error('');
+ };
+}
+
+function extend(subClass, baseClass) {
+ function inheritance() { }
+ inheritance.prototype = baseClass.prototype;
+ subClass.prototype = new inheritance();
+ subClass.prototype.constructor = subClass;
+ subClass.prototype.superClass = baseClass.prototype;
+};
+
+function ShellInABox(url, container) {
+ if (url == undefined) {
+ this.rooturl = document.location.href;
+ this.url = document.location.href.replace(/[?#].*/, '');
+ } else {
+ this.rooturl = url;
+ this.url = url;
+ }
+ if (document.location.hash != '') {
+ var hash = decodeURIComponent(document.location.hash).
+ replace(/^#/, '');
+ this.nextUrl = hash.replace(/,.*/, '');
+ this.session = hash.replace(/[^,]*,/, '');
+ } else {
+ this.nextUrl = this.url;
+ this.session = null;
+ }
+ this.pendingKeys = '';
+ this.keysInFlight = false;
+ this.connected = false;
+ this.superClass.constructor.call(this, container);
+
+ // We have to initiate the first XMLHttpRequest from a timer. Otherwise,
+ // Chrome never realizes that the page has loaded.
+ setTimeout(function(shellInABox) {
+ return function() {
+ shellInABox.sendRequest();
+ };
+ }(this), 1);
+};
+extend(ShellInABox, VT100);
+
+ShellInABox.prototype.sessionClosed = function() {
+ try {
+ this.connected = false;
+ if (this.session) {
+ this.session = undefined;
+ if (this.cursorX > 0) {
+ this.vt100('\r\n');
+ }
+ this.vt100('Session closed.');
+ }
+ // Revealing the "reconnect" button is commented out until we hook
+ // up the username+token auto-login mechanism to the new session:
+ //this.showReconnect(true);
+ } catch (e) {
+ }
+};
+
+ShellInABox.prototype.reconnect = function() {
+ this.showReconnect(false);
+ if (!this.session) {
+ if (document.location.hash != '') {
+ // A shellinaboxd daemon launched from a CGI only allows a single
+ // session. In order to reconnect, we must reload the frame definition
+ // and obtain a new port number. As this is a different origin, we
+ // need to get enclosing page to help us.
+ parent.location = this.nextUrl;
+ } else {
+ if (this.url != this.nextUrl) {
+ document.location.replace(this.nextUrl);
+ } else {
+ this.pendingKeys = '';
+ this.keysInFlight = false;
+ this.reset(true);
+ this.sendRequest();
+ }
+ }
+ }
+ return false;
+};
+
+ShellInABox.prototype.sendRequest = function(request) {
+ if (request == undefined) {
+ request = new XMLHttpRequest();
+ }
+ request.open('POST', this.url + '?', true);
+ request.setRequestHeader('Cache-Control', 'no-cache');
+ request.setRequestHeader('Content-Type',
+ 'application/x-www-form-urlencoded; charset=utf-8');
+ var content = 'width=' + this.terminalWidth +
+ '&height=' + this.terminalHeight +
+ (this.session ? '&session=' +
+ encodeURIComponent(this.session) : '&rooturl='+
+ encodeURIComponent(this.rooturl));
+
+ request.onreadystatechange = function(shellInABox) {
+ return function() {
+ try {
+ return shellInABox.onReadyStateChange(request);
+ } catch (e) {
+ shellInABox.sessionClosed();
+ }
+ }
+ }(this);
+ ShellInABox.lastRequestSent = Date.now();
+ request.send(content);
+};
+
+ShellInABox.prototype.onReadyStateChange = function(request) {
+ if (request.readyState == 4 /* XHR_LOADED */) {
+ if (request.status == 200) {
+ this.connected = true;
+ var response = eval('(' + request.responseText + ')');
+ if (response.data) {
+ this.vt100(response.data);
+ }
+
+ if (!response.session ||
+ this.session && this.session != response.session) {
+ this.sessionClosed();
+ } else {
+ this.session = response.session;
+ this.sendRequest(request);
+ }
+ } else if (request.status == 0) {
+ if (ShellInABox.lastRequestSent + 2000 < Date.now()) {
+ // Timeout, try again
+ this.sendRequest(request);
+ } else {
+ this.vt100('\r\n\r\nRequest failed.');
+ this.sessionClosed();
+ }
+ } else {
+ this.sessionClosed();
+ }
+ }
+};
+
+ShellInABox.prototype.sendKeys = function(keys) {
+ if (!this.connected) {
+ return;
+ }
+ if (this.keysInFlight || this.session == undefined) {
+ this.pendingKeys += keys;
+ } else {
+ this.keysInFlight = true;
+ keys = this.pendingKeys + keys;
+ this.pendingKeys = '';
+ var request = new XMLHttpRequest();
+ request.open('POST', this.url + '?', true);
+ request.setRequestHeader('Cache-Control', 'no-cache');
+ request.setRequestHeader('Content-Type',
+ 'application/x-www-form-urlencoded; charset=utf-8');
+ var content = 'width=' + this.terminalWidth +
+ '&height=' + this.terminalHeight +
+ '&session=' +encodeURIComponent(this.session)+
+ '&keys=' + encodeURIComponent(keys);
+ request.onreadystatechange = function(shellInABox) {
+ return function() {
+ try {
+ return shellInABox.keyPressReadyStateChange(request);
+ } catch (e) {
+ }
+ }
+ }(this);
+ request.send(content);
+ }
+};
+
+ShellInABox.prototype.keyPressReadyStateChange = function(request) {
+ if (request.readyState == 4 /* XHR_LOADED */) {
+ this.keysInFlight = false;
+ if (this.pendingKeys) {
+ this.sendKeys('');
+ }
+ }
+};
+
+ShellInABox.prototype.keysPressed = function(ch) {
+ var hex = '0123456789ABCDEF';
+ var s = '';
+ for (var i = 0; i < ch.length; i++) {
+ var c = ch.charCodeAt(i);
+ if (c < 128) {
+ s += hex.charAt(c >> 4) + hex.charAt(c & 0xF);
+ } else if (c < 0x800) {
+ s += hex.charAt(0xC + (c >> 10) ) +
+ hex.charAt( (c >> 6) & 0xF ) +
+ hex.charAt(0x8 + ((c >> 4) & 0x3)) +
+ hex.charAt( c & 0xF );
+ } else if (c < 0x10000) {
+ s += 'E' +
+ hex.charAt( (c >> 12) ) +
+ hex.charAt(0x8 + ((c >> 10) & 0x3)) +
+ hex.charAt( (c >> 6) & 0xF ) +
+ hex.charAt(0x8 + ((c >> 4) & 0x3)) +
+ hex.charAt( c & 0xF );
+ } else if (c < 0x110000) {
+ s += 'F' +
+ hex.charAt( (c >> 18) ) +
+ hex.charAt(0x8 + ((c >> 16) & 0x3)) +
+ hex.charAt( (c >> 12) & 0xF ) +
+ hex.charAt(0x8 + ((c >> 10) & 0x3)) +
+ hex.charAt( (c >> 6) & 0xF ) +
+ hex.charAt(0x8 + ((c >> 4) & 0x3)) +
+ hex.charAt( c & 0xF );
+ }
+ }
+ this.sendKeys(s);
+};
+
+ShellInABox.prototype.resized = function(w, h) {
+ // Do not send a resize request until we are fully initialized.
+ if (this.session) {
+ // sendKeys() always transmits the current terminal size. So, flush all
+ // pending keys.
+ this.sendKeys('');
+ }
+};
+
+ShellInABox.prototype.toggleSSL = function() {
+ if (document.location.hash != '') {
+ if (this.nextUrl.match(/\?plain$/)) {
+ this.nextUrl = this.nextUrl.replace(/\?plain$/, '');
+ } else {
+ this.nextUrl = this.nextUrl.replace(/[?#].*/, '') + '?plain';
+ }
+ if (!this.session) {
+ parent.location = this.nextUrl;
+ }
+ } else {
+ this.nextUrl = this.nextUrl.match(/^https:/)
+ ? this.nextUrl.replace(/^https:/, 'http:').replace(/\/*$/, '/plain')
+ : this.nextUrl.replace(/^http/, 'https').replace(/\/*plain$/, '');
+ }
+ if (this.nextUrl.match(/^[:]*:\/\/[^/]*$/)) {
+ this.nextUrl += '/';
+ }
+ if (this.session && this.nextUrl != this.url) {
+ alert('This change will take effect the next time you login.');
+ }
+};
+
+ShellInABox.prototype.extendContextMenu = function(entries, actions) {
+ // Modify the entries and actions in place, adding any locally defined
+ // menu entries.
+ var oldActions = [ ];
+ for (var i = 0; i < actions.length; i++) {
+ oldActions[i] = actions[i];
+ }
+ for (var node = entries.firstChild, i = 0, j = 0; node;
+ node = node.nextSibling) {
+ if (node.tagName == 'LI') {
+ actions[i++] = oldActions[j++];
+ if (node.id == "endconfig") {
+ node.id = '';
+ if (typeof serverSupportsSSL != 'undefined' && serverSupportsSSL &&
+ !(typeof disableSSLMenu != 'undefined' && disableSSLMenu)) {
+ // If the server supports both SSL and plain text connections,
+ // provide a menu entry to switch between the two.
+ var newNode = document.createElement('li');
+ var isSecure;
+ if (document.location.hash != '') {
+ isSecure = !this.nextUrl.match(/\?plain$/);
+ } else {
+ isSecure = this.nextUrl.match(/^https:/);
+ }
+ newNode.innerHTML = (isSecure ? '✔ ' : '') + 'Secure';
+ if (node.nextSibling) {
+ entries.insertBefore(newNode, node.nextSibling);
+ } else {
+ entries.appendChild(newNode);
+ }
+ actions[i++] = this.toggleSSL;
+ node = newNode;
+ }
+ node.id = 'endconfig';
+ }
+ }
+ }
+
+};
+
+ShellInABox.prototype.about = function() {
+ alert("Shell In A Box version " + "2.10 (revision 239)" +
+ "\nCopyright 2008-2010 by Markus Gutschke\n" +
+ "For more information check http://shellinabox.com" +
+ (typeof serverSupportsSSL != 'undefined' && serverSupportsSSL ?
+ "\n\n" +
+ "This product includes software developed by the OpenSSL Project\n" +
+ "for use in the OpenSSL Toolkit. (http://www.openssl.org/)\n" +
+ "\n" +
+ "This product includes cryptographic software written by " +
+ "Eric Young\n(eay@cryptsoft.com)" :
+ ""));
+};
+
+
+// VT100.js -- JavaScript based terminal emulator
+// Copyright (C) 2008-2010 Markus Gutschke <markus@shellinabox.com>
+//
+// This program is free software; you can redistribute it and/or modify
+// it under the terms of the GNU General Public License version 2 as
+// published by the Free Software Foundation.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License along
+// with this program; if not, write to the Free Software Foundation, Inc.,
+// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+//
+// In addition to these license terms, the author grants the following
+// additional rights:
+//
+// If you modify this program, or any covered work, by linking or
+// combining it with the OpenSSL project's OpenSSL library (or a
+// modified version of that library), containing parts covered by the
+// terms of the OpenSSL or SSLeay licenses, the author
+// grants you additional permission to convey the resulting work.
+// Corresponding Source for a non-source form of such a combination
+// shall include the source code for the parts of OpenSSL used as well
+// as that of the covered work.
+//
+// You may at your option choose to remove this additional permission from
+// the work, or from any part of it.
+//
+// It is possible to build this program in a way that it loads OpenSSL
+// libraries at run-time. If doing so, the following notices are required
+// by the OpenSSL and SSLeay licenses:
+//
+// This product includes software developed by the OpenSSL Project
+// for use in the OpenSSL Toolkit. (http://www.openssl.org/)
+//
+// This product includes cryptographic software written by Eric Young
+// (eay@cryptsoft.com)
+//
+//
+// The most up-to-date version of this program is always available from
+// http://shellinabox.com
+//
+//
+// Notes:
+//
+// The author believes that for the purposes of this license, you meet the
+// requirements for publishing the source code, if your web server publishes
+// the source in unmodified form (i.e. with licensing information, comments,
+// formatting, and identifier names intact). If there are technical reasons
+// that require you to make changes to the source code when serving the
+// JavaScript (e.g to remove pre-processor directives from the source), these
+// changes should be done in a reversible fashion.
+//
+// The author does not consider websites that reference this script in
+// unmodified form, and web servers that serve this script in unmodified form
+// to be derived works. As such, they are believed to be outside of the
+// scope of this license and not subject to the rights or restrictions of the
+// GNU General Public License.
+//
+// If in doubt, consult a legal professional familiar with the laws that
+// apply in your country.
+
+// #define ESnormal 0
+// #define ESesc 1
+// #define ESsquare 2
+// #define ESgetpars 3
+// #define ESgotpars 4
+// #define ESdeviceattr 5
+// #define ESfunckey 6
+// #define EShash 7
+// #define ESsetG0 8
+// #define ESsetG1 9
+// #define ESsetG2 10
+// #define ESsetG3 11
+// #define ESbang 12
+// #define ESpercent 13
+// #define ESignore 14
+// #define ESnonstd 15
+// #define ESpalette 16
+// #define EStitle 17
+// #define ESss2 18
+// #define ESss3 19
+
+// #define ATTR_DEFAULT 0x00F0
+// #define ATTR_REVERSE 0x0100
+// #define ATTR_UNDERLINE 0x0200
+// #define ATTR_DIM 0x0400
+// #define ATTR_BRIGHT 0x0800
+// #define ATTR_BLINK 0x1000
+
+// #define MOUSE_DOWN 0
+// #define MOUSE_UP 1
+// #define MOUSE_CLICK 2
+
+function VT100(container) {
+ if (typeof linkifyURLs == 'undefined' || linkifyURLs <= 0) {
+ this.urlRE = null;
+ } else {
+ this.urlRE = new RegExp(
+ // Known URL protocol are "http", "https", and "ftp".
+ '(?:http|https|ftp)://' +
+
+ // Optionally allow username and passwords.
+ '(?:[^:@/ \u00A0]*(?::[^@/ \u00A0]*)?@)?' +
+
+ // Hostname.
+ '(?:[1-9][0-9]{0,2}(?:[.][1-9][0-9]{0,2}){3}|' +
+ '[0-9a-fA-F]{0,4}(?::{1,2}[0-9a-fA-F]{1,4})+|' +
+ '(?!-)[^[!"#$%&\'()*+,/:;<=>?@\\^_`{|}~\u0000- \u007F-\u00A0]+)' +
+
+ // Port
+ '(?::[1-9][0-9]*)?' +
+
+ // Path.
+ '(?:/(?:(?![/ \u00A0]|[,.)}"\u0027!]+[ \u00A0]|[,.)}"\u0027!]+$).)*)*|' +
+
+ (linkifyURLs <= 1 ? '' :
+ // Also support URLs without a protocol (assume "http").
+ // Optional username and password.
+ '(?:[^:@/ \u00A0]*(?::[^@/ \u00A0]*)?@)?' +
+
+ // Hostnames must end with a well-known top-level domain or must be
+ // numeric.
+ '(?:[1-9][0-9]{0,2}(?:[.][1-9][0-9]{0,2}){3}|' +
+ 'localhost|' +
+ '(?:(?!-)' +
+ '[^.[!"#$%&\'()*+,/:;<=>?@\\^_`{|}~\u0000- \u007F-\u00A0]+[.]){2,}' +
+ '(?:(?:com|net|org|edu|gov|aero|asia|biz|cat|coop|info|int|jobs|mil|mobi|'+
+ 'museum|name|pro|tel|travel|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|' +
+ 'au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|' +
+ 'ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cu|cv|cx|cy|cz|de|dj|dk|dm|do|' +
+ 'dz|ec|ee|eg|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|' +
+ 'gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|' +
+ 'ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|' +
+ 'lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|' +
+ 'mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|' +
+ 'pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|' +
+ 'sj|sk|sl|sm|sn|so|sr|st|su|sv|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|' +
+ 'tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|' +
+ 'yu|za|zm|zw|arpa)(?![a-zA-Z0-9])|[Xx][Nn]--[-a-zA-Z0-9]+))' +
+
+ // Port
+ '(?::[1-9][0-9]{0,4})?' +
+
+ // Path.
+ '(?:/(?:(?![/ \u00A0]|[,.)}"\u0027!]+[ \u00A0]|[,.)}"\u0027!]+$).)*)*|') +
+
+ // In addition, support e-mail address. Optionally, recognize "mailto:"
+ '(?:mailto:)' + (linkifyURLs <= 1 ? '' : '?') +
+
+ // Username:
+ '[-_.+a-zA-Z0-9]+@' +
+
+ // Hostname.
+ '(?!-)[-a-zA-Z0-9]+(?:[.](?!-)[-a-zA-Z0-9]+)?[.]' +
+ '(?:(?:com|net|org|edu|gov|aero|asia|biz|cat|coop|info|int|jobs|mil|mobi|'+
+ 'museum|name|pro|tel|travel|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|' +
+ 'au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|' +
+ 'ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cu|cv|cx|cy|cz|de|dj|dk|dm|do|' +
+ 'dz|ec|ee|eg|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|' +
+ 'gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|' +
+ 'ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|' +
+ 'lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|' +
+ 'mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|' +
+ 'pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|' +
+ 'sj|sk|sl|sm|sn|so|sr|st|su|sv|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|' +
+ 'tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|' +
+ 'yu|za|zm|zw|arpa)(?![a-zA-Z0-9])|[Xx][Nn]--[-a-zA-Z0-9]+)' +
+
+ // Optional arguments
+ '(?:[?](?:(?![ \u00A0]|[,.)}"\u0027!]+[ \u00A0]|[,.)}"\u0027!]+$).)*)?');
+ }
+ this.getUserSettings();
+ this.initializeElements(container);
+ this.maxScrollbackLines = 500;
+ this.npar = 0;
+ this.par = [ ];
+ this.isQuestionMark = false;
+ this.savedX = [ ];
+ this.savedY = [ ];
+ this.savedAttr = [ ];
+ this.savedUseGMap = 0;
+ this.savedGMap = [ this.Latin1Map, this.VT100GraphicsMap,
+ this.CodePage437Map, this.DirectToFontMap ];
+ this.savedValid = [ ];
+ this.respondString = '';
+ this.titleString = '';
+ this.internalClipboard = undefined;
+ this.reset(true);
+}
+
+VT100.prototype.reset = function(clearHistory) {
+ this.isEsc = 0 /* ESnormal */;
+ this.needWrap = false;
+ this.autoWrapMode = true;
+ this.dispCtrl = false;
+ this.toggleMeta = false;
+ this.insertMode = false;
+ this.applKeyMode = false;
+ this.cursorKeyMode = false;
+ this.crLfMode = false;
+ this.offsetMode = false;
+ this.mouseReporting = false;
+ this.printing = false;
+ if (typeof this.printWin != 'undefined' &&
+ this.printWin && !this.printWin.closed) {
+ this.printWin.close();
+ }
+ this.printWin = null;
+ this.utfEnabled = this.utfPreferred;
+ this.utfCount = 0;
+ this.utfChar = 0;
+ this.color = 'ansi0 bgAnsi15';
+ this.style = '';
+ this.attr = 0x00F0 /* ATTR_DEFAULT */;
+ this.useGMap = 0;
+ this.GMap = [ this.Latin1Map,
+ this.VT100GraphicsMap,
+ this.CodePage437Map,
+ this.DirectToFontMap];
+ this.translate = this.GMap[this.useGMap];
+ this.top = 0;
+ this.bottom = this.terminalHeight;
+ this.lastCharacter = ' ';
+ this.userTabStop = [ ];
+
+ if (clearHistory) {
+ for (var i = 0; i < 2; i++) {
+ while (this.console[i].firstChild) {
+ this.console[i].removeChild(this.console[i].firstChild);
+ }
+ }
+ }
+
+ this.enableAlternateScreen(false);
+
+ var wasCompressed = false;
+ var transform = this.getTransformName();
+ if (transform) {
+ for (var i = 0; i < 2; ++i) {
+ wasCompressed |= this.console[i].style[transform] != '';
+ this.console[i].style[transform] = '';
+ }
+ this.cursor.style[transform] = '';
+ this.space.style[transform] = '';
+ if (transform == 'filter') {
+ this.console[this.currentScreen].style.width = '';
+ }
+ }
+ this.scale = 1.0;
+ if (wasCompressed) {
+ this.resizer();
+ }
+
+ this.gotoXY(0, 0);
+ this.showCursor();
+ this.isInverted = false;
+ this.refreshInvertedState();
+ this.clearRegion(0, 0, this.terminalWidth, this.terminalHeight,
+ this.color, this.style);
+};
+
+VT100.prototype.addListener = function(elem, event, listener) {
+ try {
+ if (elem.addEventListener) {
+ elem.addEventListener(event, listener, false);
+ } else {
+ elem.attachEvent('on' + event, listener);
+ }
+ } catch (e) {
+ }
+};
+
+VT100.prototype.getUserSettings = function() {
+ // Compute hash signature to identify the entries in the userCSS menu.
+ // If the menu is unchanged from last time, default values can be
+ // looked up in a cookie associated with this page.
+ this.signature = 3;
+ this.utfPreferred = true;
+ this.visualBell = typeof suppressAllAudio != 'undefined' &&
+ suppressAllAudio;
+ this.autoprint = true;
+ this.softKeyboard = false;
+ this.blinkingCursor = true;
+ if (this.visualBell) {
+ this.signature = Math.floor(16807*this.signature + 1) %
+ ((1 << 31) - 1);
+ }
+ if (typeof userCSSList != 'undefined') {
+ for (var i = 0; i < userCSSList.length; ++i) {
+ var label = userCSSList[i][0];
+ for (var j = 0; j < label.length; ++j) {
+ this.signature = Math.floor(16807*this.signature+
+ label.charCodeAt(j)) %
+ ((1 << 31) - 1);
+ }
+ if (userCSSList[i][1]) {
+ this.signature = Math.floor(16807*this.signature + 1) %
+ ((1 << 31) - 1);
+ }
+ }
+ }
+
+ var key = 'shellInABox=' + this.signature + ':';
+ var settings = document.cookie.indexOf(key);
+ if (settings >= 0) {
+ settings = document.cookie.substr(settings + key.length).
+ replace(/([0-1]*).*/, "$1");
+ if (settings.length == 5 + (typeof userCSSList == 'undefined' ?
+ 0 : userCSSList.length)) {
+ this.utfPreferred = settings.charAt(0) != '0';
+ this.visualBell = settings.charAt(1) != '0';
+ this.autoprint = settings.charAt(2) != '0';
+ this.softKeyboard = settings.charAt(3) != '0';
+ this.blinkingCursor = settings.charAt(4) != '0';
+ if (typeof userCSSList != 'undefined') {
+ for (var i = 0; i < userCSSList.length; ++i) {
+ userCSSList[i][2] = settings.charAt(i + 5) != '0';
+ }
+ }
+ }
+ }
+ this.utfEnabled = this.utfPreferred;
+};
+
+VT100.prototype.storeUserSettings = function() {
+ var settings = 'shellInABox=' + this.signature + ':' +
+ (this.utfEnabled ? '1' : '0') +
+ (this.visualBell ? '1' : '0') +
+ (this.autoprint ? '1' : '0') +
+ (this.softKeyboard ? '1' : '0') +
+ (this.blinkingCursor ? '1' : '0');
+ if (typeof userCSSList != 'undefined') {
+ for (var i = 0; i < userCSSList.length; ++i) {
+ settings += userCSSList[i][2] ? '1' : '0';
+ }
+ }
+ var d = new Date();
+ d.setDate(d.getDate() + 3653);
+ document.cookie = settings + ';expires=' + d.toGMTString();
+};
+
+VT100.prototype.initializeUserCSSStyles = function() {
+ this.usercssActions = [];
+ if (typeof userCSSList != 'undefined') {
+ var menu = '';
+ var group = '';
+ var wasSingleSel = 1;
+ var beginOfGroup = 0;
+ for (var i = 0; i <= userCSSList.length; ++i) {
+ if (i < userCSSList.length) {
+ var label = userCSSList[i][0];
+ var newGroup = userCSSList[i][1];
+ var enabled = userCSSList[i][2];
+
+ // Add user style sheet to document
+ var style = document.createElement('link');
+ var id = document.createAttribute('id');
+ id.nodeValue = 'usercss-' + i;
+ style.setAttributeNode(id);
+ var rel = document.createAttribute('rel');
+ rel.nodeValue = 'stylesheet';
+ style.setAttributeNode(rel);
+ var href = document.createAttribute('href');
+ href.nodeValue = 'usercss-' + i + '.css';
+ style.setAttributeNode(href);
+ var type = document.createAttribute('type');
+ type.nodeValue = 'text/css';
+ style.setAttributeNode(type);
+ document.getElementsByTagName('head')[0].appendChild(style);
+ style.disabled = !enabled;
+ }
+
+ // Add entry to menu
+ if (newGroup || i == userCSSList.length) {
+ if (beginOfGroup != 0 && (i - beginOfGroup > 1 || !wasSingleSel)) {
+ // The last group had multiple entries that are mutually exclusive;
+ // or the previous to last group did. In either case, we need to
+ // append a "<hr />" before we can add the last group to the menu.
+ menu += '<hr />';
+ }
+ wasSingleSel = i - beginOfGroup < 1;
+ menu += group;
+ group = '';
+
+ for (var j = beginOfGroup; j < i; ++j) {
+ this.usercssActions[this.usercssActions.length] =
+ function(vt100, current, begin, count) {
+
+ // Deselect all other entries in the group, then either select
+ // (for multiple entries in group) or toggle (for on/off entry)
+ // the current entry.
+ return function() {
+ var entry = vt100.getChildById(vt100.menu,
+ 'beginusercss');
+ var i = -1;
+ var j = -1;
+ for (var c = count; c > 0; ++j) {
+ if (entry.tagName == 'LI') {
+ if (++i >= begin) {
+ --c;
+ var label = vt100.usercss.childNodes[j];
+
+ // Restore label to just the text content
+ if (typeof label.textContent == 'undefined') {
+ var s = label.innerText;
+ label.innerHTML = '';
+ label.appendChild(document.createTextNode(s));
+ } else {
+ label.textContent= label.textContent;
+ }
+
+ // User style sheets are numbered sequentially
+ var sheet = document.getElementById(
+ 'usercss-' + i);
+ if (i == current) {
+ if (count == 1) {
+ sheet.disabled = !sheet.disabled;
+ } else {
+ sheet.disabled = false;
+ }
+ if (!sheet.disabled) {
+ label.innerHTML= '<img src="/webshell/enabled.gif" />' +
+ label.innerHTML;
+ }
+ } else {
+ sheet.disabled = true;
+ }
+ userCSSList[i][2] = !sheet.disabled;
+ }
+ }
+ entry = entry.nextSibling;
+ }
+
+ // If the font size changed, adjust cursor and line dimensions
+ this.cursor.style.cssText= '';
+ this.cursorWidth = this.cursor.clientWidth;
+ this.cursorHeight = this.lineheight.clientHeight;
+ for (i = 0; i < this.console.length; ++i) {
+ for (var line = this.console[i].firstChild; line;
+ line = line.nextSibling) {
+ line.style.height = this.cursorHeight + 'px';
+ }
+ }
+ vt100.resizer();
+ };
+ }(this, j, beginOfGroup, i - beginOfGroup);
+ }
+
+ if (i == userCSSList.length) {
+ break;
+ }
+
+ beginOfGroup = i;
+ }
+ // Collect all entries in a group, before attaching them to the menu.
+ // This is necessary as we don't know whether this is a group of
+ // mutually exclusive options (which should be separated by "<hr />" on
+ // both ends), or whether this is a on/off toggle, which can be grouped
+ // together with other on/off options.
+ group +=
+ '<li>' + (enabled ? '<img src="/webshell/enabled.gif" />' : '') +
+ label +
+ '</li>';
+ }
+ this.usercss.innerHTML = menu;
+ }
+};
+
+VT100.prototype.resetLastSelectedKey = function(e) {
+ var key = this.lastSelectedKey;
+ if (!key) {
+ return false;
+ }
+
+ var position = this.mousePosition(e);
+
+ // We don't get all the necessary events to reliably reselect a key
+ // if we moved away from it and then back onto it. We approximate the
+ // behavior by remembering the key until either we release the mouse
+ // button (we might never get this event if the mouse has since left
+ // the window), or until we move away too far.
+ var box = this.keyboard.firstChild;
+ if (position[0] < box.offsetLeft + key.offsetWidth ||
+ position[1] < box.offsetTop + key.offsetHeight ||
+ position[0] >= box.offsetLeft + box.offsetWidth - key.offsetWidth ||
+ position[1] >= box.offsetTop + box.offsetHeight - key.offsetHeight ||
+ position[0] < box.offsetLeft + key.offsetLeft - key.offsetWidth ||
+ position[1] < box.offsetTop + key.offsetTop - key.offsetHeight ||
+ position[0] >= box.offsetLeft + key.offsetLeft + 2*key.offsetWidth ||
+ position[1] >= box.offsetTop + key.offsetTop + 2*key.offsetHeight) {
+ if (this.lastSelectedKey.className) log.console('reset: deselecting');
+ this.lastSelectedKey.className = '';
+ this.lastSelectedKey = undefined;
+ }
+ return false;
+};
+
+VT100.prototype.showShiftState = function(state) {
+ var style = document.getElementById('shift_state');
+ if (state) {
+ this.setTextContentRaw(style,
+ '#vt100 #keyboard .shifted {' +
+ 'display: inline }' +
+ '#vt100 #keyboard .unshifted {' +
+ 'display: none }');
+ } else {
+ this.setTextContentRaw(style, '');
+ }
+ var elems = this.keyboard.getElementsByTagName('I');
+ for (var i = 0; i < elems.length; ++i) {
+ if (elems[i].id == '16') {
+ elems[i].className = state ? 'selected' : '';
+ }
+ }
+};
+
+VT100.prototype.showCtrlState = function(state) {
+ var ctrl = this.getChildById(this.keyboard, '17' /* Ctrl */);
+ if (ctrl) {
+ ctrl.className = state ? 'selected' : '';
+ }
+};
+
+VT100.prototype.showAltState = function(state) {
+ var alt = this.getChildById(this.keyboard, '18' /* Alt */);
+ if (alt) {
+ alt.className = state ? 'selected' : '';
+ }
+};
+
+VT100.prototype.clickedKeyboard = function(e, elem, ch, key, shift, ctrl, alt){
+ var fake = [ ];
+ fake.charCode = ch;
+ fake.keyCode = key;
+ fake.ctrlKey = ctrl;
+ fake.shiftKey = shift;
+ fake.altKey = alt;
+ fake.metaKey = alt;
+ return this.handleKey(fake);
+};
+
+VT100.prototype.addKeyBinding = function(elem, ch, key, CH, KEY) {
+ if (elem == undefined) {
+ return;
+ }
+ if (ch == '\u00A0') {
+ // should be treated as a regular space character.
+ ch = ' ';
+ }
+ if (ch != undefined && CH == undefined) {
+ // For letter keys, we automatically compute the uppercase character code
+ // from the lowercase one.
+ CH = ch.toUpperCase();
+ }
+ if (KEY == undefined && key != undefined) {
+ // Most keys have identically key codes for both lowercase and uppercase
+ // keypresses. Normally, only function keys would have distinct key codes,
+ // whereas regular keys have character codes.
+ KEY = key;
+ } else if (KEY == undefined && CH != undefined) {
+ // For regular keys, copy the character code to the key code.
+ KEY = CH.charCodeAt(0);
+ }
+ if (key == undefined && ch != undefined) {
+ // For regular keys, copy the character code to the key code.
+ key = ch.charCodeAt(0);
+ }
+ // Convert characters to numeric character codes. If the character code
+ // is undefined (i.e. this is a function key), set it to zero.
+ ch = ch ? ch.charCodeAt(0) : 0;
+ CH = CH ? CH.charCodeAt(0) : 0;
+
+ // Mouse down events high light the key. We also set lastSelectedKey. This
+ // is needed to that mouseout/mouseover can keep track of the key that
+ // is currently being clicked.
+ this.addListener(elem, 'mousedown',
+ function(vt100, elem, key) { return function(e) {
+ if ((e.which || e.button) == 1) {
+ if (vt100.lastSelectedKey) {
+ vt100.lastSelectedKey.className= '';
+ }
+ // Highlight the key while the mouse button is held down.
+ if (key == 16 /* Shift */) {
+ if (!elem.className != vt100.isShift) {
+ vt100.showShiftState(!vt100.isShift);
+ }
+ } else if (key == 17 /* Ctrl */) {
+ if (!elem.className != vt100.isCtrl) {
+ vt100.showCtrlState(!vt100.isCtrl);
+ }
+ } else if (key == 18 /* Alt */) {
+ if (!elem.className != vt100.isAlt) {
+ vt100.showAltState(!vt100.isAlt);
+ }
+ } else {
+ elem.className = 'selected';
+ }
+ vt100.lastSelectedKey = elem;
+ }
+ return false; }; }(this, elem, key));
+ var clicked =
+ // Modifier keys update the state of the keyboard, but do not generate
+ // any key clicks that get forwarded to the application.
+ key >= 16 /* Shift */ && key <= 18 /* Alt */ ?
+ function(vt100, elem) { return function(e) {
+ if (elem == vt100.lastSelectedKey) {
+ if (key == 16 /* Shift */) {
+ // The user clicked the Shift key
+ vt100.isShift = !vt100.isShift;
+ vt100.showShiftState(vt100.isShift);
+ } else if (key == 17 /* Ctrl */) {
+ vt100.isCtrl = !vt100.isCtrl;
+ vt100.showCtrlState(vt100.isCtrl);
+ } else if (key == 18 /* Alt */) {
+ vt100.isAlt = !vt100.isAlt;
+ vt100.showAltState(vt100.isAlt);
+ }
+ vt100.lastSelectedKey = undefined;
+ }
+ if (vt100.lastSelectedKey) {
+ vt100.lastSelectedKey.className = '';
+ vt100.lastSelectedKey = undefined;
+ }
+ return false; }; }(this, elem) :
+ // Regular keys generate key clicks, when the mouse button is released or
+ // when a mouse click event is received.
+ function(vt100, elem, ch, key, CH, KEY) { return function(e) {
+ if (vt100.lastSelectedKey) {
+ if (elem == vt100.lastSelectedKey) {
+ // The user clicked a key.
+ if (vt100.isShift) {
+ vt100.clickedKeyboard(e, elem, CH, KEY,
+ true, vt100.isCtrl, vt100.isAlt);
+ } else {
+ vt100.clickedKeyboard(e, elem, ch, key,
+ false, vt100.isCtrl, vt100.isAlt);
+ }
+ vt100.isShift = false;
+ vt100.showShiftState(false);
+ vt100.isCtrl = false;
+ vt100.showCtrlState(false);
+ vt100.isAlt = false;
+ vt100.showAltState(false);
+ }
+ vt100.lastSelectedKey.className = '';
+ vt100.lastSelectedKey = undefined;
+ }
+ elem.className = '';
+ return false; }; }(this, elem, ch, key, CH, KEY);
+ this.addListener(elem, 'mouseup', clicked);
+ this.addListener(elem, 'click', clicked);
+
+ // When moving the mouse away from a key, check if any keys need to be
+ // deselected.
+ this.addListener(elem, 'mouseout',
+ function(vt100, elem, key) { return function(e) {
+ if (key == 16 /* Shift */) {
+ if (!elem.className == vt100.isShift) {
+ vt100.showShiftState(vt100.isShift);
+ }
+ } else if (key == 17 /* Ctrl */) {
+ if (!elem.className == vt100.isCtrl) {
+ vt100.showCtrlState(vt100.isCtrl);
+ }
+ } else if (key == 18 /* Alt */) {
+ if (!elem.className == vt100.isAlt) {
+ vt100.showAltState(vt100.isAlt);
+ }
+ } else if (elem.className) {
+ elem.className = '';
+ vt100.lastSelectedKey = elem;
+ } else if (vt100.lastSelectedKey) {
+ vt100.resetLastSelectedKey(e);
+ }
+ return false; }; }(this, elem, key));
+
+ // When moving the mouse over a key, select it if the user is still holding
+ // the mouse button down (i.e. elem == lastSelectedKey)
+ this.addListener(elem, 'mouseover',
+ function(vt100, elem, key) { return function(e) {
+ if (elem == vt100.lastSelectedKey) {
+ if (key == 16 /* Shift */) {
+ if (!elem.className != vt100.isShift) {
+ vt100.showShiftState(!vt100.isShift);
+ }
+ } else if (key == 17 /* Ctrl */) {
+ if (!elem.className != vt100.isCtrl) {
+ vt100.showCtrlState(!vt100.isCtrl);
+ }
+ } else if (key == 18 /* Alt */) {
+ if (!elem.className != vt100.isAlt) {
+ vt100.showAltState(!vt100.isAlt);
+ }
+ } else if (!elem.className) {
+ elem.className = 'selected';
+ }
+ } else {
+ vt100.resetLastSelectedKey(e);
+ }
+ return false; }; }(this, elem, key));
+};
+
+VT100.prototype.initializeKeyBindings = function(elem) {
+ if (elem) {
+ if (elem.nodeName == "I" || elem.nodeName == "B") {
+ if (elem.id) {
+ // Function keys. The Javascript keycode is part of the "id"
+ var i = parseInt(elem.id);
+ if (i) {
+ // If the id does not parse as a number, it is not a keycode.
+ this.addKeyBinding(elem, undefined, i);
+ }
+ } else {
+ var child = elem.firstChild;
+ if (child) {
+ if (child.nodeName == "#text") {
+ // If the key only has a text node as a child, then it is a letter.
+ // Automatically compute the lower and upper case version of the
+ // key.
+ var text = this.getTextContent(child) ||
+ this.getTextContent(elem);
+ this.addKeyBinding(elem, text.toLowerCase());
+ } else if (child.nextSibling) {
+ // If the key has two children, they are the lower and upper case
+ // character code, respectively.
+ this.addKeyBinding(elem, this.getTextContent(child), undefined,
+ this.getTextContent(child.nextSibling));
+ }
+ }
+ }
+ }
+ }
+ // Recursively parse all other child nodes.
+ for (elem = elem.firstChild; elem; elem = elem.nextSibling) {
+ this.initializeKeyBindings(elem);
+ }
+};
+
+VT100.prototype.initializeKeyboardButton = function() {
+ // Configure mouse event handlers for button that displays/hides keyboard
+ this.addListener(this.keyboardImage, 'click',
+ function(vt100) { return function(e) {
+ if (vt100.keyboard.style.display != '') {
+ if (vt100.reconnectBtn.style.visibility != '') {
+ vt100.initializeKeyboard();
+ vt100.showSoftKeyboard();
+ }
+ } else {
+ vt100.hideSoftKeyboard();
+ vt100.input.focus();
+ }
+ return false; }; }(this));
+
+ // Enable button that displays keyboard
+ if (this.softKeyboard) {
+ this.keyboardImage.style.visibility = 'visible';
+ }
+};
+
+VT100.prototype.initializeKeyboard = function() {
+ // Only need to initialize the keyboard the very first time. When doing so,
+ // copy the keyboard layout from the iframe.
+ if (this.keyboard.firstChild) {
+ return;
+ }
+ this.keyboard.innerHTML =
+ this.layout.contentDocument.body.innerHTML;
+ var box = this.keyboard.firstChild;
+ this.hideSoftKeyboard();
+
+ // Configure mouse event handlers for on-screen keyboard
+ this.addListener(this.keyboard, 'click',
+ function(vt100) { return function(e) {
+ vt100.hideSoftKeyboard();
+ vt100.input.focus();
+ return false; }; }(this));
+ this.addListener(this.keyboard, 'selectstart', this.cancelEvent);
+ this.addListener(box, 'click', this.cancelEvent);
+ this.addListener(box, 'mouseup',
+ function(vt100) { return function(e) {
+ if (vt100.lastSelectedKey) {
+ vt100.lastSelectedKey.className = '';
+ vt100.lastSelectedKey = undefined;
+ }
+ return false; }; }(this));
+ this.addListener(box, 'mouseout',
+ function(vt100) { return function(e) {
+ return vt100.resetLastSelectedKey(e); }; }(this));
+ this.addListener(box, 'mouseover',
+ function(vt100) { return function(e) {
+ return vt100.resetLastSelectedKey(e); }; }(this));
+
+ // Configure SHIFT key behavior
+ var style = document.createElement('style');
+ var id = document.createAttribute('id');
+ id.nodeValue = 'shift_state';
+ style.setAttributeNode(id);
+ var type = document.createAttribute('type');
+ type.nodeValue = 'text/css';
+ style.setAttributeNode(type);
+ document.getElementsByTagName('head')[0].appendChild(style);
+
+ // Set up key bindings
+ this.initializeKeyBindings(box);
+};
+
+VT100.prototype.initializeElements = function(container) {
+ // If the necessary objects have not already been defined in the HTML
+ // page, create them now.
+ if (container) {
+ this.container = container;
+ } else if (!(this.container = document.getElementById('vt100'))) {
+ this.container = document.createElement('div');
+ this.container.id = 'vt100';
+ document.body.appendChild(this.container);
+ }
+
+ if (!this.getChildById(this.container, 'reconnect') ||
+ !this.getChildById(this.container, 'menu') ||
+ !this.getChildById(this.container, 'keyboard') ||
+ !this.getChildById(this.container, 'kbd_button') ||
+ !this.getChildById(this.container, 'kbd_img') ||
+ !this.getChildById(this.container, 'layout') ||
+ !this.getChildById(this.container, 'scrollable') ||
+ !this.getChildById(this.container, 'console') ||
+ !this.getChildById(this.container, 'alt_console') ||
+ !this.getChildById(this.container, 'ieprobe') ||
+ !this.getChildById(this.container, 'padding') ||
+ !this.getChildById(this.container, 'cursor') ||
+ !this.getChildById(this.container, 'lineheight') ||
+ !this.getChildById(this.container, 'usercss') ||
+ !this.getChildById(this.container, 'space') ||
+ !this.getChildById(this.container, 'input') ||
+ !this.getChildById(this.container, 'cliphelper')) {
+ // Only enable the "embed" object, if we have a suitable plugin. Otherwise,
+ // we might get a pointless warning that a suitable plugin is not yet
+ // installed. If in doubt, we'd rather just stay silent.
+ var embed = '';
+ try {
+ if (typeof navigator.mimeTypes["audio/x-wav"].enabledPlugin.name !=
+ 'undefined') {
+ embed = typeof suppressAllAudio != 'undefined' &&
+ suppressAllAudio ? "" :
+ '<embed classid="clsid:02BF25D5-8C17-4B23-BC80-D3488ABDDC6B" ' +
+ 'id="beep_embed" ' +
+ 'src="beep.wav" ' +
+ 'autostart="false" ' +
+ 'volume="100" ' +
+ 'enablejavascript="true" ' +
+ 'type="audio/x-wav" ' +
+ 'height="16" ' +
+ 'width="200" ' +
+ 'style="position:absolute;left:-1000px;top:-1000px" />';
+ }
+ } catch (e) {
+ }
+
+ this.container.innerHTML =
+ '<div id="reconnect" style="visibility: hidden">' +
+ '<input type="button" value="Connect" ' +
+ 'onsubmit="return false" />' +
+ '</div>' +
+ '<div id="cursize" style="visibility: hidden">' +
+ '</div>' +
+ '<div id="menu"></div>' +
+ '<div id="keyboard" unselectable="on">' +
+ '</div>' +
+ '<div id="scrollable">' +
+ '<table id="kbd_button">' +
+ '<tr><td width="100%"> </td>' +
+ '<td><img id="kbd_img" src="/webshell/keyboard.png" /></td>' +
+ '<td> </td></tr>' +
+ '</table>' +
+ '<pre id="lineheight"> </pre>' +
+ '<pre id="console">' +
+ '<pre></pre>' +
+ '<div id="ieprobe"><span> </span></div>' +
+ '</pre>' +
+ '<pre id="alt_console" style="display: none"></pre>' +
+ '<div id="padding"></div>' +
+ '<pre id="cursor"> </pre>' +
+ '</div>' +
+ '<div class="hidden">' +
+ '<div id="usercss"></div>' +
+ '<pre><div><span id="space"></span></div></pre>' +
+ '<input type="textfield" id="input" autocorrect="off" autocapitalize="off" />' +
+ '<input type="textfield" id="cliphelper" />' +
+ (typeof suppressAllAudio != 'undefined' &&
+ suppressAllAudio ? "" :
+ embed + '<bgsound id="beep_bgsound" loop=1 />') +
+ '<iframe id="layout" src="/webshell/keyboard.html" />' +
+ '</div>';
+ }
+
+ // Find the object used for playing the "beep" sound, if any.
+ if (typeof suppressAllAudio != 'undefined' && suppressAllAudio) {
+ this.beeper = undefined;
+ } else {
+ this.beeper = this.getChildById(this.container,
+ 'beep_embed');
+ if (!this.beeper || !this.beeper.Play) {
+ this.beeper = this.getChildById(this.container,
+ 'beep_bgsound');
+ if (!this.beeper || typeof this.beeper.src == 'undefined') {
+ this.beeper = undefined;
+ }
+ }
+ }
+
+ // Initialize the variables for finding the text console and the
+ // cursor.
+ this.reconnectBtn = this.getChildById(this.container,'reconnect');
+ this.curSizeBox = this.getChildById(this.container, 'cursize');
+ this.menu = this.getChildById(this.container, 'menu');
+ this.keyboard = this.getChildById(this.container, 'keyboard');
+ this.keyboardImage = this.getChildById(this.container, 'kbd_img');
+ this.layout = this.getChildById(this.container, 'layout');
+ this.scrollable = this.getChildById(this.container,
+ 'scrollable');
+ this.lineheight = this.getChildById(this.container,
+ 'lineheight');
+ this.console =
+ [ this.getChildById(this.container, 'console'),
+ this.getChildById(this.container, 'alt_console') ];
+ var ieProbe = this.getChildById(this.container, 'ieprobe');
+ this.padding = this.getChildById(this.container, 'padding');
+ this.cursor = this.getChildById(this.container, 'cursor');
+ this.usercss = this.getChildById(this.container, 'usercss');
+ this.space = this.getChildById(this.container, 'space');
+ this.input = this.getChildById(this.container, 'input');
+ this.cliphelper = this.getChildById(this.container,
+ 'cliphelper');
+
+ // Add any user selectable style sheets to the menu
+ this.initializeUserCSSStyles();
+
+ // Remember the dimensions of a standard character glyph. We would
+ // expect that we could just check cursor.clientWidth/Height at any time,
+ // but it turns out that browsers sometimes invalidate these values
+ // (e.g. while displaying a print preview screen).
+ this.cursorWidth = this.cursor.clientWidth;
+ this.cursorHeight = this.lineheight.clientHeight;
+
+ // IE has a slightly different boxing model, that we need to compensate for
+ this.isIE = ieProbe.offsetTop > 1;
+ ieProbe = undefined;
+ this.console.innerHTML = '';
+
+ // Determine if the terminal window is positioned at the beginning of the
+ // page, or if it is embedded somewhere else in the page. For full-screen
+ // terminals, automatically resize whenever the browser window changes.
+ var marginTop = parseInt(this.getCurrentComputedStyle(
+ document.body, 'marginTop'));
+ var marginLeft = parseInt(this.getCurrentComputedStyle(
+ document.body, 'marginLeft'));
+ var marginRight = parseInt(this.getCurrentComputedStyle(
+ document.body, 'marginRight'));
+ var x = this.container.offsetLeft;
+ var y = this.container.offsetTop;
+ for (var parent = this.container; parent = parent.offsetParent; ) {
+ x += parent.offsetLeft;
+ y += parent.offsetTop;
+ }
+ this.isEmbedded = marginTop != y ||
+ marginLeft != x ||
+ (window.innerWidth ||
+ document.documentElement.clientWidth ||
+ document.body.clientWidth) -
+ marginRight != x + this.container.offsetWidth;
+ if (!this.isEmbedded) {
+ // Some browsers generate resize events when the terminal is first
+ // shown. Disable showing the size indicator until a little bit after
+ // the terminal has been rendered the first time.
+ this.indicateSize = false;
+ setTimeout(function(vt100) {
+ return function() {
+ vt100.indicateSize = true;
+ };
+ }(this), 100);
+ this.addListener(window, 'resize',
+ function(vt100) {
+ return function() {
+ vt100.hideContextMenu();
+ vt100.resizer();
+ vt100.showCurrentSize();
+ }
+ }(this));
+
+ // Hide extra scrollbars attached to window
+ document.body.style.margin = '0px';
+ try { document.body.style.overflow ='hidden'; } catch (e) { }
+ try { document.body.oncontextmenu = function() {return false;};} catch(e){}
+ }
+
+ // Set up onscreen soft keyboard
+ this.initializeKeyboardButton();
+
+ // Hide context menu
+ this.hideContextMenu();
+
+ // Add listener to reconnect button
+ this.addListener(this.reconnectBtn.firstChild, 'click',
+ function(vt100) {
+ return function() {
+ var rc = vt100.reconnect();
+ vt100.input.focus();
+ return rc;
+ }
+ }(this));
+
+ // Add input listeners
+ this.addListener(this.input, 'blur',
+ function(vt100) {
+ return function() { vt100.blurCursor(); } }(this));
+ this.addListener(this.input, 'focus',
+ function(vt100) {
+ return function() { vt100.focusCursor(); } }(this));
+ this.addListener(this.input, 'keydown',
+ function(vt100) {
+ return function(e) {
+ if (!e) e = window.event;
+ return vt100.keyDown(e); } }(this));
+ this.addListener(this.input, 'keypress',
+ function(vt100) {
+ return function(e) {
+ if (!e) e = window.event;
+ return vt100.keyPressed(e); } }(this));
+ this.addListener(this.input, 'keyup',
+ function(vt100) {
+ return function(e) {
+ if (!e) e = window.event;
+ return vt100.keyUp(e); } }(this));
+
+ // Attach listeners that move the focus to the <input> field. This way we
+ // can make sure that we can receive keyboard input.
+ var mouseEvent = function(vt100, type) {
+ return function(e) {
+ if (!e) e = window.event;
+ return vt100.mouseEvent(e, type);
+ };
+ };
+ this.addListener(this.scrollable,'mousedown',mouseEvent(this, 0 /* MOUSE_DOWN */));
+ this.addListener(this.scrollable,'mouseup', mouseEvent(this, 1 /* MOUSE_UP */));
+ this.addListener(this.scrollable,'click', mouseEvent(this, 2 /* MOUSE_CLICK */));
+
+ // Check that browser supports drag and drop
+ if ('draggable' in document.createElement('span')) {
+ var dropEvent = function (vt100) {
+ return function(e) {
+ if (!e) e = window.event;
+ if (e.preventDefault) e.preventDefault();
+ vt100.keysPressed(e.dataTransfer.getData('Text'));
+ return false;
+ };
+ };
+ // Tell the browser that we *can* drop on this target
+ this.addListener(this.scrollable, 'dragover', cancel);
+ this.addListener(this.scrollable, 'dragenter', cancel);
+
+ // Add a listener for the drop event
+ this.addListener(this.scrollable, 'drop', dropEvent(this));
+ }
+
+ // Initialize the blank terminal window.
+ this.currentScreen = 0;
+ this.cursorX = 0;
+ this.cursorY = 0;
+ this.numScrollbackLines = 0;
+ this.top = 0;
+ this.bottom = 0x7FFFFFFF;
+ this.scale = 1.0;
+ this.resizer();
+ this.focusCursor();
+ this.input.focus();
+};
+
+function cancel(event) {
+ if (event.preventDefault) {
+ event.preventDefault();
+ }
+ return false;
+}
+
+VT100.prototype.getChildById = function(parent, id) {
+ var nodeList = parent.all || parent.getElementsByTagName('*');
+ if (typeof nodeList.namedItem == 'undefined') {
+ for (var i = 0; i < nodeList.length; i++) {
+ if (nodeList[i].id == id) {
+ return nodeList[i];
+ }
+ }
+ return null;
+ } else {
+ var elem = (parent.all || parent.getElementsByTagName('*')).namedItem(id);
+ return elem ? elem[0] || elem : null;
+ }
+};
+
+VT100.prototype.getCurrentComputedStyle = function(elem, style) {
+ if (typeof elem.currentStyle != 'undefined') {
+ return elem.currentStyle[style];
+ } else {
+ return document.defaultView.getComputedStyle(elem, null)[style];
+ }
+};
+
+VT100.prototype.reconnect = function() {
+ return false;
+};
+
+VT100.prototype.showReconnect = function(state) {
+ if (state) {
+ this.hideSoftKeyboard();
+ this.reconnectBtn.style.visibility = '';
+ } else {
+ this.reconnectBtn.style.visibility = 'hidden';
+ }
+};
+
+VT100.prototype.repairElements = function(console) {
+ for (var line = console.firstChild; line; line = line.nextSibling) {
+ if (!line.clientHeight) {
+ var newLine = document.createElement(line.tagName);
+ newLine.style.cssText = line.style.cssText;
+ newLine.className = line.className;
+ if (line.tagName == 'DIV') {
+ for (var span = line.firstChild; span; span = span.nextSibling) {
+ var newSpan = document.createElement(span.tagName);
+ newSpan.style.cssText = span.style.cssText;
+ newSpan.className = span.className;
+ this.setTextContent(newSpan, this.getTextContent(span));
+ newLine.appendChild(newSpan);
+ }
+ } else {
+ this.setTextContent(newLine, this.getTextContent(line));
+ }
+ line.parentNode.replaceChild(newLine, line);
+ line = newLine;
+ }
+ }
+};
+
+VT100.prototype.resized = function(w, h) {
+};
+
+VT100.prototype.resizer = function() {
+ // Hide onscreen soft keyboard
+ this.hideSoftKeyboard();
+
+ // The cursor can get corrupted if the print-preview is displayed in Firefox.
+ // Recreating it, will repair it.
+ var newCursor = document.createElement('pre');
+ this.setTextContent(newCursor, ' ');
+ newCursor.id = 'cursor';
+ newCursor.style.cssText = this.cursor.style.cssText;
+ this.cursor.parentNode.insertBefore(newCursor, this.cursor);
+ if (!newCursor.clientHeight) {
+ // Things are broken right now. This is probably because we are
+ // displaying the print-preview. Just don't change any of our settings
+ // until the print dialog is closed again.
+ newCursor.parentNode.removeChild(newCursor);
+ return;
+ } else {
+ // Swap the old broken cursor for the newly created one.
+ this.cursor.parentNode.removeChild(this.cursor);
+ this.cursor = newCursor;
+ }
+
+ // Really horrible things happen if the contents of the terminal changes
+ // while the print-preview is showing. We get HTML elements that show up
+ // in the DOM, but that do not take up any space. Find these elements and
+ // try to fix them.
+ this.repairElements(this.console[0]);
+ this.repairElements(this.console[1]);
+
+ // Lock the cursor size to the size of a normal character. This helps with
+ // characters that are taller/shorter than normal. Unfortunately, we will
+ // still get confused if somebody enters a character that is wider/narrower
+ // than normal. This can happen if the browser tries to substitute a
+ // characters from a different font.
+ this.cursor.style.width = this.cursorWidth + 'px';
+ this.cursor.style.height = this.cursorHeight + 'px';
+
+ // Adjust height for one pixel padding of the #vt100 element.
+ // The latter is necessary to properly display the inactive cursor.
+ var console = this.console[this.currentScreen];
+ var height = (this.isEmbedded ? this.container.clientHeight
+ : (window.innerHeight ||
+ document.documentElement.clientHeight ||
+ document.body.clientHeight))-1;
+ var partial = height % this.cursorHeight;
+ this.scrollable.style.height = (height > 0 ? height : 0) + 'px';
+ this.padding.style.height = (partial > 0 ? partial : 0) + 'px';
+ var oldTerminalHeight = this.terminalHeight;
+ this.updateWidth();
+ this.updateHeight();
+
+ // Clip the cursor to the visible screen.
+ var cx = this.cursorX;
+ var cy = this.cursorY + this.numScrollbackLines;
+
+ // The alternate screen never keeps a scroll back buffer.
+ this.updateNumScrollbackLines();
+ while (this.currentScreen && this.numScrollbackLines > 0) {
+ console.removeChild(console.firstChild);
+ this.numScrollbackLines--;
+ }
+ cy -= this.numScrollbackLines;
+ if (cx < 0) {
+ cx = 0;
+ } else if (cx > this.terminalWidth) {
+ cx = this.terminalWidth - 1;
+ if (cx < 0) {
+ cx = 0;
+ }
+ }
+ if (cy < 0) {
+ cy = 0;
+ } else if (cy > this.terminalHeight) {
+ cy = this.terminalHeight - 1;
+ if (cy < 0) {
+ cy = 0;
+ }
+ }
+
+ // Clip the scroll region to the visible screen.
+ if (this.bottom > this.terminalHeight ||
+ this.bottom == oldTerminalHeight) {
+ this.bottom = this.terminalHeight;
+ }
+ if (this.top >= this.bottom) {
+ this.top = this.bottom-1;
+ if (this.top < 0) {
+ this.top = 0;
+ }
+ }
+
+ // Truncate lines, if necessary. Explicitly reposition cursor (this is
+ // particularly important after changing the screen number), and reset
+ // the scroll region to the default.
+ this.truncateLines(this.terminalWidth);
+ this.putString(cx, cy, '', undefined);
+ this.scrollable.scrollTop = this.numScrollbackLines *
+ this.cursorHeight + 1;
+
+ // Update classNames for lines in the scrollback buffer
+ var line = console.firstChild;
+ for (var i = 0; i < this.numScrollbackLines; i++) {
+ line.className = 'scrollback';
+ line = line.nextSibling;
+ }
+ while (line) {
+ line.className = '';
+ line = line.nextSibling;
+ }
+
+ // Reposition the reconnect button
+ this.reconnectBtn.style.left = (this.terminalWidth*this.cursorWidth/
+ this.scale -
+ this.reconnectBtn.clientWidth)/2 + 'px';
+ this.reconnectBtn.style.top = (this.terminalHeight*this.cursorHeight-
+ this.reconnectBtn.clientHeight)/2 + 'px';
+
+ // Send notification that the window size has been changed
+ this.resized(this.terminalWidth, this.terminalHeight);
+};
+
+VT100.prototype.showCurrentSize = function() {
+ if (!this.indicateSize) {
+ return;
+ }
+ this.curSizeBox.innerHTML = '' + this.terminalWidth + 'x' +
+ this.terminalHeight;
+ this.curSizeBox.style.left =
+ (this.terminalWidth*this.cursorWidth/
+ this.scale -
+ this.curSizeBox.clientWidth)/2 + 'px';
+ this.curSizeBox.style.top =
+ (this.terminalHeight*this.cursorHeight -
+ this.curSizeBox.clientHeight)/2 + 'px';
+ this.curSizeBox.style.visibility = '';
+ if (this.curSizeTimeout) {
+ clearTimeout(this.curSizeTimeout);
+ }
+
+ // Only show the terminal size for a short amount of time after resizing.
+ // Then hide this information, again. Some browsers generate resize events
+ // throughout the entire resize operation. This is nice, and we will show
+ // the terminal size while the user is dragging the window borders.
+ // Other browsers only generate a single event when the user releases the
+ // mouse. In those cases, we can only show the terminal size once at the
+ // end of the resize operation.
+ this.curSizeTimeout = setTimeout(function(vt100) {
+ return function() {
+ vt100.curSizeTimeout = null;
+ vt100.curSizeBox.style.visibility = 'hidden';
+ };
+ }(this), 1000);
+};
+
+VT100.prototype.selection = function() {
+ try {
+ return '' + (window.getSelection && window.getSelection() ||
+ document.selection && document.selection.type == 'Text' &&
+ document.selection.createRange().text || '');
+ } catch (e) {
+ }
+ return '';
+};
+
+VT100.prototype.cancelEvent = function(event) {
+ try {
+ // For non-IE browsers
+ event.stopPropagation();
+ event.preventDefault();
+ } catch (e) {
+ }
+ try {
+ // For IE
+ event.cancelBubble = true;
+ event.returnValue = false;
+ event.button = 0;
+ event.keyCode = 0;
+ } catch (e) {
+ }
+ return false;
+};
+
+VT100.prototype.mousePosition = function(event) {
+ var offsetX = this.container.offsetLeft;
+ var offsetY = this.container.offsetTop;
+ for (var e = this.container; e = e.offsetParent; ) {
+ offsetX += e.offsetLeft;
+ offsetY += e.offsetTop;
+ }
+ return [ event.clientX - offsetX,
+ event.clientY - offsetY ];
+};
+
+VT100.prototype.mouseEvent = function(event, type) {
+ // If any text is currently selected, do not move the focus as that would
+ // invalidate the selection.
+ var selection = this.selection();
+ if ((type == 1 /* MOUSE_UP */ || type == 2 /* MOUSE_CLICK */) && !selection.length) {
+ this.input.focus();
+ }
+
+ // Compute mouse position in characters.
+ var position = this.mousePosition(event);
+ var x = Math.floor(position[0] / this.cursorWidth);
+ var y = Math.floor((position[1] + this.scrollable.scrollTop) /
+ this.cursorHeight) - this.numScrollbackLines;
+ var inside = true;
+ if (x >= this.terminalWidth) {
+ x = this.terminalWidth - 1;
+ inside = false;
+ }
+ if (x < 0) {
+ x = 0;
+ inside = false;
+ }
+ if (y >= this.terminalHeight) {
+ y = this.terminalHeight - 1;
+ inside = false;
+ }
+ if (y < 0) {
+ y = 0;
+ inside = false;
+ }
+
+ // Compute button number and modifier keys.
+ var button = type != 0 /* MOUSE_DOWN */ ? 3 :
+ typeof event.pageX != 'undefined' ? event.button :
+ [ undefined, 0, 2, 0, 1, 0, 1, 0 ][event.button];
+ if (button != undefined) {
+ if (event.shiftKey) {
+ button |= 0x04;
+ }
+ if (event.altKey || event.metaKey) {
+ button |= 0x08;
+ }
+ if (event.ctrlKey) {
+ button |= 0x10;
+ }
+ }
+
+ // Report mouse events if they happen inside of the current screen and
+ // with the SHIFT key unpressed. Both of these restrictions do not apply
+ // for button releases, as we always want to report those.
+ if (this.mouseReporting && !selection.length &&
+ (type != 0 /* MOUSE_DOWN */ || !event.shiftKey)) {
+ if (inside || type != 0 /* MOUSE_DOWN */) {
+ if (button != undefined) {
+ var report = '\u001B[M' + String.fromCharCode(button + 32) +
+ String.fromCharCode(x + 33) +
+ String.fromCharCode(y + 33);
+ if (type != 2 /* MOUSE_CLICK */) {
+ this.keysPressed(report);
+ }
+
+ // If we reported the event, stop propagating it (not sure, if this
+ // actually works on most browsers; blocking the global "oncontextmenu"
+ // even is still necessary).
+ return this.cancelEvent(event);
+ }
+ }
+ }
+
+ // Bring up context menu.
+ if (button == 2 && !event.shiftKey) {
+ if (type == 0 /* MOUSE_DOWN */) {
+ this.showContextMenu(position[0], position[1]);
+ }
+ return this.cancelEvent(event);
+ }
+
+ if (this.mouseReporting) {
+ try {
+ event.shiftKey = false;
+ } catch (e) {
+ }
+ }
+
+ return true;
+};
+
+VT100.prototype.replaceChar = function(s, ch, repl) {
+ for (var i = -1;;) {
+ i = s.indexOf(ch, i + 1);
+ if (i < 0) {
+ break;
+ }
+ s = s.substr(0, i) + repl + s.substr(i + 1);
+ }
+ return s;
+};
+
+VT100.prototype.htmlEscape = function(s) {
+ return this.replaceChar(this.replaceChar(this.replaceChar(this.replaceChar(
+ s, '&', '&'), '<', '<'), '"', '"'), ' ', '\u00A0');
+};
+
+VT100.prototype.getTextContent = function(elem) {
+ return elem.textContent ||
+ (typeof elem.textContent == 'undefined' ? elem.innerText : '');
+};
+
+VT100.prototype.setTextContentRaw = function(elem, s) {
+ // Updating the content of an element is an expensive operation. It actually
+ // pays off to first check whether the element is still unchanged.
+ if (typeof elem.textContent == 'undefined') {
+ if (elem.innerText != s) {
+ try {
+ elem.innerText = s;
+ } catch (e) {
+ // Very old versions of IE do not allow setting innerText. Instead,
+ // remove all children, by setting innerHTML and then set the text
+ // using DOM methods.
+ elem.innerHTML = '';
+ elem.appendChild(document.createTextNode(
+ this.replaceChar(s, ' ', '\u00A0')));
+ }
+ }
+ } else {
+ if (elem.textContent != s) {
+ elem.textContent = s;
+ }
+ }
+};
+
+VT100.prototype.setTextContent = function(elem, s) {
+ // Check if we find any URLs in the text. If so, automatically convert them
+ // to links.
+ if (this.urlRE && this.urlRE.test(s)) {
+ var inner = '';
+ for (;;) {
+ var consumed = 0;
+ if (RegExp.leftContext != null) {
+ inner += this.htmlEscape(RegExp.leftContext);
+ consumed += RegExp.leftContext.length;
+ }
+ var url = this.htmlEscape(RegExp.lastMatch);
+ var fullUrl = url;
+
+ // If no protocol was specified, try to guess a reasonable one.
+ if (url.indexOf('http://') < 0 && url.indexOf('https://') < 0 &&
+ url.indexOf('ftp://') < 0 && url.indexOf('mailto:') < 0) {
+ var slash = url.indexOf('/');
+ var at = url.indexOf('@');
+ var question = url.indexOf('?');
+ if (at > 0 &&
+ (at < question || question < 0) &&
+ (slash < 0 || (question > 0 && slash > question))) {
+ fullUrl = 'mailto:' + url;
+ } else {
+ fullUrl = (url.indexOf('ftp.') == 0 ? 'ftp://' : 'http://') +
+ url;
+ }
+ }
+
+ inner += '<a target="vt100Link" href="' + fullUrl +
+ '">' + url + '</a>';
+ consumed += RegExp.lastMatch.length;
+ s = s.substr(consumed);
+ if (!this.urlRE.test(s)) {
+ if (RegExp.rightContext != null) {
+ inner += this.htmlEscape(RegExp.rightContext);
+ }
+ break;
+ }
+ }
+ elem.innerHTML = inner;
+ return;
+ }
+
+ this.setTextContentRaw(elem, s);
+};
+
+VT100.prototype.insertBlankLine = function(y, color, style) {
+ // Insert a blank line a position y. This method ignores the scrollback
+ // buffer. The caller has to add the length of the scrollback buffer to
+ // the position, if necessary.
+ // If the position is larger than the number of current lines, this
+ // method just adds a new line right after the last existing one. It does
+ // not add any missing lines in between. It is the caller's responsibility
+ // to do so.
+ if (!color) {
+ color = 'ansi0 bgAnsi15';
+ }
+ if (!style) {
+ style = '';
+ }
+ var line;
+ if (color != 'ansi0 bgAnsi15' && !style) {
+ line = document.createElement('pre');
+ this.setTextContent(line, '\n');
+ } else {
+ line = document.createElement('div');
+ var span = document.createElement('span');
+ span.style.cssText = style;
+ span.className = color;
+ this.setTextContent(span, this.spaces(this.terminalWidth));
+ line.appendChild(span);
+ }
+ line.style.height = this.cursorHeight + 'px';
+ var console = this.console[this.currentScreen];
+ if (console.childNodes.length > y) {
+ console.insertBefore(line, console.childNodes[y]);
+ } else {
+ console.appendChild(line);
+ }
+};
+
+VT100.prototype.updateWidth = function() {
+ this.terminalWidth = Math.floor(this.console[this.currentScreen].offsetWidth/
+ this.cursorWidth*this.scale);
+ return this.terminalWidth;
+};
+
+VT100.prototype.updateHeight = function() {
+ // We want to be able to display either a terminal window that fills the
+ // entire browser window, or a terminal window that is contained in a
+ // <div> which is embededded somewhere in the web page.
+ if (this.isEmbedded) {
+ // Embedded terminal. Use size of the containing <div> (id="vt100").
+ this.terminalHeight = Math.floor((this.container.clientHeight-1) /
+ this.cursorHeight);
+ } else {
+ // Use the full browser window.
+ this.terminalHeight = Math.floor(((window.innerHeight ||
+ document.documentElement.clientHeight ||
+ document.body.clientHeight)-1)/
+ this.cursorHeight);
+ }
+ return this.terminalHeight;
+};
+
+VT100.prototype.updateNumScrollbackLines = function() {
+ var scrollback = Math.floor(
+ this.console[this.currentScreen].offsetHeight /
+ this.cursorHeight) -
+ this.terminalHeight;
+ this.numScrollbackLines = scrollback < 0 ? 0 : scrollback;
+ return this.numScrollbackLines;
+};
+
+VT100.prototype.truncateLines = function(width) {
+ if (width < 0) {
+ width = 0;
+ }
+ for (var line = this.console[this.currentScreen].firstChild; line;
+ line = line.nextSibling) {
+ if (line.tagName == 'DIV') {
+ var x = 0;
+
+ // Traverse current line and truncate it once we saw "width" characters
+ for (var span = line.firstChild; span;
+ span = span.nextSibling) {
+ var s = this.getTextContent(span);
+ var l = s.length;
+ if (x + l > width) {
+ this.setTextContent(span, s.substr(0, width - x));
+ while (span.nextSibling) {
+ line.removeChild(line.lastChild);
+ }
+ break;
+ }
+ x += l;
+ }
+ // Prune white space from the end of the current line
+ var span = line.lastChild;
+ while (span &&
+ span.className == 'ansi0 bgAnsi15' &&
+ !span.style.cssText.length) {
+ // Scan backwards looking for first non-space character
+ var s = this.getTextContent(span);
+ for (var i = s.length; i--; ) {
+ if (s.charAt(i) != ' ' && s.charAt(i) != '\u00A0') {
+ if (i+1 != s.length) {
+ this.setTextContent(s.substr(0, i+1));
+ }
+ span = null;
+ break;
+ }
+ }
+ if (span) {
+ var sibling = span;
+ span = span.previousSibling;
+ if (span) {
+ // Remove blank <span>'s from end of line
+ line.removeChild(sibling);
+ } else {
+ // Remove entire line (i.e. <div>), if empty
+ var blank = document.createElement('pre');
+ blank.style.height = this.cursorHeight + 'px';
+ this.setTextContent(blank, '\n');
+ line.parentNode.replaceChild(blank, line);
+ }
+ }
+ }
+ }
+ }
+};
+
+VT100.prototype.putString = function(x, y, text, color, style) {
+ if (!color) {
+ color = 'ansi0 bgAnsi15';
+ }
+ if (!style) {
+ style = '';
+ }
+ var yIdx = y + this.numScrollbackLines;
+ var line;
+ var sibling;
+ var s;
+ var span;
+ var xPos = 0;
+ var console = this.console[this.currentScreen];
+ if (!text.length && (yIdx >= console.childNodes.length ||
+ console.childNodes[yIdx].tagName != 'DIV')) {
+ // Positioning cursor to a blank location
+ span = null;
+ } else {
+ // Create missing blank lines at end of page
+ while (console.childNodes.length <= yIdx) {
+ // In order to simplify lookups, we want to make sure that each line
+ // is represented by exactly one element (and possibly a whole bunch of
+ // children).
+ // For non-blank lines, we can create a <div> containing one or more
+ // <span>s. For blank lines, this fails as browsers tend to optimize them
+ // away. But fortunately, a <pre> tag containing a newline character
+ // appears to work for all browsers (a would also work, but then
+ // copying from the browser window would insert superfluous spaces into
+ // the clipboard).
+ this.insertBlankLine(yIdx);
+ }
+ line = console.childNodes[yIdx];
+
+ // If necessary, promote blank '\n' line to a <div> tag
+ if (line.tagName != 'DIV') {
+ var div = document.createElement('div');
+ div.style.height = this.cursorHeight + 'px';
+ div.innerHTML = '<span></span>';
+ console.replaceChild(div, line);
+ line = div;
+ }
+
+ // Scan through list of <span>'s until we find the one where our text
+ // starts
+ span = line.firstChild;
+ var len;
+ while (span.nextSibling && xPos < x) {
+ len = this.getTextContent(span).length;
+ if (xPos + len > x) {
+ break;
+ }
+ xPos += len;
+ span = span.nextSibling;
+ }
+
+ if (text.length) {
+ // If current <span> is not long enough, pad with spaces or add new
+ // span
+ s = this.getTextContent(span);
+ var oldColor = span.className;
+ var oldStyle = span.style.cssText;
+ if (xPos + s.length < x) {
+ if (oldColor != 'ansi0 bgAnsi15' || oldStyle != '') {
+ span = document.createElement('span');
+ line.appendChild(span);
+ span.className = 'ansi0 bgAnsi15';
+ span.style.cssText = '';
+ oldColor = 'ansi0 bgAnsi15';
+ oldStyle = '';
+ xPos += s.length;
+ s = '';
+ }
+ do {
+ s += ' ';
+ } while (xPos + s.length < x);
+ }
+
+ // If styles do not match, create a new <span>
+ var del = text.length - s.length + x - xPos;
+ if (oldColor != color ||
+ (oldStyle != style && (oldStyle || style))) {
+ if (xPos == x) {
+ // Replacing text at beginning of existing <span>
+ if (text.length >= s.length) {
+ // New text is equal or longer than existing text
+ s = text;
+ } else {
+ // Insert new <span> before the current one, then remove leading
+ // part of existing <span>, adjust style of new <span>, and finally
+ // set its contents
+ sibling = document.createElement('span');
+ line.insertBefore(sibling, span);
+ this.setTextContent(span, s.substr(text.length));
+ span = sibling;
+ s = text;
+ }
+ } else {
+ // Replacing text some way into the existing <span>
+ var remainder = s.substr(x + text.length - xPos);
+ this.setTextContent(span, s.substr(0, x - xPos));
+ xPos = x;
+ sibling = document.createElement('span');
+ if (span.nextSibling) {
+ line.insertBefore(sibling, span.nextSibling);
+ span = sibling;
+ if (remainder.length) {
+ sibling = document.createElement('span');
+ sibling.className = oldColor;
+ sibling.style.cssText = oldStyle;
+ this.setTextContent(sibling, remainder);
+ line.insertBefore(sibling, span.nextSibling);
+ }
+ } else {
+ line.appendChild(sibling);
+ span = sibling;
+ if (remainder.length) {
+ sibling = document.createElement('span');
+ sibling.className = oldColor;
+ sibling.style.cssText = oldStyle;
+ this.setTextContent(sibling, remainder);
+ line.appendChild(sibling);
+ }
+ }
+ s = text;
+ }
+ span.className = color;
+ span.style.cssText = style;
+ } else {
+ // Overwrite (partial) <span> with new text
+ s = s.substr(0, x - xPos) +
+ text +
+ s.substr(x + text.length - xPos);
+ }
+ this.setTextContent(span, s);
+
+
+ // Delete all subsequent <span>'s that have just been overwritten
+ sibling = span.nextSibling;
+ while (del > 0 && sibling) {
+ s = this.getTextContent(sibling);
+ len = s.length;
+ if (len <= del) {
+ line.removeChild(sibling);
+ del -= len;
+ sibling = span.nextSibling;
+ } else {
+ this.setTextContent(sibling, s.substr(del));
+ break;
+ }
+ }
+
+ // Merge <span> with next sibling, if styles are identical
+ if (sibling && span.className == sibling.className &&
+ span.style.cssText == sibling.style.cssText) {
+ this.setTextContent(span,
+ this.getTextContent(span) +
+ this.getTextContent(sibling));
+ line.removeChild(sibling);
+ }
+ }
+ }
+
+ // Position cursor
+ this.cursorX = x + text.length;
+ if (this.cursorX >= this.terminalWidth) {
+ this.cursorX = this.terminalWidth - 1;
+ if (this.cursorX < 0) {
+ this.cursorX = 0;
+ }
+ }
+ var pixelX = -1;
+ var pixelY = -1;
+ if (!this.cursor.style.visibility) {
+ var idx = this.cursorX - xPos;
+ if (span) {
+ // If we are in a non-empty line, take the cursor Y position from the
+ // other elements in this line. If dealing with broken, non-proportional
+ // fonts, this is likely to yield better results.
+ pixelY = span.offsetTop +
+ span.offsetParent.offsetTop;
+ s = this.getTextContent(span);
+ var nxtIdx = idx - s.length;
+ if (nxtIdx < 0) {
+ this.setTextContent(this.cursor, s.charAt(idx));
+ pixelX = span.offsetLeft +
+ idx*span.offsetWidth / s.length;
+ } else {
+ if (nxtIdx == 0) {
+ pixelX = span.offsetLeft + span.offsetWidth;
+ }
+ if (span.nextSibling) {
+ s = this.getTextContent(span.nextSibling);
+ this.setTextContent(this.cursor, s.charAt(nxtIdx));
+ if (pixelX < 0) {
+ pixelX = span.nextSibling.offsetLeft +
+ nxtIdx*span.offsetWidth / s.length;
+ }
+ } else {
+ this.setTextContent(this.cursor, ' ');
+ }
+ }
+ } else {
+ this.setTextContent(this.cursor, ' ');
+ }
+ }
+ if (pixelX >= 0) {
+ this.cursor.style.left = (pixelX + (this.isIE ? 1 : 0))/
+ this.scale + 'px';
+ } else {
+ this.setTextContent(this.space, this.spaces(this.cursorX));
+ this.cursor.style.left = (this.space.offsetWidth +
+ console.offsetLeft)/this.scale + 'px';
+ }
+ this.cursorY = yIdx - this.numScrollbackLines;
+ if (pixelY >= 0) {
+ this.cursor.style.top = pixelY + 'px';
+ } else {
+ this.cursor.style.top = yIdx*this.cursorHeight +
+ console.offsetTop + 'px';
+ }
+
+ if (text.length) {
+ // Merge <span> with previous sibling, if styles are identical
+ if ((sibling = span.previousSibling) &&
+ span.className == sibling.className &&
+ span.style.cssText == sibling.style.cssText) {
+ this.setTextContent(span,
+ this.getTextContent(sibling) +
+ this.getTextContent(span));
+ line.removeChild(sibling);
+ }
+
+ // Prune white space from the end of the current line
+ span = line.lastChild;
+ while (span &&
+ span.className == 'ansi0 bgAnsi15' &&
+ !span.style.cssText.length) {
+ // Scan backwards looking for first non-space character
+ s = this.getTextContent(span);
+ for (var i = s.length; i--; ) {
+ if (s.charAt(i) != ' ' && s.charAt(i) != '\u00A0') {
+ if (i+1 != s.length) {
+ this.setTextContent(s.substr(0, i+1));
+ }
+ span = null;
+ break;
+ }
+ }
+ if (span) {
+ sibling = span;
+ span = span.previousSibling;
+ if (span) {
+ // Remove blank <span>'s from end of line
+ line.removeChild(sibling);
+ } else {
+ // Remove entire line (i.e. <div>), if empty
+ var blank = document.createElement('pre');
+ blank.style.height = this.cursorHeight + 'px';
+ this.setTextContent(blank, '\n');
+ line.parentNode.replaceChild(blank, line);
+ }
+ }
+ }
+ }
+};
+
+VT100.prototype.gotoXY = function(x, y) {
+ if (x >= this.terminalWidth) {
+ x = this.terminalWidth - 1;
+ }
+ if (x < 0) {
+ x = 0;
+ }
+ var minY, maxY;
+ if (this.offsetMode) {
+ minY = this.top;
+ maxY = this.bottom;
+ } else {
+ minY = 0;
+ maxY = this.terminalHeight;
+ }
+ if (y >= maxY) {
+ y = maxY - 1;
+ }
+ if (y < minY) {
+ y = minY;
+ }
+ this.putString(x, y, '', undefined);
+ this.needWrap = false;
+};
+
+VT100.prototype.gotoXaY = function(x, y) {
+ this.gotoXY(x, this.offsetMode ? (this.top + y) : y);
+};
+
+VT100.prototype.refreshInvertedState = function() {
+ if (this.isInverted) {
+ this.scrollable.className += ' inverted';
+ } else {
+ this.scrollable.className = this.scrollable.className.
+ replace(/ *inverted/, '');
+ }
+};
+
+VT100.prototype.enableAlternateScreen = function(state) {
+ // Don't do anything, if we are already on the desired screen
+ if ((state ? 1 : 0) == this.currentScreen) {
+ // Calling the resizer is not actually necessary. But it is a good way
+ // of resetting state that might have gotten corrupted.
+ this.resizer();
+ return;
+ }
+
+ // We save the full state of the normal screen, when we switch away from it.
+ // But for the alternate screen, no saving is necessary. We always reset
+ // it when we switch to it.
+ if (state) {
+ this.saveCursor();
+ }
+
+ // Display new screen, and initialize state (the resizer does that for us).
+ this.currentScreen = state ? 1 : 0;
+ this.console[1-this.currentScreen].style.display = 'none';
+ this.console[this.currentScreen].style.display = '';
+
+ // Select appropriate character pitch.
+ var transform = this.getTransformName();
+ if (transform) {
+ if (state) {
+ // Upon enabling the alternate screen, we switch to 80 column mode. But
+ // upon returning to the regular screen, we restore the mode that was
+ // in effect previously.
+ this.console[1].style[transform] = '';
+ }
+ var style =
+ this.console[this.currentScreen].style[transform];
+ this.cursor.style[transform] = style;
+ this.space.style[transform] = style;
+ this.scale = style == '' ? 1.0:1.65;
+ if (transform == 'filter') {
+ this.console[this.currentScreen].style.width = style == '' ? '165%':'';
+ }
+ }
+ this.resizer();
+
+ // If we switched to the alternate screen, reset it completely. Otherwise,
+ // restore the saved state.
+ if (state) {
+ this.gotoXY(0, 0);
+ this.clearRegion(0, 0, this.terminalWidth, this.terminalHeight);
+ } else {
+ this.restoreCursor();
+ }
+};
+
+VT100.prototype.hideCursor = function() {
+ var hidden = this.cursor.style.visibility == 'hidden';
+ if (!hidden) {
+ this.cursor.style.visibility = 'hidden';
+ return true;
+ }
+ return false;
+};
+
+VT100.prototype.showCursor = function(x, y) {
+ if (this.cursor.style.visibility) {
+ this.cursor.style.visibility = '';
+ this.putString(x == undefined ? this.cursorX : x,
+ y == undefined ? this.cursorY : y,
+ '', undefined);
+ return true;
+ }
+ return false;
+};
+
+VT100.prototype.scrollBack = function() {
+ var i = this.scrollable.scrollTop -
+ this.scrollable.clientHeight;
+ this.scrollable.scrollTop = i < 0 ? 0 : i;
+};
+
+VT100.prototype.scrollFore = function() {
+ var i = this.scrollable.scrollTop +
+ this.scrollable.clientHeight;
+ this.scrollable.scrollTop = i > this.numScrollbackLines *
+ this.cursorHeight + 1
+ ? this.numScrollbackLines *
+ this.cursorHeight + 1
+ : i;
+};
+
+VT100.prototype.spaces = function(i) {
+ var s = '';
+ while (i-- > 0) {
+ s += ' ';
+ }
+ return s;
+};
+
+VT100.prototype.clearRegion = function(x, y, w, h, color, style) {
+ w += x;
+ if (x < 0) {
+ x = 0;
+ }
+ if (w > this.terminalWidth) {
+ w = this.terminalWidth;
+ }
+ if ((w -= x) <= 0) {
+ return;
+ }
+ h += y;
+ if (y < 0) {
+ y = 0;
+ }
+ if (h > this.terminalHeight) {
+ h = this.terminalHeight;
+ }
+ if ((h -= y) <= 0) {
+ return;
+ }
+
+ // Special case the situation where we clear the entire screen, and we do
+ // not have a scrollback buffer. In that case, we should just remove all
+ // child nodes.
+ if (!this.numScrollbackLines &&
+ w == this.terminalWidth && h == this.terminalHeight &&
+ (color == undefined || color == 'ansi0 bgAnsi15') && !style) {
+ var console = this.console[this.currentScreen];
+ while (console.lastChild) {
+ console.removeChild(console.lastChild);
+ }
+ this.putString(this.cursorX, this.cursorY, '', undefined);
+ } else {
+ var hidden = this.hideCursor();
+ var cx = this.cursorX;
+ var cy = this.cursorY;
+ var s = this.spaces(w);
+ for (var i = y+h; i-- > y; ) {
+ this.putString(x, i, s, color, style);
+ }
+ hidden ? this.showCursor(cx, cy) : this.putString(cx, cy, '', undefined);
+ }
+};
+
+VT100.prototype.copyLineSegment = function(dX, dY, sX, sY, w) {
+ var text = [ ];
+ var className = [ ];
+ var style = [ ];
+ var console = this.console[this.currentScreen];
+ if (sY >= console.childNodes.length) {
+ text[0] = this.spaces(w);
+ className[0] = undefined;
+ style[0] = undefined;
+ } else {
+ var line = console.childNodes[sY];
+ if (line.tagName != 'DIV' || !line.childNodes.length) {
+ text[0] = this.spaces(w);
+ className[0] = undefined;
+ style[0] = undefined;
+ } else {
+ var x = 0;
+ for (var span = line.firstChild; span && w > 0; span = span.nextSibling){
+ var s = this.getTextContent(span);
+ var len = s.length;
+ if (x + len > sX) {
+ var o = sX > x ? sX - x : 0;
+ text[text.length] = s.substr(o, w);
+ className[className.length] = span.className;
+ style[style.length] = span.style.cssText;
+ w -= len - o;
+ }
+ x += len;
+ }
+ if (w > 0) {
+ text[text.length] = this.spaces(w);
+ className[className.length] = undefined;
+ style[style.length] = undefined;
+ }
+ }
+ }
+ var hidden = this.hideCursor();
+ var cx = this.cursorX;
+ var cy = this.cursorY;
+ for (var i = 0; i < text.length; i++) {
+ var color;
+ if (className[i]) {
+ color = className[i];
+ } else {
+ color = 'ansi0 bgAnsi15';
+ }
+ this.putString(dX, dY - this.numScrollbackLines, text[i], color, style[i]);
+ dX += text[i].length;
+ }
+ hidden ? this.showCursor(cx, cy) : this.putString(cx, cy, '', undefined);
+};
+
+VT100.prototype.scrollRegion = function(x, y, w, h, incX, incY,
+ color, style) {
+ var left = incX < 0 ? -incX : 0;
+ var right = incX > 0 ? incX : 0;
+ var up = incY < 0 ? -incY : 0;
+ var down = incY > 0 ? incY : 0;
+
+ // Clip region against terminal size
+ var dontScroll = null;
+ w += x;
+ if (x < left) {
+ x = left;
+ }
+ if (w > this.terminalWidth - right) {
+ w = this.terminalWidth - right;
+ }
+ if ((w -= x) <= 0) {
+ dontScroll = 1;
+ }
+ h += y;
+ if (y < up) {
+ y = up;
+ }
+ if (h > this.terminalHeight - down) {
+ h = this.terminalHeight - down;
+ }
+ if ((h -= y) < 0) {
+ dontScroll = 1;
+ }
+ if (!dontScroll) {
+ if (style && style.indexOf('underline')) {
+ // Different terminal emulators disagree on the attributes that
+ // are used for scrolling. The consensus seems to be, never to
+ // fill with underlined spaces. N.B. this is different from the
+ // cases when the user blanks a region. User-initiated blanking
+ // always fills with all of the current attributes.
+ style = style.replace(/text-decoration:underline;/, '');
+ }
+
+ // Compute current scroll position
+ var scrollPos = this.numScrollbackLines -
+ (this.scrollable.scrollTop-1) / this.cursorHeight;
+
+ // Determine original cursor position. Hide cursor temporarily to avoid
+ // visual artifacts.
+ var hidden = this.hideCursor();
+ var cx = this.cursorX;
+ var cy = this.cursorY;
+ var console = this.console[this.currentScreen];
+
+ if (!incX && !x && w == this.terminalWidth) {
+ // Scrolling entire lines
+ if (incY < 0) {
+ // Scrolling up
+ if (!this.currentScreen && y == -incY &&
+ h == this.terminalHeight + incY) {
+ // Scrolling up with adding to the scrollback buffer. This is only
+ // possible if there are at least as many lines in the console,
+ // as the terminal is high
+ while (console.childNodes.length < this.terminalHeight) {
+ this.insertBlankLine(this.terminalHeight);
+ }
+
+ // Add new lines at bottom in order to force scrolling
+ for (var i = 0; i < y; i++) {
+ this.insertBlankLine(console.childNodes.length, color, style);
+ }
+
+ // Adjust the number of lines in the scrollback buffer by
+ // removing excess entries.
+ this.updateNumScrollbackLines();
+ while (this.numScrollbackLines >
+ (this.currentScreen ? 0 : this.maxScrollbackLines)) {
+ console.removeChild(console.firstChild);
+ this.numScrollbackLines--;
+ }
+
+ // Mark lines in the scrollback buffer, so that they do not get
+ // printed.
+ for (var i = this.numScrollbackLines, j = -incY;
+ i-- > 0 && j-- > 0; ) {
+ console.childNodes[i].className = 'scrollback';
+ }
+ } else {
+ // Scrolling up without adding to the scrollback buffer.
+ for (var i = -incY;
+ i-- > 0 &&
+ console.childNodes.length >
+ this.numScrollbackLines + y + incY; ) {
+ console.removeChild(console.childNodes[
+ this.numScrollbackLines + y + incY]);
+ }
+
+ // If we used to have a scrollback buffer, then we must make sure
+ // that we add back blank lines at the bottom of the terminal.
+ // Similarly, if we are scrolling in the middle of the screen,
+ // we must add blank lines to ensure that the bottom of the screen
+ // does not move up.
+ if (this.numScrollbackLines > 0 ||
+ console.childNodes.length > this.numScrollbackLines+y+h+incY) {
+ for (var i = -incY; i-- > 0; ) {
+ this.insertBlankLine(this.numScrollbackLines + y + h + incY,
+ color, style);
+ }
+ }
+ }
+ } else {
+ // Scrolling down
+ for (var i = incY;
+ i-- > 0 &&
+ console.childNodes.length > this.numScrollbackLines + y + h; ) {
+ console.removeChild(console.childNodes[this.numScrollbackLines+y+h]);
+ }
+ for (var i = incY; i--; ) {
+ this.insertBlankLine(this.numScrollbackLines + y, color, style);
+ }
+ }
+ } else {
+ // Scrolling partial lines
+ if (incY <= 0) {
+ // Scrolling up or horizontally within a line
+ for (var i = y + this.numScrollbackLines;
+ i < y + this.numScrollbackLines + h;
+ i++) {
+ this.copyLineSegment(x + incX, i + incY, x, i, w);
+ }
+ } else {
+ // Scrolling down
+ for (var i = y + this.numScrollbackLines + h;
+ i-- > y + this.numScrollbackLines; ) {
+ this.copyLineSegment(x + incX, i + incY, x, i, w);
+ }
+ }
+
+ // Clear blank regions
+ if (incX > 0) {
+ this.clearRegion(x, y, incX, h, color, style);
+ } else if (incX < 0) {
+ this.clearRegion(x + w + incX, y, -incX, h, color, style);
+ }
+ if (incY > 0) {
+ this.clearRegion(x, y, w, incY, color, style);
+ } else if (incY < 0) {
+ this.clearRegion(x, y + h + incY, w, -incY, color, style);
+ }
+ }
+
+ // Reset scroll position
+ this.scrollable.scrollTop = (this.numScrollbackLines-scrollPos) *
+ this.cursorHeight + 1;
+
+ // Move cursor back to its original position
+ hidden ? this.showCursor(cx, cy) : this.putString(cx, cy, '', undefined);
+ }
+};
+
+VT100.prototype.copy = function(selection) {
+ if (selection == undefined) {
+ selection = this.selection();
+ }
+ this.internalClipboard = undefined;
+ if (selection.length) {
+ try {
+ // IE
+ this.cliphelper.value = selection;
+ this.cliphelper.select();
+ this.cliphelper.createTextRange().execCommand('copy');
+ } catch (e) {
+ this.internalClipboard = selection;
+ }
+ this.cliphelper.value = '';
+ }
+};
+
+VT100.prototype.copyLast = function() {
+ // Opening the context menu can remove the selection. We try to prevent this
+ // from happening, but that is not possible for all browsers. So, instead,
+ // we compute the selection before showing the menu.
+ this.copy(this.lastSelection);
+};
+
+VT100.prototype.pasteFnc = function() {
+ var clipboard = undefined;
+ if (this.internalClipboard != undefined) {
+ clipboard = this.internalClipboard;
+ } else {
+ try {
+ this.cliphelper.value = '';
+ this.cliphelper.createTextRange().execCommand('paste');
+ clipboard = this.cliphelper.value;
+ } catch (e) {
+ }
+ }
+ this.cliphelper.value = '';
+ if (clipboard && this.menu.style.visibility == 'hidden') {
+ return function() {
+ this.keysPressed('' + clipboard);
+ };
+ } else {
+ return undefined;
+ }
+};
+
+VT100.prototype.pasteBrowserFnc = function() {
+ var clipboard = prompt("Paste into this box:","");
+ if (clipboard != undefined) {
+ return this.keysPressed('' + clipboard);
+ }
+};
+
+VT100.prototype.toggleUTF = function() {
+ this.utfEnabled = !this.utfEnabled;
+
+ // We always persist the last value that the user selected. Not necessarily
+ // the last value that a random program requested.
+ this.utfPreferred = this.utfEnabled;
+};
+
+VT100.prototype.toggleBell = function() {
+ this.visualBell = !this.visualBell;
+};
+
+VT100.prototype.toggleSoftKeyboard = function() {
+ this.softKeyboard = !this.softKeyboard;
+ this.keyboardImage.style.visibility = this.softKeyboard ? 'visible' : '';
+};
+
+VT100.prototype.deselectKeys = function(elem) {
+ if (elem && elem.className == 'selected') {
+ elem.className = '';
+ }
+ for (elem = elem.firstChild; elem; elem = elem.nextSibling) {
+ this.deselectKeys(elem);
+ }
+};
+
+VT100.prototype.showSoftKeyboard = function() {
+ // Make sure no key is currently selected
+ this.lastSelectedKey = undefined;
+ this.deselectKeys(this.keyboard);
+ this.isShift = false;
+ this.showShiftState(false);
+ this.isCtrl = false;
+ this.showCtrlState(false);
+ this.isAlt = false;
+ this.showAltState(false);
+
+ this.keyboard.style.left = '0px';
+ this.keyboard.style.top = '0px';
+ this.keyboard.style.width = this.container.offsetWidth + 'px';
+ this.keyboard.style.height = this.container.offsetHeight + 'px';
+ this.keyboard.style.visibility = 'hidden';
+ this.keyboard.style.display = '';
+
+ var kbd = this.keyboard.firstChild;
+ var scale = 1.0;
+ var transform = this.getTransformName();
+ if (transform) {
+ kbd.style[transform] = '';
+ if (kbd.offsetWidth > 0.9 * this.container.offsetWidth) {
+ scale = (kbd.offsetWidth/
+ this.container.offsetWidth)/0.9;
+ }
+ if (kbd.offsetHeight > 0.9 * this.container.offsetHeight) {
+ scale = Math.max((kbd.offsetHeight/
+ this.container.offsetHeight)/0.9);
+ }
+ var style = this.getTransformStyle(transform,
+ scale > 1.0 ? scale : undefined);
+ kbd.style[transform] = style;
+ }
+ if (transform == 'filter') {
+ scale = 1.0;
+ }
+ kbd.style.left = ((this.container.offsetWidth -
+ kbd.offsetWidth/scale)/2) + 'px';
+ kbd.style.top = ((this.container.offsetHeight -
+ kbd.offsetHeight/scale)/2) + 'px';
+
+ this.keyboard.style.visibility = 'visible';
+};
+
+VT100.prototype.hideSoftKeyboard = function() {
+ this.keyboard.style.display = 'none';
+};
+
+VT100.prototype.toggleCursorBlinking = function() {
+ this.blinkingCursor = !this.blinkingCursor;
+};
+
+VT100.prototype.about = function() {
+ alert("VT100 Terminal Emulator " + "2.10 (revision 239)" +
+ "\nCopyright 2008-2010 by Markus Gutschke\n" +
+ "For more information check http://shellinabox.com");
+};
+
+VT100.prototype.hideContextMenu = function() {
+ this.menu.style.visibility = 'hidden';
+ this.menu.style.top = '-100px';
+ this.menu.style.left = '-100px';
+ this.menu.style.width = '0px';
+ this.menu.style.height = '0px';
+};
+
+VT100.prototype.extendContextMenu = function(entries, actions) {
+};
+
+VT100.prototype.showContextMenu = function(x, y) {
+ this.menu.innerHTML =
+ '<table class="popup" ' +
+ 'cellpadding="0" cellspacing="0">' +
+ '<tr><td>' +
+ '<ul id="menuentries">' +
+ '<li id="beginclipboard">Copy</li>' +
+ '<li id="endclipboard">Paste</li>' +
+ '<li id="browserclipboard">Paste from browser</li>' +
+ '<hr />' +
+ '<li id="reset">Reset</li>' +
+ '<hr />' +
+ '<li id="beginconfig">' +
+ (this.utfEnabled ? '<img src="/webshell/enabled.gif" />' : '') +
+ 'Unicode</li>' +
+ '<li>' +
+ (this.visualBell ? '<img src="/webshell/enabled.gif" />' : '') +
+ 'Visual Bell</li>'+
+ '<li>' +
+ (this.softKeyboard ? '<img src="/webshell/enabled.gif" />' : '') +
+ 'Onscreen Keyboard</li>' +
+ '<li id="endconfig">' +
+ (this.blinkingCursor ? '<img src="/webshell/enabled.gif" />' : '') +
+ 'Blinking Cursor</li>'+
+ (this.usercss.firstChild ?
+ '<hr id="beginusercss" />' +
+ this.usercss.innerHTML +
+ '<hr id="endusercss" />' :
+ '<hr />') +
+ '<li id="about">About...</li>' +
+ '</ul>' +
+ '</td></tr>' +
+ '</table>';
+
+ var popup = this.menu.firstChild;
+ var menuentries = this.getChildById(popup, 'menuentries');
+
+ // Determine menu entries that should be disabled
+ this.lastSelection = this.selection();
+ if (!this.lastSelection.length) {
+ menuentries.firstChild.className
+ = 'disabled';
+ }
+ var p = this.pasteFnc();
+ if (!p) {
+ menuentries.childNodes[1].className
+ = 'disabled';
+ }
+
+ // Actions for default items
+ var actions = [ this.copyLast, p, this.pasteBrowserFnc, this.reset,
+ this.toggleUTF, this.toggleBell,
+ this.toggleSoftKeyboard,
+ this.toggleCursorBlinking ];
+
+ // Actions for user CSS styles (if any)
+ for (var i = 0; i < this.usercssActions.length; ++i) {
+ actions[actions.length] = this.usercssActions[i];
+ }
+ actions[actions.length] = this.about;
+
+ // Allow subclasses to dynamically add entries to the context menu
+ this.extendContextMenu(menuentries, actions);
+
+ // Hook up event listeners
+ for (var node = menuentries.firstChild, i = 0; node;
+ node = node.nextSibling) {
+ if (node.tagName == 'LI') {
+ if (node.className != 'disabled') {
+ this.addListener(node, 'mouseover',
+ function(vt100, node) {
+ return function() {
+ node.className = 'hover';
+ }
+ }(this, node));
+ this.addListener(node, 'mouseout',
+ function(vt100, node) {
+ return function() {
+ node.className = '';
+ }
+ }(this, node));
+ this.addListener(node, 'mousedown',
+ function(vt100, action) {
+ return function(event) {
+ vt100.hideContextMenu();
+ action.call(vt100);
+ vt100.storeUserSettings();
+ return vt100.cancelEvent(event || window.event);
+ }
+ }(this, actions[i]));
+ this.addListener(node, 'mouseup',
+ function(vt100) {
+ return function(event) {
+ return vt100.cancelEvent(event || window.event);
+ }
+ }(this));
+ this.addListener(node, 'mouseclick',
+ function(vt100) {
+ return function(event) {
+ return vt100.cancelEvent(event || window.event);
+ }
+ }());
+ }
+ i++;
+ }
+ }
+
+ // Position menu next to the mouse pointer
+ this.menu.style.left = '0px';
+ this.menu.style.top = '0px';
+ this.menu.style.width = this.container.offsetWidth + 'px';
+ this.menu.style.height = this.container.offsetHeight + 'px';
+ popup.style.left = '0px';
+ popup.style.top = '0px';
+
+ var margin = 2;
+ if (x + popup.clientWidth >= this.container.offsetWidth - margin) {
+ x = this.container.offsetWidth-popup.clientWidth - margin - 1;
+ }
+ if (x < margin) {
+ x = margin;
+ }
+ if (y + popup.clientHeight >= this.container.offsetHeight - margin) {
+ y = this.container.offsetHeight-popup.clientHeight - margin - 1;
+ }
+ if (y < margin) {
+ y = margin;
+ }
+ popup.style.left = x + 'px';
+ popup.style.top = y + 'px';
+
+ // Block all other interactions with the terminal emulator
+ this.addListener(this.menu, 'click', function(vt100) {
+ return function() {
+ vt100.hideContextMenu();
+ }
+ }(this));
+
+ // Show the menu
+ this.menu.style.visibility = '';
+};
+
+VT100.prototype.keysPressed = function(ch) {
+ for (var i = 0; i < ch.length; i++) {
+ var c = ch.charCodeAt(i);
+ this.vt100(c >= 7 && c <= 15 ||
+ c == 24 || c == 26 || c == 27 || c >= 32
+ ? String.fromCharCode(c) : '<' + c + '>');
+ }
+};
+
+VT100.prototype.applyModifiers = function(ch, event) {
+ if (ch) {
+ if (event.ctrlKey) {
+ if (ch >= 32 && ch <= 127) {
+ // For historic reasons, some control characters are treated specially
+ switch (ch) {
+ case /* 3 */ 51: ch = 27; break;
+ case /* 4 */ 52: ch = 28; break;
+ case /* 5 */ 53: ch = 29; break;
+ case /* 6 */ 54: ch = 30; break;
+ case /* 7 */ 55: ch = 31; break;
+ case /* 8 */ 56: ch = 127; break;
+ case /* ? */ 63: ch = 127; break;
+ default: ch &= 31; break;
+ }
+ }
+ }
+ return String.fromCharCode(ch);
+ } else {
+ return undefined;
+ }
+};
+
+VT100.prototype.handleKey = function(event) {
+ // this.vt100('H: c=' + event.charCode + ', k=' + event.keyCode +
+ // (event.shiftKey || event.ctrlKey || event.altKey ||
+ // event.metaKey ? ', ' +
+ // (event.shiftKey ? 'S' : '') + (event.ctrlKey ? 'C' : '') +
+ // (event.altKey ? 'A' : '') + (event.metaKey ? 'M' : '') : '') +
+ // '\r\n');
+ var ch, key;
+ if (typeof event.charCode != 'undefined') {
+ // non-IE keypress events have a translated charCode value. Also, our
+ // fake events generated when receiving keydown events include this data
+ // on all browsers.
+ ch = event.charCode;
+ key = event.keyCode;
+ } else {
+ // When sending a keypress event, IE includes the translated character
+ // code in the keyCode field.
+ ch = event.keyCode;
+ key = undefined;
+ }
+
+ // Apply modifier keys (ctrl and shift)
+ if (ch) {
+ key = undefined;
+ }
+ ch = this.applyModifiers(ch, event);
+
+ // By this point, "ch" is either defined and contains the character code, or
+ // it is undefined and "key" defines the code of a function key
+ if (ch != undefined) {
+ this.scrollable.scrollTop = this.numScrollbackLines *
+ this.cursorHeight + 1;
+ } else {
+ if ((event.altKey || event.metaKey) && !event.shiftKey && !event.ctrlKey) {
+ // Many programs have difficulties dealing with parametrized escape
+ // sequences for function keys. Thus, if ALT is the only modifier
+ // key, return Emacs-style keycodes for commonly used keys.
+ switch (key) {
+ case 33: /* Page Up */ ch = '\u001B<'; break;
+ case 34: /* Page Down */ ch = '\u001B>'; break;
+ case 37: /* Left */ ch = '\u001Bb'; break;
+ case 38: /* Up */ ch = '\u001Bp'; break;
+ case 39: /* Right */ ch = '\u001Bf'; break;
+ case 40: /* Down */ ch = '\u001Bn'; break;
+ case 46: /* Delete */ ch = '\u001Bd'; break;
+ default: break;
+ }
+ } else if (event.shiftKey && !event.ctrlKey &&
+ !event.altKey && !event.metaKey) {
+ switch (key) {
+ case 33: /* Page Up */ this.scrollBack(); return;
+ case 34: /* Page Down */ this.scrollFore(); return;
+ default: break;
+ }
+ }
+ if (ch == undefined) {
+ switch (key) {
+ case 8: /* Backspace */ ch = '\u007f'; break;
+ case 9: /* Tab */ ch = '\u0009'; break;
+ case 10: /* Return */ ch = '\u000A'; break;
+ case 13: /* Enter */ ch = this.crLfMode ?
+ '\r\n' : '\r'; break;
+ case 16: /* Shift */ return;
+ case 17: /* Ctrl */ return;
+ case 18: /* Alt */ return;
+ case 19: /* Break */ return;
+ case 20: /* Caps Lock */ return;
+ case 27: /* Escape */ ch = '\u001B'; break;
+ case 33: /* Page Up */ ch = '\u001B[5~'; break;
+ case 34: /* Page Down */ ch = '\u001B[6~'; break;
+ case 35: /* End */ ch = '\u001BOF'; break;
+ case 36: /* Home */ ch = '\u001BOH'; break;
+ case 37: /* Left */ ch = this.cursorKeyMode ?
+ '\u001BOD' : '\u001B[D'; break;
+ case 38: /* Up */ ch = this.cursorKeyMode ?
+ '\u001BOA' : '\u001B[A'; break;
+ case 39: /* Right */ ch = this.cursorKeyMode ?
+ '\u001BOC' : '\u001B[C'; break;
+ case 40: /* Down */ ch = this.cursorKeyMode ?
+ '\u001BOB' : '\u001B[B'; break;
+ case 45: /* Insert */ ch = '\u001B[2~'; break;
+ case 46: /* Delete */ ch = '\u001B[3~'; break;
+ case 91: /* Left Window */ return;
+ case 92: /* Right Window */ return;
+ case 93: /* Select */ return;
+ case 96: /* 0 */ ch = this.applyModifiers(48, event); break;
+ case 97: /* 1 */ ch = this.applyModifiers(49, event); break;
+ case 98: /* 2 */ ch = this.applyModifiers(50, event); break;
+ case 99: /* 3 */ ch = this.applyModifiers(51, event); break;
+ case 100: /* 4 */ ch = this.applyModifiers(52, event); break;
+ case 101: /* 5 */ ch = this.applyModifiers(53, event); break;
+ case 102: /* 6 */ ch = this.applyModifiers(54, event); break;
+ case 103: /* 7 */ ch = this.applyModifiers(55, event); break;
+ case 104: /* 8 */ ch = this.applyModifiers(56, event); break;
+ case 105: /* 9 */ ch = this.applyModifiers(58, event); break;
+ case 106: /* * */ ch = this.applyModifiers(42, event); break;
+ case 107: /* + */ ch = this.applyModifiers(43, event); break;
+ case 109: /* - */ ch = this.applyModifiers(45, event); break;
+ case 110: /* . */ ch = this.applyModifiers(46, event); break;
+ case 111: /* / */ ch = this.applyModifiers(47, event); break;
+ case 112: /* F1 */ ch = '\u001BOP'; break;
+ case 113: /* F2 */ ch = '\u001BOQ'; break;
+ case 114: /* F3 */ ch = '\u001BOR'; break;
+ case 115: /* F4 */ ch = '\u001BOS'; break;
+ case 116: /* F5 */ ch = '\u001B[15~'; break;
+ case 117: /* F6 */ ch = '\u001B[17~'; break;
+ case 118: /* F7 */ ch = '\u001B[18~'; break;
+ case 119: /* F8 */ ch = '\u001B[19~'; break;
+ case 120: /* F9 */ ch = '\u001B[20~'; break;
+ case 121: /* F10 */ ch = '\u001B[21~'; break;
+ case 122: /* F11 */ ch = '\u001B[23~'; break;
+ case 123: /* F12 */ ch = '\u001B[24~'; break;
+ case 144: /* Num Lock */ return;
+ case 145: /* Scroll Lock */ return;
+ case 186: /* ; */ ch = this.applyModifiers(59, event); break;
+ case 187: /* = */ ch = this.applyModifiers(61, event); break;
+ case 188: /* , */ ch = this.applyModifiers(44, event); break;
+ case 189: /* - */ ch = this.applyModifiers(45, event); break;
+ case 173: /* - */ ch = this.applyModifiers(45, event); break; // FF15 Patch
+ case 190: /* . */ ch = this.applyModifiers(46, event); break;
+ case 191: /* / */ ch = this.applyModifiers(47, event); break;
+ // Conflicts with dead key " on Swiss keyboards
+ //case 192: /* ` */ ch = this.applyModifiers(96, event); break;
+ // Conflicts with dead key " on Swiss keyboards
+ //case 219: /* [ */ ch = this.applyModifiers(91, event); break;
+ case 220: /* \ */ ch = this.applyModifiers(92, event); break;
+ // Conflicts with dead key ^ and ` on Swiss keaboards
+ // ^ and " on French keyboards
+ //case 221: /* ] */ ch = this.applyModifiers(93, event); break;
+ case 222: /* ' */ ch = this.applyModifiers(39, event); break;
+ default: return;
+ }
+ this.scrollable.scrollTop = this.numScrollbackLines *
+ this.cursorHeight + 1;
+ }
+ }
+
+ // "ch" now contains the sequence of keycodes to send. But we might still
+ // have to apply the effects of modifier keys.
+ if (event.shiftKey || event.ctrlKey || event.altKey || event.metaKey) {
+ var start, digit, part1, part2;
+ if ((start = ch.substr(0, 2)) == '\u001B[') {
+ for (part1 = start;
+ part1.length < ch.length &&
+ (digit = ch.charCodeAt(part1.length)) >= 48 && digit <= 57; ) {
+ part1 = ch.substr(0, part1.length + 1);
+ }
+ part2 = ch.substr(part1.length);
+ if (part1.length > 2) {
+ part1 += ';';
+ }
+ } else if (start == '\u001BO') {
+ part1 = start;
+ part2 = ch.substr(2);
+ }
+ if (part1 != undefined) {
+ ch = part1 +
+ ((event.shiftKey ? 1 : 0) +
+ (event.altKey|event.metaKey ? 2 : 0) +
+ (event.ctrlKey ? 4 : 0)) +
+ part2;
+ } else if (ch.length == 1 && (event.altKey || event.metaKey)) {
+ ch = '\u001B' + ch;
+ }
+ }
+
+ if (this.menu.style.visibility == 'hidden') {
+ // this.vt100('R: c=');
+ // for (var i = 0; i < ch.length; i++)
+ // this.vt100((i != 0 ? ', ' : '') + ch.charCodeAt(i));
+ // this.vt100('\r\n');
+ this.keysPressed(ch);
+ }
+};
+
+VT100.prototype.inspect = function(o, d) {
+ if (d == undefined) {
+ d = 0;
+ }
+ var rc = '';
+ if (typeof o == 'object' && ++d < 2) {
+ rc = '[\r\n';
+ for (i in o) {
+ rc += this.spaces(d * 2) + i + ' -> ';
+ try {
+ rc += this.inspect(o[i], d);
+ } catch (e) {
+ rc += '?' + '?' + '?\r\n';
+ }
+ }
+ rc += ']\r\n';
+ } else {
+ rc += ('' + o).replace(/\n/g, ' ').replace(/ +/g,' ') + '\r\n';
+ }
+ return rc;
+};
+
+VT100.prototype.checkComposedKeys = function(event) {
+ // Composed keys (at least on Linux) do not generate normal events.
+ // Instead, they get entered into the text field. We normally catch
+ // this on the next keyup event.
+ var s = this.input.value;
+ if (s.length) {
+ this.input.value = '';
+ if (this.menu.style.visibility == 'hidden') {
+ this.keysPressed(s);
+ }
+ }
+};
+
+VT100.prototype.fixEvent = function(event) {
+ // Some browsers report AltGR as a combination of ALT and CTRL. As AltGr
+ // is used as a second-level selector, clear the modifier bits before
+ // handling the event.
+ if (event.ctrlKey && event.altKey) {
+ var fake = [ ];
+ fake.charCode = event.charCode;
+ fake.keyCode = event.keyCode;
+ fake.ctrlKey = false;
+ fake.shiftKey = event.shiftKey;
+ fake.altKey = false;
+ fake.metaKey = event.metaKey;
+ return fake;
+ }
+
+ // Some browsers fail to translate keys, if both shift and alt/meta is
+ // pressed at the same time. We try to translate those cases, but that
+ // only works for US keyboard layouts.
+ if (event.shiftKey) {
+ var u = undefined;
+ var s = undefined;
+ switch (this.lastNormalKeyDownEvent.keyCode) {
+ case 39: /* ' -> " */ u = 39; s = 34; break;
+ case 44: /* , -> < */ u = 44; s = 60; break;
+ case 45: /* - -> _ */ u = 45; s = 95; break;
+ case 46: /* . -> > */ u = 46; s = 62; break;
+ case 47: /* / -> ? */ u = 47; s = 63; break;
+
+ case 48: /* 0 -> ) */ u = 48; s = 41; break;
+ case 49: /* 1 -> ! */ u = 49; s = 33; break;
+ case 50: /* 2 -> @ */ u = 50; s = 64; break;
+ case 51: /* 3 -> # */ u = 51; s = 35; break;
+ case 52: /* 4 -> $ */ u = 52; s = 36; break;
+ case 53: /* 5 -> % */ u = 53; s = 37; break;
+ case 54: /* 6 -> ^ */ u = 54; s = 94; break;
+ case 55: /* 7 -> & */ u = 55; s = 38; break;
+ case 56: /* 8 -> * */ u = 56; s = 42; break;
+ case 57: /* 9 -> ( */ u = 57; s = 40; break;
+
+ case 59: /* ; -> : */ u = 59; s = 58; break;
+ case 61: /* = -> + */ u = 61; s = 43; break;
+ case 91: /* [ -> { */ u = 91; s = 123; break;
+ case 92: /* \ -> | */ u = 92; s = 124; break;
+ case 93: /* ] -> } */ u = 93; s = 125; break;
+ case 96: /* ` -> ~ */ u = 96; s = 126; break;
+
+ case 109: /* - -> _ */ u = 45; s = 95; break;
+ case 111: /* / -> ? */ u = 47; s = 63; break;
+
+ case 186: /* ; -> : */ u = 59; s = 58; break;
+ case 187: /* = -> + */ u = 61; s = 43; break;
+ case 188: /* , -> < */ u = 44; s = 60; break;
+ case 189: /* - -> _ */ u = 45; s = 95; break;
+ case 173: /* - -> _ */ u = 45; s = 95; break; // FF15 Patch
+ case 190: /* . -> > */ u = 46; s = 62; break;
+ case 191: /* / -> ? */ u = 47; s = 63; break;
+ case 192: /* ` -> ~ */ u = 96; s = 126; break;
+ case 219: /* [ -> { */ u = 91; s = 123; break;
+ case 220: /* \ -> | */ u = 92; s = 124; break;
+ case 221: /* ] -> } */ u = 93; s = 125; break;
+ case 222: /* ' -> " */ u = 39; s = 34; break;
+ default: break;
+ }
+ if (s && (event.charCode == u || event.charCode == 0)) {
+ var fake = [ ];
+ fake.charCode = s;
+ fake.keyCode = event.keyCode;
+ fake.ctrlKey = event.ctrlKey;
+ fake.shiftKey = event.shiftKey;
+ fake.altKey = event.altKey;
+ fake.metaKey = event.metaKey;
+ return fake;
+ }
+ }
+ return event;
+};
+
+VT100.prototype.keyDown = function(event) {
+ // this.vt100('D: c=' + event.charCode + ', k=' + event.keyCode +
+ // (event.shiftKey || event.ctrlKey || event.altKey ||
+ // event.metaKey ? ', ' +
+ // (event.shiftKey ? 'S' : '') + (event.ctrlKey ? 'C' : '') +
+ // (event.altKey ? 'A' : '') + (event.metaKey ? 'M' : '') : '') +
+ // '\r\n');
+ this.checkComposedKeys(event);
+ this.lastKeyPressedEvent = undefined;
+ this.lastKeyDownEvent = undefined;
+ this.lastNormalKeyDownEvent = event;
+
+ // Swiss keyboard conflicts:
+ // [ 59
+ // ] 192
+ // ' 219 (dead key)
+ // { 220
+ // ~ 221 (dead key)
+ // } 223
+ // French keyoard conflicts:
+ // ~ 50 (dead key)
+ // } 107
+ var asciiKey =
+ event.keyCode == 32 ||
+ event.keyCode >= 48 && event.keyCode <= 57 ||
+ event.keyCode >= 65 && event.keyCode <= 90;
+ var alphNumKey =
+ asciiKey ||
+ event.keyCode == 59 ||
+ event.keyCode >= 96 && event.keyCode <= 105 ||
+ event.keyCode == 107 ||
+ event.keyCode == 192 ||
+ event.keyCode >= 219 && event.keyCode <= 221 ||
+ event.keyCode == 223 ||
+ event.keyCode == 226;
+ var normalKey =
+ alphNumKey ||
+ event.keyCode == 61 ||
+ event.keyCode == 106 ||
+ event.keyCode >= 109 && event.keyCode <= 111 ||
+ event.keyCode >= 186 && event.keyCode <= 191 ||
+ event.keyCode == 222 ||
+ event.keyCode == 252;
+ try {
+ if (navigator.appName == 'Konqueror') {
+ normalKey |= event.keyCode < 128;
+ }
+ } catch (e) {
+ }
+
+ // We normally prefer to look at keypress events, as they perform the
+ // translation from keyCode to charCode. This is important, as the
+ // translation is locale-dependent.
+ // But for some keys, we must intercept them during the keydown event,
+ // as they would otherwise get interpreted by the browser.
+ // Even, when doing all of this, there are some keys that we can never
+ // intercept. This applies to some of the menu navigation keys in IE.
+ // In fact, we see them, but we cannot stop IE from seeing them, too.
+ if ((event.charCode || event.keyCode) &&
+ ((alphNumKey && (event.ctrlKey || event.altKey || event.metaKey) &&
+ !event.shiftKey &&
+ // Some browsers signal AltGR as both CTRL and ALT. Do not try to
+ // interpret this sequence ourselves, as some keyboard layouts use
+ // it for second-level layouts.
+ !(event.ctrlKey && event.altKey)) ||
+ this.catchModifiersEarly && normalKey && !alphNumKey &&
+ (event.ctrlKey || event.altKey || event.metaKey) ||
+ !normalKey)) {
+ this.lastKeyDownEvent = event;
+ var fake = [ ];
+ fake.ctrlKey = event.ctrlKey;
+ fake.shiftKey = event.shiftKey;
+ fake.altKey = event.altKey;
+ fake.metaKey = event.metaKey;
+ if (asciiKey) {
+ fake.charCode = event.keyCode;
+ fake.keyCode = 0;
+ } else {
+ fake.charCode = 0;
+ fake.keyCode = event.keyCode;
+ if (!alphNumKey && event.shiftKey) {
+ fake = this.fixEvent(fake);
+ }
+ }
+
+ this.handleKey(fake);
+ this.lastNormalKeyDownEvent = undefined;
+
+ try {
+ // For non-IE browsers
+ event.stopPropagation();
+ event.preventDefault();
+ } catch (e) {
+ }
+ try {
+ // For IE
+ event.cancelBubble = true;
+ event.returnValue = false;
+ event.keyCode = 0;
+ } catch (e) {
+ }
+
+ return false;
+ }
+ return true;
+};
+
+VT100.prototype.keyPressed = function(event) {
+ // this.vt100('P: c=' + event.charCode + ', k=' + event.keyCode +
+ // (event.shiftKey || event.ctrlKey || event.altKey ||
+ // event.metaKey ? ', ' +
+ // (event.shiftKey ? 'S' : '') + (event.ctrlKey ? 'C' : '') +
+ // (event.altKey ? 'A' : '') + (event.metaKey ? 'M' : '') : '') +
+ // '\r\n');
+ if (this.lastKeyDownEvent) {
+ // If we already processed the key on keydown, do not process it
+ // again here. Ideally, the browser should not even have generated a
+ // keypress event in this case. But that does not appear to always work.
+ this.lastKeyDownEvent = undefined;
+ } else {
+ this.handleKey(event.altKey || event.metaKey
+ ? this.fixEvent(event) : event);
+ }
+
+ try {
+ // For non-IE browsers
+ event.preventDefault();
+ } catch (e) {
+ }
+
+ try {
+ // For IE
+ event.cancelBubble = true;
+ event.returnValue = false;
+ event.keyCode = 0;
+ } catch (e) {
+ }
+
+ this.lastNormalKeyDownEvent = undefined;
+ this.lastKeyPressedEvent = event;
+ return false;
+};
+
+VT100.prototype.keyUp = function(event) {
+ // this.vt100('U: c=' + event.charCode + ', k=' + event.keyCode +
+ // (event.shiftKey || event.ctrlKey || event.altKey ||
+ // event.metaKey ? ', ' +
+ // (event.shiftKey ? 'S' : '') + (event.ctrlKey ? 'C' : '') +
+ // (event.altKey ? 'A' : '') + (event.metaKey ? 'M' : '') : '') +
+ // '\r\n');
+ if (this.lastKeyPressedEvent) {
+ // The compose key on Linux occasionally confuses the browser and keeps
+ // inserting bogus characters into the input field, even if just a regular
+ // key has been pressed. Detect this case and drop the bogus characters.
+ (event.target ||
+ event.srcElement).value = '';
+ } else {
+ // This is usually were we notice that a key has been composed and
+ // thus failed to generate normal events.
+ this.checkComposedKeys(event);
+
+ // Some browsers don't report keypress events if ctrl or alt is pressed
+ // for non-alphanumerical keys. Patch things up for now, but in the
+ // future we will catch these keys earlier (in the keydown handler).
+ if (this.lastNormalKeyDownEvent) {
+ // this.vt100('ENABLING EARLY CATCHING OF MODIFIER KEYS\r\n');
+ this.catchModifiersEarly = true;
+ var asciiKey =
+ event.keyCode == 32 ||
+ // Conflicts with dead key ~ (code 50) on French keyboards
+ //event.keyCode >= 48 && event.keyCode <= 57 ||
+ event.keyCode >= 48 && event.keyCode <= 49 ||
+ event.keyCode >= 51 && event.keyCode <= 57 ||
+ event.keyCode >= 65 && event.keyCode <= 90;
+ var alphNumKey =
+ asciiKey ||
+ event.keyCode == 50 ||
+ event.keyCode >= 96 && event.keyCode <= 105;
+ var normalKey =
+ alphNumKey ||
+ event.keyCode == 59 || event.keyCode == 61 ||
+ event.keyCode == 106 || event.keyCode == 107 ||
+ event.keyCode >= 109 && event.keyCode <= 111 ||
+ event.keyCode >= 186 && event.keyCode <= 192 ||
+ event.keyCode >= 219 && event.keyCode <= 223 ||
+ event.keyCode == 252;
+ var fake = [ ];
+ fake.ctrlKey = event.ctrlKey;
+ fake.shiftKey = event.shiftKey;
+ fake.altKey = event.altKey;
+ fake.metaKey = event.metaKey;
+ if (asciiKey) {
+ fake.charCode = event.keyCode;
+ fake.keyCode = 0;
+ } else {
+ fake.charCode = 0;
+ fake.keyCode = event.keyCode;
+ if (!alphNumKey && (event.ctrlKey || event.altKey || event.metaKey)) {
+ fake = this.fixEvent(fake);
+ }
+ }
+ this.lastNormalKeyDownEvent = undefined;
+ this.handleKey(fake);
+ }
+ }
+
+ try {
+ // For IE
+ event.cancelBubble = true;
+ event.returnValue = false;
+ event.keyCode = 0;
+ } catch (e) {
+ }
+
+ this.lastKeyDownEvent = undefined;
+ this.lastKeyPressedEvent = undefined;
+ return false;
+};
+
+VT100.prototype.animateCursor = function(inactive) {
+ if (!this.cursorInterval) {
+ this.cursorInterval = setInterval(
+ function(vt100) {
+ return function() {
+ vt100.animateCursor();
+
+ // Use this opportunity to check whether the user entered a composed
+ // key, or whether somebody pasted text into the textfield.
+ vt100.checkComposedKeys();
+ }
+ }(this), 500);
+ }
+ if (inactive != undefined || this.cursor.className != 'inactive') {
+ if (inactive) {
+ this.cursor.className = 'inactive';
+ } else {
+ if (this.blinkingCursor) {
+ this.cursor.className = this.cursor.className == 'bright'
+ ? 'dim' : 'bright';
+ } else {
+ this.cursor.className = 'bright';
+ }
+ }
+ }
+};
+
+VT100.prototype.blurCursor = function() {
+ this.animateCursor(true);
+};
+
+VT100.prototype.focusCursor = function() {
+ this.animateCursor(false);
+};
+
+VT100.prototype.flashScreen = function() {
+ this.isInverted = !this.isInverted;
+ this.refreshInvertedState();
+ this.isInverted = !this.isInverted;
+ setTimeout(function(vt100) {
+ return function() {
+ vt100.refreshInvertedState();
+ };
+ }(this), 100);
+};
+
+VT100.prototype.beep = function() {
+ if (this.visualBell) {
+ this.flashScreen();
+ } else {
+ try {
+ this.beeper.Play();
+ } catch (e) {
+ try {
+ this.beeper.src = 'beep.wav';
+ } catch (e) {
+ }
+ }
+ }
+};
+
+VT100.prototype.bs = function() {
+ if (this.cursorX > 0) {
+ this.gotoXY(this.cursorX - 1, this.cursorY);
+ this.needWrap = false;
+ }
+};
+
+VT100.prototype.ht = function(count) {
+ if (count == undefined) {
+ count = 1;
+ }
+ var cx = this.cursorX;
+ while (count-- > 0) {
+ while (cx++ < this.terminalWidth) {
+ var tabState = this.userTabStop[cx];
+ if (tabState == false) {
+ // Explicitly cleared tab stop
+ continue;
+ } else if (tabState) {
+ // Explicitly set tab stop
+ break;
+ } else {
+ // Default tab stop at each eighth column
+ if (cx % 8 == 0) {
+ break;
+ }
+ }
+ }
+ }
+ if (cx > this.terminalWidth - 1) {
+ cx = this.terminalWidth - 1;
+ }
+ if (cx != this.cursorX) {
+ this.gotoXY(cx, this.cursorY);
+ }
+};
+
+VT100.prototype.rt = function(count) {
+ if (count == undefined) {
+ count = 1 ;
+ }
+ var cx = this.cursorX;
+ while (count-- > 0) {
+ while (cx-- > 0) {
+ var tabState = this.userTabStop[cx];
+ if (tabState == false) {
+ // Explicitly cleared tab stop
+ continue;
+ } else if (tabState) {
+ // Explicitly set tab stop
+ break;
+ } else {
+ // Default tab stop at each eighth column
+ if (cx % 8 == 0) {
+ break;
+ }
+ }
+ }
+ }
+ if (cx < 0) {
+ cx = 0;
+ }
+ if (cx != this.cursorX) {
+ this.gotoXY(cx, this.cursorY);
+ }
+};
+
+VT100.prototype.cr = function() {
+ this.gotoXY(0, this.cursorY);
+ this.needWrap = false;
+};
+
+VT100.prototype.lf = function(count) {
+ if (count == undefined) {
+ count = 1;
+ } else {
+ if (count > this.terminalHeight) {
+ count = this.terminalHeight;
+ }
+ if (count < 1) {
+ count = 1;
+ }
+ }
+ while (count-- > 0) {
+ if (this.cursorY == this.bottom - 1) {
+ this.scrollRegion(0, this.top + 1,
+ this.terminalWidth, this.bottom - this.top - 1,
+ 0, -1, this.color, this.style);
+ offset = undefined;
+ } else if (this.cursorY < this.terminalHeight - 1) {
+ this.gotoXY(this.cursorX, this.cursorY + 1);
+ }
+ }
+};
+
+VT100.prototype.ri = function(count) {
+ if (count == undefined) {
+ count = 1;
+ } else {
+ if (count > this.terminalHeight) {
+ count = this.terminalHeight;
+ }
+ if (count < 1) {
+ count = 1;
+ }
+ }
+ while (count-- > 0) {
+ if (this.cursorY == this.top) {
+ this.scrollRegion(0, this.top,
+ this.terminalWidth, this.bottom - this.top - 1,
+ 0, 1, this.color, this.style);
+ } else if (this.cursorY > 0) {
+ this.gotoXY(this.cursorX, this.cursorY - 1);
+ }
+ }
+ this.needWrap = false;
+};
+
+VT100.prototype.respondID = function() {
+ this.respondString += '\u001B[?6c';
+};
+
+VT100.prototype.respondSecondaryDA = function() {
+ this.respondString += '\u001B[>0;0;0c';
+};
+
+
+VT100.prototype.updateStyle = function() {
+ this.style = '';
+ if (this.attr & 0x0200 /* ATTR_UNDERLINE */) {
+ this.style = 'text-decoration: underline;';
+ }
+ var bg = (this.attr >> 4) & 0xF;
+ var fg = this.attr & 0xF;
+ if (this.attr & 0x0100 /* ATTR_REVERSE */) {
+ var tmp = bg;
+ bg = fg;
+ fg = tmp;
+ }
+ if ((this.attr & (0x0100 /* ATTR_REVERSE */ | 0x0400 /* ATTR_DIM */)) == 0x0400 /* ATTR_DIM */) {
+ fg = 8; // Dark grey
+ } else if (this.attr & 0x0800 /* ATTR_BRIGHT */) {
+ fg |= 8;
+ this.style = 'font-weight: bold;';
+ }
+ if (this.attr & 0x1000 /* ATTR_BLINK */) {
+ this.style = 'text-decoration: blink;';
+ }
+ this.color = 'ansi' + fg + ' bgAnsi' + bg;
+};
+
+VT100.prototype.setAttrColors = function(attr) {
+ if (attr != this.attr) {
+ this.attr = attr;
+ this.updateStyle();
+ }
+};
+
+VT100.prototype.saveCursor = function() {
+ this.savedX[this.currentScreen] = this.cursorX;
+ this.savedY[this.currentScreen] = this.cursorY;
+ this.savedAttr[this.currentScreen] = this.attr;
+ this.savedUseGMap = this.useGMap;
+ for (var i = 0; i < 4; i++) {
+ this.savedGMap[i] = this.GMap[i];
+ }
+ this.savedValid[this.currentScreen] = true;
+};
+
+VT100.prototype.restoreCursor = function() {
+ if (!this.savedValid[this.currentScreen]) {
+ return;
+ }
+ this.attr = this.savedAttr[this.currentScreen];
+ this.updateStyle();
+ this.useGMap = this.savedUseGMap;
+ for (var i = 0; i < 4; i++) {
+ this.GMap[i] = this.savedGMap[i];
+ }
+ this.translate = this.GMap[this.useGMap];
+ this.needWrap = false;
+ this.gotoXY(this.savedX[this.currentScreen],
+ this.savedY[this.currentScreen]);
+};
+
+VT100.prototype.getTransformName = function() {
+ var styles = [ 'transform', 'WebkitTransform', 'MozTransform', 'filter' ];
+ for (var i = 0; i < styles.length; ++i) {
+ if (typeof this.console[0].style[styles[i]] != 'undefined') {
+ return styles[i];
+ }
+ }
+ return undefined;
+};
+
+VT100.prototype.getTransformStyle = function(transform, scale) {
+ return scale && scale != 1.0
+ ? transform == 'filter'
+ ? 'progid:DXImageTransform.Microsoft.Matrix(' +
+ 'M11=' + (1.0/scale) + ',M12=0,M21=0,M22=1,' +
+ "sizingMethod='auto expand')"
+ : 'translateX(-50%) ' +
+ 'scaleX(' + (1.0/scale) + ') ' +
+ 'translateX(50%)'
+ : '';
+};
+
+VT100.prototype.set80_132Mode = function(state) {
+ var transform = this.getTransformName();
+ if (transform) {
+ if ((this.console[this.currentScreen].style[transform] != '') == state) {
+ return;
+ }
+ var style = state ?
+ this.getTransformStyle(transform, 1.65):'';
+ this.console[this.currentScreen].style[transform] = style;
+ this.cursor.style[transform] = style;
+ this.space.style[transform] = style;
+ this.scale = state ? 1.65 : 1.0;
+ if (transform == 'filter') {
+ this.console[this.currentScreen].style.width = state ? '165%' : '';
+ }
+ this.resizer();
+ }
+};
+
+VT100.prototype.setMode = function(state) {
+ for (var i = 0; i <= this.npar; i++) {
+ if (this.isQuestionMark) {
+ switch (this.par[i]) {
+ case 1: this.cursorKeyMode = state; break;
+ case 3: this.set80_132Mode(state); break;
+ case 5: this.isInverted = state; this.refreshInvertedState(); break;
+ case 6: this.offsetMode = state; break;
+ case 7: this.autoWrapMode = state; break;
+ case 1000:
+ case 9: this.mouseReporting = state; break;
+ case 25: this.cursorNeedsShowing = state;
+ if (state) { this.showCursor(); }
+ else { this.hideCursor(); } break;
+ case 1047:
+ case 1049:
+ case 47: this.enableAlternateScreen(state); break;
+ default: break;
+ }
+ } else {
+ switch (this.par[i]) {
+ case 3: this.dispCtrl = state; break;
+ case 4: this.insertMode = state; break;
+ case 20:this.crLfMode = state; break;
+ default: break;
+ }
+ }
+ }
+};
+
+VT100.prototype.statusReport = function() {
+ // Ready and operational.
+ this.respondString += '\u001B[0n';
+};
+
+VT100.prototype.cursorReport = function() {
+ this.respondString += '\u001B[' +
+ (this.cursorY + (this.offsetMode ? this.top + 1 : 1)) +
+ ';' +
+ (this.cursorX + 1) +
+ 'R';
+};
+
+VT100.prototype.setCursorAttr = function(setAttr, xorAttr) {
+ // Changing of cursor color is not implemented.
+};
+
+VT100.prototype.openPrinterWindow = function() {
+ var rc = true;
+ try {
+ if (!this.printWin || this.printWin.closed) {
+ this.printWin = window.open('', 'print-output',
+ 'width=800,height=600,directories=no,location=no,menubar=yes,' +
+ 'status=no,toolbar=no,titlebar=yes,scrollbars=yes,resizable=yes');
+ this.printWin.document.body.innerHTML =
+ '<link rel="stylesheet" href="' +
+ document.location.protocol + '//' + document.location.host +
+ document.location.pathname.replace(/[^/]*$/, '') +
+ 'print-styles.css" type="text/css">\n' +
+ '<div id="options"><input id="autoprint" type="checkbox"' +
+ (this.autoprint ? ' checked' : '') + '>' +
+ 'Automatically, print page(s) when job is ready' +
+ '</input></div>\n' +
+ '<div id="spacer"><input type="checkbox"> </input></div>' +
+ '<pre id="print"></pre>\n';
+ var autoprint = this.printWin.document.getElementById('autoprint');
+ this.addListener(autoprint, 'click',
+ (function(vt100, autoprint) {
+ return function() {
+ vt100.autoprint = autoprint.checked;
+ vt100.storeUserSettings();
+ return false;
+ };
+ })(this, autoprint));
+ this.printWin.document.title = 'ShellInABox Printer Output';
+ }
+ } catch (e) {
+ // Maybe, a popup blocker prevented us from working. Better catch the
+ // exception, so that we won't break the entire terminal session. The
+ // user probably needs to disable the blocker first before retrying the
+ // operation.
+ rc = false;
+ }
+ rc &= this.printWin && !this.printWin.closed &&
+ (this.printWin.innerWidth ||
+ this.printWin.document.documentElement.clientWidth ||
+ this.printWin.document.body.clientWidth) > 1;
+
+ if (!rc && this.printing == 100) {
+ // Different popup blockers work differently. We try to detect a couple
+ // of common methods. And then we retry again a brief amount later, as
+ // false positives are otherwise possible. If we are sure that there is
+ // a popup blocker in effect, we alert the user to it. This is helpful
+ // as some popup blockers have minimal or no UI, and the user might not
+ // notice that they are missing the popup. In any case, we only show at
+ // most one message per print job.
+ this.printing = true;
+ setTimeout((function(win) {
+ return function() {
+ if (!win || win.closed ||
+ (win.innerWidth ||
+ win.document.documentElement.clientWidth ||
+ win.document.body.clientWidth) <= 1) {
+ alert('Attempted to print, but a popup blocker ' +
+ 'prevented the printer window from opening');
+ }
+ };
+ })(this.printWin), 2000);
+ }
+ return rc;
+};
+
+VT100.prototype.sendToPrinter = function(s) {
+ this.openPrinterWindow();
+ try {
+ var doc = this.printWin.document;
+ var print = doc.getElementById('print');
+ if (print.lastChild && print.lastChild.nodeName == '#text') {
+ print.lastChild.textContent += this.replaceChar(s, ' ', '\u00A0');
+ } else {
+ print.appendChild(doc.createTextNode(this.replaceChar(s, ' ','\u00A0')));
+ }
+ } catch (e) {
+ // There probably was a more aggressive popup blocker that prevented us
+ // from accessing the printer windows.
+ }
+};
+
+VT100.prototype.sendControlToPrinter = function(ch) {
+ // We get called whenever doControl() is active. But for the printer, we
+ // only implement a basic line printer that doesn't understand most of
+ // the escape sequences of the VT100 terminal. In fact, the only escape
+ // sequence that we really need to recognize is '^[[5i' for turning the
+ // printer off.
+ try {
+ switch (ch) {
+ case 9:
+ // HT
+ this.openPrinterWindow();
+ var doc = this.printWin.document;
+ var print = doc.getElementById('print');
+ var chars = print.lastChild &&
+ print.lastChild.nodeName == '#text' ?
+ print.lastChild.textContent.length : 0;
+ this.sendToPrinter(this.spaces(8 - (chars % 8)));
+ break;
+ case 10:
+ // CR
+ break;
+ case 12:
+ // FF
+ this.openPrinterWindow();
+ var pageBreak = this.printWin.document.createElement('div');
+ pageBreak.className = 'pagebreak';
+ pageBreak.innerHTML = '<hr />';
+ this.printWin.document.getElementById('print').appendChild(pageBreak);
+ break;
+ case 13:
+ // LF
+ this.openPrinterWindow();
+ var lineBreak = this.printWin.document.createElement('br');
+ this.printWin.document.getElementById('print').appendChild(lineBreak);
+ break;
+ case 27:
+ // ESC
+ this.isEsc = 1 /* ESesc */;
+ break;
+ default:
+ switch (this.isEsc) {
+ case 1 /* ESesc */:
+ this.isEsc = 0 /* ESnormal */;
+ switch (ch) {
+ case 0x5B /*[*/:
+ this.isEsc = 2 /* ESsquare */;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 2 /* ESsquare */:
+ this.npar = 0;
+ this.par = [ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 ];
+ this.isEsc = 3 /* ESgetpars */;
+ this.isQuestionMark = ch == 0x3F /*?*/;
+ if (this.isQuestionMark) {
+ break;
+ }
+ // Fall through
+ case 3 /* ESgetpars */:
+ if (ch == 0x3B /*;*/) {
+ this.npar++;
+ break;
+ } else if (ch >= 0x30 /*0*/ && ch <= 0x39 /*9*/) {
+ var par = this.par[this.npar];
+ if (par == undefined) {
+ par = 0;
+ }
+ this.par[this.npar] = 10*par + (ch & 0xF);
+ break;
+ } else {
+ this.isEsc = 4 /* ESgotpars */;
+ }
+ // Fall through
+ case 4 /* ESgotpars */:
+ this.isEsc = 0 /* ESnormal */;
+ if (this.isQuestionMark) {
+ break;
+ }
+ switch (ch) {
+ case 0x69 /*i*/:
+ this.csii(this.par[0]);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ this.isEsc = 0 /* ESnormal */;
+ break;
+ }
+ break;
+ }
+ } catch (e) {
+ // There probably was a more aggressive popup blocker that prevented us
+ // from accessing the printer windows.
+ }
+};
+
+VT100.prototype.csiAt = function(number) {
+ // Insert spaces
+ if (number == 0) {
+ number = 1;
+ }
+ if (number > this.terminalWidth - this.cursorX) {
+ number = this.terminalWidth - this.cursorX;
+ }
+ this.scrollRegion(this.cursorX, this.cursorY,
+ this.terminalWidth - this.cursorX - number, 1,
+ number, 0, this.color, this.style);
+ this.needWrap = false;
+};
+
+VT100.prototype.csii = function(number) {
+ // Printer control
+ switch (number) {
+ case 0: // Print Screen
+ window.print();
+ break;
+ case 4: // Stop printing
+ try {
+ if (this.printing && this.printWin && !this.printWin.closed) {
+ var print = this.printWin.document.getElementById('print');
+ while (print.lastChild &&
+ print.lastChild.tagName == 'DIV' &&
+ print.lastChild.className == 'pagebreak') {
+ // Remove trailing blank pages
+ print.removeChild(print.lastChild);
+ }
+ if (this.autoprint) {
+ this.printWin.print();
+ }
+ }
+ } catch (e) {
+ }
+ this.printing = false;
+ break;
+ case 5: // Start printing
+ if (!this.printing && this.printWin && !this.printWin.closed) {
+ this.printWin.document.getElementById('print').innerHTML = '';
+ }
+ this.printing = 100;
+ break;
+ default:
+ break;
+ }
+};
+
+VT100.prototype.csiJ = function(number) {
+ switch (number) {
+ case 0: // Erase from cursor to end of display
+ this.clearRegion(this.cursorX, this.cursorY,
+ this.terminalWidth - this.cursorX, 1,
+ this.color, this.style);
+ if (this.cursorY < this.terminalHeight-2) {
+ this.clearRegion(0, this.cursorY+1,
+ this.terminalWidth, this.terminalHeight-this.cursorY-1,
+ this.color, this.style);
+ }
+ break;
+ case 1: // Erase from start to cursor
+ if (this.cursorY > 0) {
+ this.clearRegion(0, 0,
+ this.terminalWidth, this.cursorY,
+ this.color, this.style);
+ }
+ this.clearRegion(0, this.cursorY, this.cursorX + 1, 1,
+ this.color, this.style);
+ break;
+ case 2: // Erase whole display
+ this.clearRegion(0, 0, this.terminalWidth, this.terminalHeight,
+ this.color, this.style);
+ break;
+ default:
+ return;
+ }
+ needWrap = false;
+};
+
+VT100.prototype.csiK = function(number) {
+ switch (number) {
+ case 0: // Erase from cursor to end of line
+ this.clearRegion(this.cursorX, this.cursorY,
+ this.terminalWidth - this.cursorX, 1,
+ this.color, this.style);
+ break;
+ case 1: // Erase from start of line to cursor
+ this.clearRegion(0, this.cursorY, this.cursorX + 1, 1,
+ this.color, this.style);
+ break;
+ case 2: // Erase whole line
+ this.clearRegion(0, this.cursorY, this.terminalWidth, 1,
+ this.color, this.style);
+ break;
+ default:
+ return;
+ }
+ needWrap = false;
+};
+
+VT100.prototype.csiL = function(number) {
+ // Open line by inserting blank line(s)
+ if (this.cursorY >= this.bottom) {
+ return;
+ }
+ if (number == 0) {
+ number = 1;
+ }
+ if (number > this.bottom - this.cursorY) {
+ number = this.bottom - this.cursorY;
+ }
+ this.scrollRegion(0, this.cursorY,
+ this.terminalWidth, this.bottom - this.cursorY - number,
+ 0, number, this.color, this.style);
+ needWrap = false;
+};
+
+VT100.prototype.csiM = function(number) {
+ // Delete line(s), scrolling up the bottom of the screen.
+ if (this.cursorY >= this.bottom) {
+ return;
+ }
+ if (number == 0) {
+ number = 1;
+ }
+ if (number > this.bottom - this.cursorY) {
+ number = bottom - cursorY;
+ }
+ this.scrollRegion(0, this.cursorY + number,
+ this.terminalWidth, this.bottom - this.cursorY - number,
+ 0, -number, this.color, this.style);
+ needWrap = false;
+};
+
+VT100.prototype.csim = function() {
+ for (var i = 0; i <= this.npar; i++) {
+ switch (this.par[i]) {
+ case 0: this.attr = 0x00F0 /* ATTR_DEFAULT */; break;
+ case 1: this.attr = (this.attr & ~0x0400 /* ATTR_DIM */)|0x0800 /* ATTR_BRIGHT */; break;
+ case 2: this.attr = (this.attr & ~0x0800 /* ATTR_BRIGHT */)|0x0400 /* ATTR_DIM */; break;
+ case 4: this.attr |= 0x0200 /* ATTR_UNDERLINE */; break;
+ case 5: this.attr |= 0x1000 /* ATTR_BLINK */; break;
+ case 7: this.attr |= 0x0100 /* ATTR_REVERSE */; break;
+ case 10:
+ this.translate = this.GMap[this.useGMap];
+ this.dispCtrl = false;
+ this.toggleMeta = false;
+ break;
+ case 11:
+ this.translate = this.CodePage437Map;
+ this.dispCtrl = true;
+ this.toggleMeta = false;
+ break;
+ case 12:
+ this.translate = this.CodePage437Map;
+ this.dispCtrl = true;
+ this.toggleMeta = true;
+ break;
+ case 21:
+ case 22: this.attr &= ~(0x0800 /* ATTR_BRIGHT */|0x0400 /* ATTR_DIM */); break;
+ case 24: this.attr &= ~ 0x0200 /* ATTR_UNDERLINE */; break;
+ case 25: this.attr &= ~ 0x1000 /* ATTR_BLINK */; break;
+ case 27: this.attr &= ~ 0x0100 /* ATTR_REVERSE */; break;
+ case 38: this.attr = (this.attr & ~(0x0400 /* ATTR_DIM */|0x0800 /* ATTR_BRIGHT */|0x0F))|
+ 0x0200 /* ATTR_UNDERLINE */; break;
+ case 39: this.attr &= ~(0x0400 /* ATTR_DIM */|0x0800 /* ATTR_BRIGHT */|0x0200 /* ATTR_UNDERLINE */|0x0F); break;
+ case 49: this.attr |= 0xF0; break;
+ default:
+ if (this.par[i] >= 30 && this.par[i] <= 37) {
+ var fg = this.par[i] - 30;
+ this.attr = (this.attr & ~0x0F) | fg;
+ } else if (this.par[i] >= 40 && this.par[i] <= 47) {
+ var bg = this.par[i] - 40;
+ this.attr = (this.attr & ~0xF0) | (bg << 4);
+ }
+ break;
+ }
+ }
+ this.updateStyle();
+};
+
+VT100.prototype.csiP = function(number) {
+ // Delete character(s) following cursor
+ if (number == 0) {
+ number = 1;
+ }
+ if (number > this.terminalWidth - this.cursorX) {
+ number = this.terminalWidth - this.cursorX;
+ }
+ this.scrollRegion(this.cursorX + number, this.cursorY,
+ this.terminalWidth - this.cursorX - number, 1,
+ -number, 0, this.color, this.style);
+ needWrap = false;
+};
+
+VT100.prototype.csiX = function(number) {
+ // Clear characters following cursor
+ if (number == 0) {
+ number++;
+ }
+ if (number > this.terminalWidth - this.cursorX) {
+ number = this.terminalWidth - this.cursorX;
+ }
+ this.clearRegion(this.cursorX, this.cursorY, number, 1,
+ this.color, this.style);
+ needWrap = false;
+};
+
+VT100.prototype.settermCommand = function() {
+ // Setterm commands are not implemented
+};
+
+VT100.prototype.doControl = function(ch) {
+ if (this.printing) {
+ this.sendControlToPrinter(ch);
+ return '';
+ }
+ var lineBuf = '';
+ switch (ch) {
+ case 0x00: /* ignored */ break;
+ case 0x08: this.bs(); break;
+ case 0x09: this.ht(); break;
+ case 0x0A:
+ case 0x0B:
+ case 0x0C:
+ case 0x84: this.lf(); if (!this.crLfMode) break;
+ case 0x0D: this.cr(); break;
+ case 0x85: this.cr(); this.lf(); break;
+ case 0x0E: this.useGMap = 1;
+ this.translate = this.GMap[1];
+ this.dispCtrl = true; break;
+ case 0x0F: this.useGMap = 0;
+ this.translate = this.GMap[0];
+ this.dispCtrl = false; break;
+ case 0x18:
+ case 0x1A: this.isEsc = 0 /* ESnormal */; break;
+ case 0x1B: this.isEsc = 1 /* ESesc */; break;
+ case 0x7F: /* ignored */ break;
+ case 0x88: this.userTabStop[this.cursorX] = true; break;
+ case 0x8D: this.ri(); break;
+ case 0x8E: this.isEsc = 18 /* ESss2 */; break;
+ case 0x8F: this.isEsc = 19 /* ESss3 */; break;
+ case 0x9A: this.respondID(); break;
+ case 0x9B: this.isEsc = 2 /* ESsquare */; break;
+ case 0x07: if (this.isEsc != 17 /* EStitle */) {
+ this.beep(); break;
+ }
+ /* fall thru */
+ default: switch (this.isEsc) {
+ case 1 /* ESesc */:
+ this.isEsc = 0 /* ESnormal */;
+ switch (ch) {
+/*%*/ case 0x25: this.isEsc = 13 /* ESpercent */; break;
+/*(*/ case 0x28: this.isEsc = 8 /* ESsetG0 */; break;
+/*-*/ case 0x2D:
+/*)*/ case 0x29: this.isEsc = 9 /* ESsetG1 */; break;
+/*.*/ case 0x2E:
+/***/ case 0x2A: this.isEsc = 10 /* ESsetG2 */; break;
+/*/*/ case 0x2F:
+/*+*/ case 0x2B: this.isEsc = 11 /* ESsetG3 */; break;
+/*#*/ case 0x23: this.isEsc = 7 /* EShash */; break;
+/*7*/ case 0x37: this.saveCursor(); break;
+/*8*/ case 0x38: this.restoreCursor(); break;
+/*>*/ case 0x3E: this.applKeyMode = false; break;
+/*=*/ case 0x3D: this.applKeyMode = true; break;
+/*D*/ case 0x44: this.lf(); break;
+/*E*/ case 0x45: this.cr(); this.lf(); break;
+/*M*/ case 0x4D: this.ri(); break;
+/*N*/ case 0x4E: this.isEsc = 18 /* ESss2 */; break;
+/*O*/ case 0x4F: this.isEsc = 19 /* ESss3 */; break;
+/*H*/ case 0x48: this.userTabStop[this.cursorX] = true; break;
+/*Z*/ case 0x5A: this.respondID(); break;
+/*[*/ case 0x5B: this.isEsc = 2 /* ESsquare */; break;
+/*]*/ case 0x5D: this.isEsc = 15 /* ESnonstd */; break;
+/*c*/ case 0x63: this.reset(); break;
+/*g*/ case 0x67: this.flashScreen(); break;
+ default: break;
+ }
+ break;
+ case 15 /* ESnonstd */:
+ switch (ch) {
+/*0*/ case 0x30:
+/*1*/ case 0x31:
+/*2*/ case 0x32: this.isEsc = 17 /* EStitle */; this.titleString = ''; break;
+/*P*/ case 0x50: this.npar = 0; this.par = [ 0, 0, 0, 0, 0, 0, 0 ];
+ this.isEsc = 16 /* ESpalette */; break;
+/*R*/ case 0x52: // Palette support is not implemented
+ this.isEsc = 0 /* ESnormal */; break;
+ default: this.isEsc = 0 /* ESnormal */; break;
+ }
+ break;
+ case 16 /* ESpalette */:
+ if ((ch >= 0x30 /*0*/ && ch <= 0x39 /*9*/) ||
+ (ch >= 0x41 /*A*/ && ch <= 0x46 /*F*/) ||
+ (ch >= 0x61 /*a*/ && ch <= 0x66 /*f*/)) {
+ this.par[this.npar++] = ch > 0x39 /*9*/ ? (ch & 0xDF) - 55
+ : (ch & 0xF);
+ if (this.npar == 7) {
+ // Palette support is not implemented
+ this.isEsc = 0 /* ESnormal */;
+ }
+ } else {
+ this.isEsc = 0 /* ESnormal */;
+ }
+ break;
+ case 2 /* ESsquare */:
+ this.npar = 0;
+ this.par = [ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 ];
+ this.isEsc = 3 /* ESgetpars */;
+/*[*/ if (ch == 0x5B) { // Function key
+ this.isEsc = 6 /* ESfunckey */;
+ break;
+ } else {
+/*?*/ this.isQuestionMark = ch == 0x3F;
+ if (this.isQuestionMark) {
+ break;
+ }
+ }
+ // Fall through
+ case 5 /* ESdeviceattr */:
+ case 3 /* ESgetpars */:
+/*;*/ if (ch == 0x3B) {
+ this.npar++;
+ break;
+ } else if (ch >= 0x30 /*0*/ && ch <= 0x39 /*9*/) {
+ var par = this.par[this.npar];
+ if (par == undefined) {
+ par = 0;
+ }
+ this.par[this.npar] = 10*par + (ch & 0xF);
+ break;
+ } else if (this.isEsc == 5 /* ESdeviceattr */) {
+ switch (ch) {
+/*c*/ case 0x63: if (this.par[0] == 0) this.respondSecondaryDA(); break;
+/*m*/ case 0x6D: /* (re)set key modifier resource values */ break;
+/*n*/ case 0x6E: /* disable key modifier resource values */ break;
+/*p*/ case 0x70: /* set pointer mode resource value */ break;
+ default: break;
+ }
+ this.isEsc = 0 /* ESnormal */;
+ break;
+ } else {
+ this.isEsc = 4 /* ESgotpars */;
+ }
+ // Fall through
+ case 4 /* ESgotpars */:
+ this.isEsc = 0 /* ESnormal */;
+ if (this.isQuestionMark) {
+ switch (ch) {
+/*h*/ case 0x68: this.setMode(true); break;
+/*l*/ case 0x6C: this.setMode(false); break;
+/*c*/ case 0x63: this.setCursorAttr(this.par[2], this.par[1]); break;
+ default: break;
+ }
+ this.isQuestionMark = false;
+ break;
+ }
+ switch (ch) {
+/*!*/ case 0x21: this.isEsc = 12 /* ESbang */; break;
+/*>*/ case 0x3E: if (!this.npar) this.isEsc = 5 /* ESdeviceattr */; break;
+/*G*/ case 0x47:
+/*`*/ case 0x60: this.gotoXY(this.par[0] - 1, this.cursorY); break;
+/*A*/ case 0x41: this.gotoXY(this.cursorX,
+ this.cursorY - (this.par[0] ? this.par[0] : 1));
+ break;
+/*B*/ case 0x42:
+/*e*/ case 0x65: this.gotoXY(this.cursorX,
+ this.cursorY + (this.par[0] ? this.par[0] : 1));
+ break;
+/*C*/ case 0x43:
+/*a*/ case 0x61: this.gotoXY(this.cursorX + (this.par[0] ? this.par[0] : 1),
+ this.cursorY); break;
+/*D*/ case 0x44: this.gotoXY(this.cursorX - (this.par[0] ? this.par[0] : 1),
+ this.cursorY); break;
+/*E*/ case 0x45: this.gotoXY(0, this.cursorY + (this.par[0] ? this.par[0] :1));
+ break;
+/*F*/ case 0x46: this.gotoXY(0, this.cursorY - (this.par[0] ? this.par[0] :1));
+ break;
+/*d*/ case 0x64: this.gotoXaY(this.cursorX, this.par[0] - 1); break;
+/*H*/ case 0x48:
+/*f*/ case 0x66: this.gotoXaY(this.par[1] - 1, this.par[0] - 1); break;
+/*I*/ case 0x49: this.ht(this.par[0] ? this.par[0] : 1); break;
+/*@*/ case 0x40: this.csiAt(this.par[0]); break;
+/*i*/ case 0x69: this.csii(this.par[0]); break;
+/*J*/ case 0x4A: this.csiJ(this.par[0]); break;
+/*K*/ case 0x4B: this.csiK(this.par[0]); break;
+/*L*/ case 0x4C: this.csiL(this.par[0]); break;
+/*M*/ case 0x4D: this.csiM(this.par[0]); break;
+/*m*/ case 0x6D: this.csim(); break;
+/*P*/ case 0x50: this.csiP(this.par[0]); break;
+/*X*/ case 0x58: this.csiX(this.par[0]); break;
+/*S*/ case 0x53: this.lf(this.par[0] ? this.par[0] : 1); break;
+/*T*/ case 0x54: this.ri(this.par[0] ? this.par[0] : 1); break;
+/*c*/ case 0x63: if (!this.par[0]) this.respondID(); break;
+/*g*/ case 0x67: if (this.par[0] == 0) {
+ this.userTabStop[this.cursorX] = false;
+ } else if (this.par[0] == 2 || this.par[0] == 3) {
+ this.userTabStop = [ ];
+ for (var i = 0; i < this.terminalWidth; i++) {
+ this.userTabStop[i] = false;
+ }
+ }
+ break;
+/*h*/ case 0x68: this.setMode(true); break;
+/*l*/ case 0x6C: this.setMode(false); break;
+/*n*/ case 0x6E: switch (this.par[0]) {
+ case 5: this.statusReport(); break;
+ case 6: this.cursorReport(); break;
+ default: break;
+ }
+ break;
+/*q*/ case 0x71: // LED control not implemented
+ break;
+/*r*/ case 0x72: var t = this.par[0] ? this.par[0] : 1;
+ var b = this.par[1] ? this.par[1]
+ : this.terminalHeight;
+ if (t < b && b <= this.terminalHeight) {
+ this.top = t - 1;
+ this.bottom= b;
+ this.gotoXaY(0, 0);
+ }
+ break;
+/*b*/ case 0x62: var c = this.par[0] ? this.par[0] : 1;
+ if (c > this.terminalWidth * this.terminalHeight) {
+ c = this.terminalWidth * this.terminalHeight;
+ }
+ while (c-- > 0) {
+ lineBuf += this.lastCharacter;
+ }
+ break;
+/*s*/ case 0x73: this.saveCursor(); break;
+/*u*/ case 0x75: this.restoreCursor(); break;
+/*Z*/ case 0x5A: this.rt(this.par[0] ? this.par[0] : 1); break;
+/*]*/ case 0x5D: this.settermCommand(); break;
+ default: break;
+ }
+ break;
+ case 12 /* ESbang */:
+ if (ch == 'p') {
+ this.reset();
+ }
+ this.isEsc = 0 /* ESnormal */;
+ break;
+ case 13 /* ESpercent */:
+ this.isEsc = 0 /* ESnormal */;
+ switch (ch) {
+/*@*/ case 0x40: this.utfEnabled = false; break;
+/*G*/ case 0x47:
+/*8*/ case 0x38: this.utfEnabled = true; break;
+ default: break;
+ }
+ break;
+ case 6 /* ESfunckey */:
+ this.isEsc = 0 /* ESnormal */; break;
+ case 7 /* EShash */:
+ this.isEsc = 0 /* ESnormal */;
+/*8*/ if (ch == 0x38) {
+ // Screen alignment test not implemented
+ }
+ break;
+ case 8 /* ESsetG0 */:
+ case 9 /* ESsetG1 */:
+ case 10 /* ESsetG2 */:
+ case 11 /* ESsetG3 */:
+ var g = this.isEsc - 8 /* ESsetG0 */;
+ this.isEsc = 0 /* ESnormal */;
+ switch (ch) {
+/*0*/ case 0x30: this.GMap[g] = this.VT100GraphicsMap; break;
+/*A*/ case 0x42:
+/*B*/ case 0x42: this.GMap[g] = this.Latin1Map; break;
+/*U*/ case 0x55: this.GMap[g] = this.CodePage437Map; break;
+/*K*/ case 0x4B: this.GMap[g] = this.DirectToFontMap; break;
+ default: break;
+ }
+ if (this.useGMap == g) {
+ this.translate = this.GMap[g];
+ }
+ break;
+ case 17 /* EStitle */:
+ if (ch == 0x07) {
+ if (this.titleString && this.titleString.charAt(0) == ';') {
+ this.titleString = this.titleString.substr(1);
+ if (this.titleString != '') {
+ this.titleString += ' - ';
+ }
+ this.titleString += 'Shell In A Box'
+ }
+ try {
+ window.document.title = this.titleString;
+ } catch (e) {
+ }
+ this.isEsc = 0 /* ESnormal */;
+ } else {
+ this.titleString += String.fromCharCode(ch);
+ }
+ break;
+ case 18 /* ESss2 */:
+ case 19 /* ESss3 */:
+ if (ch < 256) {
+ ch = this.GMap[this.isEsc - 18 /* ESss2 */ + 2]
+ [this.toggleMeta ? (ch | 0x80) : ch];
+ if ((ch & 0xFF00) == 0xF000) {
+ ch = ch & 0xFF;
+ } else if (ch == 0xFEFF || (ch >= 0x200A && ch <= 0x200F)) {
+ this.isEsc = 0 /* ESnormal */; break;
+ }
+ }
+ this.lastCharacter = String.fromCharCode(ch);
+ lineBuf += this.lastCharacter;
+ this.isEsc = 0 /* ESnormal */; break;
+ default:
+ this.isEsc = 0 /* ESnormal */; break;
+ }
+ break;
+ }
+ return lineBuf;
+};
+
+VT100.prototype.renderString = function(s, showCursor) {
+ if (this.printing) {
+ this.sendToPrinter(s);
+ if (showCursor) {
+ this.showCursor();
+ }
+ return;
+ }
+
+ // We try to minimize the number of DOM operations by coalescing individual
+ // characters into strings. This is a significant performance improvement.
+ var incX = s.length;
+ if (incX > this.terminalWidth - this.cursorX) {
+ incX = this.terminalWidth - this.cursorX;
+ if (incX <= 0) {
+ return;
+ }
+ s = s.substr(0, incX - 1) + s.charAt(s.length - 1);
+ }
+ if (showCursor) {
+ // Minimize the number of calls to putString(), by avoiding a direct
+ // call to this.showCursor()
+ this.cursor.style.visibility = '';
+ }
+ this.putString(this.cursorX, this.cursorY, s, this.color, this.style);
+};
+
+VT100.prototype.vt100 = function(s) {
+ this.cursorNeedsShowing = this.hideCursor();
+ this.respondString = '';
+ var lineBuf = '';
+ for (var i = 0; i < s.length; i++) {
+ var ch = s.charCodeAt(i);
+ if (this.utfEnabled) {
+ // Decode UTF8 encoded character
+ if (ch > 0x7F) {
+ if (this.utfCount > 0 && (ch & 0xC0) == 0x80) {
+ this.utfChar = (this.utfChar << 6) | (ch & 0x3F);
+ if (--this.utfCount <= 0) {
+ if (this.utfChar > 0xFFFF || this.utfChar < 0) {
+ ch = 0xFFFD;
+ } else {
+ ch = this.utfChar;
+ }
+ } else {
+ continue;
+ }
+ } else {
+ if ((ch & 0xE0) == 0xC0) {
+ this.utfCount = 1;
+ this.utfChar = ch & 0x1F;
+ } else if ((ch & 0xF0) == 0xE0) {
+ this.utfCount = 2;
+ this.utfChar = ch & 0x0F;
+ } else if ((ch & 0xF8) == 0xF0) {
+ this.utfCount = 3;
+ this.utfChar = ch & 0x07;
+ } else if ((ch & 0xFC) == 0xF8) {
+ this.utfCount = 4;
+ this.utfChar = ch & 0x03;
+ } else if ((ch & 0xFE) == 0xFC) {
+ this.utfCount = 5;
+ this.utfChar = ch & 0x01;
+ } else {
+ this.utfCount = 0;
+ }
+ continue;
+ }
+ } else {
+ this.utfCount = 0;
+ }
+ }
+ var isNormalCharacter =
+ (ch >= 32 && ch <= 127 || ch >= 160 ||
+ this.utfEnabled && ch >= 128 ||
+ !(this.dispCtrl ? this.ctrlAlways : this.ctrlAction)[ch & 0x1F]) &&
+ (ch != 0x7F || this.dispCtrl);
+
+ if (isNormalCharacter && this.isEsc == 0 /* ESnormal */) {
+ if (ch < 256) {
+ ch = this.translate[this.toggleMeta ? (ch | 0x80) : ch];
+ }
+ if ((ch & 0xFF00) == 0xF000) {
+ ch = ch & 0xFF;
+ } else if (ch == 0xFEFF || (ch >= 0x200A && ch <= 0x200F)) {
+ continue;
+ }
+ if (!this.printing) {
+ if (this.needWrap || this.insertMode) {
+ if (lineBuf) {
+ this.renderString(lineBuf);
+ lineBuf = '';
+ }
+ }
+ if (this.needWrap) {
+ this.cr(); this.lf();
+ }
+ if (this.insertMode) {
+ this.scrollRegion(this.cursorX, this.cursorY,
+ this.terminalWidth - this.cursorX - 1, 1,
+ 1, 0, this.color, this.style);
+ }
+ }
+ this.lastCharacter = String.fromCharCode(ch);
+ lineBuf += this.lastCharacter;
+ if (!this.printing &&
+ this.cursorX + lineBuf.length >= this.terminalWidth) {
+ this.needWrap = this.autoWrapMode;
+ }
+ } else {
+ if (lineBuf) {
+ this.renderString(lineBuf);
+ lineBuf = '';
+ }
+ var expand = this.doControl(ch);
+ if (expand.length) {
+ var r = this.respondString;
+ this.respondString= r + this.vt100(expand);
+ }
+ }
+ }
+ if (lineBuf) {
+ this.renderString(lineBuf, this.cursorNeedsShowing);
+ } else if (this.cursorNeedsShowing) {
+ this.showCursor();
+ }
+ return this.respondString;
+};
+
+VT100.prototype.Latin1Map = [
+0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
+0x0008, 0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x000E, 0x000F,
+0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,
+0x0018, 0x0019, 0x001A, 0x001B, 0x001C, 0x001D, 0x001E, 0x001F,
+0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,
+0x0028, 0x0029, 0x002A, 0x002B, 0x002C, 0x002D, 0x002E, 0x002F,
+0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,
+0x0038, 0x0039, 0x003A, 0x003B, 0x003C, 0x003D, 0x003E, 0x003F,
+0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,
+0x0048, 0x0049, 0x004A, 0x004B, 0x004C, 0x004D, 0x004E, 0x004F,
+0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,
+0x0058, 0x0059, 0x005A, 0x005B, 0x005C, 0x005D, 0x005E, 0x005F,
+0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067,
+0x0068, 0x0069, 0x006A, 0x006B, 0x006C, 0x006D, 0x006E, 0x006F,
+0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077,
+0x0078, 0x0079, 0x007A, 0x007B, 0x007C, 0x007D, 0x007E, 0x007F,
+0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087,
+0x0088, 0x0089, 0x008A, 0x008B, 0x008C, 0x008D, 0x008E, 0x008F,
+0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097,
+0x0098, 0x0099, 0x009A, 0x009B, 0x009C, 0x009D, 0x009E, 0x009F,
+0x00A0, 0x00A1, 0x00A2, 0x00A3, 0x00A4, 0x00A5, 0x00A6, 0x00A7,
+0x00A8, 0x00A9, 0x00AA, 0x00AB, 0x00AC, 0x00AD, 0x00AE, 0x00AF,
+0x00B0, 0x00B1, 0x00B2, 0x00B3, 0x00B4, 0x00B5, 0x00B6, 0x00B7,
+0x00B8, 0x00B9, 0x00BA, 0x00BB, 0x00BC, 0x00BD, 0x00BE, 0x00BF,
+0x00C0, 0x00C1, 0x00C2, 0x00C3, 0x00C4, 0x00C5, 0x00C6, 0x00C7,
+0x00C8, 0x00C9, 0x00CA, 0x00CB, 0x00CC, 0x00CD, 0x00CE, 0x00CF,
+0x00D0, 0x00D1, 0x00D2, 0x00D3, 0x00D4, 0x00D5, 0x00D6, 0x00D7,
+0x00D8, 0x00D9, 0x00DA, 0x00DB, 0x00DC, 0x00DD, 0x00DE, 0x00DF,
+0x00E0, 0x00E1, 0x00E2, 0x00E3, 0x00E4, 0x00E5, 0x00E6, 0x00E7,
+0x00E8, 0x00E9, 0x00EA, 0x00EB, 0x00EC, 0x00ED, 0x00EE, 0x00EF,
+0x00F0, 0x00F1, 0x00F2, 0x00F3, 0x00F4, 0x00F5, 0x00F6, 0x00F7,
+0x00F8, 0x00F9, 0x00FA, 0x00FB, 0x00FC, 0x00FD, 0x00FE, 0x00FF
+];
+
+VT100.prototype.VT100GraphicsMap = [
+0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
+0x0008, 0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x000E, 0x000F,
+0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,
+0x0018, 0x0019, 0x001A, 0x001B, 0x001C, 0x001D, 0x001E, 0x001F,
+0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,
+0x0028, 0x0029, 0x002A, 0x2192, 0x2190, 0x2191, 0x2193, 0x002F,
+0x2588, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,
+0x0038, 0x0039, 0x003A, 0x003B, 0x003C, 0x003D, 0x003E, 0x003F,
+0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,
+0x0048, 0x0049, 0x004A, 0x004B, 0x004C, 0x004D, 0x004E, 0x004F,
+0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,
+0x0058, 0x0059, 0x005A, 0x005B, 0x005C, 0x005D, 0x005E, 0x00A0,
+0x25C6, 0x2592, 0x2409, 0x240C, 0x240D, 0x240A, 0x00B0, 0x00B1,
+0x2591, 0x240B, 0x2518, 0x2510, 0x250C, 0x2514, 0x253C, 0xF800,
+0xF801, 0x2500, 0xF803, 0xF804, 0x251C, 0x2524, 0x2534, 0x252C,
+0x2502, 0x2264, 0x2265, 0x03C0, 0x2260, 0x00A3, 0x00B7, 0x007F,
+0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087,
+0x0088, 0x0089, 0x008A, 0x008B, 0x008C, 0x008D, 0x008E, 0x008F,
+0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097,
+0x0098, 0x0099, 0x009A, 0x009B, 0x009C, 0x009D, 0x009E, 0x009F,
+0x00A0, 0x00A1, 0x00A2, 0x00A3, 0x00A4, 0x00A5, 0x00A6, 0x00A7,
+0x00A8, 0x00A9, 0x00AA, 0x00AB, 0x00AC, 0x00AD, 0x00AE, 0x00AF,
+0x00B0, 0x00B1, 0x00B2, 0x00B3, 0x00B4, 0x00B5, 0x00B6, 0x00B7,
+0x00B8, 0x00B9, 0x00BA, 0x00BB, 0x00BC, 0x00BD, 0x00BE, 0x00BF,
+0x00C0, 0x00C1, 0x00C2, 0x00C3, 0x00C4, 0x00C5, 0x00C6, 0x00C7,
+0x00C8, 0x00C9, 0x00CA, 0x00CB, 0x00CC, 0x00CD, 0x00CE, 0x00CF,
+0x00D0, 0x00D1, 0x00D2, 0x00D3, 0x00D4, 0x00D5, 0x00D6, 0x00D7,
+0x00D8, 0x00D9, 0x00DA, 0x00DB, 0x00DC, 0x00DD, 0x00DE, 0x00DF,
+0x00E0, 0x00E1, 0x00E2, 0x00E3, 0x00E4, 0x00E5, 0x00E6, 0x00E7,
+0x00E8, 0x00E9, 0x00EA, 0x00EB, 0x00EC, 0x00ED, 0x00EE, 0x00EF,
+0x00F0, 0x00F1, 0x00F2, 0x00F3, 0x00F4, 0x00F5, 0x00F6, 0x00F7,
+0x00F8, 0x00F9, 0x00FA, 0x00FB, 0x00FC, 0x00FD, 0x00FE, 0x00FF
+];
+
+VT100.prototype.CodePage437Map = [
+0x0000, 0x263A, 0x263B, 0x2665, 0x2666, 0x2663, 0x2660, 0x2022,
+0x25D8, 0x25CB, 0x25D9, 0x2642, 0x2640, 0x266A, 0x266B, 0x263C,
+0x25B6, 0x25C0, 0x2195, 0x203C, 0x00B6, 0x00A7, 0x25AC, 0x21A8,
+0x2191, 0x2193, 0x2192, 0x2190, 0x221F, 0x2194, 0x25B2, 0x25BC,
+0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,
+0x0028, 0x0029, 0x002A, 0x002B, 0x002C, 0x002D, 0x002E, 0x002F,
+0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,
+0x0038, 0x0039, 0x003A, 0x003B, 0x003C, 0x003D, 0x003E, 0x003F,
+0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,
+0x0048, 0x0049, 0x004A, 0x004B, 0x004C, 0x004D, 0x004E, 0x004F,
+0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,
+0x0058, 0x0059, 0x005A, 0x005B, 0x005C, 0x005D, 0x005E, 0x005F,
+0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067,
+0x0068, 0x0069, 0x006A, 0x006B, 0x006C, 0x006D, 0x006E, 0x006F,
+0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077,
+0x0078, 0x0079, 0x007A, 0x007B, 0x007C, 0x007D, 0x007E, 0x2302,
+0x00C7, 0x00FC, 0x00E9, 0x00E2, 0x00E4, 0x00E0, 0x00E5, 0x00E7,
+0x00EA, 0x00EB, 0x00E8, 0x00EF, 0x00EE, 0x00EC, 0x00C4, 0x00C5,
+0x00C9, 0x00E6, 0x00C6, 0x00F4, 0x00F6, 0x00F2, 0x00FB, 0x00F9,
+0x00FF, 0x00D6, 0x00DC, 0x00A2, 0x00A3, 0x00A5, 0x20A7, 0x0192,
+0x00E1, 0x00ED, 0x00F3, 0x00FA, 0x00F1, 0x00D1, 0x00AA, 0x00BA,
+0x00BF, 0x2310, 0x00AC, 0x00BD, 0x00BC, 0x00A1, 0x00AB, 0x00BB,
+0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x2561, 0x2562, 0x2556,
+0x2555, 0x2563, 0x2551, 0x2557, 0x255D, 0x255C, 0x255B, 0x2510,
+0x2514, 0x2534, 0x252C, 0x251C, 0x2500, 0x253C, 0x255E, 0x255F,
+0x255A, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256C, 0x2567,
+0x2568, 0x2564, 0x2565, 0x2559, 0x2558, 0x2552, 0x2553, 0x256B,
+0x256A, 0x2518, 0x250C, 0x2588, 0x2584, 0x258C, 0x2590, 0x2580,
+0x03B1, 0x00DF, 0x0393, 0x03C0, 0x03A3, 0x03C3, 0x00B5, 0x03C4,
+0x03A6, 0x0398, 0x03A9, 0x03B4, 0x221E, 0x03C6, 0x03B5, 0x2229,
+0x2261, 0x00B1, 0x2265, 0x2264, 0x2320, 0x2321, 0x00F7, 0x2248,
+0x00B0, 0x2219, 0x00B7, 0x221A, 0x207F, 0x00B2, 0x25A0, 0x00A0
+];
+
+VT100.prototype.DirectToFontMap = [
+0xF000, 0xF001, 0xF002, 0xF003, 0xF004, 0xF005, 0xF006, 0xF007,
+0xF008, 0xF009, 0xF00A, 0xF00B, 0xF00C, 0xF00D, 0xF00E, 0xF00F,
+0xF010, 0xF011, 0xF012, 0xF013, 0xF014, 0xF015, 0xF016, 0xF017,
+0xF018, 0xF019, 0xF01A, 0xF01B, 0xF01C, 0xF01D, 0xF01E, 0xF01F,
+0xF020, 0xF021, 0xF022, 0xF023, 0xF024, 0xF025, 0xF026, 0xF027,
+0xF028, 0xF029, 0xF02A, 0xF02B, 0xF02C, 0xF02D, 0xF02E, 0xF02F,
+0xF030, 0xF031, 0xF032, 0xF033, 0xF034, 0xF035, 0xF036, 0xF037,
+0xF038, 0xF039, 0xF03A, 0xF03B, 0xF03C, 0xF03D, 0xF03E, 0xF03F,
+0xF040, 0xF041, 0xF042, 0xF043, 0xF044, 0xF045, 0xF046, 0xF047,
+0xF048, 0xF049, 0xF04A, 0xF04B, 0xF04C, 0xF04D, 0xF04E, 0xF04F,
+0xF050, 0xF051, 0xF052, 0xF053, 0xF054, 0xF055, 0xF056, 0xF057,
+0xF058, 0xF059, 0xF05A, 0xF05B, 0xF05C, 0xF05D, 0xF05E, 0xF05F,
+0xF060, 0xF061, 0xF062, 0xF063, 0xF064, 0xF065, 0xF066, 0xF067,
+0xF068, 0xF069, 0xF06A, 0xF06B, 0xF06C, 0xF06D, 0xF06E, 0xF06F,
+0xF070, 0xF071, 0xF072, 0xF073, 0xF074, 0xF075, 0xF076, 0xF077,
+0xF078, 0xF079, 0xF07A, 0xF07B, 0xF07C, 0xF07D, 0xF07E, 0xF07F,
+0xF080, 0xF081, 0xF082, 0xF083, 0xF084, 0xF085, 0xF086, 0xF087,
+0xF088, 0xF089, 0xF08A, 0xF08B, 0xF08C, 0xF08D, 0xF08E, 0xF08F,
+0xF090, 0xF091, 0xF092, 0xF093, 0xF094, 0xF095, 0xF096, 0xF097,
+0xF098, 0xF099, 0xF09A, 0xF09B, 0xF09C, 0xF09D, 0xF09E, 0xF09F,
+0xF0A0, 0xF0A1, 0xF0A2, 0xF0A3, 0xF0A4, 0xF0A5, 0xF0A6, 0xF0A7,
+0xF0A8, 0xF0A9, 0xF0AA, 0xF0AB, 0xF0AC, 0xF0AD, 0xF0AE, 0xF0AF,
+0xF0B0, 0xF0B1, 0xF0B2, 0xF0B3, 0xF0B4, 0xF0B5, 0xF0B6, 0xF0B7,
+0xF0B8, 0xF0B9, 0xF0BA, 0xF0BB, 0xF0BC, 0xF0BD, 0xF0BE, 0xF0BF,
+0xF0C0, 0xF0C1, 0xF0C2, 0xF0C3, 0xF0C4, 0xF0C5, 0xF0C6, 0xF0C7,
+0xF0C8, 0xF0C9, 0xF0CA, 0xF0CB, 0xF0CC, 0xF0CD, 0xF0CE, 0xF0CF,
+0xF0D0, 0xF0D1, 0xF0D2, 0xF0D3, 0xF0D4, 0xF0D5, 0xF0D6, 0xF0D7,
+0xF0D8, 0xF0D9, 0xF0DA, 0xF0DB, 0xF0DC, 0xF0DD, 0xF0DE, 0xF0DF,
+0xF0E0, 0xF0E1, 0xF0E2, 0xF0E3, 0xF0E4, 0xF0E5, 0xF0E6, 0xF0E7,
+0xF0E8, 0xF0E9, 0xF0EA, 0xF0EB, 0xF0EC, 0xF0ED, 0xF0EE, 0xF0EF,
+0xF0F0, 0xF0F1, 0xF0F2, 0xF0F3, 0xF0F4, 0xF0F5, 0xF0F6, 0xF0F7,
+0xF0F8, 0xF0F9, 0xF0FA, 0xF0FB, 0xF0FC, 0xF0FD, 0xF0FE, 0xF0FF
+];
+
+VT100.prototype.ctrlAction = [
+ true, false, false, false, false, false, false, true,
+ true, true, true, true, true, true, true, true,
+ false, false, false, false, false, false, false, false,
+ true, false, true, true, false, false, false, false
+];
+
+VT100.prototype.ctrlAlways = [
+ true, false, false, false, false, false, false, false,
+ true, false, true, false, true, true, true, true,
+ false, false, false, false, false, false, false, false,
+ false, false, false, true, false, false, false, false
+];
+
+
--- /dev/null
+#vt100 a {
+ text-decoration: none;
+ color: inherit;
+}
+
+#vt100 a:hover {
+ text-decoration: underline;
+}
+
+#vt100 #reconnect {
+ position: absolute;
+ z-index: 2;
+}
+
+#vt100 #reconnect input {
+ padding: 1ex;
+ font-weight: bold;
+ font-size: x-large;
+}
+
+#vt100 #cursize {
+ background: #EEEEEE;
+ border: 1px solid black;
+ font-family: sans-serif;
+ font-size: large;
+ font-weight: bold;
+ padding: 1ex;
+ position: absolute;
+ z-index: 2;
+}
+
+#vt100 pre {
+ margin: 0px;
+}
+
+#vt100 pre pre {
+ overflow: hidden;
+}
+
+#vt100 #scrollable {
+ overflow-x: hidden;
+ overflow-y: scroll;
+ position: relative;
+ padding: 1px;
+}
+
+#vt100 #console, #vt100 #alt_console, #vt100 #cursor, #vt100 #lineheight, #vt100 .hidden pre {
+ font-family: "DejaVu Sans Mono", "Everson Mono", FreeMono, "Andale Mono", monospace;
+}
+
+#vt100 #lineheight {
+ position: absolute;
+ visibility: hidden;
+}
+
+#vt100 #cursor {
+ position: absolute;
+ left: 0px;
+ top: 0px;
+ overflow: hidden;
+ z-index: 1;
+}
+
+#vt100 #cursor.bright {
+ background-color: black;
+ color: white;
+}
+
+#vt100 #cursor.dim {
+ visibility: hidden;
+}
+
+#vt100 #cursor.inactive {
+ border: 1px solid;
+ margin: -1px;
+}
+
+#vt100 #padding {
+ visibility: hidden;
+ width: 1px;
+ height: 0px;
+ overflow: hidden;
+}
+
+#vt100 .hidden {
+ position: absolute;
+ top: -10000px;
+ left: -10000px;
+ width: 0px;
+ height: 0px;
+}
+
+#vt100 #menu {
+ overflow: visible;
+ position: absolute;
+ z-index: 3;
+}
+
+#vt100 #menu .popup {
+ background-color: #EEEEEE;
+ border: 1px solid black;
+ font-family: sans-serif;
+ position: absolute;
+}
+
+#vt100 #menu .popup ul {
+ list-style-type: none;
+ padding: 0px;
+ margin: 0px;
+ min-width: 10em;
+}
+
+#vt100 #menu .popup li {
+ padding: 3px 0.5ex 3px 0.5ex;
+}
+
+#vt100 #menu .popup li.hover {
+ background-color: #444444;
+ color: white;
+}
+
+#vt100 #menu .popup li.disabled {
+ color: #AAAAAA;
+}
+
+#vt100 #menu .popup hr {
+ margin: 0.5ex 0px 0.5ex 0px;
+}
+
+#vt100 #menu img {
+ margin-right: 0.5ex;
+ width: 1ex;
+ height: 1ex;
+}
+
+#vt100 #scrollable.inverted { color: #ffffff;
+ background-color: #000000; }
+
+#vt100 #kbd_button {
+ float: left;
+ position: fixed;
+ z-index: 0;
+ visibility: hidden;
+}
+
+#vt100 #keyboard {
+ z-index: 3;
+ position: absolute;
+}
+
+#vt100 #keyboard .box {
+ font-family: sans-serif;
+ background-color: #cccccc;
+ padding: .8em;
+ float: left;
+ position: absolute;
+ border-radius: 10px;
+ -moz-border-radius: 10px;
+ box-shadow: 4px 4px 6px #222222;
+ -webkit-box-shadow: 4px 4px 6px #222222;
+ /* Don't set the -moz-box-shadow. It doesn't properly scale when CSS
+ * transforms are in effect. Once Firefox supports box-shadow, it should
+ * automatically do the right thing. Until then, leave shadows disabled
+ * for Firefox.
+ */
+ opacity: 0.85;
+ -moz-opacity: 0.85;
+ filter: alpha(opacity=85);
+}
+
+#vt100 #keyboard .box * {
+ vertical-align: top;
+ display: inline-block;
+}
+
+#vt100 #keyboard b, #vt100 #keyboard i, #vt100 #keyboard s, #vt100 #keyboard u {
+ font-style: normal;
+ font-weight: bold;
+ border-radius: 5px;
+ -moz-border-radius: 5px;
+ background-color: #555555;
+ color: #eeeeee;
+ box-shadow: 2px 2px 3px #222222;
+ -webkit-box-shadow: 2px 2px 3px #222222;
+ padding: 4px;
+ margin: 2px;
+ height: 2ex;
+ display: inline-block;
+ text-align: center;
+ text-decoration: none;
+}
+
+#vt100 #keyboard b, #vt100 #keyboard s {
+ width: 2ex;
+}
+
+#vt100 #keyboard u, #vt100 #keyboard s {
+ visibility: hidden;
+}
+
+#vt100 #keyboard .shifted {
+ display: none;
+}
+
+#vt100 #keyboard .selected {
+ color: #888888;
+ background-color: #eeeeee;
+ box-shadow: 0px 0px 3px #222222;
+ -webkit-box-shadow: 0px 0px 3px #222222;
+ position: relative;
+ top: 1px;
+ left: 1px;
+}
+
+[if DEFINES_COLORS]
+/* IE cannot properly handle "inherit" properties. So, the monochrome.css/
+ * color.css style sheets cannot work, if we define colors in styles.css.
+ */
+[else DEFINES_COLORS]
+#vt100 .ansi0 { }
+#vt100 .ansi1 { color: #cd0000; }
+#vt100 .ansi2 { color: #00cd00; }
+#vt100 .ansi3 { color: #cdcd00; }
+#vt100 .ansi4 { color: #0000ee; }
+#vt100 .ansi5 { color: #cd00cd; }
+#vt100 .ansi6 { color: #00cdcd; }
+#vt100 .ansi7 { color: #e5e5e5; }
+#vt100 .ansi8 { color: #7f7f7f; }
+#vt100 .ansi9 { color: #ff0000; }
+#vt100 .ansi10 { color: #00ff00; }
+#vt100 .ansi11 { color: #e8e800; }
+#vt100 .ansi12 { color: #5c5cff; }
+#vt100 .ansi13 { color: #ff00ff; }
+#vt100 .ansi14 { color: #00ffff; }
+#vt100 .ansi15 { color: #ffffff; }
+
+#vt100 .bgAnsi0 { background-color: #000000; }
+#vt100 .bgAnsi1 { background-color: #cd0000; }
+#vt100 .bgAnsi2 { background-color: #00cd00; }
+#vt100 .bgAnsi3 { background-color: #cdcd00; }
+#vt100 .bgAnsi4 { background-color: #0000ee; }
+#vt100 .bgAnsi5 { background-color: #cd00cd; }
+#vt100 .bgAnsi6 { background-color: #00cdcd; }
+#vt100 .bgAnsi7 { background-color: #e5e5e5; }
+#vt100 .bgAnsi8 { background-color: #7f7f7f; }
+#vt100 .bgAnsi9 { background-color: #ff0000; }
+#vt100 .bgAnsi10 { background-color: #00ff00; }
+#vt100 .bgAnsi11 { background-color: #e8e800; }
+#vt100 .bgAnsi12 { background-color: #5c5cff; }
+#vt100 .bgAnsi13 { background-color: #ff00ff; }
+#vt100 .bgAnsi14 { background-color: #00ffff; }
+#vt100 .bgAnsi15 { }
+[endif DEFINES_COLORS]
+
+@media print {
+ #vt100 .scrollback {
+ display: none;
+ }
+
+ #vt100 #reconnect, #vt100 #cursor, #vt100 #menu, #vt100 #kbd_button, #vt100 #keyboard {
+ visibility: hidden;
+ }
+
+ #vt100 #scrollable {
+ overflow: hidden;
+ }
+
+ #vt100 #console, #vt100 #alt_console {
+ overflow: hidden;
+ width: 1000000ex;
+ }
+}
assert_equal([['.', 'foo', 3]], assigns(:object).files)
end
end
+
+ test 'Edit name and verify that a duplicate is not created' do
+ @controller = ProjectsController.new
+ project = api_fixture("groups")["aproject"]
+ post :update, {
+ id: project["uuid"],
+ project: {
+ name: 'test name'
+ },
+ format: :json
+ }, session_for(:active)
+ assert_includes @response.body, 'test name'
+ updated = assigns(:object)
+ assert_equal updated.uuid, project["uuid"]
+ assert_equal 'test name', updated.name
+ end
end
assert_equal files.sort, disabled.sort, "Expected to see all collection files in disabled list of files"
end
+
+ test "anonymous user accesses collection in shared project" do
+ Rails.configuration.anonymous_user_token =
+ api_fixture('api_client_authorizations')['anonymous']['api_token']
+ collection = api_fixture('collections')['public_text_file']
+ get(:show, {id: collection['uuid']})
+
+ response_object = assigns(:object)
+ assert_equal collection['name'], response_object['name']
+ assert_equal collection['uuid'], response_object['uuid']
+ assert_includes @response.body, 'Hello world'
+ assert_includes @response.body, 'Content address'
+ refute_nil css_select('[href="#Advanced"]')
+ end
+
+ test "can view empty collection" do
+ get :show, {id: 'd41d8cd98f00b204e9800998ecf8427e+0'}, session_for(:active)
+ assert_includes @response.body, 'The following collections have this content'
+ end
+
+ test "collection portable data hash redirect" do
+ di = api_fixture('collections')['docker_image']
+ get :show, {id: di['portable_data_hash']}, session_for(:active)
+ assert_match /\/collections\/#{di['uuid']}/, @response.redirect_url
+ end
+
+ test "collection portable data hash with multiple matches" do
+ pdh = api_fixture('collections')['foo_file']['portable_data_hash']
+ get :show, {id: pdh}, session_for(:admin)
+ matches = api_fixture('collections').select {|k,v| v["portable_data_hash"] == pdh}
+ assert matches.size > 1
+
+ matches.each do |k,v|
+ assert_match /href="\/collections\/#{v['uuid']}">.*#{v['name']}<\/a>/, @response.body
+ end
+
+ assert_includes @response.body, 'The following collections have this content:'
+ assert_not_includes @response.body, 'more results are not shown'
+ assert_not_includes @response.body, 'Activity'
+ assert_not_includes @response.body, 'Sharing and permissions'
+ end
+
+ test "collection page renders name" do
+ collection = api_fixture('collections')['foo_file']
+ get :show, {id: collection['uuid']}, session_for(:active)
+ assert_includes @response.body, collection['name']
+ assert_match /href="#{collection['uuid']}\/foo" ><\/i> foo</, @response.body
+ end
+
+ test "No Upload tab on non-writable collection" do
+ get :show, {id: api_fixture('collections')['user_agreement']['uuid']}, session_for(:active)
+ assert_not_includes @response.body, '<a href="#Upload"'
+ end
end
require 'test_helper'
class JobsControllerTest < ActionController::TestCase
+ test "visit jobs index page" do
+ get :index, {}, session_for(:active)
+ assert_response :success
+ end
end
project_names = assigns(:objects).collect(&:name)
assert_includes project_names, 'Unrestricted public data'
assert_not_includes project_names, 'A Project'
+ refute_empty css_select('[href="/projects/public"]')
end
end
assert_response 404
end
+ test "visit public projects page when anon config is enabled but public projects page is disabled as active user and expect 404" do
+ Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+ Rails.configuration.enable_public_projects_page = false
+ get :public, {}, session_for(:active)
+ assert_response 404
+ end
+
test "visit public projects page when anon config is not enabled as anonymous and expect login page" do
get :public
assert_response :redirect
assert_match /\/users\/welcome/, @response.redirect_url
+ assert_empty css_select('[href="/projects/public"]')
+ end
+
+ test "visit public projects page when anon config is enabled and public projects page is disabled and expect login page" do
+ Rails.configuration.anonymous_user_token = api_fixture('api_client_authorizations')['anonymous']['api_token']
+ Rails.configuration.enable_public_projects_page = false
+ get :index
+ assert_response :redirect
+ assert_match /\/users\/welcome/, @response.redirect_url
+ assert_empty css_select('[href="/projects/public"]')
+ end
+
+ test "visit public projects page when anon config is not enabled and public projects page is enabled and expect login page" do
+ Rails.configuration.enable_public_projects_page = true
+ get :index
+ assert_response :redirect
+ assert_match /\/users\/welcome/, @response.redirect_url
+ assert_empty css_select('[href="/projects/public"]')
+ end
+
+ test "find a project and edit its description" do
+ project = api_fixture('groups')['aproject']
+ use_token :active
+ found = Group.find(project['uuid'])
+ found.description = 'test description update'
+ found.save!
+ get(:show, {id: project['uuid']}, session_for(:active))
+ assert_includes @response.body, 'test description update'
+ end
+
+ test "find a project and edit description to textile description" do
+ project = api_fixture('groups')['aproject']
+ use_token :active
+ found = Group.find(project['uuid'])
+ found.description = '*test bold description for textile formatting*'
+ found.save!
+ get(:show, {id: project['uuid']}, session_for(:active))
+ assert_includes @response.body, '<strong>test bold description for textile formatting</strong>'
+ end
+
+ test "find a project and edit description to html description" do
+ project = api_fixture('groups')['aproject']
+ use_token :active
+ found = Group.find(project['uuid'])
+ found.description = 'Textile description with link to home page <a href="/">take me home</a>.'
+ found.save!
+ get(:show, {id: project['uuid']}, session_for(:active))
+ assert_includes @response.body, 'Textile description with link to home page <a href="/">take me home</a>.'
+ end
+
+ test "find a project and edit description to textile description with link to object" do
+ project = api_fixture('groups')['aproject']
+ use_token :active
+ found = Group.find(project['uuid'])
+
+ # uses 'Link to object' as a hyperlink for the object
+ found.description = '"Link to object":' + api_fixture('groups')['asubproject']['uuid']
+ found.save!
+ get(:show, {id: project['uuid']}, session_for(:active))
+
+ # check that input was converted to textile, not staying as inputted
+ refute_includes @response.body,'"Link to object"'
+ refute_empty css_select('[href="/groups/zzzzz-j7d0g-axqo7eu9pwvna1x"]')
+ end
+
+ test "project viewer can't see project sharing tab" do
+ project = api_fixture('groups')['aproject']
+ get(:show, {id: project['uuid']}, session_for(:project_viewer))
+ refute_includes @response.body, '<div id="Sharing"'
+ assert_includes @response.body, '<div id="Data_collections"'
+ end
+
+ [
+ 'admin',
+ 'active',
+ ].each do |username|
+ test "#{username} can see project sharing tab" do
+ project = api_fixture('groups')['aproject']
+ get(:show, {id: project['uuid']}, session_for(username))
+ assert_includes @response.body, '<div id="Sharing"'
+ assert_includes @response.body, '<div id="Data_collections"'
+ end
+ end
+
+ [
+ ['admin',true],
+ ['active',true],
+ ['project_viewer',false],
+ ].each do |user, can_move|
+ test "#{user} can move subproject from project #{can_move}" do
+ get(:show, {id: api_fixture('groups')['aproject']['uuid']}, session_for(user))
+ if can_move
+ assert_includes @response.body, 'Move project...'
+ else
+ refute_includes @response.body, 'Move project...'
+ end
+ end
+ end
+
+ [
+ ["jobs", "/jobs"],
+ ["pipelines", "/pipeline_instances"],
+ ["collections", "/collections"],
+ ].each do |target,path|
+ test "test dashboard button all #{target}" do
+ get :index, {}, session_for(:active)
+ assert_includes @response.body, "href=\"#{path}\""
+ assert_includes @response.body, "All #{target}"
+ end
end
end
require 'test_helper'
class UsersControllerTest < ActionController::TestCase
+
test "valid token works in controller test" do
get :index, {}, session_for(:active)
assert_response :success
end
assert_equal 1, found_email, "Expected 1 email after requesting shell access"
end
+
+ [
+ 'admin',
+ 'active',
+ ].each do |username|
+ test "access users page as #{username} and verify show button is available" do
+ admin_user = api_fixture('users','admin')
+ active_user = api_fixture('users','active')
+ get :index, {}, session_for(username)
+ if username == 'admin'
+ assert_match /<a href="\/projects\/#{admin_user['uuid']}">Home<\/a>/, @response.body
+ assert_match /<a href="\/projects\/#{active_user['uuid']}">Home<\/a>/, @response.body
+ assert_match /href="\/users\/#{admin_user['uuid']}" title="show user"><i class="fa fa-fw fa-user"><\/i> Show<\/a/, @response.body
+ assert_match /href="\/users\/#{active_user['uuid']}" title="show user"><i class="fa fa-fw fa-user"><\/i> Show<\/a/, @response.body
+ assert_includes @response.body, admin_user['email']
+ assert_includes @response.body, active_user['email']
+ else
+ refute_match /Home<\/a>/, @response.body
+ refute_match /href="\/users\/#{admin_user['uuid']}" title="show user"><i class="fa fa-fw fa-user"><\/i> Show<\/a/, @response.body
+ assert_match /href="\/users\/#{active_user['uuid']}" title="show user"><i class="fa fa-fw fa-user"><\/i> Show<\/a/, @response.body
+ assert_includes @response.body, active_user['email']
+ end
+ end
+ end
+
+ [
+ 'admin',
+ 'active',
+ ].each do |username|
+ test "access settings drop down menu as #{username}" do
+ admin_user = api_fixture('users','admin')
+ active_user = api_fixture('users','active')
+ get :show, {
+ id: api_fixture('users')[username]['uuid']
+ }, session_for(username)
+ if username == 'admin'
+ assert_includes @response.body, admin_user['email']
+ refute_empty css_select('[id="system-menu"]')
+ else
+ assert_includes @response.body, active_user['email']
+ assert_empty css_select('[id="system-menu"]')
+ end
+ end
+ end
end
["filename.yml", true],
["filename.bam", false],
+ ["filename.tar", false],
["filename", false],
].each do |file_name, preview_allowed|
test "verify '#{file_name}' is allowed for preview #{preview_allowed}" do
end
end
end
-
- test "anonymous user accesses collection in shared project" do
- visit "/collections/#{api_fixture('collections')['public_text_file']['uuid']}"
-
- # in collection page
- assert_text 'Public Projects Unrestricted public data'
- assert_text 'Hello world'
- assert_text 'Content address'
- assert_selector 'a', text: 'Provenance graph'
- end
end
assert page.has_link?('Report a problem ...'), 'No link - Report a problem'
end
end
+
+ test "no SSH public key notification when shell_in_a_box_url is configured" do
+ Rails.configuration.shell_in_a_box_url = 'example.com'
+ visit page_with_token('job_reader')
+ click_link 'notifications-menu'
+ assert_no_selector 'a', text:'Click here to set up an SSH public key for use with Arvados.'
+ assert_selector 'a', text:'Click here to learn how to run an Arvados Crunch pipeline'
+ end
+
+ [
+ ['Repositories','repository','Attributes'],
+ ['Virtual machines','virtual machine','current_user_logins'],
+ ['SSH keys','authorized key','public_key'],
+ ['Links','link','link_class'],
+ ['Groups','group','group_class'],
+ ['Compute nodes','node','info[ping_secret'],
+ ['Keep services','keep service','service_ssl_flag'],
+ ['Keep disks', 'keep disk','bytes_free'],
+ ].each do |page_name, add_button_text, look_for|
+ test "test system menu #{page_name} link" do
+ skip 'Skip repositories test until #6652 is fixed.' if page_name == 'Repositories'
+
+ visit page_with_token('admin')
+ within('.navbar-fixed-top') do
+ page.find("#system-menu").click
+ within('.dropdown-menu') do
+ assert_selector 'a', text: page_name
+ find('a', text: page_name).click
+ end
+ end
+
+ # click the add button
+ assert_selector 'button', text: "Add a new #{add_button_text}"
+ find('button', text: "Add a new #{add_button_text}").click
+
+ # look for unique property in the created object page
+ assert page.has_text? look_for
+ end
+ end
end
assert_selector 'div#Upload.active div.panel'
end
- test "No Upload tab on non-writable collection" do
- need_javascript
- visit(page_with_token 'active',
- '/collections/'+api_fixture('collections')['user_agreement']['uuid'])
- assert_no_selector '.nav-tabs Upload'
- end
-
test "Upload two empty files with the same name" do
need_selenium "to make file uploads work"
visit page_with_token 'active', sandbox_path
assert_text "Copy of #{collection_name}"
end
- test "Collection page renders name" do
- Capybara.current_driver = :rack_test
- uuid = api_fixture('collections')['foo_file']['uuid']
- coll_name = api_fixture('collections')['foo_file']['name']
- visit page_with_token('active', "/collections/#{uuid}")
- assert(page.has_text?(coll_name), "Collection page did not include name")
- # Now check that the page is otherwise normal, and the collection name
- # isn't only showing up in an error message.
- assert(page.has_link?('foo'), "Collection page did not include file link")
- end
-
def check_sharing(want_state, link_regexp)
# We specifically want to click buttons. See #4291.
if want_state == :off
end
end
- test "can view empty collection" do
- Capybara.current_driver = :rack_test
- uuid = 'd41d8cd98f00b204e9800998ecf8427e+0'
- visit page_with_token('active', "/collections/#{uuid}")
- assert page.has_text?(/This collection is empty|The following collections have this content/)
- end
-
test "combine selected collections into new collection" do
foo_collection = api_fixture('collections')['foo_file']
bar_collection = api_fixture('collections')['bar_file']
assert(page.has_text?('file2_in_subdir4.txt'), 'file not found - file1_in_subdir4.txt')
end
- test "Collection portable data hash redirect" do
- di = api_fixture('collections')['docker_image']
- visit page_with_token('active', "/collections/#{di['portable_data_hash']}")
-
- # check redirection
- assert current_path.end_with?("/collections/#{di['uuid']}")
- assert page.has_text?("docker_image")
- assert page.has_text?("Activity")
- assert page.has_text?("Sharing and permissions")
- end
-
- test "Collection portable data hash with multiple matches" do
- pdh = api_fixture('collections')['foo_file']['portable_data_hash']
- visit page_with_token('admin', "/collections/#{pdh}")
-
- matches = api_fixture('collections').select {|k,v| v["portable_data_hash"] == pdh}
- assert matches.size > 1
-
- matches.each do |k,v|
- assert page.has_link?(v["name"]), "Page /collections/#{pdh} should contain link '#{v['name']}'"
- end
- assert_text 'The following collections have this content:'
- assert_no_text 'more results are not shown'
- assert_no_text 'Activity'
- assert_no_text 'Sharing and permissions'
- end
-
test "Collection portable data hash with multiple matches with more than one page of results" do
pdh = api_fixture('collections')['baz_file']['portable_data_hash']
visit page_with_token('admin', "/collections/#{pdh}")
page_text = page.text
if run_time
- match = /This pipeline started at (.*)\. It failed after (.*) seconds at (.*)\. Check the Log/.match page_text
+ match = /This pipeline started at (.*)\. It failed after (.*) at (.*)\. Check the Log/.match page_text
else
match = /This pipeline started at (.*). It has been active for(.*)/.match page_text
end
"Description update did not survive page refresh")
end
- test 'Find a project and edit description to textile description' do
- visit page_with_token 'active', '/'
- find("#projects-menu").click
- find(".dropdown-menu a", text: "A Project").click
- within('.container-fluid', text: api_fixture('groups')['aproject']['name']) do
- find('span', text: api_fixture('groups')['aproject']['name']).click
- within('.arv-description-as-subtitle') do
- find('.fa-pencil').click
- find('.editable-input textarea').set('<p>*Textile description for A project* - "take me home":/ </p><p>And a new paragraph in description.</p>')
- find('.editable-submit').click
- end
- wait_for_ajax
- end
-
- # visit project page
- visit current_path
- assert_no_text '*Textile description for A project*'
- assert(find?('.container-fluid', text: 'Textile description for A project'),
- "Description update did not survive page refresh")
- assert(find?('.container-fluid', text: 'And a new paragraph in description'),
- "Description did not contain the expected new paragraph")
- assert(page.has_link?("take me home"), "link not found in description")
-
- click_link 'take me home'
-
- # now in dashboard
- assert(page.has_text?('Active pipelines'), 'Active pipelines - not found on dashboard')
- end
-
- test 'Find a project and edit description to html description' do
- visit page_with_token 'active', '/'
- find("#projects-menu").click
- find(".dropdown-menu a", text: "A Project").click
- within('.container-fluid', text: api_fixture('groups')['aproject']['name']) do
- find('span', text: api_fixture('groups')['aproject']['name']).click
- within('.arv-description-as-subtitle') do
- find('.fa-pencil').click
- find('.editable-input textarea').set('<br>Textile description for A project</br> - <a href="/">take me home</a>')
- find('.editable-submit').click
- end
- wait_for_ajax
- end
- visit current_path
- assert(find?('.container-fluid', text: 'Textile description for A project'),
- "Description update did not survive page refresh")
- assert(!find?('.container-fluid', text: '<br>Textile description for A project</br>'),
- "Textile description is displayed with uninterpreted formatting characters")
- assert(page.has_link?("take me home"),"link not found in description")
- click_link 'take me home'
- assert page.has_text?('Active pipelines')
- end
-
- test 'Find a project and edit description to textile description with link to object' do
- visit page_with_token 'active', '/'
- find("#projects-menu").click
- find(".dropdown-menu a", text: "A Project").click
- within('.container-fluid', text: api_fixture('groups')['aproject']['name']) do
- find('span', text: api_fixture('groups')['aproject']['name']).click
- within('.arv-description-as-subtitle') do
- find('.fa-pencil').click
- find('.editable-input textarea').set('*Textile description for A project* - "go to sub-project":' + api_fixture('groups')['asubproject']['uuid'] + "'")
- find('.editable-submit').click
- end
- wait_for_ajax
- end
- visit current_path
- assert(find?('.container-fluid', text: 'Textile description for A project'),
- "Description update did not survive page refresh")
- assert(!find?('.container-fluid', text: '*Textile description for A project*'),
- "Textile description is displayed with uninterpreted formatting characters")
- assert(page.has_link?("go to sub-project"), "link not found in description")
- click_link 'go to sub-project'
- assert(page.has_text?(api_fixture('groups')['asubproject']['name']), 'sub-project name not found after clicking link')
- end
-
- test 'Add a new name, then edit it, without creating a duplicate' do
- project_uuid = api_fixture('groups')['aproject']['uuid']
- specimen_uuid = api_fixture('traits')['owned_by_aproject_with_no_name']['uuid']
- visit page_with_token 'active', '/projects/' + project_uuid
- click_link 'Other objects'
- within '.selection-action-container' do
- # Wait for the tab to load:
- assert_selector 'tr[data-kind="arvados#trait"]'
- within first('tr', text: 'Trait') do
- find(".fa-pencil").click
- find('.editable-input input').set('Now I have a name.')
- find('.glyphicon-ok').click
- assert_selector '.editable', text: 'Now I have a name.'
- find(".fa-pencil").click
- find('.editable-input input').set('Now I have a new name.')
- find('.glyphicon-ok').click
- end
- wait_for_ajax
- assert_selector '.editable', text: 'Now I have a new name.'
- end
- visit current_path
- click_link 'Other objects'
- within '.selection-action-container' do
- find '.editable', text: 'Now I have a new name.'
- assert_no_selector '.editable', text: 'Now I have a name.'
- end
- end
-
test 'Create a project and move it into a different project' do
visit page_with_token 'active', '/projects'
find("#projects-menu").click
text: group_name("anonymous_group"))
end
- test "project viewer can't see project sharing tab" do
- show_object_using('project_viewer', 'groups', 'aproject', 'A Project')
- assert(page.has_no_link?("Sharing"),
- "read-only project user sees sharing tab")
- end
-
test "project owner can manage sharing for another user" do
add_user = api_fixture('users')['future_project_user']
new_name = ["first_name", "last_name"].map { |k| add_user[k] }.join(" ")
end
end
- [
- ["jobs", "/jobs"],
- ["pipelines", "/pipeline_instances"],
- ["collections", "/collections"]
- ].each do |target,path|
- test "Test dashboard button all #{target}" do
- visit page_with_token 'active', '/'
- click_link "All #{target}"
- assert_equal path, current_path
- end
- end
-
def scroll_setup(project_name,
total_nbr_items,
item_list_parameter,
end
end
- # Move button accessibility
- [
- ['admin', true],
- ['active', true], # project owner
- ['project_viewer', false],
- ].each do |user, can_move|
- test "#{user} can move subproject under another user's Home #{can_move}" do
- project = api_fixture('groups')['aproject']
- collection = api_fixture('collections')['collection_to_move_around_in_aproject']
-
- # verify the project move button
- visit page_with_token user, "/projects/#{project['uuid']}"
- if can_move
- assert page.has_link? 'Move project...'
- else
- assert page.has_no_link? 'Move project...'
- end
- end
- end
-
test "error while loading tab" do
original_arvados_v1_base = Rails.configuration.arvados_v1_base
page.find_field('public_key').set 'first test with an incorrect ssh key value'
click_button 'Submit'
- assert page.has_text?('Public key does not appear to be a valid ssh-rsa or dsa public key'), 'No text - Public key does not appear to be a valid'
+ assert_text 'Public key does not appear to be a valid ssh-rsa or dsa public key'
public_key_str = api_fixture('authorized_keys')['active']['public_key']
page.find_field('public_key').set public_key_str
page.find_field('name').set 'added_in_test'
click_button 'Submit'
- assert page.has_text?('Public key already exists in the database, use a different key.'), 'No text - Public key already exists'
+ assert_text 'Public key already exists in the database, use a different key.'
new_key = SSHKey.generate
page.find_field('public_key').set new_key.ssh_public_key
end
# key must be added. look for it in the refreshed page
- assert page.has_text?('added_in_test'), 'No text - added_in_test'
+ assert_text 'added_in_test'
end
[
click_on "Create"
end
assert_text ":active/workbenchtest.git"
+ assert_match /git@git.*:active\/workbenchtest.git/, page.text
+ assert_match /https:\/\/git.*\/active\/workbenchtest.git/, page.text
end
end
within '.modal-content' do
find 'label', text: 'Virtual Machine'
fill_in "email", :with => "foo@example.com"
- fill_in "repo_name", :with => "newtestrepo"
click_button "Submit"
wait_for_ajax
end
click_link 'Advanced'
click_link 'Metadata'
- assert page.has_text? 'Repository: foo/newtestrepo'
assert !(page.has_text? 'VirtualMachine:')
end
click_link 'Admin'
assert page.has_text? 'As an admin, you can setup'
- click_link 'Setup Active User'
+ click_link 'Setup shell account for Active User'
within '.modal-content' do
find 'label', text: 'Virtual Machine'
- fill_in "repo_name", :with => "activetestrepo"
click_button "Submit"
end
click_link 'Advanced'
click_link 'Metadata'
- assert page.has_text? 'Repository: active/activetestrepo'
vm_links = all("a", text: "VirtualMachine:")
assert_equal(1, vm_links.size)
assert_equal("VirtualMachine: testvm2.shell", vm_links.first.text)
# Click on Setup button again and this time also choose a VM
click_link 'Admin'
- click_link 'Setup Active User'
+ click_link 'Setup shell account for Active User'
within '.modal-content' do
- fill_in "repo_name", :with => "activetestrepo2"
select("testvm.shell", :from => 'vm_uuid')
fill_in "groups", :with => "test group one, test-group-two"
click_button "Submit"
click_link 'Advanced'
click_link 'Metadata'
- assert page.has_text? 'Repository: active/activetestrepo2'
assert page.has_text? 'VirtualMachine: testvm.shell'
assert page.has_text? '["test group one", "test-group-two"]'
end
click_link 'Advanced'
click_link 'Metadata'
- assert page.has_no_text? 'Repository: active/'
assert page.has_no_text? 'VirtualMachine: testvm.shell'
# setup user again and verify links present
click_link 'Admin'
- click_link 'Setup Active User'
+ click_link 'Setup shell account for Active User'
within '.modal-content' do
- fill_in "repo_name", :with => "activetestrepo"
select("testvm.shell", :from => 'vm_uuid')
click_button "Submit"
end
click_link 'Advanced'
click_link 'Metadata'
- assert page.has_text? 'Repository: active/activetestrepo'
assert page.has_text? 'VirtualMachine: testvm.shell'
end
-
- [
- 'admin',
- 'active',
- ].each do |username|
- test "login as #{username} and access show button" do
- need_javascript
-
- user = api_fixture('users', username)
-
- visit page_with_token(username, '/users')
-
- within('tr', text: user['uuid']) do
- assert_text user['email']
- if username == 'admin'
- assert_selector 'a', text: 'Home'
- else
- assert_no_selector 'a', text: 'Home'
- end
- assert_selector 'a', text: 'Show'
- find('a', text: 'Show').click
- end
- assert_selector 'a', text: 'Attributes'
- end
- end
-
- test "admin user can access another user page" do
- need_javascript
-
- visit page_with_token('admin', '/users')
-
- active_user = api_fixture('users', 'active')
- within('tr', text: active_user['uuid']) do
- assert_text active_user['email']
- assert_selector "a[href=\"/projects/#{active_user['uuid']}\"]", text: 'Home'
- assert_selector 'a', text: 'Show'
- find('a', text: 'Show').click
- end
- assert_selector 'a', text:'Attributes'
- end
end
if not args.dry_run:
stdoutfile = open(stdoutname, "wb")
+ if "task.env" in taskp:
+ env = copy.copy(os.environ)
+ for k,v in taskp["task.env"].items():
+ env[k] = subst.do_substitution(taskp, v)
+ else:
+ env = None
+
logger.info("{}{}{}".format(' | '.join([' '.join(c) for c in cmd]), (" < " + stdinname) if stdinname is not None else "", (" > " + stdoutname) if stdoutname is not None else ""))
if args.dry_run:
# this is an intermediate command in the pipeline, so its stdout should go to a pipe
next_stdout = subprocess.PIPE
- sp = subprocess.Popen(cmd[i], shell=False, stdin=next_stdin, stdout=next_stdout)
+ sp = subprocess.Popen(cmd[i], shell=False, stdin=next_stdin, stdout=next_stdout, env=env)
# Need to close the FDs on our side so that subcommands will get SIGPIPE if the
# consuming process ends prematurely.
- user/getting_started/workbench.html.textile.liquid
- user/tutorials/tutorial-pipeline-workbench.html.textile.liquid
- Access an Arvados virtual machine:
+ - user/getting_started/vm-login-with-webshell.html.textile.liquid
- user/getting_started/ssh-access-unix.html.textile.liquid
- user/getting_started/ssh-access-windows.html.textile.liquid
- user/getting_started/check-environment.html.textile.liquid
- user/tutorials/tutorial-keep-get.html.textile.liquid
- user/tutorials/tutorial-keep-mount.html.textile.liquid
- user/topics/keep.html.textile.liquid
+ - user/topics/arv-copy.html.textile.liquid
- Run a pipeline on the command line:
- user/topics/running-pipeline-command-line.html.textile.liquid
- user/topics/arv-run.html.textile.liquid
+ - Working with Arvados Repositories:
+ - user/tutorials/add-new-repository.html.textile.liquid
- Develop a new pipeline:
- user/tutorials/intro-crunch.html.textile.liquid
- user/tutorials/running-external-program.html.textile.liquid
installguide:
- Overview:
- install/index.html.textile.liquid
- - Docker:
+ - Docker-based installation:
- install/pre-built-docker.html.textile.liquid
- install/install-docker.html.textile.liquid
- Manual installation:
- install/install-manual-prerequisites.html.textile.liquid
+ - install/install-sso.html.textile.liquid
- install/install-api-server.html.textile.liquid
- - install/install-workbench-app.html.textile.liquid
- - install/install-shell-server.html.textile.liquid
- - install/create-standard-objects.html.textile.liquid
+ - install/install-arv-git-httpd.html.textile.liquid
- install/install-keepstore.html.textile.liquid
- install/install-keepproxy.html.textile.liquid
- - install/install-arv-git-httpd.html.textile.liquid
- install/install-crunch-dispatch.html.textile.liquid
- install/install-compute-node.html.textile.liquid
+ - install/install-shell-server.html.textile.liquid
+ - install/create-standard-objects.html.textile.liquid
+ - install/install-workbench-app.html.textile.liquid
- install/cheat_sheet.html.textile.liquid
- - Software prerequisites:
- - install/install-manual-prerequisites-ruby.html.textile.liquid
- - install/install-sso.html.textile.liquid
--- /dev/null
+{% include 'notebox_begin' %}
+As stated above, arv-copy is recursive by default and requires a working git repository in the destination cluster. If you do not have a repository created, you can follow the "Adding a new repository":{{site.baseurl}}/user/tutorials/add-new-repository.html page. We will use the *tutorial* repository created in that page as the example.
+
+<br/>In addition, arv-copy requires git when copying to a git repository. Please make sure that git is installed and available.
+
+{% include 'notebox_end' %}
--- /dev/null
+<notextile>
+<pre><code>~$ <span class="userinput">sudo /usr/bin/apt-key adv --keyserver pool.sks-keyservers.net --recv 1078ECD7</span>
+</code></pre>
+</notextile>
--- /dev/null
+On a Debian-based system, install the following packages:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install libpq-dev postgresql</span>
+</code></pre>
+</notextile>
+
+On a Red Hat-based system, install the following packages:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install postgresql postgresql-devel</span>
+</code></pre>
+</notextile>
+
+{% include 'notebox_begin' %}
+
+If you intend to use specific versions of these packages from Software Collections, you may have to adapt some of the package names to match. For example:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install postgresql92 postgresql92-postgresql-devel</span></code></pre></notextile>
+
+{% include 'notebox_end' %}
--- /dev/null
+<notextile>
+<pre><code>~$ <span class="userinput">gpg --keyserver pool.sks-keyservers.net --recv-keys 1078ECD7</span>
+~$ <span class="userinput">gpg --armor --export 1078ECD7 >/tmp/curoverse.key</span>
+~$ <span class="userinput">sudo rpm --import /tmp/curoverse.key</span>
+</code></pre>
+</notextile>
--- /dev/null
+Currently, only Ruby 2.1 is supported.
+
+h4(#rvm). *Option 1: Install with RVM*
+
+<notextile>
+<pre><code><span class="userinput">sudo gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
+\curl -sSL https://get.rvm.io | sudo bash -s stable --ruby=2.1
+sudo adduser "$USER" rvm
+</span></code></pre></notextile>
+
+Either log out and log back in to activate RVM, or explicitly load it in all open shells like this:
+
+<notextile>
+<pre><code><span class="userinput">source /usr/local/rvm/scripts/rvm
+</span></code></pre></notextile>
+
+Once RVM is activated in your shell, install Bundler:
+
+<notextile>
+<pre><code>~$ <span class="userinput">gem install bundler</span>
+</code></pre></notextile>
+
+h4(#fromsource). *Option 2: Install from source*
+
+Install prerequisites for Debian 7 or 8:
+
+<notextile>
+<pre><code><span class="userinput">sudo apt-get install \
+ bison build-essential gettext libcurl3 libcurl3-gnutls \
+ libcurl4-openssl-dev libpcre3-dev libreadline-dev \
+ libssl-dev libxslt1.1 zlib1g-dev
+</span></code></pre></notextile>
+
+Install prerequisites for CentOS 6:
+
+<notextile>
+<pre><code><span class="userinput">sudo yum install \
+ libyaml-devel glibc-headers autoconf gcc-c++ glibc-devel \
+ patch readline-devel zlib-devel libffi-devel openssl-devel \
+ automake libtool bison sqlite-devel
+</span></code></pre></notextile>
+
+Install prerequisites for Ubuntu 12.04 or 14.04:
+
+<notextile>
+<pre><code><span class="userinput">sudo apt-get install \
+ gawk g++ gcc make libc6-dev libreadline6-dev zlib1g-dev libssl-dev \
+ libyaml-dev libsqlite3-dev sqlite3 autoconf libgdbm-dev \
+ libncurses5-dev automake libtool bison pkg-config libffi-dev
+</span></code></pre></notextile>
+
+Build and install Ruby:
+
+<notextile>
+<pre><code><span class="userinput">mkdir -p ~/src
+cd ~/src
+curl http://cache.ruby-lang.org/pub/ruby/2.1/ruby-2.1.6.tar.gz | tar xz
+cd ruby-2.1.6
+./configure --no-install-rdoc
+make
+sudo make install
+
+sudo gem install bundler</span>
+</code></pre></notextile>
--- /dev/null
+{% include 'notebox_begin' %}
+
+On older Red Hat-based systems, these packages require the "python27 Software Collection":https://www.softwarecollections.org/en/scls/rhscl/python27/.
+
+{% include 'notebox_end' %}
{% include 'notebox_begin' %}
-This tutorial assumes either that you are logged into an Arvados VM instance (instructions for "Unix":{{site.baseurl}}/user/getting_started/ssh-access-unix.html#login or "Windows":{{site.baseurl}}/user/getting_started/ssh-access-windows.html#login) or you have installed the Arvados "Command line SDK":{{site.baseurl}}/sdk/cli/install.html and "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html on your workstation and have a "working environment.":{{site.baseurl}}/user/getting_started/check-environment.html
+This tutorial assumes that you are logged into an Arvados VM instance (instructions for "Webshell":{{site.baseurl}}/user/getting_started/vm-login-with-webshell.html or "Unix":{{site.baseurl}}/user/getting_started/ssh-access-unix.html#login or "Windows":{{site.baseurl}}/user/getting_started/ssh-access-windows.html#login) or you have installed the Arvados "Command line SDK":{{site.baseurl}}/sdk/cli/install.html and "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html on your workstation and have a "working environment.":{{site.baseurl}}/user/getting_started/check-environment.html
{% include 'notebox_end' %}
title: Install the API server
...
-This installation guide assumes you are on a 64 bit Debian or Ubuntu system.
-
h2. Install prerequisites
-<notextile>
-<pre><code>~$ <span class="userinput">sudo apt-get install \
- bison build-essential gettext libcurl3 libcurl3-gnutls \
- libcurl4-openssl-dev libpcre3-dev libpq-dev libreadline-dev \
- libssl-dev libxslt1.1 postgresql git wget zlib1g-dev
-</span></code></pre></notextile>
+The Arvados package repository includes an API server package that can help automate much of the deployment.
+
+h3(#install_ruby_and_bundler). Install Ruby and Bundler
+
+{% include 'install_ruby_and_bundler' %}
+
+h3(#install_postgres). Install PostgreSQL
+
+{% include 'install_postgres' %}
-Also make sure you have "Ruby and bundler":install-manual-prerequisites-ruby.html installed.
+h3(#build_tools_apiserver). Build tools
-h2. Download the source tree
+On older distributions, you may need to use a backports repository to satisfy these requirements. For example, on older Red Hat-based systems, consider using the "postgresql92":https://www.softwarecollections.org/en/scls/rhscl/postgresql92/ and "nginx16":https://www.softwarecollections.org/en/scls/rhscl/nginx16/ Software Collections.
+
+On a Debian-based system, install the following packages:
<notextile>
-<pre><code>~$ <span class="userinput">cd $HOME</span> # (or wherever you want to install)
-~$ <span class="userinput">git clone https://github.com/curoverse/arvados.git</span>
-</code></pre></notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install bison build-essential libcurl4-openssl-dev git nginx arvados-api-server</span>
+</code></pre>
+</notextile>
-See also: "Downloading the source code":https://arvados.org/projects/arvados/wiki/Download on the Arvados wiki.
+On a Red Hat-based system, install the following packages:
-The API server is in @services/api@ in the source tree.
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install bison make automake gcc gcc-c++ libcurl-devel nginx git arvados-api-server</span>
+</code></pre>
+</notextile>
+
+h2. Set up the database
+
+Generate a new database password. Nobody ever needs to memorize it or type it, so we'll make a strong one:
+
+<notextile>
+<pre><code>~$ <span class="userinput">ruby -e 'puts rand(2**128).to_s(36)'</span>
+6gqa1vu492idd7yca9tfandj3
+</code></pre></notextile>
-h2. Install gem dependencies
+Create a new database user.
<notextile>
-<pre><code>~$ <span class="userinput">cd arvados/services/api</span>
-~/arvados/services/api$ <span class="userinput">bundle install</span>
+<pre><code>~$ <span class="userinput">sudo -u postgres createuser --encrypted -R -S --pwprompt arvados</span>
+[sudo] password for <b>you</b>: <span class="userinput">yourpassword</span>
+Enter password for new role: <span class="userinput">paste-password-you-generated</span>
+Enter it again: <span class="userinput">paste-password-again</span>
</code></pre></notextile>
-h2. Choose your environment
+{% include 'notebox_begin' %}
+
+This user setup assumes that your PostgreSQL is configured to accept password authentication. Red Hat systems use ident-based authentication by default. You may need to either adapt the user creation, or reconfigure PostgreSQL (in @pg_hba.conf@) to accept password authentication.
-The API server can be run in @development@ or in @production@ mode. Unless this installation is going to be used for development on the Arvados API server itself, you should run it in @production@ mode.
+{% include 'notebox_end' %}
-Copy the example environment file for your environment. For example, if you choose @production@:
+Create the database:
<notextile>
-<pre><code>~/arvados/services/api$ <span class="userinput">cp -i config/environments/production.rb.example config/environments/production.rb</span>
-</code></pre></notextile>
+<pre><code>~$ <span class="userinput">sudo -u postgres createdb arvados_production -T template0 -E UTF8 -O arvados</span>
+</code></pre>
+</notextile>
-h2. Configure the API server
+h2. Set up configuration files
-First, copy the example configuration file:
+The API server package uses configuration files that you write to @/etc/arvados/api@ and ensures they're consistently deployed. Create this directory and copy the example configuration files to it:
<notextile>
-<pre><code>~/arvados/services/api$ <span class="userinput">cp -i config/application.yml.example config/application.yml</span>
-</code></pre></notextile>
+<pre><code>~$ <span class="userinput">sudo mkdir -p /etc/arvados/api</span>
+~$ <span class="userinput">sudo chmod 700 /etc/arvados/api</span>
+~$ <span class="userinput">cd /var/www/arvados-api/current</span>
+/var/www/arvados-api/current$ <span class="userinput">sudo cp config/database.yml.sample /etc/arvados/api/database.yml</span>
+/var/www/arvados-api/current$ <span class="userinput">sudo cp config/application.yml.example /etc/arvados/api/application.yml</span>
+</code></pre>
+</notextile>
+
+h2. Configure the database connection
+
+Edit @/etc/arvados/api/database.yml@ and replace the @xxxxxxxx@ database password placeholders with the PostgreSQL password you generated above.
+
+h2. Configure the API server
-The API server reads the @config/application.yml@ file, as well as the @config/application.defaults.yml@ file. Values in @config/application.yml@ take precedence over the defaults that are defined in @config/application.defaults.yml@. The @config/application.yml.example@ file is not read by the API server and is provided for installation convenience, only.
+Edit @/etc/arvados/api/application.yml@ following the instructions below. The deployment script will consistently deploy this to the API server's configuration directory. The API server reads both @application.yml@ and its own @config/application.default.yml@ file. Values in @application.yml@ take precedence over the defaults that are defined in @config/application.default.yml@. The @config/application.yml.example@ file is not read by the API server and is provided for installation convenience only.
-Consult @config/application.default.yml@ for a full list of configuration options. Always put your local configuration in @config/application.yml@, never edit @config/application.default.yml@.
+Always put your local configuration in @application.yml@ instead of editing @application.default.yml@.
h3(#uuid_prefix). uuid_prefix
-Define your @uuid_prefix@ in @config/application.yml@ by setting the @uuid_prefix@ field in the section for your environment. This prefix is used for all database identifiers to identify the record as originating from this site. It must be exactly 5 alphanumeric characters (lowercase ASCII letters and digits).
+Define your @uuid_prefix@ in @application.yml@ by setting the @uuid_prefix@ field in the section for your environment. This prefix is used for all database identifiers to identify the record as originating from this site. It must be exactly 5 alphanumeric characters (lowercase ASCII letters and digits).
h3(#git_repositories_dir). git_repositories_dir
-This field defaults to @/var/lib/arvados/git@. You can override the value by defining it in @config/application.yml@.
+This field defaults to @/var/lib/arvados/git@. You can override the value by defining it in @application.yml@.
Make sure a clone of the arvados repository exists in @git_repositories_dir@.
<notextile>
-<pre><code>~/arvados/services/api$ <span class="userinput">sudo mkdir -p /var/lib/arvados/git</span>
-~/arvados/services/api$ <span class="userinput">sudo git clone --bare ../../.git /var/lib/arvados/git/arvados.git</span>
+<pre><code>~$ <span class="userinput">sudo mkdir -p /var/lib/arvados/git</span>
+~$ <span class="userinput">sudo git clone --bare git://git.curoverse.com/arvados.git /var/lib/arvados/git/arvados.git</span>
</code></pre></notextile>
h3. secret_token
Generate a new secret token for signing cookies:
<notextile>
-<pre><code>~/arvados/services/api$ <span class="userinput">ruby -e 'puts rand(2**400).to_s(36)'</span>
+<pre><code>~$ <span class="userinput">ruby -e 'puts rand(2**400).to_s(36)'</span>
zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
</code></pre></notextile>
Fill in the url of your workbench application in @workbench_address@, for example
- https://workbench.@prefix_uuid@.your.domain
+ https://workbench.@uuid_prefix@.your.domain
-h3. other options
+h3(#omniauth). sso_app_id, sso_app_secret, sso_provider_url
-Consult @application.default.yml@ for a full list of configuration options. Always put your local configuration in @application.yml@ instead of editing @application.default.yml@.
+For @sso_app_id@ and @sso_app_secret@, provide the same @app_id@ and @app_secret@ used in the "Create arvados-server client for Single Sign On (SSO)":install-sso.html#client step.
-h2. Set up the database
-
-Generate a new database password. Nobody ever needs to memorize it or type it, so we'll make a strong one:
+For @sso_provider_url@, provide the base URL where your SSO server is installed: just the scheme and host, with no trailing slash.
<notextile>
-<pre><code>~/arvados/services/api$ <span class="userinput">ruby -e 'puts rand(2**128).to_s(36)'</span>
-6gqa1vu492idd7yca9tfandj3
-</code></pre></notextile>
-
-Create a new database user with permission to create its own databases.
-
-<notextile>
-<pre><code>~/arvados/services/api$ <span class="userinput">sudo -u postgres createuser --createdb --encrypted -R -S --pwprompt arvados</span>
-[sudo] password for <b>you</b>: <span class="userinput">yourpassword</span>
-Enter password for new role: <span class="userinput">paste-password-you-generated</span>
-Enter it again: <span class="userinput">paste-password-again</span>
-</code></pre></notextile>
-
-Configure API server to connect to your database by creating and updating @config/database.yml@. Replace the @xxxxxxxx@ database password placeholders with the new password you generated above.
-
-<notextile>
-<pre><code>~/arvados/services/api$ <span class="userinput">cp -i config/database.yml.sample config/database.yml</span>
-~/arvados/services/api$ <span class="userinput">edit config/database.yml</span>
-</code></pre></notextile>
+<pre><code> sso_app_id: arvados-server
+ sso_app_secret: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ sso_provider_url: https://sso.example.com
+</code></pre>
+</notextile>
-Create and initialize the database. If you are planning a production system, choose the @production@ rails environment, otherwise use @development@.
+h3. Other options
-<notextile>
-<pre><code>~/arvados/services/api$ <span class="userinput">RAILS_ENV=production bundle exec rake db:setup</span>
-</code></pre></notextile>
+Consult @/var/www/arvados-api/current/config/application.default.yml@ for a full list of configuration options. (But don't edit it. Edit @application.yml@ instead.)
-Alternatively, if the database user you intend to use for the API server is not allowed to create new databases, you can create the database first and then populate it with rake. Be sure to adjust the database name if you are using the @development@ environment. This sequence of commands is functionally equivalent to the rake db:setup command above.
+h2. Prepare the API server deployment
-<notextile>
-<pre><code>~/arvados/services/api$ <span class="userinput">su postgres createdb arvados_production -E UTF8 -O arvados</span>
-~/arvados/services/api$ <span class="userinput">RAILS_ENV=production bundle exec rake db:structure:load</span>
-~/arvados/services/api$ <span class="userinput">RAILS_ENV=production bundle exec rake db:seed</span>
-</code></pre></notextile>
+Now that all your configuration is in place, run @/usr/local/bin/arvados-api-server-upgrade.sh@. This will install and check your configuration, install necessary gems, and run any necessary database setup.
{% include 'notebox_begin' %}
You can safely ignore the following error message you may see when loading the database structure:
<pre><code>ERROR: must be owner of extension plpgsql</code></pre></notextile>
{% include 'notebox_end' %}
-h2(#omniauth). Set up omniauth
+This command aborts when it encounters an error. It's safe to rerun multiple times, so if there's a problem with your configuration, you can fix that and try again.
+
+h2. Set up Web servers
-First copy the omniauth configuration file:
+For best performance, we recommend you use Nginx as your Web server front-end, with a Passenger backend for the main API server and a Puma backend for API server Websockets. To do that:
<notextile>
-<pre><code>~/arvados/services/api$ <span class="userinput">cp -i config/initializers/omniauth.rb.example config/initializers/omniauth.rb
-</code></pre></notextile>
+<ol>
+<li>Install Nginx via your distribution or a backports repository.</li>
-Edit @config/initializers/omniauth.rb@ to configure the SSO server for authentication. @APP_ID@ and @APP_SECRET@ correspond to the @app_id@ and @app_secret@ set in "Create arvados-server client for Single Sign On (SSO)":install-sso.html#client and @CUSTOM_PROVIDER_URL@ is the address of your SSO server.
+<li><a href="https://www.phusionpassenger.com/documentation/Users%20guide%20Nginx.html">Install Phusion Passenger for Nginx</a>.</li>
-<notextile>
-<pre><code>APP_ID = 'arvados-server'
-APP_SECRET = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
-CUSTOM_PROVIDER_URL = 'https://sso.example.com/'
+<li><p>Puma is already included with the API server's gems. We recommend you use a tool like <a href="http://smarden.org/runit/">runit</a> or something similar. Here's a sample run script for that:</p>
+
+<pre><code>#!/bin/bash
+
+set -e
+exec 2>&1
+
+# Uncomment the line below if you're using RVM.
+#source /etc/profile.d/rvm.sh
+
+envdir="`pwd`/env"
+mkdir -p "$envdir"
+echo ws-only > "$envdir/ARVADOS_WEBSOCKETS"
+
+cd /var/www/arvados-api/current
+echo "Starting puma in `pwd`"
+
+# You may need to change arguments below to match your deployment, especially -u.
+exec chpst -m 1073741824 -u www-data:www-data -e "$envdir" \
+ bundle exec puma -t 0:512 -e production -b tcp://127.0.0.1:8100
</code></pre>
-</notextile>
+</li>
-h2. Start the API server
+<li><p>Edit the http section of your Nginx configuration to run the Passenger server, and act as a front-end for both it and Puma. You might add a block like the following, adding SSL and logging parameters to taste:</p>
-h3. Development environment
+<pre><code>server {
+ listen 127.0.0.1:8000;
+ server_name localhost-api;
-If you plan to run in development mode, you can now run the development server this way:
+ root /var/www/arvados-api/current/public;
+ index index.html index.htm index.php;
-<notextile>
-<pre><code>~/arvados/services/api$ <span class="userinput">bundle exec rails server --port=3030
-</code></pre></notextile>
+ passenger_enabled on;
+ # If you're using RVM, uncomment the line below.
+ #passenger_ruby /usr/local/rvm/wrappers/default/ruby;
+}
-h3. Production environment
+upstream api {
+ server 127.0.0.1:8000 fail_timeout=10s;
+}
-We recommend "Passenger":https://www.phusionpassenger.com/ to run the API server in production.
+upstream websockets {
+ # The address below must match the one specified in puma's -b option.
+ server 127.0.0.1:8100 fail_timeout=10s;
+}
-Point it to the services/api directory in the source tree.
+proxy_http_version 1.1;
-To enable streaming so users can monitor crunch jobs in real time, make sure to add the following to your Passenger configuration:
+server {
+ listen <span class="userinput">[your public IP address]</span>:443 ssl;
+ server_name <span class="userinput">uuid_prefix.your.domain</span>;
-<notextile>
-<pre><code><span class="userinput">PassengerBufferResponse off</span>
+ ssl on;
+
+ index index.html index.htm index.php;
+
+ location / {
+ proxy_pass http://api;
+ proxy_redirect off;
+
+ proxy_set_header X-Forwarded-Proto https;
+ proxy_set_header Host $http_host;
+ proxy_set_header X-External-Client $external_client;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ }
+}
+
+server {
+ listen <span class="userinput">[your public IP address]</span>:443 ssl;
+ server_name ws.<span class="userinput">uuid_prefix.your.domain</span>;
+
+ ssl on;
+
+ index index.html index.htm index.php;
+
+ location / {
+ proxy_pass http://websockets;
+ proxy_redirect off;
+
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection "upgrade";
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ }
+}
</code></pre>
+</li>
+
+<li>Restart Nginx.</li>
+
+</ol>
</notextile>
title: Install Git server
...
-This installation guide assumes you are on a 64 bit Debian or Ubuntu system.
-
-The arv-git-httpd server provides HTTP access to hosted git repositories, using Arvados authentication tokens instead of passwords. It is intended to be installed on the system where your git repositories are stored, and accessed through a web proxy that provides SSL support.
+The arvados-git-httpd server provides HTTP access to hosted git repositories, using Arvados authentication tokens instead of passwords. It is intended to be installed on the system where your git repositories are stored, and accessed through a web proxy that provides SSL support.
By convention, we use the following hostname for the git service:
This hostname should resolve from anywhere on the internet.
-h2. Install arv-git-httpd
+h2. Install arvados-git-httpd
+
+On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install git arvados-git-httpd</span>
+</code></pre>
+</notextile>
-First add the Arvados apt repository, and then install the arv-git-httpd package.
+On Red Hat-based systems:
<notextile>
-<pre><code>~$ <span class="userinput">echo "deb http://apt.arvados.org/ wheezy main" | sudo tee /etc/apt/sources.list.d/apt.arvados.org.list</span>
-~$ <span class="userinput">sudo /usr/bin/apt-key adv --keyserver pool.sks-keyservers.net --recv 1078ECD7</span>
-~$ <span class="userinput">sudo /usr/bin/apt-get update</span>
-~$ <span class="userinput">sudo /usr/bin/apt-get install arv-git-httpd</span>
+<pre><code>~$ <span class="userinput">sudo yum install git arvados-git-httpd</span>
</code></pre>
</notextile>
-Verify that @arv-git-httpd@ and @git-http-backend@ are functional:
+Verify that @arvados-git-httpd@ and @git-http-backend@ are functional:
<notextile>
-<pre><code>~$ <span class="userinput">arv-git-httpd -h</span>
+<pre><code>~$ <span class="userinput">arvados-git-httpd -h</span>
Usage of arv-git-httpd:
-address="0.0.0.0:80": Address to listen on, "host:port".
-git-command="/usr/bin/git": Path to git executable. Each authenticated request will execute this program with a single argument, "http-backend".
</code></pre>
</notextile>
-We recommend running @arv-git-httpd@ under "runit":https://packages.debian.org/search?keywords=runit or something similar.
+We recommend running @arvados-git-httpd@ under "runit":http://smarden.org/runit/ or something similar.
Your @run@ script should look something like this:
<notextile>
<pre><code>export ARVADOS_API_HOST=<span class="userinput">uuid_prefix</span>.your.domain
-exec sudo -u git arv-git-httpd -address=:9001 -git-command="$(which git)" -repo-root=<span class="userinput">/var/lib/arvados/git</span> 2>&1
+exec sudo -u git arvados-git-httpd -address=:9001 -git-command="$(which git)" -repo-root=<span class="userinput">/var/lib/arvados/git</span> 2>&1
</code></pre>
</notextile>
h3. Set up a reverse proxy with SSL support
-The arv-git-httpd service will be accessible from anywhere on the internet, so we recommend using SSL for transport encryption.
+The arvados-git-httpd service will be accessible from anywhere on the internet, so we recommend using SSL for transport encryption.
-This is best achieved by putting a reverse proxy with SSL support in front of arv-git-httpd, running on port 443 and passing requests to arv-git-httpd on port 9001 (or whatever port you chose in your run script).
+This is best achieved by putting a reverse proxy with SSL support in front of arvados-git-httpd, running on port 443 and passing requests to arvados-git-httpd on port 9001 (or whatever port you chose in your run script).
-h3. Tell the API server about the arv-git-httpd service
+h3. Tell the API server about the arvados-git-httpd service
-In your API server's config/application.yml file, add the following entry:
+In your API server's @config/application.yml@ file, add the following entry:
<notextile>
-<pre><code>git_http_base: git.<span class="userinput">uuid_prefix</span>.your.domain
+<pre><code>git_http_base: git.<span class="userinput">uuid_prefix.your.domain</span>
</code></pre>
</notextile>
title: Install a compute node
...
-This installation guide assumes you are on a 64 bit Debian or Ubuntu system.
-
h2. Install dependencies
-First add the Arvados apt repository, and then install a number of packages.
+First, "add the appropriate package repository for your distribution":{{ site.baseurl }}/install/install-manual-prerequisites.html#repos.
+
+On Debian-based systems:
<notextile>
-<pre><code>~$ <span class="userinput">echo "deb http://apt.arvados.org/ wheezy main" | sudo tee /etc/apt/sources.list.d/apt.arvados.org.list</span>
-~$ <span class="userinput">sudo /usr/bin/apt-key adv --keyserver pool.sks-keyservers.net --recv 1078ECD7</span>
-~$ <span class="userinput">sudo /usr/bin/apt-get update</span>
-~$ <span class="userinput">sudo /usr/bin/apt-get install python-pip python-pyvcf python-gflags python-google-api-python-client python-virtualenv libattr1-dev libfuse-dev python-dev python-llfuse fuse crunchstat python-arvados-fuse iptables ca-certificates lxc apt-transport-https docker.io</span>
+<pre><code>~$ <span class="userinput">sudo apt-get install perl python-virtualenv fuse python-arvados-python-client python-arvados-fuse crunchstat iptables ca-certificates</span>
</code></pre>
</notextile>
-h2. Install slurm and munge
+On Red Hat-based systems:
<notextile>
-<pre><code>~$ <span class="userinput">sudo /usr/bin/apt-get install slurm-llnl munge</span>
+<pre><code>~$ <span class="userinput">sudo yum install perl python27-python-virtualenv fuse python27-python-arvados-python-client python27-python-arvados-fuse crunchstat iptables ca-certificates</span>
</code></pre>
</notextile>
-h2. Copy configuration files from the dispatcher (api)
+{% include 'note_python27_sc' %}
-The @/etc/slurm-llnl/slurm.conf@ and @/etc/munge/munge.key@ files need to be identicaly across the dispatcher and all compute nodes. Copy the files you created in the "Install the Crunch dispatcher":{{site.baseurl}} step to this compute node.
+h2. Set up Docker
-h2. Crunch user account
+Compute nodes must have Docker installed to run jobs inside containers. This requires a relatively recent version of Linux (at least upstream version 3.10, or a distribution version with the appropriate patches backported). Follow the "Docker Engine installation documentation":https://docs.docker.com/ for your distribution.
-* @adduser crunch@
+For Debian-based systems, the Arvados package repository includes a backported @docker.io@ package with a known-good version you can install.
-The crunch user should have the same UID, GID, and home directory on all compute nodes and on the dispatcher (api server).
+h2. Set up SLURM
+
+Install SLURM following "the same process you used to install the Crunch dispatcher":{{ site.baseurl }}/install/install-crunch-dispatch.html#slurm.
+
+h2. Copy configuration files from the dispatcher (API server)
+
+The @/etc/slurm-llnl/slurm.conf@ and @/etc/munge/munge.key@ files need to be identicaly across the dispatcher and all compute nodes. Copy the files you created in the "Install the Crunch dispatcher":{{site.baseurl}} step to this compute node.
-h2. Configure fuse
+h2. Configure FUSE
Install this file as @/etc/fuse.conf@:
</pre>
</notextile>
+h2. Crunch user account
+
+Create a Crunch user account, and add it to the @fuse@ and @docker@ groups so it can use those tools:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo useradd --groups fuse,docker crunch</span>
+</code></pre>
+</notextile>
+
+The crunch user should have the same UID, GID, and home directory across all compute nodes and the dispatcher (API server).
+
h2. Tell the API server about this compute node
Load your API superuser token on the compute node:
...
-
-
The dispatcher normally runs on the same host/VM as the API server.
-h4. Perl SDK dependencies
+h2. Perl SDK dependencies
Install the Perl SDK on the controller.
* See "Perl SDK":{{site.baseurl}}/sdk/perl/index.html page for details.
-h4. Python SDK dependencies
+h2. Python SDK dependencies
Install the Python SDK and CLI tools on controller and all compute nodes.
* See "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html page for details.
-h4. Slurm
+h2(#slurm). Set up SLURM
+
+On the API server, install SLURM and munge, and generate a munge key.
-On the API server, install slurm and munge, and generate a munge key:
+On Debian-based systems:
<notextile>
<pre><code>~$ <span class="userinput">sudo /usr/bin/apt-get install slurm-llnl munge</span>
</code></pre>
</notextile>
-Now we need to give slurm a configuration file in @/etc/slurm-llnl/slurm.conf@. Here's an example:
+On Red Hat-based systems, "install SLURM and munge from source following their installation guide":https://computing.llnl.gov/linux/slurm/quickstart_admin.html.
+
+Now we need to give SLURM a configuration file in @/etc/slurm-llnl/slurm.conf@. Here's an example:
<notextile>
<pre>
</pre>
</notextile>
-Please make sure to update the value of the @ControlMachine@ parameter to the hostname of your dispatcher (api server).
+Please make sure to update the value of the @ControlMachine@ parameter to the hostname of your dispatcher (API server).
+
+h2. Enable SLURM job dispatch
-h4. Crunch user account
+In your API server's @application.yml@ configuration file, add the line @crunch_job_wrapper: :slurm_immediate@ under the appropriate section. (The second colon is not a typo. It denotes a Ruby symbol.)
-* @adduser crunch@
+h2. Crunch user account
-The crunch user should have the same UID, GID, and home directory on all compute nodes and on the dispatcher (api server).
+Run @sudo adduser crunch@. The crunch user should have the same UID, GID, and home directory on all compute nodes and on the dispatcher (API server).
-h4. Repositories
+h2. Git Repositories
Crunch scripts must be in Git repositories in the directory configured as @git_repositories_dir@/*.git (see the "API server installation":install-api-server.html#git_repositories_dir).
ArgumentError: Specified script_version does not resolve to a commit
</pre>
-h4. Running jobs
+h2. Running jobs
* @services/api/script/crunch-dispatch.rb@ must be running.
* @crunch-dispatch.rb@ needs @services/crunch/crunch-job@ in its @PATH@.
-* @crunch-job@ needs @sdk/perl/lib@ and @warehouse-apps/libwarehouse-perl/lib@ in its @PERLLIB@
-* @crunch-job@ needs @ARVADOS_API_HOST@ (and, if necessary in a development environment, @ARVADOS_API_HOST_INSECURE@)
+* @crunch-job@ needs the installation path of the Perl SDK in its @PERLLIB@.
+* @crunch-job@ needs the @ARVADOS_API_HOST@ (and, if necessary in a development environment, @ARVADOS_API_HOST_INSECURE@) environment variable set.
Example @/var/service/arvados_crunch_dispatch/run@ script:
# A GNU/Linux (virtual) machine
# A working Docker installation (see "Installing Docker":https://docs.docker.com/installation/)
# A working Go installation (see "Install the Go tools":https://golang.org/doc/install)
-# A working Ruby installation (see "Install Ruby and bundler":install-manual-prerequisites-ruby.html)
+# A working Ruby installation, with the Bundler gem installed
+
+h3. Install Ruby and Bundler
+
+{% include 'install_ruby_and_bundler' %}
h2. Download the source tree
title: Install Keepproxy server
...
-This installation guide assumes you are on a 64 bit Debian or Ubuntu system.
-
The Keepproxy server is a gateway into your Keep storage. Unlike the Keepstore servers, which are only accessible on the local LAN, Keepproxy is designed to provide secure access into Keep from anywhere on the internet.
By convention, we use the following hostname for the Keepproxy:
h2. Install Keepproxy
-First add the Arvados apt repository, and then install the Keepproxy package.
+On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install keepproxy</span>
+</code></pre>
+</notextile>
+
+On Red Hat-based systems:
<notextile>
-<pre><code>~$ <span class="userinput">echo "deb http://apt.arvados.org/ wheezy main" | sudo tee /etc/apt/sources.list.d/apt.arvados.org.list</span>
-~$ <span class="userinput">sudo /usr/bin/apt-key adv --keyserver pool.sks-keyservers.net --recv 1078ECD7</span>
-~$ <span class="userinput">sudo /usr/bin/apt-get update</span>
-~$ <span class="userinput">sudo /usr/bin/apt-get install keepproxy</span>
+<pre><code>~$ <span class="userinput">sudo yum install keepproxy</span>
</code></pre>
</notextile>
</code></pre>
</notextile>
-It's recommended to run Keepproxy under "runit":https://packages.debian.org/search?keywords=runit or something similar.
+It's recommended to run Keepproxy under "runit":http://smarden.org/runit/ or something similar.
h3. Create an API token for the Keepproxy server
title: Install Keepstore servers
...
-This installation guide assumes you are on a 64 bit Debian or Ubuntu system.
-
We are going to install two Keepstore servers. By convention, we use the following hostname pattern:
<div class="offset1">
h2. Install Keepstore
-First add the Arvados apt repository, and then install the Keepstore package.
+On Debian-based systems:
<notextile>
-<pre><code>~$ <span class="userinput">echo "deb http://apt.arvados.org/ wheezy main" | sudo tee /etc/apt/sources.list.d/apt.arvados.org.list</span>
-~$ <span class="userinput">sudo /usr/bin/apt-key adv --keyserver pool.sks-keyservers.net --recv 1078ECD7</span>
-~$ <span class="userinput">sudo /usr/bin/apt-get update</span>
-~$ <span class="userinput">sudo /usr/bin/apt-get install keepstore</span>
+<pre><code>~$ <span class="userinput">sudo apt-get install keepstore</span>
+</code></pre>
+</notextile>
+
+On Red Hat-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install keepstore</span>
</code></pre>
</notextile>
2015/05/08 13:44:26 keepstore starting, pid 2765
2015/05/08 13:44:26 Using volume [UnixVolume /mnt/keep] (writable=true)
2015/05/08 13:44:26 listening at :25107
-
</code></pre>
</notextile>
-It's recommended to run Keepstore under "runit":https://packages.debian.org/search?keywords=runit or something similar.
+It's recommended to run Keepstore under "runit":http://smarden.org/runit/ or something similar.
Repeat this section for each Keepstore server you are setting up.
+++ /dev/null
----
-layout: default
-navsection: installguide
-title: Install Ruby and bundler
-...
-
-We recommend Ruby >= 2.1.
-
-h2(#rvm). Option 1: Install with rvm
-
-<notextile>
-<pre><code>~$ <span class="userinput">gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3</span>
-~$ <span class="userinput">\curl -sSL https://get.rvm.io | bash -s stable --ruby=2.1</span>
-~$ <span class="userinput">gem install bundler
-</span></code></pre></notextile>
-
-h2(#fromsource). Option 2: Install from source
-
-<notextile>
-<pre><code><span class="userinput">mkdir -p ~/src
-cd ~/src
-wget http://cache.ruby-lang.org/pub/ruby/2.1/ruby-2.1.5.tar.gz
-tar xzf ruby-2.1.5.tar.gz
-cd ruby-2.1.5
-./configure
-make
-sudo make install
-
-sudo gem install bundler</span>
-</code></pre></notextile>
The number of Keepstore, shell and compute nodes listed above is a minimum. In a real production installation, you will likely run many more of each of those types of nodes. In such a scenario, you would probably also want to dedicate a node to the Workbench server and Crunch dispatcher, respectively. For performance reasons, you may want to run the database server on a separate node as well.
+h2(#repos). Arvados package repositories
+
+On any host where you install Arvados software, you'll need to set up an Arvados package repository. They're available for several popular distributions.
+
+h3. CentOS
+
+Packages are available for CentOS 6. First, register the Curoverse signing key in RPM's database:
+
+{% include 'install_redhat_key' %}
+
+Then save this configuration block in @/etc/yum.repos.d/arvados.repo@:
+
+<notextile>
+<pre><code>[arvados]
+name=Arvados
+baseurl=http://rpm.arvados.org/CentOS/$releasever/os/$basearch/
+</code></pre>
+</notextile>
+
+h3. Debian and Ubuntu
+
+Packages are available for Debian 7 ("wheezy"), Ubuntu 12.04 ("precise"), and Ubuntu 14.04 ("trusty").
+
+First, register the Curoverse signing key in apt's database:
+
+{% include 'install_debian_key' %}
+
+Configure apt to retrieve packages from the Arvados package repository. This command depends on your OS vendor and version:
+
+table(table table-bordered table-condensed).
+|OS version|Command|
+|Debian 7 ("wheezy")|<notextile><code><span class="userinput">echo "deb http://apt.arvados.org/ wheezy main" | sudo tee /etc/apt/sources.list.d/arvados.list</span></code></notextile>|
+|Ubuntu 12.04 ("precise")|<notextile><code><span class="userinput">echo "deb http://apt.arvados.org/ precise main" | sudo tee /etc/apt/sources.list.d/arvados.list</span></code></notextile>|
+|Ubuntu 14.04 ("trusty")|<notextile><code><span class="userinput">echo "deb http://apt.arvados.org/ trusty main" | sudo tee /etc/apt/sources.list.d/arvados.list</span></code></notextile>|
+
+{% include 'notebox_begin' %}
+
+Arvados packages for Ubuntu may depend on third-party packages in Ubuntu's "universe" repository. If you're installing on Ubuntu, make sure you have the universe sources uncommented in @/etc/apt/sources.list@.
+
+{% include 'notebox_end' %}
+
+Retrieve the package list:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get update</span>
+</code></pre>
+</notextile>
+
h2. A unique identifier
-Each Arvados installation should have a globally unique identifier, which is a unique 5-character alphanumeric string. Here is a snippet of ruby that generates such a string based on the hostname of your computer:
+Each Arvados installation should have a globally unique identifier, which is a unique 5-character lowercase alphanumeric string. For testing purposes, here is one way to make a random 5-character string:
-<pre>
-Digest::MD5.hexdigest(`hostname`).to_i(16).to_s(36)[0..4]
-</pre>
+<notextile>
+<pre><code>~$ <span class="userinput">tr -dc 0-9a-z </dev/urandom | head -c5; echo</span>
+</code></pre>
+</notextile>
-You may also use a different method to pick the unique identifier. The unique identifier will be part of the hostname of the services in your Arvados cluster. The rest of this documentation will refer to it as your @uuid_prefix@.
+You may also use a different method to pick the unique identifier. The unique identifier will be part of the hostname of the services in your Arvados cluster. The rest of this documentation will refer to it as your @uuid_prefix@.
h2. SSL certificates
title: Install a shell server
...
-This installation guide assumes you are on a 64 bit Debian or Ubuntu system.
-
-There is nothing inherently special about an Arvados shell server. It is just a GNU/Linux machine with the Arvados SDKs installed. For optimal performance, the Arvados shell server should be on the same LAN as the Arvados cluster, but that is not required.
+There is nothing inherently special about an Arvados shell server. It is just a GNU/Linux machine with Arvados utilites and SDKs installed. For optimal performance, the Arvados shell server should be on the same LAN as the Arvados cluster, but that is not required.
h2. Install API tokens
Please follow the "API token guide":{{site.baseurl}}/user/reference/api-tokens.html to get API tokens for your user and install them on your shell server. We will use those tokens to test the SDKs as we install them.
-h2. Install the SDKs
+h2. Install the Ruby SDK and utilities
+
+If you're using RVM:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo rvm-exec gem install arvados-cli</span>
+</code></pre>
+</notextile>
+
+If you're not using RVM:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo gem install arvados-cli</span>
+</code></pre>
+</notextile>
+
+h2. Install the Python SDK and utilities
+
+On Debian-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install python-arvados-python-client python-arvados-fuse</span>
+</code></pre>
+</notextile>
+
+On Red Hat-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install python27-python-arvados-python-client python27-python-arvados-fuse</span>
+</code></pre>
+</notextile>
+
+{% include 'note_python27_sc' %}
+
+h2. Update Git Config
+
+Configure git to use the ARVADOS_API_TOKEN environment variable to authenticate to arv-git-httpd.
+
+Execute the following commands to setup the needed configuration.
-Install the "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html and the "Command line SDK":{{site.baseurl}}/sdk/cli/install.html
+<notextile>
+<pre>
+<code>~$ <span class="userinput">git config 'credential.https://git.{{ site.arvados_api_host }}/.username' none</span></code>
+<code>~$ <span class="userinput">git config 'credential.https://git.{{ site.arvados_api_host }}/.helper' '!cred(){ cat >/dev/null; if [ "$1" = get ]; then echo password=$ARVADOS_API_TOKEN; fi; };cred'</span></code>
+</pre>
+</notextile>
h2(#dependencies). Install dependencies
-Make sure you have "Ruby and Bundler":install-manual-prerequisites-ruby.html installed.
+h3(#install_ruby_and_bundler). Install Ruby and Bundler
+
+{% include 'install_ruby_and_bundler' %}
+
+h3(#install_postgres). Install PostgreSQL
+
+{% include 'install_postgres' %}
h2(#install). Install SSO server
<pre><code>~$ <span class="userinput">cd $HOME</span> # (or wherever you want to install)
~$ <span class="userinput">git clone https://github.com/curoverse/sso-devise-omniauth-provider.git</span>
~$ <span class="userinput">cd sso-devise-omniauth-provider</span>
-~/sso-devise-omniauth-provider$ <span class="userinput">bundle install</span>
+~/sso-devise-omniauth-provider$ <span class="userinput">bundle</span>
</code></pre></notextile>
h2. Configure the SSO server
h3(#authentication_methods). Authentication methods
-Three authentication methods are supported: google OAuth2, ldap, local accounts.
+Three authentication methods are supported: Google+, LDAP, and local accounts.
-h3(#google_oauth2). google_oauth2 authentication
+h3(#google). Google+ authentication
-Google OAuth2 authentication can be configured with these options.
+In order to use Google+ authentication, you must use the "Google Developers Console":https://console.developers.google.com to create a set of client credentials. In short:
-<pre>
+* Enable the Contacts and Google+ APIs.
+* Create an OAuth Client ID for a web application.
+** JavaScript origins: @https://sso.example.com/@
+** Redirect URIs: @https://sso.example.com/auth/google_oauth2/callback@
+
+Copy the "Client ID" and "Client secret" values from the Google Developers Console into the Google section of @config/application.yml@, like this:
+
+<notextile>
+<pre><code>
# Google API tokens required for OAuth2 login.
#
# See https://github.com/zquestz/omniauth-google-oauth2
#
# and https://developers.google.com/accounts/docs/OAuth2
- google_oauth2_client_id: false
- google_oauth2_client_secret: false
+ google_oauth2_client_id: "<span class="userinput">---YOUR---CLIENT---ID---HERE---</span>"
+ google_oauth2_client_secret: "<span class="userinput">---YOUR---CLIENT---SECRET---HERE---</span>"
# Set this to your OpenId 2.0 realm to enable migration from Google OpenId
# 2.0 to Google OAuth2 OpenId Connect (Google will provide OpenId 2.0 user
# identifiers via the openid.realm parameter in the OAuth2 flow until 2017).
- google_openid_realm: false
-</pre>
+ google_openid_realm: <span class="userinput">false</span></code></pre></notextile>
-h3(#ldap). ldap authentication
+h3(#ldap). LDAP authentication
LDAP authentication can be configured with these options. Make sure to preserve the indentation of the fields beyond @use_ldap@.
use_ldap: false
</pre>
-h3(#local_accounts). local account authentication
+h3(#local_accounts). Local account authentication
If neither Google OAuth2 nor LDAP are enabled, the SSO server automatically
falls back to local accounts. There are two configuration options for local
h2. Set up the database
-Generate a new database password. Nobody ever needs to memorize it or type it, so we'll make a strong one:
+Generate a new database password. Nobody ever needs to memorize it or type it, so make a strong one:
<notextile>
<pre><code>~/sso-devise-omniauth-provider$ <span class="userinput">ruby -e 'puts rand(2**128).to_s(36)'</span>
<notextile>
<pre><code>~/sso-devise-omniauth-provider$ <span class="userinput">su postgres createdb arvados_sso_production -E UTF8 -O arvados_sso</span>
-~/sso-devise-omniauth-provider$ <span class="userinput">RAILS_ENV=production bundle exec rake db:structure:load</span>
+~/sso-devise-omniauth-provider$ <span class="userinput">RAILS_ENV=production bundle exec rake db:schema:load</span>
~/sso-devise-omniauth-provider$ <span class="userinput">RAILS_ENV=production bundle exec rake db:seed</span>
</code></pre></notextile>
-h2(#client). Generate assets
+h2(#assets). Generate assets
If you are running in the production environment, you'll want to generate the assets:
title: Install Workbench
...
-This installation guide assumes you are on a 64 bit Debian or Ubuntu system.
-
h2. Install prerequisites
-<notextile>
-<pre><code>~$ <span class="userinput">sudo apt-get install \
- bison build-essential gettext libcurl3 libcurl3-gnutls \
- libcurl4-openssl-dev libpcre3-dev libpq-dev libreadline-dev \
- libssl-dev libxslt1.1 git wget zlib1g-dev graphviz
-</span></code></pre></notextile>
+The Arvados package repository includes Workbench server package that can help automate much of the deployment.
-Also make sure you have "Ruby and bundler":install-manual-prerequisites-ruby.html installed.
+h3(#install_ruby_and_bundler). Install Ruby and Bundler
-Workbench doesn't need its own database, so it does not need to have PostgreSQL installed.
+{% include 'install_ruby_and_bundler' %}
-h2. Download the source tree
+h3(#build_tools_workbench). Build tools
-<notextile>
-<pre><code>~$ <span class="userinput">cd $HOME</span> # (or wherever you want to install)
-~$ <span class="userinput">git clone https://github.com/curoverse/arvados.git</span>
-</code></pre></notextile>
-
-See also: "Downloading the source code":https://arvados.org/projects/arvados/wiki/Download on the Arvados wiki.
+Workbench doesn't need its own database, so it does not need to have PostgreSQL installed.
-The Workbench application is in @apps/workbench@ in the source tree.
+On older distributions, you may need to use a backports repository to satisfy these requirements. For example, on older Red Hat-based systems, consider using the "nginx16":https://www.softwarecollections.org/en/scls/rhscl/nginx16/ Software Collection.
-h2. Install gem dependencies
+On a Debian-based system, install the following packages:
<notextile>
-<pre><code>~$ <span class="userinput">cd arvados/apps/workbench</span>
-~/arvados/apps/workbench$ <span class="userinput">bundle install</span>
+<pre><code>~$ <span class="userinput">sudo apt-get install bison build-essential graphviz git nginx python-arvados-python-client arvados-workbench</span>
</code></pre>
</notextile>
-Alternatively, if you don't have sudo/root privileges on the host, install the gems in your own directory instead of installing them system-wide:
+On a Red Hat-based system, install the following packages:
<notextile>
-<pre><code>~$ <span class="userinput">cd arvados/apps/workbench</span>
-~/arvados/apps/workbench$ <span class="userinput">bundle install --path=vendor/bundle</span>
-</code></pre></notextile>
-
-The @bundle install@ command might produce a warning about the themes_for_rails gem. This is OK:
-
-<notextile>
-<pre><code>themes_for_rails at /home/<b>you</b>/.rvm/gems/ruby-2.1.1/bundler/gems/themes_for_rails-1fd2d7897d75 did not have a valid gemspec.
-This prevents bundler from installing bins or native extensions, but that may not affect its functionality.
-The validation message from Rubygems was:
- duplicate dependency on rails (= 3.0.11, development), (>= 3.0.0) use:
- add_runtime_dependency 'rails', '= 3.0.11', '>= 3.0.0'
-Using themes_for_rails (0.5.1) from https://github.com/holtkampw/themes_for_rails (at 1fd2d78)
-</code></pre></notextile>
+<pre><code>~$ <span class="userinput">sudo yum install bison make automake gcc gcc-c++ graphviz git nginx python27-python-arvados-python-client arvados-workbench</span>
+</code></pre>
+</notextile>
-h2. Choose your environment
+{% include 'notebox_begin' %}
-The Workbench application can be run in @development@ or in @production@ mode. Unless this installation is going to be used for development on the Workbench applicatoin itself, you should run it in @production@ mode.
+If you intend to use specific versions of these packages from Software Collections, you may have to adapt some of the package names to match; e.g., @nginx16@.
-Copy the example environment file for your environment. For example, if you choose @production@:
+{% include 'notebox_end' %}
-<notextile>
-<pre><code>~/arvados/apps/workbench$ <span class="userinput">cp -i config/environments/production.rb.example config/environments/production.rb</span>
-</code></pre></notextile>
+{% include 'note_python27_sc' %}
-h2. Configure the Workbench application
+h2. Set up configuration files
-First, copy the example configuration file:
+The Workbench server package uses configuration files that you write to @/etc/arvados/workbench@ and ensures they're consistently deployed. Create this directory and copy the example configuration files to it:
<notextile>
-<pre><code>~/arvados/apps/workbench$ <span class="userinput">cp -i config/application.yml.example config/application.yml</span>
-</code></pre></notextile>
+<pre><code>~$ <span class="userinput">sudo mkdir -p /etc/arvados/workbench</span>
+~$ <span class="userinput">sudo chmod 700 /etc/arvados/workbench</span>
+~$ <span class="userinput">sudo cp /var/www/arvados-workbench/current/config/application.yml.example /etc/arvados/workbench/application.yml</span>
+</code></pre>
+</notextile>
+
+h2. Configure Workbench
-The Workbench application reads the @config/application.yml@ file, as well as the @config/application.defaults.yml@ file. Values in @config/application.yml@ take precedence over the defaults that are defined in @config/application.defaults.yml@. The @config/application.yml.example@ file is not read by the Workbench application and is provided for installation convenience, only.
+Edit @/etc/arvados/workbench/application.yml@ following the instructions below. The deployment script will consistently deploy this to Workbench's configuration directory. Workbench reads both @application.yml@ and its own @config/application.defaults.yml@ file. Values in @application.yml@ take precedence over the defaults that are defined in @config/application.defaults.yml@. The @config/application.yml.example@ file is not read by Workbench and is provided for installation convenience only.
-Consult @config/application.default.yml@ for a full list of configuration options. Always put your local configuration in @config/application.yml@, never edit @config/application.default.yml@.
+Consult @config/application.default.yml@ for a full list of configuration options. Always put your local configuration in @/etc/arvados/workbench/application.yml@—never edit @config/application.default.yml@.
h3. secret_token
This application needs a secret token. Generate a new secret:
<notextile>
-<pre><code>~/arvados/apps/workbench$ <span class="userinput">ruby -e 'puts rand(2**400).to_s(36)'</span>
+<pre><code>~$ <span class="userinput">ruby -e 'puts rand(2**400).to_s(36)'</span>
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
</code></pre>
</notextile>
If the SSL certificate you use for your API server isn't an official certificate signed by a CA, make sure @arvados_insecure_https@ is @true@.
-h3. other options
+h3. Other options
Consult @application.default.yml@ for a full list of configuration options. Always put your local configuration in @application.yml@ instead of editing @application.default.yml@.
-Copy @config/piwik.yml.example@ to @config/piwik.yml@ and edit to suit.
+h2. Configure Piwik
-h2. Start the Workbench application
+In @/var/www/arvados-workbench/current/config@, copy @piwik.yml.example@ to @piwik.yml@ and edit to suit.
-h3. Development environment
+h2. Prepare the Workbench deployment
-If you plan to run in development mode, you can now run the development server this way:
+Now that all your configuration is in place, run @/usr/local/bin/arvados-workbench-upgrade.sh@. This will install and check your configuration, and install necessary gems.
+{% include 'notebox_begin' %}
+You can safely ignore the following error message you may see when installing gems:
<notextile>
-<pre><code>~/arvados/apps/workbench$ <span class="userinput">bundle exec rails server --port=3031</span>
-</code></pre></notextile>
+<pre><code>themes_for_rails at /usr/local/rvm/gems/ruby-2.1.1/bundler/gems/themes_for_rails-1fd2d7897d75 did not have a valid gemspec.
+This prevents bundler from installing bins or native extensions, but that may not affect its functionality.
+The validation message from Rubygems was:
+ duplicate dependency on rails (= 3.0.11, development), (>= 3.0.0) use:
+ add_runtime_dependency 'rails', '= 3.0.11', '>= 3.0.0'
+Using themes_for_rails (0.5.1) from https://github.com/holtkampw/themes_for_rails (at 1fd2d78)
+</code></pre>
+</notextile>
+{% include 'notebox_end' %}
+
+This command aborts when it encounters an error. It's safe to rerun multiple times, so if there's a problem with your configuration, you can fix that and try again.
+
+h2. Set up Web server
+
+For best performance, we recommend you use Nginx as your Web server front-end, with a Passenger backend to serve Workbench. To do that:
+
+<notextile>
+<ol>
+<li>Install Nginx via your distribution or a backports repository.</li>
+
+<li><a href="https://www.phusionpassenger.com/documentation/Users%20guide%20Nginx.html">Install Phusion Passenger for Nginx</a>.</li>
-h3. Production environment
+<li><p>Edit the http section of your Nginx configuration to run the Passenger server, and act as a front-end for it. You might add a block like the following, adding SSL and logging parameters to taste:</p>
-We recommend "Passenger":https://www.phusionpassenger.com/ to run the API server in production.
+<pre><code>server {
+ listen 127.0.0.1:9000;
+ server_name localhost-workbench;
-Point it to the apps/workbench directory in the source tree.
+ root /var/www/arvados-workbench/current/public;
+ index index.html index.htm index.php;
+
+ passenger_enabled on;
+ # If you're using RVM, uncomment the line below.
+ #passenger_ruby /usr/local/rvm/wrappers/default/ruby;
+}
+
+upstream workbench {
+ server 127.0.0.1:9000 fail_timeout=10s;
+}
+
+proxy_http_version 1.1;
+
+server {
+ listen <span class="userinput">[your public IP address]</span>:443 ssl;
+ server_name workbench.<span class="userinput">uuid-prefix.your.domain</span>;
+
+ ssl on;
+
+ index index.html index.htm index.php;
+
+ location / {
+ proxy_pass http://workbench;
+ proxy_redirect off;
+
+ proxy_set_header X-Forwarded-Proto https;
+ proxy_set_header Host $http_host;
+ proxy_set_header X-External-Client $external_client;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ }
+}
+</code></pre>
+</li>
+
+<li>Restart Nginx.</li>
+
+</ol>
+</notextile>
h2. Trusted client setting
In the <strong>API server</strong> project root, start the rails console. Locate the ApiClient record for your Workbench installation (typically, while you're setting this up, the @last@ one in the database is the one you want), then set the @is_trusted@ flag for the appropriate client record:
-<notextile><pre><code>~/arvados/services/api$ <span class="userinput">RAILS_ENV=production bundle exec rails console</span>
+<notextile><pre><code>/var/www/arvados-api/current$ <span class="userinput">RAILS_ENV=production bundle exec rails console</span>
irb(main):001:0> <span class="userinput">wb = ApiClient.all.last; [wb.url_prefix, wb.created_at]</span>
=> ["https://workbench.example.com/", Sat, 19 Apr 2014 03:35:12 UTC +00:00]
irb(main):002:0> <span class="userinput">include CurrentApiClient</span>
Next, we're going to use the rails console on the <strong>API server</strong> to activate our own account and give yourself admin privileges:
<notextile>
-<pre><code>~/arvados/services/api$ <span class="userinput">RAILS_ENV=production bundle exec rails console</span>
+<pre><code>/var/www/arvados-api/current$ <span class="userinput">RAILS_ENV=production bundle exec rails console</span>
irb(main):001:0> <span class="userinput">Thread.current[:user] = User.all.select(&:identity_url).last</span>
irb(main):002:0> <span class="userinput">Thread.current[:user].is_admin = true</span>
irb(main):003:0> <span class="userinput">Thread.current[:user].update_attributes is_admin: true, is_active: true</span>
To use the @arv@ command, you can either install the @arvados-cli@ gem via RubyGems or build and install the package from source.
-h4. Prerequisites: Ruby >= 2.1.0 and curl libraries
+h3. Prerequisites: Ruby, Bundler, and curl libraries
-Make sure you have "Ruby and bundler":{{site.baseurl}}/install/install-manual-prerequisites-ruby.html installed.
+{% include 'install_ruby_and_bundler' %}
Install curl libraries with your system's package manager. For example, on Debian or Ubuntu:
</pre>
</notextile>
-h4. Option 1: install with RubyGems
+h3. Option 1: Install with RubyGems
<notextile>
<pre>
</pre>
</notextile>
-h4. Option 2: build and install from source
+h3. Option 2: Build and install from source
<notextile>
<pre>
exit
--local Run locally using arv-run-pipeline-instance
--docker-image DOCKER_IMAGE
- Docker image to use, default arvados/jobs
+ Docker image to use, otherwise use instance default.
--ignore-rcode Commands that return non-zero return codes should not
be considered failed.
--no-reuse Do not reuse past jobs.
h3. Installation
+h4. Option 1: Install from distribution packages
+
+First, "add the appropriate package repository for your distribution":{{ site.baseurl }}/install/install-manual-prerequisites.html#repos.
+
+On Debian-based systems:
+
<notextile>
-<pre>
-$ <code class="userinput">sudo apt-get install libjson-perl libio-socket-ssl-perl libwww-perl libipc-system-simple-perl</code>
-$ <code class="userinput">git clone https://github.com/curoverse/arvados.git</code>
-$ <code class="userinput">cd arvados/sdk/perl</code>
-$ <code class="userinput">perl Makefile.PL</code>
-$ <code class="userinput">sudo make install</code>
-</pre>
+<pre><code>~$ <span class="userinput">sudo apt-get install libjson-perl libio-socket-ssl-perl libwww-perl libipc-system-simple-perl libarvados-perl</code>
+</code></pre>
+</notextile>
+
+On Red Hat-based systems:
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo yum install perl-ExtUtils-MakeMaker perl-JSON perl-IO-Socket-SSL perl-WWW-Curl libarvados-perl</code>
+</code></pre>
+</notextile>
+
+h4. Option 2: Install from source
+
+First, install dependencies from your distribution. Refer to the package lists above, but don't install @libarvados-perl@.
+
+Then run the following:
+
+<notextile>
+<pre><code>~$ <span class="userinput">git clone https://github.com/curoverse/arvados.git</span>
+~$ <span class="userinput">cd arvados/sdk/perl</span>
+~$ <span class="userinput">perl Makefile.PL</span>
+~$ <span class="userinput">sudo make install</span>
+</code></pre>
</notextile>
-h4. Test installation
+h3. Test installation
If the SDK is installed, @perl -MArvados -e ''@ should produce no errors.
If your @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ environment variables are set up correctly (see "api-tokens":{{site.baseurl}}/user/reference/api-tokens.html for details), the following test script should work:
<notextile>
-<pre>$ <code class="userinput">perl <<'EOF'
+<pre>~$ <code class="userinput">perl <<'EOF'
use Arvados;
my $arv = Arvados->new('apiVersion' => 'v1');
my $me = $arv->{'users'}->{'current'}->execute;
If you are logged in to an Arvados VM, the Python SDK should be installed.
-To use the Python SDK elsewhere, you can either install the Python SDK via PyPI or build and install the package using the arvados source tree.
+To use the Python SDK elsewhere, you can install from a distribution package, PyPI, or source.
{% include 'notebox_begin' %}
-The Python SDK requires Python 2.7
+The Python SDK requires Python 2.7.
{% include 'notebox_end' %}
-h4. Option 1: install with PyPI
+h4. Option 1: Install from distribution packages
+
+First, "add the appropriate package repository for your distribution":{{ site.baseurl }}/install/install-manual-prerequisites.html#repos.
+
+On Debian-based systems:
<notextile>
-<pre>
-$ <code class="userinput">sudo apt-get install python-pip python-dev libattr1-dev libfuse-dev pkg-config python-yaml</code>
-$ <code class="userinput">sudo pip install arvados-python-client</code>
-</pre>
+<pre><code>~$ <span class="userinput">sudo apt-get install python-arvados-python-client</code>
+</code></pre>
</notextile>
-_If your version of @pip@ is 1.4 or newer, the @pip install@ command might give an error: "Could not find a version that satisfies the requirement arvados-python-client". If this happens, fix it by adding a @--pre@ flag:_
+On Red Hat-based systems:
<notextile>
-<pre>
-$ <code class="userinput">sudo pip install --pre arvados-python-client</code>
-</pre>
+<pre><code>~$ <span class="userinput">sudo yum install python27-python-arvados-python-client</code>
+</code></pre>
</notextile>
-h4. Option 2: install from distribution packages (Debian/Ubuntu only)
+{% include 'note_python27_sc' %}
-First add @http://apt.arvados.org@ to your list of apt repositories:
+h4. Option 2: Install with pip
-<notextile>
-<pre>
-$ <code class="userinput">echo "deb http://apt.arvados.org/ wheezy main" | sudo tee /etc/apt/sources.list.d/apt.arvados.org.list</code>
-</pre>
-</notextile>
+Run @pip-2.7 install arvados-python-client@ in an appropriate installation environment, such as a virtualenv.
-Then install the package:
+If your version of @pip@ is 1.4 or newer, the @pip install@ command might give an error: "Could not find a version that satisfies the requirement arvados-python-client". If this happens, try @pip-2.7 install --pre arvados-python-client@.
-<notextile>
-<pre>
-$ <code class="userinput">sudo apt-get update</code>
-$ <code class="userinput">sudo apt-get install python-arvados-python-client</code>
-</pre>
-</notextile>
+h4. Option 3: Install from source
-h4. Option 3: build and install from source
+Install the @python-setuptools@ package from your distribution. Then run the following:
<notextile>
-<pre>
-~$ <code class="userinput">sudo apt-get install python-dev libattr1-dev libfuse-dev pkg-config</code>
-~$ <code class="userinput">git clone https://github.com/curoverse/arvados.git</code>
-~$ <code class="userinput">cd arvados/sdk/python</code>
-~/arvados/sdk/python$ <code class="userinput">sudo python setup.py install</code>
-</pre>
+<pre><code>~$ <span class="userinput">git clone https://github.com/curoverse/arvados.git</span>
+~$ <span class="userinput">cd arvados/sdk/python</span>
+~$ <span class="userinput">python2.7 setup.py install</span>
+</code></pre>
</notextile>
+You may optionally run the final installation command in a virtualenv, or with the @--user@ option.
+
h4. Test installation
If the SDK is installed and your @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ environment variables are set up correctly (see "api-tokens":{{site.baseurl}}/user/reference/api-tokens.html for details), @import arvados@ should produce no errors:
<notextile>
-<pre>$ <code class="userinput">python</code>
-Python 2.7.4 (default, Sep 26 2013, 03:20:26)
+<pre>~$ <code class="userinput">python2.7</code>
+Python 2.7.4 (default, Sep 26 2013, 03:20:26)
[GCC 4.7.3] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> <code class="userinput">import arvados</code>
</notextile>
The SDK retrieves the list of API methods from the server at run time. Therefore, the set of available methods is determined by the server version rather than the SDK version.
-
title: "Checking your environment"
...
-First, log into an Arvados VM instance (instructions for "Unix":{{site.baseurl}}/user/getting_started/ssh-access-unix.html#login or "Windows":{{site.baseurl}}/user/getting_started/ssh-access-windows.html#login) or install the Arvados "Command line SDK":{{site.baseurl}}/sdk/cli/install.html and "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html on your workstation.
+First, log into an Arvados VM instance (instructions for "Webshell":{{site.baseurl}}/user/getting_started/vm-login-with-webshell.html or "Unix":{{site.baseurl}}/user/getting_started/ssh-access-unix.html#login or "Windows":{{site.baseurl}}/user/getting_started/ssh-access-windows.html#login) or install the Arvados "Command line SDK":{{site.baseurl}}/sdk/cli/install.html and "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html on your workstation.
Check that you are able to access the Arvados API server using @arv user current@. If it is able to access the API server, it will print out information about your account:
title: Accessing an Arvados VM with SSH - Unix Environments
...
-This document is for Unix environments (Linux, OS X, Cygwin). If you are using a Windows environment, please visit the "Accessing an Arvados VM with SSH - Windows Environments":ssh-access-windows.html page.
+This document is for accessing an arvados VM using SSK keys in Unix environments (Linux, OS X, Cygwin). If you would like to access VM through your browser, please visit the "Accessing an Arvados VM with Webshell":vm-login-with-webshell.html page. If you are using a Windows environment, please visit the "Accessing an Arvados VM with SSH - Windows Environments":ssh-access-windows.html page.
{% include 'ssh_intro' %}
title: Accessing an Arvados VM with SSH - Windows Environments
...
-This document is for Windows environments. If you are using a Unix environment (Linux, OS X, Cygwin), please visit the "Accessing an Arvados VM with SSH - Unix Environments":ssh-access-unix.html page.
+This document is for accessing an arvados VM using SSK keys in Windows environments. If you would like to use to access VM through your browser, please visit the "Accessing an Arvados VM with Webshell":vm-login-with-webshell.html page. If you are using a Unix environment (Linux, OS X, Cygwin), please visit the "Accessing an Arvados VM with SSH - Unix Environments":ssh-access-unix.html page.
{% include 'ssh_intro' %}
--- /dev/null
+---
+layout: default
+navsection: userguide
+title: Accessing an Arvados VM with Webshell
+...
+
+This document describes how to access an Arvados VM with Webshell from Workbench.
+
+h2(#webshell). Access VM using webshell
+
+Webshell gives you access to an arvados virtual machine from your browser with no additional setup.
+
+In the Arvados Workbench, click on the dropdown menu icon <span class="fa fa-lg fa-user"></span> <span class="caret"></span> in the upper right corner of the top navigation menu to access the user settings menu, and click on the menu item *Manage account* to go to the account management page. In the *Manage account* page, you will see the *Virtual Machines* panel that lists the virtual machines you can access.
+
+Each row in the Virtual Machines panel lists the hostname of the VM, along with a <code>Log in as *you*</code> button under the column "Web shell beta". Clinking on this button will open up a webshell terminal for you in a new browser tab and log you in.
+
+!{display: block;margin-left: 25px;margin-right: auto;border:1px solid lightgray;}{{ site.baseurl }}/images/vm-access-with-webshell.png!
+
+You are now ready to work in your Arvados VM.
The Arvados API token is a secret key that enables the @arv@ command line client to access Arvados with the proper permissions.
-Access the Arvados Workbench using this link: "{{site.arvados_workbench_host}}/":{{site.arvados_workbench_host}}/ (Replace @{{ site.arvados_api_host }}@ with the hostname of your local Arvados instance if necessary.)
+Access the Arvados Workbench using this link: "{{site.arvados_workbench_host}}/":{{site.arvados_workbench_host}}/ (Replace the hostname portion with the hostname of your local Arvados instance if necessary.)
-Open a shell on the system where you want to use the Arvados client. This may be your local workstation, or an Arvados virtual machine accessed with SSH (instructions for "Unix":{{site.baseurl}}/user/getting_started/ssh-access-unix.html#login or "Windows":{{site.baseurl}}/user/getting_started/ssh-access-windows.html#login).
+Open a shell on the system where you want to use the Arvados client. This may be your local workstation, or an Arvados virtual machine accessed with "Webshell":{{site.baseurl}}/user/getting_started/vm-login-with-webshell.html or SSH (instructions for "Unix":{{site.baseurl}}/user/getting_started/ssh-access-unix.html#login or "Windows":{{site.baseurl}}/user/getting_started/ssh-access-windows.html#login).
Click on the dropdown menu icon <span class="fa fa-lg fa-user"></span> <span class="caret"></span> in the upper right corner of the top navigation menu to access your account menu, then click on the menu item *Manage account* to go to the account management page. On the *Manage account* page, you will see the *Current Token* panel, which lists your current token and instructions to set up your environment.
--- /dev/null
+---
+layout: default
+navsection: userguide
+title: "Using arv-copy"
+...
+
+
+This tutorial describes how to copy Arvados objects from one cluster to another by using @arv-copy@.
+
+{% include 'tutorial_expectations' %}
+
+h2. arv-copy
+
+@arv-copy@ allows users to copy collections, pipeline templates, and pipeline instances from one cluster to another. By default, @arv-copy@ will recursively go through a template or instance and copy all dependencies associated with the object.
+
+For example, let's copy from our <a href="https://cloud.curoverse.com/">beta cloud instance *qr1hi*</a> to *dst_cluster*. The names *qr1hi* and *dst_cluster* are interchangable with any cluster name. You can find the cluster name from the prefix of the uuid of the object you want to copy. For example, in *qr1hi*-4zz18-tci4vn4fa95w0zx, the cluster name is qr1hi.
+
+In order for the clusters to be able to communicate with each other, you must create custom configuration files for both clusters. First, go to your *Manage account* page in Workbench and copy the @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ in both of your clusters. Then, create two configuration files, one for each cluster. The names of the files must have the format of *uuid_prefix.conf*. In our example, let's make two files, one for *qr1hi* and one for *dst_cluster*. From your *Manage account* page in *qr1hi* and *dst_cluster*, copy the @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@.
+
+!{display: block;margin-left: 25px;margin-right: auto;}{{ site.baseurl }}/images/api-token-host.png!
+
+Copy your @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ into the config files as shown below in the shell account from which you are executing the commands. For example, the default shell you may have access to is shell.qr1hi. You can add these files in ~/.config/arvados/ in the qr1hi shell terminal.
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd ~/.config/arvados</span>
+~$ <span class="userinput">echo "ARVADOS_API_HOST=qr1hi.arvadosapi.com" >> qr1hi.conf</span>
+~$ <span class="userinput">echo "ARVADOS_API_TOKEN=123456789abcdefghijkl" >> qr1hi.conf</span>
+~$ <span class="userinput">echo "ARVADOS_API_HOST=dst_cluster.arvadosapi.com" >> dst_cluster.conf</span>
+~$ <span class="userinput">echo "ARVADOS_API_TOKEN=987654321lkjihgfedcba" >> dst_cluster.conf</span>
+</code></pre>
+</notextile>
+
+Now you're ready to copy between *qr1hi* and *dst_cluster*!
+
+h3. How to copy a collection
+
+First, select the uuid of the collection you want to copy from the source cluster. The uuid can be found in the collection display page in the collection summary area (top left box), or from the URL bar (the part after @collections/...@)
+
+Now copy the collection from *qr1hi* to *dst_cluster*. We will use the uuid @qr1hi-4zz18-tci4vn4fa95w0zx@ as an example. You can find this collection in the <a href="https://cloud.curoverse.com/collections/qr1hi-4zz18-tci4vn4fa95w0zx">lobSTR v.3 project on cloud.curoverse.com</a>.
+<notextile>
+<pre><code>~$ <span class="userinput">arv-copy --src qr1hi --dst dst_cluster qr1hi-4zz18-tci4vn4fa95w0zx</span>
+qr1hi-4zz18-tci4vn4fa95w0zx: 6.1M / 6.1M 100.0%
+arvados.arv-copy[1234] INFO: Success: created copy with uuid dst_cluster-4zz18-8765943210cdbae
+</code></pre>
+</notextile>
+
+The output of arv-copy displays the uuid of the collection generated in the destination cluster. By default, the output is placed in your home project in the destination cluster. If you want to place your collection in a pre-created project, you can specify the project you want it to be in using the tag @--project-uuid@ followed by the project uuid.
+
+For example, this will copy the collection to project dst_cluster-j7d0g-a894213ukjhal12 in the destination cluster.
+
+<notextile> <pre><code>~$ <span class="userinput">arv-copy --src qr1hi --dst dst_cluster --project-uuid dst_cluster-j7d0g-a894213ukjhal12 qr1hi-4zz18-tci4vn4fa95w0zx</span>
+</code></pre>
+</notextile>
+
+h3. How to copy a pipeline template or pipeline instance
+
+{% include 'arv_copy_expectations' %}
+
+We will use the uuid @qr1hi-d1hrv-nao0ohw8y7dpf84@ as an example pipeline instance.
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv-copy --src qr1hi --dst dst_cluster --dst-git-repo $USER/tutorial qr1hi-d1hrv-nao0ohw8y7dpf84</span>
+To git@git.dst_cluster.arvadosapi.com:$USER/tutorial.git
+ * [new branch] git_git_qr1hi_arvadosapi_com_arvados_git_ac21f0d45a76294aaca0c0c0fdf06eb72d03368d -> git_git_qr1hi_arvadosapi_com_arvados_git_ac21f0d45a76294aaca0c0c0fdf06eb72d03368d
+arvados.arv-copy[19694] INFO: Success: created copy with uuid dst_cluster-d1hrv-rym2h5ub9m8ofwj
+</code></pre>
+</notextile>
+
+New branches in the destination git repo will be created for each branch used in the pipeline template. For example, if your source branch was named ac21f0d45a76294aaca0c0c0fdf06eb72d03368d, your new branch will be named @git_git_qr1hi_arvadosapi_com_reponame_git_ac21f0d45a76294aaca0c0c0fdf06eb72d03368d@.
+
+By default, if you copy a pipeline instance recursively, you will find that the template as well as all the dependencies are in your home project.
+
+If you would like to copy the object without dependencies, you can use the @--no-recursive@ tag.
+
+For example, we can copy the same object using this tag.
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv-copy --src qr1hi --dst dst_cluster --dst-git-repo $USER/tutorial --no-recursive qr1hi-d1hrv-nao0ohw8y7dpf84</span>
+</code></pre>
+</notextile>
h2. Additional options
-* @--docker-image IMG@ : By default, commands run inside a Docker container created from the latest "arvados/jobs" Docker image. Use this option to specify a different image to use. Note: the Docker image must be uploaded to Arvados using @arv keep docker@.
+* @--docker-image IMG@ : By default, commands run based in a container created from the @default_docker_image_for_jobs@ setting on the API server. Use this option to specify a different image to use. Note: the Docker image must be uploaded to Arvados using @arv keep docker@.
* @--dry-run@ : Print out the final Arvados pipeline generated by @arv-run@ without submitting it.
* @--local@ : By default, the pipeline will be submitted to your configured Arvados instance. Use this option to run the command locally using @arv-run-pipeline-instance --run-jobs-here@.
* @--ignore-rcode@ : Some commands use non-zero exit codes to indicate nonfatal conditions (e.g. @grep@ returns 1 when no match is found). Set this to indicate that commands that return non-zero return codes should not be considered failed.
@task.stdout@ specifies the desired file name in the output directory to save the content of standard output. When command describes a Unix pipeline, this captures the output of the last command.
+h3. task.env
+
+Set environment variables for the command. Accepts an object mapping environment variables to the desired values. Parameter substitution is performed on values, but not on the environment variable names themselves. Example usage:
+
+<pre>
+{
+ "command": ["/bin/sh", "-c", "echo $MY_ENV_VAR"],
+ "task.env": {
+ "MY_ENV_VAR": "Hello world!"
+ }
+}
+</pre>
+
h3. task.vwd
Background: because Keep collections are read-only, this does not play well with certain tools that expect to be able to write their outputs alongside their inputs (such as tools that generate indexes that are closely associated with the original file.) The run-command's solution to this is the "virtual working directory".
--- /dev/null
+---
+layout: default
+navsection: userguide
+title: Adding a new arvados repository
+...
+
+Arvados repositories are managed through the Git revision control system. You can use these repositories to store your crunch scripts and run them in the arvados cluster.
+
+{% include 'tutorial_expectations' %}
+
+h2. Setting up Git
+
+Before you start using Git and arvados repositories, you should do some basic configuration (you only need to do this the first time):
+
+<notextile>
+<pre><code>~$ <span class="userinput">git config --global user.name "Your Name"</span>
+~$ <span class="userinput">git config --global user.email $USER@example.com</span></code></pre>
+</notextile>
+
+h2. Add "tutorial" repository
+
+On the Arvados Workbench, click on the dropdown menu icon <span class="fa fa-lg fa-user"></span> <span class="caret"></span> in the upper right corner of the top navigation menu to access the user settings menu, and click on the menu item *Manage account* to go to the account management page.
+
+In the *Manage account* page, you will see the *Repositories* panel with the *Add new repository* button.
+
+!{display: block;margin-left: 25px;margin-right: auto;}{{ site.baseurl }}/images/repositories-panel.png!
+
+Click the *Add new Repository* button to open the popup to add a new arvados repository. You will see a text box where you can enter the name of the repository. Enter *tutorial* in this text box and click on *Create*.
+
+{% include 'notebox_begin' %}
+The name you enter here must begin with a letter and can only contain alphanumeric characters.
+{% include 'notebox_end' %}
+
+!{display: block;margin-left: 25px;margin-right: auto;border:1px solid lightgray;}{{ site.baseurl }}/images/add-new-repository.png!
+
+This will create a new repository with the name @$USER/tutorial@. It can be accessed using the URL <notextile><code>https://git.{{ site.arvados_api_host }}/$USER/tutorial.git</code></notextile> or <notextile><code>git@git.{{ site.arvados_api_host }}:$USER/tutorial.git</code></notextile>
+
+Back in the *Repositories* panel in the *Manage account* page, you should see the @$USER/tutorial@ repository listed in the name column with these URLs.
+
+!{display: block;margin-left: 25px;margin-right: auto;}{{ site.baseurl }}/images/added-new-repository.png!
+
+You are now ready to use this *tutorial* repository to run your crunch scripts.
This tutorial uses @$USER@ to denote your username. Replace @$USER@ with your user name in all the following examples.
-Start by creating a directory called @$USER@ . Next, create a subdirectory called @crunch_scripts@ and change to that directory:
+Start by creating a directory called @tutorial@ in your home directory. Next, create a subdirectory called @crunch_scripts@ and change to that directory:
<notextile>
-<pre><code>~$ <span class="userinput">mkdir -p tutorial/crunch_scripts</span>
+<pre><code>~$ <span class="userinput">cd $HOME</span>
+~$ <span class="userinput">mkdir -p tutorial/crunch_scripts</span>
~$ <span class="userinput">cd tutorial/crunch_scripts</span></code></pre>
</notextile>
If multiple clients (separate instances of arv-mount or other arvados applications) modify the same file in the same collection within a short time interval, this may result in a conflict. In this case, the most recent commit wins, and the "loser" will be renamed to a conflict file in the form @name~YYYYMMDD-HHMMSS~conflict~@.
-Please note this feature is in beta testing. In particular, the conflict mechanism is itself currently subject to race condiditions with potential for data loss when a collection is being modified simultaneously by multiple clients. This issue will be resolved in future development.
+Please note this feature is in beta testing. In particular, the conflict mechanism is itself currently subject to race conditions with potential for data loss when a collection is being modified simultaneously by multiple clients. This issue will be resolved in future development.
This tutorial uses @$USER@ to denote your username. Replace @$USER@ with your user name in all the following examples.
-h2. Setting up Git
+Also, this tutorial uses the @tutorial@ arvados repository created in "Adding a new arvados repository":add-new-repository.html as the example repository.
+
+h2. Clone arvados repository
All Crunch scripts are managed through the Git revision control system. Before you start using Git, you should do some basic configuration (you only need to do this the first time):
On the Arvados Workbench, click on the dropdown menu icon <span class="fa fa-lg fa-user"></span> <span class="caret"></span> in the upper right corner of the top navigation menu to access the user settings menu, and click on the menu item *Manage account* to go to the account management page.
-On the *Manage account* page, you will see *Repositories* panel. In this panel, you should see a repository with your user name listed in the *name* column. Next to *name* is the column *URL*. Copy the *URL* value associated with your repository. This should look like <notextile><code>git@git.{{ site.arvados_api_host }}:$USER/$USER.git</code></notextile>.
+On the *Manage account* page, you will see *Repositories* panel. In this panel, you should see the @$USER/tutorial@ repository listed in the *name* column. Next to *name* is the column *URL*. Copy the *URL* value associated with your repository. This should look like <notextile><code>https://git.{{ site.arvados_api_host }}/$USER/tutorial.git</code></notextile>. Alternatively, you can use <notextile><code>git@git.{{ site.arvados_api_host }}:$USER/tutorial.git</code></notextile>
Next, on the Arvados virtual machine, clone your Git repository:
<notextile>
<pre><code>~$ <span class="userinput">cd $HOME</span> # (or wherever you want to install)
-~$ <span class="userinput">git clone git@git.{{ site.arvados_api_host }}:$USER/$USER.git</span>
-Cloning into '$USER'...</code></pre>
+~$ <span class="userinput">git clone https://git.{{ site.arvados_api_host }}/$USER/tutorial.git</span>
+Cloning into 'tutorial'...</code></pre>
</notextile>
-This will create a Git repository in the directory called @$USER@ in your home directory. Say yes when prompted to continue with connection.
+This will create a Git repository in the directory called @tutorial@ in your home directory. Say yes when prompted to continue with connection.
Ignore any warning that you are cloning an empty repository.
+*Note:* If you are prompted for username and password when you try to git clone using this command, you may first need to update your git configuration. Execute the following commands to update your git configuration.
+
+<notextile>
+<pre>
+<code>~$ <span class="userinput">git config 'credential.https://git.{{ site.arvados_api_host }}/.username' none</span></code>
+<code>~$ <span class="userinput">git config 'credential.https://git.{{ site.arvados_api_host }}/.helper' '!cred(){ cat >/dev/null; if [ "$1" = get ]; then echo password=$ARVADOS_API_TOKEN; fi; };cred'</span></code>
+</pre>
+</notextile>
+
{% include 'notebox_begin' %}
For more information about using Git, try
h2. Creating a Crunch script
-Start by entering the @$USER@ directory created by @git clone@. Next create a subdirectory called @crunch_scripts@ and change to that directory:
+Start by entering the @tutorial@ directory created by @git clone@. Next create a subdirectory called @crunch_scripts@ and change to that directory:
<notextile>
<pre><code>~$ <span class="userinput">cd $USER</span>
Next, using @nano@ or your favorite Unix text editor, create a new file called @hash.py@ in the @crunch_scripts@ directory.
-notextile. <pre>~/$USER/crunch_scripts$ <code class="userinput">nano hash.py</code></pre>
+notextile. <pre>~/tutorial/crunch_scripts$ <code class="userinput">nano hash.py</code></pre>
Add the following code to compute the MD5 hash of each file in a collection (if you already completed "Writing a Crunch script":tutorial-firstscript.html you can just copy the @hash.py@ file you created previously.)
Make the file executable:
-notextile. <pre><code>~/$USER/crunch_scripts$ <span class="userinput">chmod +x hash.py</span></code></pre>
+notextile. <pre><code>~/tutorial/crunch_scripts$ <span class="userinput">chmod +x hash.py</span></code></pre>
Next, add the file to the staging area. This tells @git@ that the file should be included on the next commit.
-notextile. <pre><code>~/$USER/crunch_scripts$ <span class="userinput">git add hash.py</span></code></pre>
+notextile. <pre><code>~/tutorial/crunch_scripts$ <span class="userinput">git add hash.py</span></code></pre>
Next, commit your changes. All staged changes are recorded into the local git repository:
<notextile>
-<pre><code>~/$USER/crunch_scripts$ <span class="userinput">git commit -m"my first script"</span>
+<pre><code>~/tutorial/crunch_scripts$ <span class="userinput">git commit -m"my first script"</span>
[master (root-commit) 27fd88b] my first script
1 file changed, 45 insertions(+)
create mode 100755 crunch_scripts/hash.py</code></pre>
Finally, upload your changes to the Arvados server:
<notextile>
-<pre><code>~/$USER/crunch_scripts$ <span class="userinput">git push origin master</span>
+<pre><code>~/tutorial/crunch_scripts$ <span class="userinput">git push origin master</span>
Counting objects: 4, done.
Compressing objects: 100% (2/2), done.
Writing objects: 100% (4/4), 682 bytes, done.
Total 4 (delta 0), reused 0 (delta 0)
-To git@git.qr1hi.arvadosapi.com:$USER/$USER.git
+To git@git.qr1hi.arvadosapi.com:$USER/tutorial.git
* [new branch] master -> master</code></pre>
</notextile>
require 'arvados'
require 'tempfile'
require 'yaml'
+require 'fileutils'
# This script does the actual gitolite config management on disk.
#
gitolite_tmpdir = File.join(File.absolute_path(File.dirname(__FILE__)),
cp_config['gitolite_tmp'])
gitolite_admin = File.join(gitolite_tmpdir, 'gitolite-admin')
+gitolite_admin_keydir = File.join(gitolite_admin, 'keydir')
gitolite_keydir = File.join(gitolite_admin, 'keydir', 'arvados')
ENV['ARVADOS_API_HOST'] = cp_config['arvados_api_host']
def replace_file(path, contents)
unlink_now = true
dirname, basename = File.split(path)
+ FileUtils.mkpath(dirname)
new_file = Tempfile.new([basename, ".tmp"], dirname)
begin
new_file.write(contents)
permissions = arv.repository.get_all_permissions
ensure_directory(gitolite_keydir, 0700)
- user_ssh_keys = UserSSHKeys.new(permissions[:user_keys], gitolite_keydir)
- # Make sure the arvados_git_user key is installed
- user_ssh_keys.install('arvados_git_user.pub', gitolite_arvados_git_user_key)
+ admin_user_ssh_keys = UserSSHKeys.new(permissions[:user_keys], gitolite_admin_keydir)
+ # Make sure the arvados_git_user key is installed; put it in gitolite_admin_keydir
+ # because that is where gitolite will try to put it if we do not.
+ admin_user_ssh_keys.install('arvados_git_user.pub', gitolite_arvados_git_user_key)
+ user_ssh_keys = UserSSHKeys.new(permissions[:user_keys], gitolite_keydir)
permissions[:repositories].each do |repo_record|
repo = Repository.new(repo_record, user_ssh_keys)
repo.ensure_config(gitolite_admin)
DEBIAN_IMAGE := $(shell $(DOCKER) images -q arvados/debian |head -n1)
REALCLEAN_CONTAINERS := $(shell $(DOCKER) ps -a |grep -e arvados -e api_server -e keep_server -e keep_proxy_server -e doc_server -e workbench_server |cut -f 1 -d' ')
-REALCLEAN_IMAGES := $(shell $(DOCKER) images -q arvados/* |grep -v $(DEBIAN_IMAGE) 2>/dev/null)
-DEEPCLEAN_IMAGES := $(shell $(DOCKER) images -q arvados/*)
+# Generate a list of docker images tagged as arvados/*
+# but exclude those tagged as arvados/build
+ADI_TEMPFILE := $(shell mktemp)
+ARVADOS_DOCKER_IMAGES := $(shell $(DOCKER) images -q arvados/* |sort > $(ADI_TEMPFILE))
+ABDI_TEMPFILE := $(shell mktemp)
+ARVADOS_BUILD_DOCKER_IMAGES := $(shell $(DOCKER) images -q arvados/build |sort > $(ABDI_TEMPFILE))
+REALCLEAN_IMAGES := $(shell comm -3 $(ADI_TEMPFILE) $(ABDI_TEMPFILE) |grep -v $(DEBIAN_IMAGE) 2>/dev/null)
+DEEPCLEAN_IMAGES := $(shell comm -3 $(ADI_TEMPFILE) $(ABDI_TEMPFILE))
SKYDNS_CONTAINERS := $(shell $(DOCKER) ps -a |grep -e crosbymichael/skydns -e crosbymichael/skydock |cut -f 1 -d' ')
SKYDNS_IMAGES := $(shell $(DOCKER) images -q crosbymichael/skyd*)
$(DOCKER_BUILD) -t arvados/keepproxy keepproxy
date >keep-proxy-image
-jobs-image: base-image $(BUILD) $(JOBS_DEPS)
+jobs-image: debian-arvados-image $(BUILD) $(JOBS_DEPS)
$(DOCKER_BUILD) -t arvados/jobs jobs
date >jobs-image
debian-arvados-image:
@echo "Building debian-arvados-image"
- ./mkimage-debootstrap.sh arvados/debian wheezy ftp://ftp.us.debian.org/debian/
+ ./mkimage-debootstrap.sh arvados/debian wheezy http://ftp.us.debian.org/debian/
date >debian-arvados-image
skydns-image:
-FROM arvados/base
-MAINTAINER Brett Smith <brett@curoverse.com>
+# Based on Debian Wheezy
+FROM arvados/debian:wheezy
+MAINTAINER Ward Vandewege <ward@curoverse.com>
+
+ENV DEBIAN_FRONTEND noninteractive
+
+ADD apt.arvados.org.list /etc/apt/sources.list.d/
+RUN apt-key adv --keyserver pool.sks-keyservers.net --recv 1078ECD7
+RUN apt-get update -q
+
+RUN apt-get install -qy git python-minimal python-virtualenv python-arvados-python-client
+
+RUN gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3
# Install dependencies and set up system.
# The FUSE packages help ensure that we can install the Python SDK (arv-mount).
-RUN /usr/bin/apt-get install -q -y \
- python-dev python-llfuse python-pip python-virtualenv \
- libio-socket-ssl-perl libjson-perl liburi-perl libwww-perl dtrx \
- fuse libattr1-dev libfuse-dev && \
- /usr/sbin/adduser --disabled-password \
+RUN /usr/sbin/adduser --disabled-password \
--gecos 'Crunch execution user' crunch && \
- /usr/bin/install --directory --owner=crunch --group=crunch --mode=0700 /keep /tmp/crunch-src /tmp/crunch-job && \
- /bin/ln -s /usr/src/arvados /usr/local/src/arvados
-
-# Install Arvados packages.
-RUN (find /usr/src/arvados/sdk -name '*.gem' -print0 | \
- xargs -0rn 1 /usr/local/rvm/bin/rvm-exec default gem install) && \
- apt-get -qqy install python-arvados-fuse
+ /usr/bin/install --directory --owner=crunch --group=crunch --mode=0700 /keep /tmp/crunch-src /tmp/crunch-job
USER crunch
--- /dev/null
+# apt.arvados.org
+deb http://apt.arvados.org/ wheezy main
sudo tar --numeric-owner -c . | $docker import - $repo:$suite
# test the image
- $docker run -i -t $repo:$suite echo success
+ [[ "$(/usr/bin/tty || true)" != "not a tty" ]] && RUN_OPTS="-i -t"
+ $docker run $RUN_OPS $repo:$suite echo success
if [ -z "$skipDetection" ]; then
case "$lsbDist" in
Debian)
if [ "$suite" = "$debianStable" -o "$suite" = 'stable' ] && [ -r etc/debian_version ]; then
# tag latest
- $docker tag $repo:$suite $repo:latest
+ $docker tag -f $repo:$suite $repo:latest
if [ -r etc/debian_version ]; then
# tag the specific debian release version (which is only reasonable to tag on debian stable)
ver=$(cat etc/debian_version)
- $docker tag $repo:$suite $repo:$ver
+ $docker tag -f $repo:$suite $repo:$ver
fi
fi
;;
Ubuntu)
if [ "$suite" = "$ubuntuLatestLTS" ]; then
# tag latest
- $docker tag $repo:$suite $repo:latest
+ $docker tag -f $repo:$suite $repo:latest
fi
if [ -r etc/lsb-release ]; then
lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")"
if [ "$lsbRelease" ]; then
# tag specific Ubuntu version number, if available (12.04, etc.)
- $docker tag $repo:$suite $repo:$lsbRelease
+ $docker tag -f $repo:$suite $repo:$lsbRelease
fi
fi
;;
my $job_api_token;
my $no_clear_tmp;
my $resume_stash;
-my $docker_bin = "/usr/bin/docker.io";
+my $docker_bin = "docker.io";
GetOptions('force-unlock' => \$force_unlock,
'git-dir=s' => \$git_dir,
'job=s' => \$jobspec,
}
else
{
- $Job = JSON::decode_json($jobspec);
- $local_job = 1;
+ $local_job = JSON::decode_json($jobspec);
}
# at least able to run basic commands: they aren't down or severely
# misconfigured.
my $cmd = ['true'];
-if ($Job->{docker_image_locator}) {
+if (($Job || $local_job)->{docker_image_locator}) {
$cmd = [$docker_bin, 'ps', '-q'];
}
Log(undef, "Sanity check is `@$cmd`");
{
if (!$resume_stash)
{
- map { croak ("No $_ specified") unless $Job->{$_} }
+ map { croak ("No $_ specified") unless $local_job->{$_} }
qw(script script_version script_parameters);
}
- $Job->{'is_locked_by_uuid'} = $User->{'uuid'};
- $Job->{'started_at'} = gmtime;
- $Job->{'state'} = 'Running';
+ $local_job->{'is_locked_by_uuid'} = $User->{'uuid'};
+ $local_job->{'started_at'} = gmtime;
+ $local_job->{'state'} = 'Running';
- $Job = api_call("jobs/create", job => $Job);
+ $Job = api_call("jobs/create", job => $local_job);
}
$job_id = $Job->{'uuid'};
# TODO: When #5036 is done and widely deployed, we can get rid of the
# regular expression and just unmount everything with type fuse.keep.
srun (["srun", "--nodelist=$nodelist", "-D", $ENV{'TMPDIR'}],
- ['bash', '-ec', 'mount -t fuse,fuse.keep | awk \'($3 ~ /\ykeep\y/){print $3}\' | xargs -r -n 1 fusermount -u -z; sleep 1; rm -rf $JOB_WORK $CRUNCH_INSTALL $CRUNCH_TMP/task $CRUNCH_TMP/src* $CRUNCH_TMP/*.cid']);
+ ['bash', '-ec', '-o', 'pipefail', 'mount -t fuse,fuse.keep | awk \'($3 ~ /\ykeep\y/){print $3}\' | xargs -r -n 1 fusermount -u -z; sleep 1; rm -rf $JOB_WORK $CRUNCH_INSTALL $CRUNCH_TMP/task $CRUNCH_TMP/src* $CRUNCH_TMP/*.cid']);
exit (1);
}
while (1)
freeze_if_want_freeze ($cleanpid);
select (undef, undef, undef, 0.1);
}
- Log (undef, "Cleanup command exited ".exit_status_s($?));
+ if ($?) {
+ Log(undef, "Clean work dirs: exit ".exit_status_s($?));
+ exit(EX_RETRY_UNLOCKED);
+ }
}
# If this job requires a Docker image, install that.
unless ($? == 0 && $sha1 =~ /^([0-9a-f]{40})$/) {
croak("`$gitcmd rev-list` exited "
.exit_status_s($?)
- .", '$treeish' not found. Giving up.");
+ .", '$treeish' not found, giving up");
}
$commit = $1;
Log(undef, "Version $treeish is commit $commit");
--- /dev/null
+#!/bin/sh
+echo >&2 Failing mount stub was called
+exit 1
--- /dev/null
+#!/bin/sh
+true
--- /dev/null
+#!/bin/sh
+exit 8
--- /dev/null
+#!/bin/sh
+exit 7
end
def test_small_collection
- skip "Waiting unitl #4534 is implemented"
-
uuid = Digest::MD5.hexdigest(foo_manifest) + '+' + foo_manifest.size.to_s
out, err = capture_subprocess_io do
assert_arv('--format', 'uuid', 'collection', 'create', '--collection', {
end
def test_file_to_dev_stdout
- skip "Waiting unitl #4534 is implemented"
-
test_file_to_stdout('/dev/stdout')
end
def test_file_to_stdout(specify_stdout_as='-')
- skip "Waiting unitl #4534 is implemented"
-
out, err = capture_subprocess_io do
assert_arv_get @@foo_manifest_locator + '/foo', specify_stdout_as
end
end
def test_file_to_file
- skip "Waiting unitl #4534 is implemented"
-
remove_tmp_foo
out, err = capture_subprocess_io do
assert_arv_get @@foo_manifest_locator + '/foo', 'tmp/foo'
end
def test_file_to_file_no_overwrite_file
- skip "Waiting unitl #4534 is implemented"
File.open './tmp/foo', 'wb' do |f|
f.write 'baz'
end
out, err = capture_subprocess_io do
assert_arv_get false, @@foo_manifest_locator + '/foo', 'tmp/foo'
end
- assert_match /Error:/, err
+ assert_match /Local file tmp\/foo already exists/, err
assert_equal '', out
assert_equal 'baz', IO.read('tmp/foo')
end
def test_file_to_file_no_overwrite_file_in_dir
- skip "Waiting unitl #4534 is implemented"
File.open './tmp/foo', 'wb' do |f|
f.write 'baz'
end
out, err = capture_subprocess_io do
assert_arv_get false, @@foo_manifest_locator + '/', 'tmp/'
end
- assert_match /Error:/, err
+ assert_match /Local file tmp\/foo already exists/, err
assert_equal '', out
assert_equal 'baz', IO.read('tmp/foo')
end
def test_file_to_file_force_overwrite
- skip "Waiting unitl #4534 is implemented"
-
File.open './tmp/foo', 'wb' do |f|
f.write 'baz'
end
end
def test_file_to_file_skip_existing
- skip "Waiting unitl #4534 is implemented"
-
File.open './tmp/foo', 'wb' do |f|
f.write 'baz'
end
end
def test_file_to_dir
- skip "Waiting unitl #4534 is implemented"
-
remove_tmp_foo
out, err = capture_subprocess_io do
assert_arv_get @@foo_manifest_locator + '/foo', 'tmp/'
end
def test_nonexistent_block
- skip "Waiting unitl #4534 is implemented"
-
out, err = capture_subprocess_io do
- assert_arv_get false, 'f1554a91e925d6213ce7c3103c5110c6'
+ assert_arv_get false, 'e796ab2294f3e48ec709ffa8d6daf58c'
end
assert_equal '', out
assert_match /Error:/, err
end
def test_nonexistent_manifest
- skip "Waiting unitl #4534 is implemented"
-
out, err = capture_subprocess_io do
- assert_arv_get false, 'f1554a91e925d6213ce7c3103c5110c6/', 'tmp/'
+ assert_arv_get false, 'acbd18db4cc2f85cedef654fccc4a4d8/', 'tmp/'
end
assert_equal '', out
assert_match /Error:/, err
end
def test_manifest_root_to_dir
- skip "Waiting unitl #4534 is implemented"
-
remove_tmp_foo
out, err = capture_subprocess_io do
assert_arv_get '-r', @@foo_manifest_locator + '/', 'tmp/'
end
def test_manifest_root_to_dir_noslash
- skip "Waiting unitl #4534 is implemented"
-
remove_tmp_foo
out, err = capture_subprocess_io do
assert_arv_get '-r', @@foo_manifest_locator + '/', 'tmp'
end
def test_display_md5sum
- skip "Waiting unitl #4534 is implemented"
-
remove_tmp_foo
out, err = capture_subprocess_io do
assert_arv_get '-r', '--md5sum', @@foo_manifest_locator + '/', 'tmp/'
end
def test_md5sum_nowrite
- skip "Waiting unitl #4534 is implemented"
-
remove_tmp_foo
out, err = capture_subprocess_io do
assert_arv_get '-n', '--md5sum', @@foo_manifest_locator + '/', 'tmp/'
end
def test_sha1_nowrite
- skip "Waiting unitl #4534 is implemented"
-
remove_tmp_foo
out, err = capture_subprocess_io do
assert_arv_get '-n', '-r', '--hash', 'sha1', @@foo_manifest_locator+'/', 'tmp/'
end
def test_block_to_file
- skip "Waiting unitl #4534 is implemented"
-
remove_tmp_foo
out, err = capture_subprocess_io do
assert_arv_get @@foo_manifest_locator, 'tmp/foo'
end
def test_create_directory_tree
- skip "Waiting unitl #4534 is implemented"
-
`rm -rf ./tmp/arv-get-test/`
Dir.mkdir './tmp/arv-get-test'
out, err = capture_subprocess_io do
end
def test_create_partial_directory_tree
- skip "Waiting unitl #4534 is implemented"
-
`rm -rf ./tmp/arv-get-test/`
Dir.mkdir './tmp/arv-get-test'
out, err = capture_subprocess_io do
end
def test_raw_stdin
- skip "Waiting unitl #4534 is implemented"
-
out, err = capture_subprocess_io do
r,w = IO.pipe
wpid = fork do
end
def test_raw_file
- skip "Waiting unitl #4534 is implemented"
-
out, err = capture_subprocess_io do
assert arv_put('--raw', './tmp/foo')
end
end
def test_raw_empty_file
- skip "Waiting unitl #4534 is implemented"
-
out, err = capture_subprocess_io do
assert arv_put('--raw', './tmp/empty_file')
end
end
def test_filename_arg_with_empty_file
- skip "Waiting unitl #4534 is implemented"
-
out, err = capture_subprocess_io do
assert arv_put('--filename', 'foo', './tmp/empty_file')
end
end
def test_as_stream
- skip "Waiting unitl #4534 is implemented"
-
out, err = capture_subprocess_io do
assert arv_put('--as-stream', './tmp/foo')
end
end
def test_progress
- skip "Waiting unitl #4534 is implemented"
-
out, err = capture_subprocess_io do
assert arv_put('--manifest', '--progress', './tmp/foo')
end
end
def test_batch_progress
- skip "Waiting unitl #4534 is implemented"
-
out, err = capture_subprocess_io do
assert arv_put('--manifest', '--batch-progress', './tmp/foo')
end
end
def test_read_from_implicit_stdin
- skip "Waiting unitl #4534 is implemented"
-
test_read_from_stdin(specify_stdin_as='--manifest')
end
def test_read_from_dev_stdin
- skip "Waiting unitl #4534 is implemented"
-
test_read_from_stdin(specify_stdin_as='/dev/stdin')
end
def test_read_from_stdin(specify_stdin_as='-')
- skip "Waiting unitl #4534 is implemented"
-
out, err = capture_subprocess_io do
r,w = IO.pipe
wpid = fork do
end
def test_read_from_implicit_stdin_implicit_manifest
- skip "Waiting unitl #4534 is implemented"
-
test_read_from_stdin_implicit_manifest(specify_stdin_as=nil,
expect_filename='stdin')
end
def test_read_from_dev_stdin_implicit_manifest
- skip "Waiting unitl #4534 is implemented"
-
test_read_from_stdin_implicit_manifest(specify_stdin_as='/dev/stdin')
end
def test_read_from_stdin_implicit_manifest(specify_stdin_as='-',
expect_filename=nil)
- skip "Waiting unitl #4534 is implemented"
-
expect_filename = expect_filename || specify_stdin_as.split('/').last
out, err = capture_subprocess_io do
r,w = IO.pipe
end
def test_run_pipeline_instance_get_help
- skip "Waiting unitl #4534 is implemented"
-
out, err = capture_subprocess_io do
system ('arv-run-pipeline-instance -h')
end
class TestArvTag < Minitest::Test
def test_no_args
- skip "Waiting unitl #4534 is implemented"
+ skip "Waiting until #4534 is implemented"
# arv-tag exits with failure if run with no args
out, err = capture_subprocess_io do
--- /dev/null
+require 'minitest/autorun'
+
+class TestCrunchJob < Minitest::Test
+ SPECIAL_EXIT = {
+ EX_RETRY_UNLOCKED: 93,
+ EX_TEMPFAIL: 75,
+ }
+
+ JOBSPEC = {
+ grep_local: {
+ script: 'grep',
+ script_version: 'master',
+ repository: File.absolute_path('../../../..', __FILE__),
+ script_parameters: {foo: 'bar'},
+ },
+ }
+
+ def setup
+ end
+
+ def crunchjob
+ File.absolute_path '../../bin/crunch-job', __FILE__
+ end
+
+ # Return environment suitable for running crunch-job.
+ def crunchenv opts={}
+ env = ENV.to_h
+ env['CRUNCH_REFRESH_TRIGGER'] =
+ File.absolute_path('../../../../tmp/crunch-refresh-trigger', __FILE__)
+ env
+ end
+
+ def jobspec label
+ JOBSPEC[label].dup
+ end
+
+ # Encode job record to json and run it with crunch-job.
+ #
+ # opts[:binstubs] is an array of X where ./binstub_X is added to
+ # PATH in order to mock system programs.
+ def tryjobrecord jobrecord, opts={}
+ env = crunchenv
+ (opts[:binstubs] || []).each do |binstub|
+ env['PATH'] = File.absolute_path('../binstub_'+binstub, __FILE__) + ':' + env['PATH']
+ end
+ system env, crunchjob, '--job', jobrecord.to_json
+ end
+
+ def test_bogus_json
+ out, err = capture_subprocess_io do
+ system crunchenv, crunchjob, '--job', '"}{"'
+ end
+ assert_equal false, $?.success?
+ # Must not conflict with our special exit statuses
+ assert_jobfail $?
+ assert_match /JSON/, err
+ end
+
+ def test_fail_sanity_check
+ out, err = capture_subprocess_io do
+ j = {}
+ tryjobrecord j, binstubs: ['sanity_check']
+ end
+ assert_equal 75, $?.exitstatus
+ assert_match /Sanity check failed: 7/, err
+ end
+
+ def test_fail_docker_sanity_check
+ out, err = capture_subprocess_io do
+ j = {}
+ j[:docker_image_locator] = '4d449b9d34f2e2222747ef79c53fa3ff+1234'
+ tryjobrecord j, binstubs: ['sanity_check']
+ end
+ assert_equal 75, $?.exitstatus
+ assert_match /Sanity check failed: 8/, err
+ end
+
+ def test_no_script_specified
+ out, err = capture_subprocess_io do
+ j = jobspec :grep_local
+ j.delete :script
+ tryjobrecord j
+ end
+ assert_match /No script specified/, err
+ assert_jobfail $?
+ end
+
+ def test_fail_clean_tmp
+ out, err = capture_subprocess_io do
+ j = jobspec :grep_local
+ tryjobrecord j, binstubs: ['clean_fail']
+ end
+ assert_match /Failing mount stub was called/, err
+ assert_match /Clean work dirs: exit 1\n$/, err
+ assert_equal SPECIAL_EXIT[:EX_RETRY_UNLOCKED], $?.exitstatus
+ end
+
+ def test_docker_image_missing
+ skip 'API bug: it refuses to create this job in Running state'
+ out, err = capture_subprocess_io do
+ j = jobspec :grep_local
+ j[:docker_image_locator] = '4d449b9d34f2e2222747ef79c53fa3ff+1234'
+ tryjobrecord j, binstubs: ['docker_noop']
+ end
+ assert_match /No Docker image hash found from locator/, err
+ assert_jobfail $?
+ end
+
+ def test_script_version_not_found_in_repository
+ bogus_version = 'f8b72707c1f5f740dbf1ed56eb429a36e0dee770'
+ out, err = capture_subprocess_io do
+ j = jobspec :grep_local
+ j[:script_version] = bogus_version
+ tryjobrecord j
+ end
+ assert_match /'#{bogus_version}' not found, giving up/, err
+ assert_jobfail $?
+ end
+
+ # Ensure procstatus is not interpreted as a temporary infrastructure
+ # problem. Would be assert_http_4xx if this were http.
+ def assert_jobfail procstatus
+ refute_includes SPECIAL_EXIT.values, procstatus.exitstatus
+ assert_equal false, procstatus.success?
+ end
+end
--- /dev/null
+../python/.gitignore
\ No newline at end of file
--- /dev/null
+Arvados Common Workflow Language (CWL) runner.
--- /dev/null
+#!/usr/bin/env python
+
+import argparse
+import arvados
+import arvados.events
+import arvados.commands.keepdocker
+import arvados.commands.run
+import cwltool.draft2tool
+import cwltool.workflow
+import cwltool.main
+import threading
+import cwltool.docker
+import fnmatch
+import logging
+import re
+import os
+from cwltool.process import get_feature
+
+logger = logging.getLogger('arvados.cwl-runner')
+logger.setLevel(logging.INFO)
+
+def arv_docker_get_image(api_client, dockerRequirement, pull_image):
+ if "dockerImageId" not in dockerRequirement and "dockerPull" in dockerRequirement:
+ dockerRequirement["dockerImageId"] = dockerRequirement["dockerPull"]
+
+ sp = dockerRequirement["dockerImageId"].split(":")
+ image_name = sp[0]
+ image_tag = sp[1] if len(sp) > 1 else None
+
+ images = arvados.commands.keepdocker.list_images_in_arv(api_client, 3,
+ image_name=image_name,
+ image_tag=image_tag)
+
+ if not images:
+ imageId = cwltool.docker.get_image(dockerRequirement, pull_image)
+ args = [image_name]
+ if image_tag:
+ args.append(image_tag)
+ arvados.commands.keepdocker.main(args)
+
+ return dockerRequirement["dockerImageId"]
+
+class CollectionFsAccess(cwltool.draft2tool.StdFsAccess):
+ def __init__(self, basedir):
+ self.collections = {}
+ self.basedir = basedir
+
+ def get_collection(self, path):
+ p = path.split("/")
+ if arvados.util.keep_locator_pattern.match(p[0]):
+ if p[0] not in self.collections:
+ self.collections[p[0]] = arvados.collection.CollectionReader(p[0])
+ return (self.collections[p[0]], "/".join(p[1:]))
+ else:
+ return (None, path)
+
+ def _match(self, collection, patternsegments, parent):
+ ret = []
+ for filename in collection:
+ if fnmatch.fnmatch(filename, patternsegments[0]):
+ cur = os.path.join(parent, filename)
+ if len(patternsegments) == 1:
+ ret.append(cur)
+ else:
+ ret.extend(self._match(collection[filename], patternsegments[1:], cur))
+ return ret
+
+ def glob(self, pattern):
+ collection, rest = self.get_collection(pattern)
+ patternsegments = rest.split("/")
+ return self._match(collection, patternsegments, collection.manifest_locator())
+
+ def open(self, fn, mode):
+ collection, rest = self.get_collection(fn)
+ if collection:
+ return collection.open(rest, mode)
+ else:
+ return open(self._abs(fn), mode)
+
+ def exists(self, fn):
+ collection, rest = self.get_collection(fn)
+ if collection:
+ return collection.exists(rest)
+ else:
+ return os.path.exists(self._abs(fn))
+
+class ArvadosJob(object):
+ def __init__(self, runner):
+ self.arvrunner = runner
+ self.running = False
+
+ def run(self, dry_run=False, pull_image=True, **kwargs):
+ script_parameters = {
+ "command": self.command_line
+ }
+ runtime_constraints = {}
+
+ if self.generatefiles:
+ vwd = arvados.collection.Collection()
+ for t in self.generatefiles:
+ if isinstance(self.generatefiles[t], dict):
+ src, rest = self.arvrunner.fs_access.get_collection(self.generatefiles[t]["path"][6:])
+ vwd.copy(rest, t, source_collection=src)
+ else:
+ with vwd.open(t, "w") as f:
+ f.write(self.generatefiles[t])
+ vwd.save_new()
+ script_parameters["task.vwd"] = vwd.portable_data_hash()
+
+ script_parameters["task.env"] = {"TMPDIR": "$(task.tmpdir)"}
+ if self.environment:
+ script_parameters["task.env"].update(self.environment)
+
+ if self.stdin:
+ script_parameters["task.stdin"] = self.pathmapper.mapper(self.stdin)[1]
+
+ if self.stdout:
+ script_parameters["task.stdout"] = self.stdout
+
+ (docker_req, docker_is_req) = get_feature(self, "DockerRequirement")
+ if docker_req and kwargs.get("use_container") is not False:
+ runtime_constraints["docker_image"] = arv_docker_get_image(self.arvrunner.api, docker_req, pull_image)
+ runtime_constraints["arvados_sdk_version"] = "master"
+
+ response = self.arvrunner.api.jobs().create(body={
+ "script": "run-command",
+ "repository": "arvados",
+ "script_version": "master",
+ "script_parameters": script_parameters,
+ "runtime_constraints": runtime_constraints
+ }, find_or_create=kwargs.get("enable_reuse", True)).execute()
+
+ self.arvrunner.jobs[response["uuid"]] = self
+
+ logger.info("Job %s is %s", response["uuid"], response["state"])
+
+ if response["state"] in ("Complete", "Failed", "Cancelled"):
+ self.done(response)
+
+ def done(self, record):
+ try:
+ if record["state"] == "Complete":
+ processStatus = "success"
+ else:
+ processStatus = "permanentFail"
+
+ try:
+ outputs = {}
+ outputs = self.collect_outputs(record["output"])
+ except Exception as e:
+ logger.warn(str(e))
+ processStatus = "permanentFail"
+
+ self.output_callback(outputs, processStatus)
+ finally:
+ del self.arvrunner.jobs[record["uuid"]]
+
+class ArvPathMapper(cwltool.pathmapper.PathMapper):
+ def __init__(self, arvrunner, referenced_files, basedir, **kwargs):
+ self._pathmap = {}
+ uploadfiles = []
+
+ pdh_path = re.compile(r'^[0-9a-f]{32}\+\d+/.+')
+
+ for src in referenced_files:
+ if isinstance(src, basestring) and pdh_path.match(src):
+ self._pathmap[src] = (src, "/keep/%s" % src)
+ else:
+ ab = src if os.path.isabs(src) else os.path.join(basedir, src)
+ st = arvados.commands.run.statfile("", ab)
+ if kwargs.get("conformance_test"):
+ self._pathmap[src] = (src, ab)
+ elif isinstance(st, arvados.commands.run.UploadFile):
+ uploadfiles.append((src, ab, st))
+ elif isinstance(st, arvados.commands.run.ArvFile):
+ self._pathmap[src] = (ab, st.fn)
+ else:
+ raise cwltool.workflow.WorkflowException("Input file path '%s' is invalid" % st)
+
+ if uploadfiles:
+ arvados.commands.run.uploadfiles([u[2] for u in uploadfiles], arvrunner.api, dry_run=kwargs.get("dry_run"), num_retries=3)
+
+ for src, ab, st in uploadfiles:
+ self._pathmap[src] = (ab, st.fn)
+
+
+
+class ArvadosCommandTool(cwltool.draft2tool.CommandLineTool):
+ def __init__(self, arvrunner, toolpath_object, **kwargs):
+ super(ArvadosCommandTool, self).__init__(toolpath_object, **kwargs)
+ self.arvrunner = arvrunner
+
+ def makeJobRunner(self):
+ return ArvadosJob(self.arvrunner)
+
+ def makePathMapper(self, reffiles, input_basedir, **kwargs):
+ return ArvPathMapper(self.arvrunner, reffiles, input_basedir, **kwargs)
+
+
+class ArvCwlRunner(object):
+ def __init__(self, api_client):
+ self.api = api_client
+ self.jobs = {}
+ self.lock = threading.Lock()
+ self.cond = threading.Condition(self.lock)
+ self.final_output = None
+
+ def arvMakeTool(self, toolpath_object, **kwargs):
+ if "class" in toolpath_object and toolpath_object["class"] == "CommandLineTool":
+ return ArvadosCommandTool(self, toolpath_object, **kwargs)
+ else:
+ return cwltool.workflow.defaultMakeTool(toolpath_object, **kwargs)
+
+ def output_callback(self, out, processStatus):
+ if processStatus == "success":
+ logger.info("Overall job status is %s", processStatus)
+ else:
+ logger.warn("Overall job status is %s", processStatus)
+ self.final_output = out
+
+ def on_message(self, event):
+ if "object_uuid" in event:
+ if event["object_uuid"] in self.jobs and event["event_type"] == "update":
+ if event["properties"]["new_attributes"]["state"] == "Running" and self.jobs[event["object_uuid"]].running is False:
+ logger.info("Job %s is Running", event["object_uuid"])
+ with self.lock:
+ self.jobs[event["object_uuid"]].running = True
+ elif event["properties"]["new_attributes"]["state"] in ("Complete", "Failed", "Cancelled"):
+ logger.info("Job %s is %s", event["object_uuid"], event["properties"]["new_attributes"]["state"])
+ try:
+ self.cond.acquire()
+ self.jobs[event["object_uuid"]].done(event["properties"]["new_attributes"])
+ self.cond.notify()
+ finally:
+ self.cond.release()
+
+ def arvExecutor(self, tool, job_order, input_basedir, args, **kwargs):
+ events = arvados.events.subscribe(arvados.api('v1'), [["object_uuid", "is_a", "arvados#job"]], self.on_message)
+
+ self.fs_access = CollectionFsAccess(input_basedir)
+
+ kwargs["fs_access"] = self.fs_access
+ kwargs["enable_reuse"] = args.enable_reuse
+
+ if kwargs.get("conformance_test"):
+ return cwltool.main.single_job_executor(tool, job_order, input_basedir, args, **kwargs)
+ else:
+ jobiter = tool.job(job_order,
+ input_basedir,
+ self.output_callback,
+ **kwargs)
+
+ for runnable in jobiter:
+ if runnable:
+ with self.lock:
+ runnable.run(**kwargs)
+ else:
+ if self.jobs:
+ try:
+ self.cond.acquire()
+ self.cond.wait()
+ finally:
+ self.cond.release()
+ else:
+ logger.error("Workflow cannot make any more progress.")
+ break
+
+ while self.jobs:
+ try:
+ self.cond.acquire()
+ self.cond.wait()
+ finally:
+ self.cond.release()
+
+ events.close()
+
+ if self.final_output is None:
+ raise cwltool.workflow.WorkflowException("Workflow did not return a result.")
+
+ return self.final_output
+
+
+def main(args, stdout, stderr, api_client=None):
+ runner = ArvCwlRunner(api_client=arvados.api('v1'))
+ args.append("--leave-outputs")
+ parser = cwltool.main.arg_parser()
+ exgroup = parser.add_mutually_exclusive_group()
+ exgroup.add_argument("--enable-reuse", action="store_true",
+ default=False, dest="enable_reuse",
+ help="")
+ exgroup.add_argument("--disable-reuse", action="store_false",
+ default=False, dest="enable_reuse",
+ help="")
+
+ return cwltool.main.main(args, executor=runner.arvExecutor, makeTool=runner.arvMakeTool, parser=parser)
--- /dev/null
+#!/usr/bin/env python
+
+import sys
+
+from arvados_cwl import main
+
+sys.exit(main(sys.argv[1:], sys.stdout, sys.stderr))
--- /dev/null
+../python/gittaggers.py
\ No newline at end of file
--- /dev/null
+#!/usr/bin/env python
+
+import os
+import sys
+import setuptools.command.egg_info as egg_info_cmd
+
+from setuptools import setup, find_packages
+
+SETUP_DIR = os.path.dirname(__file__) or '.'
+README = os.path.join(SETUP_DIR, 'README.rst')
+
+try:
+ import gittaggers
+ tagger = gittaggers.EggInfoFromGit
+except ImportError:
+ tagger = egg_info_cmd.egg_info
+
+setup(name='arvados-cwl-runner',
+ version='1.0',
+ description='Arvados Common Workflow Language runner',
+ long_description=open(README).read(),
+ author='Arvados',
+ author_email='info@arvados.org',
+ url="https://arvados.org",
+ download_url="https://github.com/curoverse/arvados.git",
+ license='Apache 2.0',
+ packages=find_packages(),
+ scripts=[
+ 'bin/cwl-runner'
+ ],
+ install_requires=[
+ 'cwltool',
+ 'arvados-python-client'
+ ],
+ zip_safe=True,
+ cmdclass={'egg_info': tagger},
+ )
//
// parameter - name of parameter to be discovered
// return
-// valueMap - Dict key value pair of the discovered parameter
+// value - value of the discovered parameter
// err - error accessing the resource, or nil if no error
var API_DISCOVERY_RESOURCE string = "discovery/v1/apis/arvados/v1/rest"
-/* Stores a Block Locator Digest compactly. Can be used as a map key. */
-
+// Stores a Block Locator Digest compactly. Can be used as a map key.
package blockdigest
import (
"fmt"
"log"
+ "regexp"
"strconv"
+ "strings"
)
+var LocatorPattern = regexp.MustCompile(
+ "^[0-9a-fA-F]{32}\\+[0-9]+(\\+[A-Z][A-Za-z0-9@_-]+)*$")
+
// Stores a Block Locator Digest compactly, up to 128 bits.
// Can be used as a map key.
type BlockDigest struct {
- h uint64
- l uint64
+ H uint64
+ L uint64
+}
+
+type DigestWithSize struct {
+ Digest BlockDigest
+ Size uint32
+}
+
+type BlockLocator struct {
+ Digest BlockDigest
+ Size int
+ Hints []string
}
func (d BlockDigest) String() string {
- return fmt.Sprintf("%016x%016x", d.h, d.l)
+ return fmt.Sprintf("%016x%016x", d.H, d.L)
+}
+
+func (w DigestWithSize) String() string {
+ return fmt.Sprintf("%s+%d", w.Digest.String(), w.Size)
}
// Will create a new BlockDigest unless an error is encountered.
}
var d BlockDigest
- d.h, err = strconv.ParseUint(s[:16], 16, 64)
+ d.H, err = strconv.ParseUint(s[:16], 16, 64)
if err != nil {
return
}
- d.l, err = strconv.ParseUint(s[16:], 16, 64)
+ d.L, err = strconv.ParseUint(s[16:], 16, 64)
if err != nil {
return
}
}
return d
}
+
+func IsBlockLocator(s string) bool {
+ return LocatorPattern.MatchString(s)
+}
+
+func ParseBlockLocator(s string) (b BlockLocator, err error) {
+ if !LocatorPattern.MatchString(s) {
+ err = fmt.Errorf("String \"%s\" does not match BlockLocator pattern "+
+ "\"%s\".",
+ s,
+ LocatorPattern.String())
+ } else {
+ tokens := strings.Split(s, "+")
+ var blockSize int64
+ var blockDigest BlockDigest
+ // We expect both of the following to succeed since LocatorPattern
+ // restricts the strings appropriately.
+ blockDigest, err = FromString(tokens[0])
+ if err != nil {
+ return
+ }
+ blockSize, err = strconv.ParseInt(tokens[1], 10, 0)
+ if err != nil {
+ return
+ }
+ b.Digest = blockDigest
+ b.Size = int(blockSize)
+ b.Hints = tokens[2:]
+ }
+ return
+}
import (
"fmt"
+ "runtime"
"strings"
"testing"
)
+func getStackTrace() string {
+ buf := make([]byte, 1000)
+ bytes_written := runtime.Stack(buf, false)
+ return "Stack Trace:\n" + string(buf[:bytes_written])
+}
+
+func expectEqual(t *testing.T, actual interface{}, expected interface{}) {
+ if actual != expected {
+ t.Fatalf("Expected %v but received %v instead. %s",
+ expected,
+ actual,
+ getStackTrace())
+ }
+}
+
+func expectStringSlicesEqual(t *testing.T, actual []string, expected []string) {
+ if len(actual) != len(expected) {
+ t.Fatalf("Expected %v (length %d), but received %v (length %d) instead. %s", expected, len(expected), actual, len(actual), getStackTrace())
+ }
+ for i := range actual {
+ if actual[i] != expected[i] {
+ t.Fatalf("Expected %v but received %v instead (first disagreement at position %d). %s", expected, actual, i, getStackTrace())
+ }
+ }
+}
+
func expectValidDigestString(t *testing.T, s string) {
bd, err := FromString(s)
if err != nil {
}
expected := strings.ToLower(s)
-
+
if expected != bd.String() {
t.Fatalf("Expected %s to be returned by FromString(%s).String() but instead we received %s", expected, s, bd.String())
}
}
}
+func expectBlockLocator(t *testing.T, actual BlockLocator, expected BlockLocator) {
+ expectEqual(t, actual.Digest, expected.Digest)
+ expectEqual(t, actual.Size, expected.Size)
+ expectStringSlicesEqual(t, actual.Hints, expected.Hints)
+}
+
+func expectLocatorPatternMatch(t *testing.T, s string) {
+ if !LocatorPattern.MatchString(s) {
+ t.Fatalf("Expected \"%s\" to match locator pattern but it did not.",
+ s)
+ }
+}
+
+func expectLocatorPatternFail(t *testing.T, s string) {
+ if LocatorPattern.MatchString(s) {
+ t.Fatalf("Expected \"%s\" to fail locator pattern but it passed.",
+ s)
+ }
+}
+
func TestValidDigestStrings(t *testing.T) {
expectValidDigestString(t, "01234567890123456789abcdefabcdef")
expectValidDigestString(t, "01234567890123456789ABCDEFABCDEF")
input := "01234567890123456789abcdefabcdef"
prettyPrinted := fmt.Sprintf("%v", AssertFromString(input))
if prettyPrinted != input {
- t.Fatalf("Expected blockDigest produced from \"%s\" to be printed as " +
+ t.Fatalf("Expected blockDigest produced from \"%s\" to be printed as "+
"\"%s\", but instead it was printed as %s",
input, input, prettyPrinted)
}
func TestBlockDigestGetsPrettyPrintedByPrintfInNestedStructs(t *testing.T) {
input := "01234567890123456789abcdefabcdef"
value := 42
- nested := struct{
+ nested := struct {
// Fun trivia fact: If this field was called "digest" instead of
// "Digest", then it would not be exported and String() would
// never get called on it and our output would look very
// different.
Digest BlockDigest
- value int
+ value int
}{
AssertFromString(input),
value,
prettyPrinted := fmt.Sprintf("%+v", nested)
expected := fmt.Sprintf("{Digest:%s value:%d}", input, value)
if prettyPrinted != expected {
- t.Fatalf("Expected blockDigest produced from \"%s\" to be printed as " +
+ t.Fatalf("Expected blockDigest produced from \"%s\" to be printed as "+
"\"%s\", but instead it was printed as %s",
input, expected, prettyPrinted)
}
}
+
+func TestLocatorPatternBasic(t *testing.T) {
+ expectLocatorPatternMatch(t, "12345678901234567890123456789012+12345")
+ expectLocatorPatternMatch(t, "A2345678901234abcdefababdeffdfdf+12345")
+ expectLocatorPatternMatch(t, "12345678901234567890123456789012+12345+A1")
+ expectLocatorPatternMatch(t,
+ "12345678901234567890123456789012+12345+A1+B123wxyz@_-")
+ expectLocatorPatternMatch(t,
+ "12345678901234567890123456789012+12345+A1+B123wxyz@_-+C@")
+
+ expectLocatorPatternFail(t, "12345678901234567890123456789012")
+ expectLocatorPatternFail(t, "12345678901234567890123456789012+")
+ expectLocatorPatternFail(t, "12345678901234567890123456789012+12345+")
+ expectLocatorPatternFail(t, "1234567890123456789012345678901+12345")
+ expectLocatorPatternFail(t, "123456789012345678901234567890123+12345")
+ expectLocatorPatternFail(t, "g2345678901234abcdefababdeffdfdf+12345")
+ expectLocatorPatternFail(t, "12345678901234567890123456789012+12345 ")
+ expectLocatorPatternFail(t, "12345678901234567890123456789012+12345+1")
+ expectLocatorPatternFail(t, "12345678901234567890123456789012+12345+1A")
+ expectLocatorPatternFail(t, "12345678901234567890123456789012+12345+A")
+ expectLocatorPatternFail(t, "12345678901234567890123456789012+12345+a1")
+ expectLocatorPatternFail(t, "12345678901234567890123456789012+12345+A1+")
+ expectLocatorPatternFail(t, "12345678901234567890123456789012+12345+A1+B")
+ expectLocatorPatternFail(t, "12345678901234567890123456789012+12345+A+B2")
+}
+
+func TestParseBlockLocatorSimple(t *testing.T) {
+ b, err := ParseBlockLocator("365f83f5f808896ec834c8b595288735+2310+K@qr1hi+Af0c9a66381f3b028677411926f0be1c6282fe67c@542b5ddf")
+ if err != nil {
+ t.Fatalf("Unexpected error parsing block locator: %v", err)
+ }
+ expectBlockLocator(t, b, BlockLocator{Digest: AssertFromString("365f83f5f808896ec834c8b595288735"),
+ Size: 2310,
+ Hints: []string{"K@qr1hi",
+ "Af0c9a66381f3b028677411926f0be1c6282fe67c@542b5ddf"}})
+}
--- /dev/null
+// Code used for testing only.
+
+package blockdigest
+
+// Just used for testing when we need some distinct BlockDigests
+func MakeTestBlockDigest(i int) BlockDigest {
+ return BlockDigest{L: uint64(i)}
+}
+
+func MakeTestDigestSpecifySize(i int, s int) DigestWithSize {
+ return DigestWithSize{Digest: BlockDigest{L: uint64(i)}, Size: uint32(s)}
+}
+
+func MakeTestDigestWithSize(i int) DigestWithSize {
+ return MakeTestDigestSpecifySize(i, i)
+}
// entry map[string]interface{}) {
// // Modifiy properties and entry however you want
// // properties is a shortcut for entry["properties"].(map[string]interface{})
-// // properties can take any values you want to give it,
-// // entry will only take the fields listed at http://doc.arvados.org/api/schema/Log.html
+// // properties can take any (valid) values you want to give it,
+// // entry will only take the fields listed at
+// // http://doc.arvados.org/api/schema/Log.html
+// // Valid values for properties are anything that can be json
+// // encoded (i.e. will not error if you call json.Marshal() on it.
// })
package logger
+++ /dev/null
-// This binary tests the logger package.
-// It's not a standard unit test. Instead it writes to the actual log
-// and you have to clean up after it.
-
-package main
-
-import (
- "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
- "git.curoverse.com/arvados.git/sdk/go/logger"
- "log"
-)
-
-func main() {
- arv, err := arvadosclient.MakeArvadosClient()
- if err != nil {
- log.Fatalf("Error setting up arvados client %v", err)
- }
-
- l := logger.NewLogger(logger.LoggerParams{Client: arv,
- EventType: "experimental-logger-testing",
- // No minimum write interval
- })
-
- {
- properties, _ := l.Edit()
- properties["Ninja"] = "Misha"
- }
- l.Record()
-}
--- /dev/null
+// Helper methods for interacting with Logger.
+package logger
+
+// Retrieves the map[string]interface{} stored at parent[key] if it
+// exists, otherwise it makes it and stores it there.
+// This is useful for logger because you may not know if a map you
+// need has already been created.
+func GetOrCreateMap(
+ parent map[string]interface{},
+ key string) (child map[string]interface{}) {
+ read, exists := parent[key]
+ if exists {
+ child = read.(map[string]interface{})
+
+ } else {
+ child = make(map[string]interface{})
+ parent[key] = child
+ }
+ return
+}
package manifest
import (
- "fmt"
"git.curoverse.com/arvados.git/sdk/go/blockdigest"
"log"
- "regexp"
- "strconv"
"strings"
)
-var LocatorPattern = regexp.MustCompile(
- "^[0-9a-fA-F]{32}\\+[0-9]+(\\+[A-Z][A-Za-z0-9@_-]+)*$")
-
type Manifest struct {
Text string
}
-type BlockLocator struct {
- Digest blockdigest.BlockDigest
- Size int
- Hints []string
-}
-
// Represents a single line from a manifest.
type ManifestStream struct {
StreamName string
Files []string
}
-func ParseBlockLocator(s string) (b BlockLocator, err error) {
- if !LocatorPattern.MatchString(s) {
- err = fmt.Errorf("String \"%s\" does not match BlockLocator pattern "+
- "\"%s\".",
- s,
- LocatorPattern.String())
- } else {
- tokens := strings.Split(s, "+")
- var blockSize int64
- var blockDigest blockdigest.BlockDigest
- // We expect both of the following to succeed since LocatorPattern
- // restricts the strings appropriately.
- blockDigest, err = blockdigest.FromString(tokens[0])
- if err != nil {
- return
- }
- blockSize, err = strconv.ParseInt(tokens[1], 10, 0)
- if err != nil {
- return
- }
- b.Digest = blockDigest
- b.Size = int(blockSize)
- b.Hints = tokens[2:]
- }
- return
-}
-
func parseManifestStream(s string) (m ManifestStream) {
tokens := strings.Split(s, " ")
m.StreamName = tokens[0]
tokens = tokens[1:]
var i int
for i = range tokens {
- if !LocatorPattern.MatchString(tokens[i]) {
+ if !blockdigest.IsBlockLocator(tokens[i]) {
break
}
}
// Blocks may appear mulitple times within the same manifest if they
// are used by multiple files. In that case this Iterator will output
// the same block multiple times.
-func (m *Manifest) BlockIterWithDuplicates() <-chan BlockLocator {
- blockChannel := make(chan BlockLocator)
+func (m *Manifest) BlockIterWithDuplicates() <-chan blockdigest.BlockLocator {
+ blockChannel := make(chan blockdigest.BlockLocator)
go func(streamChannel <-chan ManifestStream) {
for m := range streamChannel {
for _, block := range m.Blocks {
- if b, err := ParseBlockLocator(block); err == nil {
+ if b, err := blockdigest.ParseBlockLocator(block); err == nil {
blockChannel <- b
} else {
log.Printf("ERROR: Failed to parse block: %v", err)
"testing"
)
-func getStackTrace() (string) {
+func getStackTrace() string {
buf := make([]byte, 1000)
bytes_written := runtime.Stack(buf, false)
return "Stack Trace:\n" + string(buf[:bytes_written])
}
func expectFromChannel(t *testing.T, c <-chan string, expected string) {
- actual, ok := <- c
+ actual, ok := <-c
if !ok {
t.Fatalf("Expected to receive %s but channel was closed. %s",
expected,
}
func expectChannelClosed(t *testing.T, c <-chan interface{}) {
- received, ok := <- c
+ received, ok := <-c
if ok {
t.Fatalf("Expected channel to be closed, but received %v instead. %s",
received,
expectStringSlicesEqual(t, actual.Files, expected.Files)
}
-func expectBlockLocator(t *testing.T, actual BlockLocator, expected BlockLocator) {
+func expectBlockLocator(t *testing.T, actual blockdigest.BlockLocator, expected blockdigest.BlockLocator) {
expectEqual(t, actual.Digest, expected.Digest)
expectEqual(t, actual.Size, expected.Size)
expectStringSlicesEqual(t, actual.Hints, expected.Hints)
}
-func expectLocatorPatternMatch(t *testing.T, s string) {
- if !LocatorPattern.MatchString(s) {
- t.Fatalf("Expected \"%s\" to match locator pattern but it did not.",
- s)
- }
-}
-
-func expectLocatorPatternFail(t *testing.T, s string) {
- if LocatorPattern.MatchString(s) {
- t.Fatalf("Expected \"%s\" to fail locator pattern but it passed.",
- s)
- }
-}
-
-func TestLocatorPatternBasic(t *testing.T) {
- expectLocatorPatternMatch(t, "12345678901234567890123456789012+12345")
- expectLocatorPatternMatch(t, "A2345678901234abcdefababdeffdfdf+12345")
- expectLocatorPatternMatch(t, "12345678901234567890123456789012+12345+A1")
- expectLocatorPatternMatch(t,
- "12345678901234567890123456789012+12345+A1+B123wxyz@_-")
- expectLocatorPatternMatch(t,
- "12345678901234567890123456789012+12345+A1+B123wxyz@_-+C@")
-
- expectLocatorPatternFail(t, "12345678901234567890123456789012")
- expectLocatorPatternFail(t, "12345678901234567890123456789012+")
- expectLocatorPatternFail(t, "12345678901234567890123456789012+12345+")
- expectLocatorPatternFail(t, "1234567890123456789012345678901+12345")
- expectLocatorPatternFail(t, "123456789012345678901234567890123+12345")
- expectLocatorPatternFail(t, "g2345678901234abcdefababdeffdfdf+12345")
- expectLocatorPatternFail(t, "12345678901234567890123456789012+12345 ")
- expectLocatorPatternFail(t, "12345678901234567890123456789012+12345+1")
- expectLocatorPatternFail(t, "12345678901234567890123456789012+12345+1A")
- expectLocatorPatternFail(t, "12345678901234567890123456789012+12345+A")
- expectLocatorPatternFail(t, "12345678901234567890123456789012+12345+a1")
- expectLocatorPatternFail(t, "12345678901234567890123456789012+12345+A1+")
- expectLocatorPatternFail(t, "12345678901234567890123456789012+12345+A1+B")
- expectLocatorPatternFail(t, "12345678901234567890123456789012+12345+A+B2")
-}
-
func TestParseManifestStreamSimple(t *testing.T) {
m := parseManifestStream(". 365f83f5f808896ec834c8b595288735+2310+K@qr1hi+Af0c9a66381f3b028677411926f0be1c6282fe67c@542b5ddf 0:2310:qr1hi-8i9sb-ienvmpve1a0vpoi.log.txt")
expectManifestStream(t, m, ManifestStream{StreamName: ".",
Blocks: []string{"365f83f5f808896ec834c8b595288735+2310+K@qr1hi+Af0c9a66381f3b028677411926f0be1c6282fe67c@542b5ddf"},
- Files: []string{"0:2310:qr1hi-8i9sb-ienvmpve1a0vpoi.log.txt"}})
-}
-
-func TestParseBlockLocatorSimple(t *testing.T) {
- b, err := ParseBlockLocator("365f83f5f808896ec834c8b595288735+2310+K@qr1hi+Af0c9a66381f3b028677411926f0be1c6282fe67c@542b5ddf")
- if err != nil {
- t.Fatalf("Unexpected error parsing block locator: %v", err)
- }
- expectBlockLocator(t, b, BlockLocator{Digest: blockdigest.AssertFromString("365f83f5f808896ec834c8b595288735"),
- Size: 2310,
- Hints: []string{"K@qr1hi",
- "Af0c9a66381f3b028677411926f0be1c6282fe67c@542b5ddf"}})
+ Files: []string{"0:2310:qr1hi-8i9sb-ienvmpve1a0vpoi.log.txt"}})
}
func TestStreamIterShortManifestWithBlankStreams(t *testing.T) {
firstStream,
ManifestStream{StreamName: ".",
Blocks: []string{"b746e3d2104645f2f64cd3cc69dd895d+15693477+E2866e643690156651c03d876e638e674dcd79475@5441920c"},
- Files: []string{"0:15893477:chr10_band0_s0_e3000000.fj"}})
+ Files: []string{"0:15893477:chr10_band0_s0_e3000000.fj"}})
- received, ok := <- streamIter
+ received, ok := <-streamIter
if ok {
t.Fatalf("Expected streamIter to be closed, but received %v instead.",
received)
firstBlock := <-blockChannel
expectBlockLocator(t,
firstBlock,
- BlockLocator{Digest: blockdigest.AssertFromString("b746e3d2104645f2f64cd3cc69dd895d"),
- Size: 15693477,
+ blockdigest.BlockLocator{Digest: blockdigest.AssertFromString("b746e3d2104645f2f64cd3cc69dd895d"),
+ Size: 15693477,
Hints: []string{"E2866e643690156651c03d876e638e674dcd79475@5441920c"}})
blocksRead := 1
- var lastBlock BlockLocator
+ var lastBlock blockdigest.BlockLocator
for lastBlock = range blockChannel {
//log.Printf("Blocks Read: %d", blocksRead)
- blocksRead++
+ blocksRead++
}
expectEqual(t, blocksRead, 853)
expectBlockLocator(t,
lastBlock,
- BlockLocator{Digest: blockdigest.AssertFromString("f9ce82f59e5908d2d70e18df9679b469"),
- Size: 31367794,
+ blockdigest.BlockLocator{Digest: blockdigest.AssertFromString("f9ce82f59e5908d2d70e18df9679b469"),
+ Size: 31367794,
Hints: []string{"E53f903684239bcc114f7bf8ff9bd6089f33058db@5441920c"}})
}
--- /dev/null
+import syslog
+import sys
+sys.argv=['']
+import arvados
+import os
+
+def auth_log(msg):
+ """Send errors to default auth log"""
+ syslog.openlog(facility=syslog.LOG_AUTH)
+ #syslog.openlog()
+ syslog.syslog("libpam python Logged: " + msg)
+ syslog.closelog()
+
+
+def check_arvados_token(requested_username, token):
+ auth_log("%s %s" % (requested_username, token))
+
+ try:
+ f=file('/etc/default/arvados_pam')
+ config=dict([l.split('=') for l in f.readlines() if not l.startswith('#') or l.strip()==""])
+ arvados_api_host=config['ARVADOS_API_HOST'].strip()
+ hostname=config['HOSTNAME'].strip()
+ except Exception as e:
+ auth_log("problem getting default values %s" % e)
+ return False
+
+ try:
+ arv = arvados.api('v1',host=arvados_api_host, token=token, cache=None)
+ except Exception as e:
+ auth_log(str(e))
+ return False
+
+ try:
+ matches = arv.virtual_machines().list(filters=[['hostname','=',hostname]]).execute()['items']
+ except Exception as e:
+ auth_log(str(e))
+ return False
+
+
+ if len(matches) != 1:
+ auth_log("libpam_arvados could not determine vm uuid for '%s'" % hostname)
+ return False
+
+ this_vm_uuid = matches[0]['uuid']
+ auth_log("this_vm_uuid: %s" % this_vm_uuid)
+ client_user_uuid = arv.users().current().execute()['uuid']
+
+ filters = [
+ ['link_class','=','permission'],
+ ['name','=','can_login'],
+ ['head_uuid','=',this_vm_uuid],
+ ['tail_uuid','=',client_user_uuid]]
+
+ for l in arv.links().list(filters=filters).execute()['items']:
+ if requested_username == l['properties']['username']:
+ return True
+ return False
+
+
+def pam_sm_authenticate(pamh, flags, argv):
+ try:
+ user = pamh.get_user()
+ except pamh.exception, e:
+ return e.pam_result
+
+ if not user:
+ return pamh.PAM_USER_UNKNOWN
+
+ try:
+ resp = pamh.conversation(pamh.Message(pamh.PAM_PROMPT_ECHO_OFF, ''))
+ except pamh.exception, e:
+ return e.pam_result
+
+ try:
+ check = check_arvados_token(user, resp.resp)
+ except Exception as e:
+ auth_log(str(e))
+ return False
+
+ if not check:
+ auth_log("Auth failed Remote Host: %s (%s:%s)" % (pamh.rhost, user, resp.resp))
+ return pamh.PAM_AUTH_ERR
+
+ auth_log("Success! Remote Host: %s (%s:%s)" % (pamh.rhost, user, resp.resp))
+ return pamh.PAM_SUCCESS
+
+def pam_sm_setcred(pamh, flags, argv):
+ return pamh.PAM_SUCCESS
+
+def pam_sm_acct_mgmt(pamh, flags, argv):
+ return pamh.PAM_SUCCESS
+
+def pam_sm_open_session(pamh, flags, argv):
+ return pamh.PAM_SUCCESS
+
+def pam_sm_close_session(pamh, flags, argv):
+ return pamh.PAM_SUCCESS
+
+def pam_sm_chauthtok(pamh, flags, argv):
+ return pamh.PAM_SUCCESS
--- /dev/null
+# Default values for libpam arvados module
+#
+# ARVADOS_API_HOST should be te api hosts.
+# should be reachable, and will be called
+# from arvados_pam.py using Arvados Python SDK
+ARVADOS_API_HOST=zzzzz.arvadosapi.com
+
+# HOSTNAME is the hostname as is stored in the API object
+# something like "foo.shell" or "shell", but not"foo.shell.zzzzz.arvadosapi.com"!
+HOSTNAME=shell
--- /dev/null
+#
+# The PAM configuration file for the Shadow `login' service
+#
+
+# Enforce a minimal delay in case of failure (in microseconds).
+# (Replaces the `FAIL_DELAY' setting from login.defs)
+# Note that other modules may require another minimal delay. (for example,
+# to disable any delay, you should add the nodelay option to pam_unix)
+#auth optional pam_faildelay.so delay=3000000
+auth optional pam_faildelay.so delay=0
+
+# Outputs an issue file prior to each login prompt (Replaces the
+# ISSUE_FILE option from login.defs). Uncomment for use
+# auth required pam_issue.so issue=/etc/issue
+
+# Disallows root logins except on tty's listed in /etc/securetty
+# (Replaces the `CONSOLE' setting from login.defs)
+#
+# With the default control of this module:
+# [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die]
+# root will not be prompted for a password on insecure lines.
+# if an invalid username is entered, a password is prompted (but login
+# will eventually be rejected)
+#
+# You can change it to a "requisite" module if you think root may mis-type
+# her login and should not be prompted for a password in that case. But
+# this will leave the system as vulnerable to user enumeration attacks.
+#
+# You can change it to a "required" module if you think it permits to
+# guess valid user names of your system (invalid user names are considered
+# as possibly being root on insecure lines), but root passwords may be
+# communicated over insecure lines.
+auth [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] pam_securetty.so
+
+# Disallows other than root logins when /etc/nologin exists
+# (Replaces the `NOLOGINS_FILE' option from login.defs)
+auth requisite pam_nologin.so
+
+# SELinux needs to be the first session rule. This ensures that any
+# lingering context has been cleared. Without out this it is possible
+# that a module could execute code in the wrong domain.
+# When the module is present, "required" would be sufficient (When SELinux
+# is disabled, this returns success.)
+session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close
+
+# This module parses environment configuration file(s)
+# and also allows you to use an extended config
+# file /etc/security/pam_env.conf.
+#
+# parsing /etc/environment needs "readenv=1"
+session required pam_env.so readenv=1
+# locale variables are also kept into /etc/default/locale in etch
+# reading this file *in addition to /etc/environment* does not hurt
+session required pam_env.so readenv=1 envfile=/etc/default/locale
+
+
+#
+# /etc/pam.d/common-auth - authentication settings common to all services
+#
+# This file is included from other service-specific PAM config files,
+# and should contain a list of the authentication modules that define
+# the central authentication scheme for use on the system
+# (e.g., /etc/shadow, LDAP, Kerberos, etc.). The default is to use the
+# traditional Unix authentication mechanisms.
+#
+# As of pam 1.0.1-6, this file is managed by pam-auth-update by default.
+# To take advantage of this, it is recommended that you configure any
+# local modules either before or after the default block, and use
+# pam-auth-update to manage selection of other modules. See
+# pam-auth-update(8) for details.
+
+# here are the per-package modules (the "Primary" block)
+auth [success=1 default=ignore] pam_python.so /usr/bin/arvados_pam.py
+# here's the fallback if no module succeeds
+auth requisite pam_deny.so
+# prime the stack with a positive return value if there isn't one already;
+# this avoids us returning an error just because nothing sets a success code
+# since the modules above will each just jump around
+auth required pam_permit.so
+# and here are more per-package modules (the "Additional" block)
+auth optional pam_ecryptfs.so unwrap
+# end of pam-auth-update config
+
+# This allows certain extra groups to be granted to a user
+# based on things like time of day, tty, service, and user.
+# Please edit /etc/security/group.conf to fit your needs
+# (Replaces the `CONSOLE_GROUPS' option in login.defs)
+auth optional pam_group.so
+
+# Uncomment and edit /etc/security/time.conf if you need to set
+# time restrainst on logins.
+# (Replaces the `PORTTIME_CHECKS_ENAB' option from login.defs
+# as well as /etc/porttime)
+# account requisite pam_time.so
+
+# Uncomment and edit /etc/security/access.conf if you need to
+# set access limits.
+# (Replaces /etc/login.access file)
+# account required pam_access.so
+
+# Sets up user limits according to /etc/security/limits.conf
+# (Replaces the use of /etc/limits in old login)
+session required pam_limits.so
+
+# Prints the last login info upon succesful login
+# (Replaces the `LASTLOG_ENAB' option from login.defs)
+session optional pam_lastlog.so
+
+# Prints the message of the day upon succesful login.
+# (Replaces the `MOTD_FILE' option in login.defs)
+# This includes a dynamically generated part from /run/motd.dynamic
+# and a static (admin-editable) part from /etc/motd.
+session optional pam_motd.so motd=/run/motd.dynamic
+session optional pam_motd.so
+
+# Prints the status of the user's mailbox upon succesful login
+# (Replaces the `MAIL_CHECK_ENAB' option from login.defs).
+#
+# This also defines the MAIL environment variable
+# However, userdel also needs MAIL_DIR and MAIL_FILE variables
+# in /etc/login.defs to make sure that removing a user
+# also removes the user's mail spool file.
+# See comments in /etc/login.defs
+session optional pam_mail.so standard
+
+# Standard Un*x account and session
+@include common-account
+@include common-session
+@include common-password
+
+# SELinux needs to intervene at login time to ensure that the process
+# starts in the proper default security context. Only sessions which are
+# intended to run in the user's context should be run after this.
+session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open
+# When the module is present, "required" would be sufficient (When SELinux
+# is disabled, this returns success.)
WriteMakefile(
NAME => 'Arvados',
- VERSION_FROM => 'lib/Arvados.pm'
+ VERSION_FROM => 'lib/Arvados.pm',
+ PREREQ_PM => {
+ 'JSON' => 0,
+ 'LWP' => 0,
+ 'Net::SSL' => 0,
+ },
);
abort('need ARVADOS_API_HOST and ARVADOS_API_TOKEN for {}'.format(instance_name))
return client
+# Check if git is available
+def check_git_availability():
+ try:
+ arvados.util.run_command(['git', '--help'])
+ except:
+ abort('git command is not available. Please ensure git is installed.')
+
# copy_pipeline_instance(pi_uuid, src, dst, args)
#
# Copies a pipeline instance identified by pi_uuid from src to dst.
pi = src.pipeline_instances().get(uuid=pi_uuid).execute(num_retries=args.retries)
if args.recursive:
+ check_git_availability()
+
if not args.dst_git_repo:
abort('--dst-git-repo is required when copying a pipeline recursively.')
# Copy the pipeline template and save the copied template.
pt = src.pipeline_templates().get(uuid=pt_uuid).execute(num_retries=args.retries)
if args.recursive:
+ check_git_availability()
+
if not args.dst_git_repo:
abort('--dst-git-repo is required when copying a pipeline recursively.')
# Copy input collections, docker images and git repos.
args = arg_parser.parse_args(arguments)
if len(args.paths) == 0:
- args.paths += ['/dev/stdin']
+ args.paths = ['-']
+
+ args.paths = map(lambda x: "-" if x == "/dev/stdin" else x, args.paths)
if len(args.paths) != 1 or os.path.isdir(args.paths[0]):
if args.filename:
args.progress = True
if args.paths == ['-']:
- args.paths = ['/dev/stdin']
+ args.resume = False
if not args.filename:
- args.filename = '-'
+ args.filename = 'stdin'
return args
writer.report_progress()
writer.do_queued_work() # Do work resumed from cache.
for path in args.paths: # Copy file data to Keep.
- if os.path.isdir(path):
+ if path == '-':
+ writer.start_new_stream()
+ writer.start_new_file(args.filename)
+ r = sys.stdin.read(64*1024)
+ while r:
+ # Need to bypass _queued_file check in ResumableCollectionWriter.write() to get
+ # CollectionWriter.write().
+ super(arvados.collection.ResumableCollectionWriter, writer).write(r)
+ r = sys.stdin.read(64*1024)
+ elif os.path.isdir(path):
writer.write_directory_tree(
path, max_manifest_depth=args.max_manifest_depth)
else:
import arvados.commands._util as arv_cmd
logger = logging.getLogger('arvados.arv-run')
+logger.setLevel(logging.INFO)
arvrun_parser = argparse.ArgumentParser(parents=[arv_cmd.retry_opt])
arvrun_parser.add_argument('--dry-run', action="store_true", help="Print out the pipeline that would be submitted and exit")
arvrun_parser.add_argument('--local', action="store_true", help="Run locally using arv-run-pipeline-instance")
-arvrun_parser.add_argument('--docker-image', type=str, default="arvados/jobs", help="Docker image to use, default arvados/jobs")
+arvrun_parser.add_argument('--docker-image', type=str, help="Docker image to use, otherwise use instance default.")
arvrun_parser.add_argument('--ignore-rcode', action="store_true", help="Commands that return non-zero return codes should not be considered failed.")
arvrun_parser.add_argument('--no-reuse', action="store_true", help="Do not reuse past jobs.")
arvrun_parser.add_argument('--no-wait', action="store_true", help="Do not wait and display logs after submitting command, just exit.")
return prefix+fn
+def uploadfiles(files, api, dry_run=False, num_retries=0, project=None):
+ # Find the smallest path prefix that includes all the files that need to be uploaded.
+ # This starts at the root and iteratively removes common parent directory prefixes
+ # until all file pathes no longer have a common parent.
+ n = True
+ pathprefix = "/"
+ while n:
+ pathstep = None
+ for c in files:
+ if pathstep is None:
+ sp = c.fn.split('/')
+ if len(sp) < 2:
+ # no parent directories left
+ n = False
+ break
+ # path step takes next directory
+ pathstep = sp[0] + "/"
+ else:
+ # check if pathstep is common prefix for all files
+ if not c.fn.startswith(pathstep):
+ n = False
+ break
+ if n:
+ # pathstep is common parent directory for all files, so remove the prefix
+ # from each path
+ pathprefix += pathstep
+ for c in files:
+ c.fn = c.fn[len(pathstep):]
+
+ orgdir = os.getcwd()
+ os.chdir(pathprefix)
+
+ logger.info("Upload local files: \"%s\"", '" "'.join([c.fn for c in files]))
+
+ if dry_run:
+ logger.info("$(input) is %s", pathprefix.rstrip('/'))
+ pdh = "$(input)"
+ else:
+ files = sorted(files, key=lambda x: x.fn)
+ collection = arvados.CollectionWriter(api, num_retries=num_retries)
+ stream = None
+ for f in files:
+ sp = os.path.split(f.fn)
+ if sp[0] != stream:
+ stream = sp[0]
+ collection.start_new_stream(stream)
+ collection.write_file(f.fn, sp[1])
+ item = api.collections().create(body={"owner_uuid": project, "manifest_text": collection.manifest_text()}).execute()
+ pdh = item["portable_data_hash"]
+ logger.info("Uploaded to %s", item["uuid"])
+
+ for c in files:
+ c.fn = "$(file %s/%s)" % (pdh, c.fn)
+
+ os.chdir(orgdir)
+
+
def main(arguments=None):
args = arvrun_parser.parse_args(arguments)
command[i] = statfile(m.group(1), m.group(2))
break
- n = True
- pathprefix = "/"
files = [c for command in slots[1:] for c in command if isinstance(c, UploadFile)]
- if len(files) > 0:
- # Find the smallest path prefix that includes all the files that need to be uploaded.
- # This starts at the root and iteratively removes common parent directory prefixes
- # until all file pathes no longer have a common parent.
- while n:
- pathstep = None
- for c in files:
- if pathstep is None:
- sp = c.fn.split('/')
- if len(sp) < 2:
- # no parent directories left
- n = False
- break
- # path step takes next directory
- pathstep = sp[0] + "/"
- else:
- # check if pathstep is common prefix for all files
- if not c.fn.startswith(pathstep):
- n = False
- break
- if n:
- # pathstep is common parent directory for all files, so remove the prefix
- # from each path
- pathprefix += pathstep
- for c in files:
- c.fn = c.fn[len(pathstep):]
-
- orgdir = os.getcwd()
- os.chdir(pathprefix)
-
- print("Upload local files: \"%s\"" % '" "'.join([c.fn for c in files]))
-
- if args.dry_run:
- print("$(input) is %s" % pathprefix.rstrip('/'))
- pdh = "$(input)"
- else:
- files = sorted(files, key=lambda x: x.fn)
- collection = arvados.CollectionWriter(api, num_retries=args.retries)
- stream = None
- for f in files:
- sp = os.path.split(f.fn)
- if sp[0] != stream:
- stream = sp[0]
- collection.start_new_stream(stream)
- collection.write_file(f.fn, sp[1])
- item = api.collections().create(body={"owner_uuid": project, "manifest_text": collection.manifest_text()}).execute()
- pdh = item["portable_data_hash"]
- print "Uploaded to %s" % item["uuid"]
-
- for c in files:
- c.fn = "$(file %s/%s)" % (pdh, c.fn)
-
- os.chdir(orgdir)
+ if files:
+ uploadfiles(files, api, dry_run=args.dry_run, num_retries=args.num_retries, project=project)
for i in xrange(1, len(slots)):
slots[i] = [("%s%s" % (c.prefix, c.fn)) if isinstance(c, ArvFile) else c for c in slots[i]]
"repository": args.repository,
"script_parameters": {
},
- "runtime_constraints": {
- "docker_image": args.docker_image
- }
+ "runtime_constraints": {}
}
+ if args.docker_image:
+ component["runtime_constraints"]["docker_image"] = args.docker_image
+
task_foreach = []
group_parser = argparse.ArgumentParser()
group_parser.add_argument('-b', '--batch-size', type=int)
else:
pipeline["owner_uuid"] = project
pi = api.pipeline_instances().create(body=pipeline, ensure_unique_name=True).execute()
- print "Running pipeline %s" % pi["uuid"]
+ logger.info("Running pipeline %s", pi["uuid"])
if args.local:
subprocess.call(["arv-run-pipeline-instance", "--instance", pi["uuid"], "--run-jobs-here"] + (["--no-reuse"] if args.no_reuse else []))
ws.main(["--pipeline", pi["uuid"]])
pi = api.pipeline_instances().get(uuid=pi["uuid"]).execute()
- print "Pipeline is %s" % pi["state"]
+ logger.info("Pipeline is %s", pi["state"])
if "output_uuid" in pi["components"]["command"]:
- print "Output is %s" % pi["components"]["command"]["output_uuid"]
+ logger.info("Output is %s", pi["components"]["command"]["output_uuid"])
else:
- print "No output"
+ logger.info("No output")
if __name__ == '__main__':
main()
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--uuid', type=str, default="", help="Filter events on object_uuid")
parser.add_argument('-f', '--filters', type=str, default="", help="Arvados query filter to apply to log events (JSON encoded)")
+ parser.add_argument('-s', '--start-time', type=str, default="", help="Arvados query filter to fetch log events created at or after this time. This will be server time in UTC. Allowed format: YYYY-MM-DD or YYYY-MM-DD hh:mm:ss")
group = parser.add_mutually_exclusive_group()
group.add_argument('--poll-interval', default=15, type=int, help="If websockets is not available, specify the polling interval, default is every 15 seconds")
if args.pipeline:
filters += [ ['object_uuid', '=', args.pipeline] ]
+ if args.start_time:
+ last_log_id = 1
+ filters += [ ['created_at', '>=', args.start_time] ]
+ else:
+ last_log_id = None
+
def on_message(ev):
global filters
global ws
print json.dumps(ev)
try:
- ws = subscribe(arvados.api('v1'), filters, on_message, poll_fallback=args.poll_interval)
+ ws = subscribe(arvados.api('v1'), filters, on_message, poll_fallback=args.poll_interval, last_log_id=last_log_id)
if ws:
if args.pipeline:
c = api.pipeline_instances().get(uuid=args.pipeline).execute()
_logger = logging.getLogger('arvados.events')
class EventClient(WebSocketClient):
- def __init__(self, url, filters, on_event):
+ def __init__(self, url, filters, on_event, last_log_id):
ssl_options = {'ca_certs': arvados.util.ca_certs_path()}
if config.flag_is_true('ARVADOS_API_HOST_INSECURE'):
ssl_options['cert_reqs'] = ssl.CERT_NONE
super(EventClient, self).__init__(url, ssl_options=ssl_options)
self.filters = filters
self.on_event = on_event
+ self.stop = threading.Event()
+ self.last_log_id = last_log_id
def opened(self):
- self.subscribe(self.filters)
+ self.subscribe(self.filters, self.last_log_id)
def received_message(self, m):
self.on_event(json.loads(str(m)))
- def close_connection(self):
- try:
- self.sock.shutdown(socket.SHUT_RDWR)
- self.sock.close()
- except:
- pass
+ def closed(self, code, reason=None):
+ self.stop.set()
+
+ def close(self, code=1000, reason=''):
+ """Close event client and wait for it to finish."""
+
+ # parent close() method sends a asynchronous "closed" event to the server
+ super(EventClient, self).close(code, reason)
+
+ # if server doesn't respond by finishing the close handshake, we'll be
+ # stuck in limbo forever. We don't need to wait for the server to
+ # respond to go ahead and actually close the socket.
+ self.close_connection()
+
+ # wait for websocket thread to finish up (closed() is called by
+ # websocket thread in as part of terminate())
+ while not self.stop.is_set():
+ self.stop.wait(1)
def subscribe(self, filters, last_log_id=None):
m = {"method": "subscribe", "filters": filters}
self.send(json.dumps({"method": "unsubscribe", "filters": filters}))
class PollClient(threading.Thread):
- def __init__(self, api, filters, on_event, poll_time):
+ def __init__(self, api, filters, on_event, poll_time, last_log_id):
super(PollClient, self).__init__()
self.api = api
if filters:
self.poll_time = poll_time
self.daemon = True
self.stop = threading.Event()
+ self.last_log_id = last_log_id
def run(self):
self.id = 0
- for f in self.filters:
- items = self.api.logs().list(limit=1, order="id desc", filters=f).execute()['items']
- if items:
- if items[0]['id'] > self.id:
- self.id = items[0]['id']
+ if self.last_log_id != None:
+ self.id = self.last_log_id
+ else:
+ for f in self.filters:
+ items = self.api.logs().list(limit=1, order="id desc", filters=f).execute()['items']
+ if items:
+ if items[0]['id'] > self.id:
+ self.id = items[0]['id']
self.on_event({'status': 200})
while not self.stop.isSet():
max_id = self.id
+ moreitems = False
for f in self.filters:
- items = self.api.logs().list(order="id asc", filters=f+[["id", ">", str(self.id)]]).execute()['items']
- for i in items:
+ items = self.api.logs().list(order="id asc", filters=f+[["id", ">", str(self.id)]]).execute()
+ for i in items["items"]:
if i['id'] > max_id:
max_id = i['id']
self.on_event(i)
+ if items["items_available"] > len(items["items"]):
+ moreitems = True
self.id = max_id
- self.stop.wait(self.poll_time)
+ if not moreitems:
+ self.stop.wait(self.poll_time)
def run_forever(self):
# Have to poll here, otherwise KeyboardInterrupt will never get processed.
self.stop.wait(1)
def close(self):
+ """Close poll client and wait for it to finish."""
+
self.stop.set()
try:
self.join()
del self.filters[self.filters.index(filters)]
-def _subscribe_websocket(api, filters, on_event):
+def _subscribe_websocket(api, filters, on_event, last_log_id=None):
endpoint = api._rootDesc.get('websocketUrl', None)
if not endpoint:
raise errors.FeatureNotEnabledError(
"Server does not advertise a websocket endpoint")
- uri_with_token = "{}?api_token={}".format(endpoint, api.api_token)
- client = EventClient(uri_with_token, filters, on_event)
- ok = False
try:
- client.connect()
- ok = True
- return client
- finally:
- if not ok:
- client.close_connection()
-
-def subscribe(api, filters, on_event, poll_fallback=15):
+ uri_with_token = "{}?api_token={}".format(endpoint, api.api_token)
+ client = EventClient(uri_with_token, filters, on_event, last_log_id)
+ ok = False
+ try:
+ client.connect()
+ ok = True
+ return client
+ finally:
+ if not ok:
+ client.close_connection()
+ except:
+ _logger.warn("Failed to connect to websockets on %s" % endpoint)
+ raise
+
+
+def subscribe(api, filters, on_event, poll_fallback=15, last_log_id=None):
"""
:api:
a client object retrieved from arvados.api(). The caller should not use this client object for anything else after calling subscribe().
The callback when a message is received.
:poll_fallback:
If websockets are not available, fall back to polling every N seconds. If poll_fallback=False, this will return None if websockets are not available.
+ :last_log_id:
+ Log rows that are newer than the log id
"""
if not poll_fallback:
- return _subscribe_websocket(api, filters, on_event)
+ return _subscribe_websocket(api, filters, on_event, last_log_id)
try:
- return _subscribe_websocket(api, filters, on_event)
+ return _subscribe_websocket(api, filters, on_event, last_log_id)
except Exception as e:
_logger.warn("Falling back to polling after websocket error: %s" % e)
- p = PollClient(api, filters, on_event, poll_fallback)
+ p = PollClient(api, filters, on_event, poll_fallback, last_log_id)
p.start()
return p
p = subprocess.Popen(execargs, **kwargs)
stdoutdata, stderrdata = p.communicate(None)
if p.returncode != 0:
- raise errors.CommandFailedError(
+ raise arvados.errors.CommandFailedError(
"run_command %s exit %d:\n%s" %
(execargs, p.returncode, stderrdata))
return stdoutdata, stderrdata
elif re.search('\.tar$', f.name()):
p = tar_extractor(path, '')
else:
- raise errors.AssertionError(
+ raise arvados.errors.AssertionError(
"tarball_extract cannot handle filename %s" % f.name())
while True:
buf = f.read(2**20)
p.wait()
if p.returncode != 0:
lockfile.close()
- raise errors.CommandFailedError(
+ raise arvados.errors.CommandFailedError(
"tar exited %d" % p.returncode)
os.symlink(tarball, os.path.join(path, '.locator'))
tld_extracts = filter(lambda f: f != '.locator', os.listdir(path))
for f in CollectionReader(zipball).all_files():
if not re.search('\.zip$', f.name()):
- raise errors.NotImplementedError(
+ raise arvados.errors.NotImplementedError(
"zipball_extract cannot handle filename %s" % f.name())
zip_filename = os.path.join(path, os.path.basename(f.name()))
zip_file = open(zip_filename, 'wb')
p.wait()
if p.returncode != 0:
lockfile.close()
- raise errors.CommandFailedError(
+ raise arvados.errors.CommandFailedError(
"unzip exited %d" % p.returncode)
os.unlink(zip_filename)
os.symlink(zipball, os.path.join(path, '.locator'))
outfile.write(buf)
outfile.close()
if len(files_got) < len(files):
- raise errors.AssertionError(
+ raise arvados.errors.AssertionError(
"Wanted files %s but only got %s from %s" %
(files, files_got,
[z.name() for z in CollectionReader(collection).all_files()]))
outfile.write(buf)
outfile.close()
if len(files_got) < len(files):
- raise errors.AssertionError(
+ raise arvados.errors.AssertionError(
"Wanted files %s but only got %s from %s" %
(files, files_got, [z.name() for z in stream.all_files()]))
lockfile.close()
"""
num_length_args = len(length_args)
if num_length_args > 2:
- raise errors.ArgumentError("is_hex accepts up to 3 arguments ({} given)"
- .format(1 + num_length_args))
+ raise arvados.errors.ArgumentError(
+ "is_hex accepts up to 3 arguments ({} given)".format(1 + num_length_args))
elif num_length_args == 2:
good_len = (length_args[0] <= len(s) <= length_args[1])
elif num_length_args == 1:
help="""
Collection locator, optionally with a file path or prefix.
""")
-parser.add_argument('destination', type=str, nargs='?', default='/dev/stdout',
+parser.add_argument('destination', type=str, nargs='?', default='-',
help="""
-Local file or directory where the data is to be written. Default:
-/dev/stdout.
+Local file or directory where the data is to be written. Default: stdout.
""")
group = parser.add_mutually_exclusive_group()
group.add_argument('--progress', action='store_true',
help="""
Overwrite existing files while writing. The default behavior is to
refuse to write *anything* if any of the output files already
-exist. As a special case, -f is not needed to write to /dev/stdout.
+exist. As a special case, -f is not needed to write to stdout.
""")
group.add_argument('--skip-existing', action='store_true',
help="""
logger.debug("Appended source file name to destination directory: %s",
args.destination)
-if args.destination == '-':
- args.destination = '/dev/stdout'
if args.destination == '/dev/stdout':
+ args.destination = "-"
+
+if args.destination == '-':
# Normally you have to use -f to write to a file (or device) that
# already exists, but "-" and "/dev/stdout" are common enough to
# merit a special exception.
# that isn't a tty.
if (not (args.batch_progress or args.no_progress)
and sys.stderr.isatty()
- and (args.destination != '/dev/stdout'
+ and (args.destination != '-'
or not sys.stdout.isatty())):
args.progress = True
if not args.f:
open_flags |= os.O_EXCL
try:
- out_fd = os.open(args.destination, open_flags)
- with os.fdopen(out_fd, 'wb') as out_file:
- out_file.write(reader.manifest_text())
+ if args.destination == "-":
+ sys.stdout.write(reader.manifest_text())
+ else:
+ out_fd = os.open(args.destination, open_flags)
+ with os.fdopen(out_fd, 'wb') as out_file:
+ out_file.write(reader.manifest_text())
except (IOError, OSError) as error:
abort("can't write to '{}': {}".format(args.destination, error))
except (arvados.errors.ApiError, arvados.errors.KeepReadError) as error:
if 0 != string.find(os.path.join(s.name(), f.name()),
'.' + get_prefix):
continue
- dest_path = os.path.join(
- args.destination,
- os.path.join(s.name(), f.name())[len(get_prefix)+1:])
- if (not (args.n or args.f or args.skip_existing) and
- os.path.exists(dest_path)):
- abort('Local file %s already exists.' % (dest_path,))
+ if args.destination == "-":
+ dest_path = "-"
+ else:
+ dest_path = os.path.join(
+ args.destination,
+ os.path.join(s.name(), f.name())[len(get_prefix)+1:])
+ if (not (args.n or args.f or args.skip_existing) and
+ os.path.exists(dest_path)):
+ abort('Local file %s already exists.' % (dest_path,))
else:
if os.path.join(s.name(), f.name()) != '.' + get_prefix:
continue
outfile = None
digestor = None
if not args.n:
- if args.skip_existing and os.path.exists(outfilename):
- logger.debug('Local file %s exists. Skipping.', outfilename)
- continue
- elif not args.f and (os.path.isfile(outfilename) or
- os.path.isdir(outfilename)):
- # Good thing we looked again: apparently this file wasn't
- # here yet when we checked earlier.
- abort('Local file %s already exists.' % (outfilename,))
- if args.r:
- arvados.util.mkdir_dash_p(os.path.dirname(outfilename))
- try:
- outfile = open(outfilename, 'wb')
- except Exception as error:
- abort('Open(%s) failed: %s' % (outfilename, error))
+ if outfilename == "-":
+ outfile = sys.stdout
+ else:
+ if args.skip_existing and os.path.exists(outfilename):
+ logger.debug('Local file %s exists. Skipping.', outfilename)
+ continue
+ elif not args.f and (os.path.isfile(outfilename) or
+ os.path.isdir(outfilename)):
+ # Good thing we looked again: apparently this file wasn't
+ # here yet when we checked earlier.
+ abort('Local file %s already exists.' % (outfilename,))
+ if args.r:
+ arvados.util.mkdir_dash_p(os.path.dirname(outfilename))
+ try:
+ outfile = open(outfilename, 'wb')
+ except Exception as error:
+ abort('Open(%s) failed: %s' % (outfilename, error))
if args.hash:
digestor = hashlib.new(args.hash)
try:
sys.stderr.write("%s %s/%s\n"
% (digestor.hexdigest(), s.name(), f.name()))
except KeyboardInterrupt:
- if outfile and outfilename != '/dev/stdout':
- os.unlink(outfilename)
+ if outfile and (outfile.fileno() > 2) and not outfile.closed:
+ os.unlink(outfile.name)
break
if args.progress:
--- /dev/null
+import arvados
+import arvados_testutil as tutil
+import hashlib
+
+class ManifestExamples(object):
+ def make_manifest(self,
+ bytes_per_block=1,
+ blocks_per_file=1,
+ files_per_stream=1,
+ streams=1):
+ datablip = 'x' * bytes_per_block
+ data_loc = '{}+{}'.format(hashlib.md5(datablip).hexdigest(),
+ bytes_per_block)
+ with tutil.mock_keep_responses(data_loc, 200):
+ coll = arvados.CollectionWriter()
+ for si in range(0, streams):
+ for fi in range(0, files_per_stream):
+ with coll.open("stream{}/file{}.txt".format(si, fi)) as f:
+ for bi in range(0, blocks_per_file):
+ f.write(datablip)
+ return coll.manifest_text()
caught = e
pr.disable()
ps = pstats.Stats(pr, stream=outfile)
- ps.print_stats()
+ ps.sort_stats('time').print_stats()
if caught:
- raise caught
+ raise
return ret
return profiled_function
sock.close()
return port
+def _wait_until_port_listens(port, timeout=10):
+ """Wait for a process to start listening on the given port.
+
+ If nothing listens on the port within the specified timeout (given
+ in seconds), print a warning on stderr before returning.
+ """
+ try:
+ subprocess.check_output(['which', 'lsof'])
+ except subprocess.CalledProcessError:
+ print("WARNING: No `lsof` -- cannot wait for port to listen. "+
+ "Sleeping 0.5 and hoping for the best.")
+ time.sleep(0.5)
+ return
+ deadline = time.time() + timeout
+ while time.time() < deadline:
+ try:
+ subprocess.check_output(
+ ['lsof', '-t', '-i', 'tcp:'+str(port)])
+ except subprocess.CalledProcessError:
+ time.sleep(0.1)
+ continue
+ return
+ print(
+ "WARNING: Nothing is listening on port {} (waited {} seconds).".
+ format(port, timeout),
+ file=sys.stderr)
+
def run(leave_running_atexit=False):
"""Ensure an API server is running, and ARVADOS_API_* env vars have
admin credentials for it.
my_api_host = match.group(1)
os.environ['ARVADOS_API_HOST'] = my_api_host
- # Make sure the server has written its pid file before continuing
+ # Make sure the server has written its pid file and started
+ # listening on its TCP port
find_server_pid(pid_file)
+ _wait_until_port_listens(port)
reset()
os.chdir(restore_cwd)
with open("{}/keep{}.volume".format(TEST_TMPDIR, n), 'w') as f:
f.write(keep0)
+ _wait_until_port_listens(port)
+
return port
def run_keep(blob_signing_key=None, enforce_permissions=False):
}}).execute()
os.environ["ARVADOS_KEEP_PROXY"] = "http://localhost:{}".format(port)
_setport('keepproxy', port)
+ _wait_until_port_listens(port)
def stop_keep_proxy():
if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
with open(_pidfile('arv-git-httpd'), 'w') as f:
f.write(str(agh.pid))
_setport('arv-git-httpd', gitport)
+ _wait_until_port_listens(gitport)
def stop_arv_git_httpd():
if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:
def _apiconfig(key):
if _cached_config:
return _cached_config[key]
- def _load(f):
- return yaml.load(os.path.join(SERVICES_SRC_DIR, 'api', 'config', f))
+ def _load(f, required=True):
+ fullpath = os.path.join(SERVICES_SRC_DIR, 'api', 'config', f)
+ if not required and not os.path.exists(fullpath):
+ return {}
+ return yaml.load(fullpath)
cdefault = _load('application.default.yml')
- csite = _load('application.yml')
+ csite = _load('application.yml', required=False)
_cached_config = {}
for section in [cdefault.get('common',{}), cdefault.get('test',{}),
csite.get('common',{}), csite.get('test',{})]:
ArvadosBaseTestCase):
def _getKeepServerConfig():
for config_file, mandatory in [
- ['application.yml', True], ['application.default.yml', False]]:
+ ['application.yml', False], ['application.default.yml', True]]:
path = os.path.join(run_test_server.SERVICES_SRC_DIR,
"api", "config", config_file)
if not mandatory and not os.path.exists(path):
--- /dev/null
+#!/usr/bin/env python
+
+import unittest
+import arvados.errors as arv_error
+import arvados.commands.ws as arv_ws
+
+class ArvWsTestCase(unittest.TestCase):
+ def run_ws(self, args):
+ return arv_ws.main(args)
+
+ def test_unsupported_arg(self):
+ with self.assertRaises(SystemExit):
+ self.run_ws(['-x=unknown'])
--- /dev/null
+import arvados
+import sys
+
+import run_test_server
+import arvados_testutil as tutil
+import manifest_examples
+from performance.performance_profiler import profiled
+
+class CollectionBenchmark(run_test_server.TestCaseWithServers,
+ tutil.ArvadosBaseTestCase,
+ manifest_examples.ManifestExamples):
+ MAIN_SERVER = {}
+ TEST_BLOCK_SIZE = 0
+
+ @classmethod
+ def list_recursive(cls, coll, parent_name=None):
+ if parent_name is None:
+ current_name = coll.stream_name()
+ else:
+ current_name = '{}/{}'.format(parent_name, coll.name)
+ try:
+ for name in coll:
+ for item in cls.list_recursive(coll[name], current_name):
+ yield item
+ except TypeError:
+ yield current_name
+
+ @classmethod
+ def setUpClass(cls):
+ super(CollectionBenchmark, cls).setUpClass()
+ run_test_server.authorize_with('active')
+ cls.api_client = arvados.api('v1')
+ cls.keep_client = arvados.KeepClient(api_client=cls.api_client,
+ local_store=cls.local_store)
+
+ @profiled
+ def profile_new_collection_from_manifest(self, manifest_text):
+ return arvados.collection.Collection(manifest_text)
+
+ @profiled
+ def profile_new_collection_from_server(self, uuid):
+ return arvados.collection.Collection(uuid)
+
+ @profiled
+ def profile_new_collection_copying_bytes_from_collection(self, src):
+ dst = arvados.collection.Collection()
+ with tutil.mock_keep_responses('x'*self.TEST_BLOCK_SIZE, 200):
+ for name in self.list_recursive(src):
+ with src.open(name) as srcfile, dst.open(name, 'w') as dstfile:
+ dstfile.write(srcfile.read())
+ dst.save_new()
+
+ @profiled
+ def profile_new_collection_copying_files_from_collection(self, src):
+ dst = arvados.collection.Collection()
+ with tutil.mock_keep_responses('x'*self.TEST_BLOCK_SIZE, 200):
+ for name in self.list_recursive(src):
+ dst.copy(name, name, src)
+ dst.save_new()
+
+ @profiled
+ def profile_collection_list_files(self, coll):
+ return sum(1 for name in self.list_recursive(coll))
+
+ def test_medium_sized_manifest(self):
+ """Exercise manifest-handling code.
+
+ Currently, this test puts undue emphasis on some code paths
+ that don't reflect typical use because the contrived example
+ manifest has some unusual characteristics:
+
+ * Block size is zero.
+
+ * Every block is identical, so block caching patterns are
+ unrealistic.
+
+ * Every file begins and ends at a block boundary.
+ """
+ specs = {
+ 'streams': 100,
+ 'files_per_stream': 100,
+ 'blocks_per_file': 20,
+ 'bytes_per_block': self.TEST_BLOCK_SIZE,
+ }
+ my_manifest = self.make_manifest(**specs)
+
+ coll = self.profile_new_collection_from_manifest(my_manifest)
+
+ coll.save_new()
+ self.profile_new_collection_from_server(coll.manifest_locator())
+
+ num_items = self.profile_collection_list_files(coll)
+ self.assertEqual(num_items, specs['streams'] * specs['files_per_stream'])
+
+ self.profile_new_collection_copying_bytes_from_collection(coll)
+
+ self.profile_new_collection_copying_files_from_collection(coll)
-import unittest
import os
-import arvados.util
+import subprocess
+import unittest
+
+import arvados
class MkdirDashPTest(unittest.TestCase):
def setUp(self):
with open('./tmp/bar', 'wb') as f:
f.write('bar')
self.assertRaises(OSError, arvados.util.mkdir_dash_p, './tmp/bar')
+
+
+class RunCommandTestCase(unittest.TestCase):
+ def test_success(self):
+ stdout, stderr = arvados.util.run_command(['echo', 'test'],
+ stderr=subprocess.PIPE)
+ self.assertEqual("test\n", stdout)
+ self.assertEqual("", stderr)
+
+ def test_failure(self):
+ with self.assertRaises(arvados.errors.CommandFailedError):
+ arvados.util.run_command(['false'])
import arvados.events
import mock
import threading
+from datetime import datetime, timedelta
+import time
class WebsocketTest(run_test_server.TestCaseWithServers):
MAIN_SERVER = {}
self.ws.close()
super(WebsocketTest, self).tearDown()
- def _test_subscribe(self, poll_fallback, expect_type):
+ def _test_subscribe(self, poll_fallback, expect_type, last_log_id=None, additional_filters=None, expected=1):
run_test_server.authorize_with('active')
- events = Queue.Queue(3)
+ events = Queue.Queue(100)
+
+ # Create ancestor before subscribing.
+ # When listening with start_time in the past, this should also be retrieved.
+ # However, when start_time is omitted in subscribe, this should not be fetched.
+ ancestor = arvados.api('v1').humans().create(body={}).execute()
+ time.sleep(5)
+
+ filters = [['object_uuid', 'is_a', 'arvados#human']]
+ if additional_filters:
+ filters = filters + additional_filters
+
self.ws = arvados.events.subscribe(
- arvados.api('v1'), [['object_uuid', 'is_a', 'arvados#human']],
- events.put, poll_fallback=poll_fallback)
+ arvados.api('v1'), filters,
+ events.put, poll_fallback=poll_fallback, last_log_id=last_log_id)
self.assertIsInstance(self.ws, expect_type)
- self.assertEqual(200, events.get(True, 10)['status'])
+ self.assertEqual(200, events.get(True, 5)['status'])
human = arvados.api('v1').humans().create(body={}).execute()
- self.assertEqual(human['uuid'], events.get(True, 10)['object_uuid'])
- self.assertTrue(events.empty(), "got more events than expected")
+
+ if last_log_id == None or expected == 0:
+ self.assertEqual(human['uuid'], events.get(True, 5)['object_uuid'])
+ self.assertTrue(events.empty(), "got more events than expected")
+ else:
+ log_events = []
+ for i in range(0, 20):
+ try:
+ event = events.get(True, 5)
+ self.assertTrue(event['object_uuid'] is not None)
+ log_events.append(event['object_uuid'])
+ except:
+ break;
+
+ self.assertTrue(len(log_events)>1)
+ self.assertTrue(human['uuid'] in log_events)
+ self.assertTrue(ancestor['uuid'] in log_events)
def test_subscribe_websocket(self):
self._test_subscribe(
event_client_constr.side_effect = Exception('All is well')
self._test_subscribe(
poll_fallback=1, expect_type=arvados.events.PollClient)
+
+ def test_subscribe_websocket_with_start_time_date_only(self):
+ lastHour = datetime.today() - timedelta(hours = 1)
+ self._test_subscribe(
+ poll_fallback=False, expect_type=arvados.events.EventClient, last_log_id=1,
+ additional_filters=[['created_at', '>=', lastHour.strftime('%Y-%m-%d')]])
+
+ @mock.patch('arvados.events.EventClient.__init__')
+ def test_poll_with_start_time_date_only(self, event_client_constr):
+ event_client_constr.side_effect = Exception('All is well')
+ lastHour = datetime.today() - timedelta(hours = 1)
+ self._test_subscribe(
+ poll_fallback=1, expect_type=arvados.events.PollClient, last_log_id=1,
+ additional_filters=[['created_at', '>=', lastHour.strftime('%Y-%m-%d')]])
+
+ def test_subscribe_websocket_with_start_time_last_hour(self):
+ lastHour = datetime.today() - timedelta(hours = 1)
+ self._test_subscribe(
+ poll_fallback=False, expect_type=arvados.events.EventClient, last_log_id=1,
+ additional_filters=[['created_at', '>=', lastHour.strftime('%Y-%m-%d %H:%M:%S')]])
+
+ @mock.patch('arvados.events.EventClient.__init__')
+ def test_subscribe_poll_with_start_time_last_hour(self, event_client_constr):
+ event_client_constr.side_effect = Exception('All is well')
+ lastHour = datetime.today() - timedelta(hours = 1)
+ self._test_subscribe(
+ poll_fallback=1, expect_type=arvados.events.PollClient, last_log_id=1,
+ additional_filters=[['created_at', '>=', lastHour.strftime('%Y-%m-%d %H:%M:%S')]])
+
+ def test_subscribe_websocket_with_start_time_next_hour(self):
+ nextHour = datetime.today() + timedelta(hours = 1)
+ with self.assertRaises(Queue.Empty):
+ self._test_subscribe(
+ poll_fallback=False, expect_type=arvados.events.EventClient, last_log_id=1,
+ additional_filters=[['created_at', '>=', nextHour.strftime('%Y-%m-%d %H:%M:%S')]], expected=0)
+
+ @mock.patch('arvados.events.EventClient.__init__')
+ def test_subscribe_poll_with_start_time_next_hour(self, event_client_constr):
+ event_client_constr.side_effect = Exception('All is well')
+ nextHour = datetime.today() + timedelta(hours = 1)
+ with self.assertRaises(Queue.Empty):
+ self._test_subscribe(
+ poll_fallback=1, expect_type=arvados.events.PollClient, last_log_id=1,
+ additional_filters=[['created_at', '>=', nextHour.strftime('%Y-%m-%d %H:%M:%S')]], expected=0)
+
+ def test_subscribe_websocket_with_start_time_tomorrow(self):
+ tomorrow = datetime.today() + timedelta(hours = 24)
+ with self.assertRaises(Queue.Empty):
+ self._test_subscribe(
+ poll_fallback=False, expect_type=arvados.events.EventClient, last_log_id=1,
+ additional_filters=[['created_at', '>=', tomorrow.strftime('%Y-%m-%d')]], expected=0)
+
+ @mock.patch('arvados.events.EventClient.__init__')
+ def test_subscribe_poll_with_start_time_tomorrow(self, event_client_constr):
+ event_client_constr.side_effect = Exception('All is well')
+ tomorrow = datetime.today() + timedelta(hours = 24)
+ with self.assertRaises(Queue.Empty):
+ self._test_subscribe(
+ poll_fallback=1, expect_type=arvados.events.PollClient, last_log_id=1,
+ additional_filters=[['created_at', '>=', tomorrow.strftime('%Y-%m-%d')]], expected=0)
end
end
end
+
+namespace :db do
+ namespace :structure do
+ task :dump do
+ require 'tempfile'
+ origfnm = File.expand_path('../db/structure.sql', __FILE__)
+ tmpfnm = Tempfile.new 'structure.sql', File.expand_path('..', origfnm)
+ begin
+ tmpfile = File.new tmpfnm, 'w'
+ origfile = File.new origfnm
+ origfile.each_line do |line|
+ if /^SET lock_timeout = 0;/ =~ line
+ # Avoid edit wars between versions that do/don't write this line.
+ next
+ elsif /^COMMENT ON EXTENSION/ =~ line
+ # Avoid warning message when loading:
+ # "structure.sql:22: ERROR: must be owner of extension plpgsql"
+ tmpfile.write "-- "
+ end
+ tmpfile.write line
+ end
+ origfile.close
+ tmpfile.close
+ File.rename tmpfnm, origfnm
+ tmpfnm = false
+ ensure
+ File.unlink tmpfnm if tmpfnm
+ end
+ end
+ end
+end
skip_before_filter :find_object_by_uuid, :only => :get_all_permissions
skip_before_filter :render_404_if_no_object, :only => :get_all_permissions
before_filter :admin_required, :only => :get_all_permissions
+
def get_all_permissions
- @users = {}
- User.includes(:authorized_keys).all.each do |u|
- @users[u.uuid] = u
+ # users is a map of {user_uuid => User object}
+ users = {}
+ # user_aks is a map of {user_uuid => array of public keys}
+ user_aks = {}
+ # admins is an array of user_uuids
+ admins = []
+ User.eager_load(:authorized_keys).find_each do |u|
+ next unless u.is_active or u.uuid == anonymous_user_uuid
+ users[u.uuid] = u
+ user_aks[u.uuid] = u.authorized_keys.collect do |ak|
+ {
+ public_key: ak.public_key,
+ authorized_key_uuid: ak.uuid
+ }
+ end
+ admins << u.uuid if u.is_admin
end
- admins = @users.select { |k,v| v.is_admin }
- @user_aks = {}
@repo_info = {}
- @repos = Repository.includes(:permissions).all
- @repos.each do |repo|
- gitolite_permissions = ''
- perms = []
+ Repository.eager_load(:permissions).find_each do |repo|
+ @repo_info[repo.uuid] = {
+ uuid: repo.uuid,
+ name: repo.name,
+ push_url: repo.push_url,
+ fetch_url: repo.fetch_url,
+ user_permissions: {},
+ }
+ # evidence is an array of {name: 'can_xxx', user_uuid: 'x-y-z'},
+ # one entry for each piece of evidence we find in the permission
+ # database that establishes that a user can access this
+ # repository. Multiple entries can be added for a given user,
+ # possibly with different access levels; these will be compacted
+ # below.
+ evidence = []
repo.permissions.each do |perm|
if ArvadosModel::resource_class_for_uuid(perm.tail_uuid) == Group
- @users.each do |user_uuid, user|
- user.group_permissions.each do |group_uuid, perm_mask|
- if perm_mask[:manage]
- perms << {name: 'can_manage', user_uuid: user_uuid}
- elsif perm_mask[:write]
- perms << {name: 'can_write', user_uuid: user_uuid}
- elsif perm_mask[:read]
- perms << {name: 'can_read', user_uuid: user_uuid}
- end
+ # A group has permission. Each user who has access to this
+ # group also has access to the repository. Access level is
+ # min(group-to-repo permission, user-to-group permission).
+ users.each do |user_uuid, user|
+ perm_mask = user.group_permissions[perm.tail_uuid]
+ if not perm_mask
+ next
+ elsif perm_mask[:manage] and perm.name == 'can_manage'
+ evidence << {name: 'can_manage', user_uuid: user_uuid}
+ elsif perm_mask[:write] and ['can_manage', 'can_write'].index perm.name
+ evidence << {name: 'can_write', user_uuid: user_uuid}
+ elsif perm_mask[:read]
+ evidence << {name: 'can_read', user_uuid: user_uuid}
end
end
- else
- perms << {name: perm.name, user_uuid: perm.tail_uuid}
+ elsif users[perm.tail_uuid]
+ # A user has permission; the user exists; and either the
+ # user is active, or it's the special case of the anonymous
+ # user which is never "active" but is allowed to read
+ # content from public repositories.
+ evidence << {name: perm.name, user_uuid: perm.tail_uuid}
end
end
- # Owner of the repository, and all admins, can RW
- ([repo.owner_uuid] + admins.keys).each do |user_uuid|
- perms << {name: 'can_write', user_uuid: user_uuid}
+ # Owner of the repository, and all admins, can do everything.
+ ([repo.owner_uuid] | admins).each do |user_uuid|
+ # Except: no permissions for inactive users, even if they own
+ # repositories.
+ next unless users[user_uuid]
+ evidence << {name: 'can_manage', user_uuid: user_uuid}
end
- perms.each do |perm|
+ # Distill all the evidence about permissions on this repository
+ # into one hash per user, of the form {'can_xxx' => true, ...}.
+ # The hash is nil for a user who has no permissions at all on
+ # this particular repository.
+ evidence.each do |perm|
user_uuid = perm[:user_uuid]
- @user_aks[user_uuid] = @users[user_uuid].andand.authorized_keys.andand.
- collect do |ak|
- {
- public_key: ak.public_key,
- authorized_key_uuid: ak.uuid
- }
- end || []
- if @user_aks[user_uuid].any?
- @repo_info[repo.uuid] ||= {
- uuid: repo.uuid,
- name: repo.name,
- push_url: repo.push_url,
- fetch_url: repo.fetch_url,
- user_permissions: {}
- }
- ri = (@repo_info[repo.uuid][:user_permissions][user_uuid] ||= {})
- ri[perm[:name]] = true
- end
+ user_perms = (@repo_info[repo.uuid][:user_permissions][user_uuid] ||= {})
+ user_perms[perm[:name]] = true
end
end
- @repo_info.values.each do |repo_users|
- repo_users[:user_permissions].each do |user_uuid,perms|
- if perms['can_manage']
- perms[:gitolite_permissions] = 'RW'
- perms['can_write'] = true
- perms['can_read'] = true
- elsif perms['can_write']
- perms[:gitolite_permissions] = 'RW'
- perms['can_read'] = true
- elsif perms['can_read']
- perms[:gitolite_permissions] = 'R'
+ # Revisit each {'can_xxx' => true, ...} hash for some final
+ # cleanup to make life easier for the requestor.
+ #
+ # Add a 'gitolite_permissions' key alongside the 'can_xxx' keys,
+ # for the convenience of the gitolite config file generator.
+ #
+ # Add all lesser permissions when a greater permission is
+ # present. If the requestor only wants to know who can write, it
+ # only has to test for 'can_write' in the response.
+ @repo_info.values.each do |repo|
+ repo[:user_permissions].each do |user_uuid, user_perms|
+ if user_perms['can_manage']
+ user_perms['gitolite_permissions'] = 'RW'
+ user_perms['can_write'] = true
+ user_perms['can_read'] = true
+ elsif user_perms['can_write']
+ user_perms['gitolite_permissions'] = 'RW'
+ user_perms['can_read'] = true
+ elsif user_perms['can_read']
+ user_perms['gitolite_permissions'] = 'R'
end
end
end
+ # The response looks like
+ # {"kind":"...",
+ # "repositories":[r1,r2,r3,...],
+ # "user_keys":usermap}
+ # where each of r1,r2,r3 looks like
+ # {"uuid":"repo-uuid-1",
+ # "name":"username/reponame",
+ # "push_url":"...",
+ # "user_permissions":{"user-uuid-a":{"can_read":true,"gitolite_permissions":"R"}}}
+ # and usermap looks like
+ # {"user-uuid-a":[{"public_key":"ssh-rsa g...","authorized_key_uuid":"ak-uuid-g"},...],
+ # "user-uuid-b":[{"public_key":"ssh-rsa h...","authorized_key_uuid":"ak-uuid-h"},...],...}
send_json(kind: 'arvados#RepositoryPermissionSnapshot',
repositories: @repo_info.values,
- user_keys: @user_aks)
+ user_keys: user_aks)
end
end
end
def get_all_logins
- @users = {}
- User.includes(:authorized_keys).all.each do |u|
- @users[u.uuid] = u
- end
@response = []
- @vms = VirtualMachine.includes(:login_permissions)
+ @vms = VirtualMachine.eager_load :login_permissions
if @object
- @vms = @vms.where('uuid=?', @object.uuid)
+ @vms = @vms.where uuid: @object.uuid
else
@vms = @vms.all
end
+ @users = {}
+ User.eager_load(:authorized_keys).
+ where('users.uuid in (?)',
+ @vms.map { |vm| vm.login_permissions.map &:tail_uuid }.flatten.uniq).
+ each do |u|
+ @users[u.uuid] = u
+ end
@vms.each do |vm|
vm.login_permissions.each do |perm|
user_uuid = perm.tail_uuid
- @users[user_uuid].andand.authorized_keys.andand.each do |ak|
- unless perm.properties['username'].blank?
- @response << {
- username: perm.properties['username'],
- hostname: vm.hostname,
- groups: (perm.properties["groups"].to_a rescue []),
- public_key: ak.public_key,
- user_uuid: user_uuid,
- virtual_machine_uuid: vm.uuid,
- authorized_key_uuid: ak.uuid
- }
- end
+ next if not @users[user_uuid]
+ next if perm.properties['username'].blank?
+ aks = @users[user_uuid].authorized_keys
+ if aks.empty?
+ # We'll emit one entry, with no public key.
+ aks = [nil]
+ end
+ aks.each do |ak|
+ @response << {
+ username: perm.properties['username'],
+ hostname: vm.hostname,
+ groups: (perm.properties['groups'].to_a rescue []),
+ public_key: ak ? ak.public_key : nil,
+ user_uuid: user_uuid,
+ virtual_machine_uuid: vm.uuid,
+ authorized_key_uuid: ak ? ak.uuid : nil,
+ }
end
end
end
unless identity_url_ok
# Whoa. This should never happen.
logger.error "UserSessionsController.create: omniauth object missing/invalid"
- logger.error "omniauth.pretty_inspect():\n\n#{omniauth.pretty_inspect()}"
+ logger.error "omniauth: "+omniauth.pretty_inspect
return redirect_to login_failure_url
end
flash[:notice] = 'You have logged off'
return_to = params[:return_to] || root_url
- redirect_to "#{CUSTOM_PROVIDER_URL}/users/sign_out?redirect_uri=#{CGI.escape return_to}"
+ redirect_to "#{Rails.configuration.sso_provider_url}/users/sign_out?redirect_uri=#{CGI.escape return_to}"
end
# login - Just bounce to /auth/joshid. The only purpose of this function is
def account_is_setup(user)
@user = user
- mail(to: user.email, subject: 'Welcome to Curoverse')
+ mail(to: user.email, subject: 'Welcome to Curoverse - shell account enabled')
end
end
after_destroy :log_destroy
after_find :convert_serialized_symbols_to_strings
before_validation :normalize_collection_uuids
+ before_validation :set_default_owner
validate :ensure_serialized_attribute_type
validate :ensure_valid_uuids
true
end
- def ensure_owner_uuid_is_permitted
- raise PermissionDeniedError if !current_user
-
- if new_record? and respond_to? :owner_uuid=
+ def set_default_owner
+ if new_record? and current_user and respond_to? :owner_uuid=
self.owner_uuid ||= current_user.uuid
end
+ end
+
+ def ensure_owner_uuid_is_permitted
+ raise PermissionDeniedError if !current_user
if self.owner_uuid.nil?
errors.add :owner_uuid, "cannot be nil"
def public_key_must_be_unique
if self.public_key
- #key = /^ssh-(rsa|dss) [A-Za-z0-9+\/=\+]+\b/.match(self.public_key)
valid_key = SSHKey.valid_ssh_public_key? self.public_key
if not valid_key
errors.add(:public_key, "does not appear to be a valid ssh-rsa or dsa public key")
else
# Valid if no other rows have this public key
- if self.class.where('public_key like ?', "%#{self.public_key}%").any?
+ if self.class.where('uuid != ? and public_key like ?',
+ uuid || '', "%#{self.public_key}%").any?
errors.add(:public_key, "already exists in the database, use a different key.")
return false
end
<div id="intropage">
- <img class="curoverse-logo" src="<%= asset_path('logo.png') %>" style="display:block; margin:2em auto"/>
+ <img class="curoverse-logo" src="<%= asset_path('logo.png') rescue '/logo.png' %>" style="display:block; margin:2em auto"/>
<div style="width:30em; margin:2em auto 0 auto">
<h1>Error</h1>
Hi there,
<% end -%>
-Your Arvados account has been set up. You can log in with your Google account
-associated with the e-mail address <%= @user.email %><% if Rails.configuration.workbench_address %> at:
+Your Arvados shell account has been set up. Please visit the manage account page <% if Rails.configuration.workbench_address %>at
- <%= Rails.configuration.workbench_address %><% else %>.<% end %>
+ <%= Rails.configuration.workbench_address %>/manage_account <% else %><% end %>
+
+for connection instructions.
Thanks,
The Arvados team.
active_support.deprecation: :stderr
active_record.mass_assignment_sanitizer: :strict
uuid_prefix: zzzzz
+ sso_app_id: arvados-server
+ sso_app_secret: <%= rand(2**512).to_s(36) %>
+ sso_provider_url: http://localhost:3002
secret_token: <%= rand(2**512).to_s(36) %>
blob_signing_key: zfhgfenhffzltr9dixws36j1yhksjoll2grmku38mi7yxd66h5j4q9w4jzanezacp8s6q0ro3hxakfye02152hncy6zml2ed0uc
user_profile_notification_address: arvados@example.com
# server is using a self-signed cert.
sso_insecure: false
+ # These settings are provided by your OAuth2 provider (e.g.,
+ # sso-provider).
+ sso_app_id: ~
+ sso_app_secret: ~
+ sso_provider_url: ~
+
# Default replication level for collections. This is used when a
# collection's replication_desired attribute is nil.
default_collection_replication: 2
blob_signing_key: ~
uuid_prefix: bogus
workbench_address: https://workbench.bogus.arvadosapi.com
+ sso_app_id: arvados-server
+ sso_app_secret: ~
+ sso_provider_url: https://login.bogus.arvadosapi.com
test:
# Tests should be able to run without further configuration, but if you do
development:
adapter: postgresql
+ template: template0
encoding: utf8
database: arvados_development
username: arvados
test:
adapter: postgresql
+ template: template0
encoding: utf8
database: arvados_test
username: arvados
production:
adapter: postgresql
+ template: template0
encoding: utf8
database: arvados_production
username: arvados
# configured by application.yml (i.e., here!) instead.
end
+if (File.exists?(File.expand_path '../omniauth.rb', __FILE__) and
+ not defined? WARNED_OMNIAUTH_CONFIG)
+ Rails.logger.warn <<-EOS
+DEPRECATED CONFIGURATION:
+ Please move your SSO provider config into config/application.yml
+ and delete config/initializers/omniauth.rb.
+EOS
+ # Real values will be copied from globals by omniauth_init.rb. For
+ # now, assign some strings so the generic *.yml config loader
+ # doesn't overwrite them or complain that they're missing.
+ Rails.configuration.sso_app_id = 'xxx'
+ Rails.configuration.sso_app_secret = 'xxx'
+ Rails.configuration.sso_provider_url = '//xxx'
+ WARNED_OMNIAUTH_CONFIG = true
+end
+
$application_config = {}
%w(application.default application).each do |cfgfile|
+++ /dev/null
-# Change this omniauth configuration to point to your registered provider
-# Since this is a registered application, add the app id and secret here
-APP_ID = 'arvados-server'
-APP_SECRET = rand(2**512).to_s(36) # CHANGE ME!
-
-# Update your custom Omniauth provider URL here
-CUSTOM_PROVIDER_URL = 'http://localhost:3002'
-
-Rails.application.config.middleware.use OmniAuth::Builder do
- provider :josh_id, APP_ID, APP_SECRET, CUSTOM_PROVIDER_URL
-end
-
-OmniAuth.config.on_failure = StaticController.action(:login_failure)
--- /dev/null
+# This file is called omniauth_init.rb instead of omniauth.rb because
+# older versions had site configuration in omniauth.rb.
+#
+# It must come after omniauth.rb in (lexical) load order.
+
+if defined? CUSTOM_PROVIDER_URL
+ Rails.logger.warn "Copying omniauth from globals in legacy config file."
+ Rails.configuration.sso_app_id = APP_ID
+ Rails.configuration.sso_app_secret = APP_SECRET
+ Rails.configuration.sso_provider_url = CUSTOM_PROVIDER_URL
+else
+ Rails.application.config.middleware.use OmniAuth::Builder do
+ provider(:josh_id,
+ Rails.configuration.sso_app_id,
+ Rails.configuration.sso_app_secret,
+ Rails.configuration.sso_provider_url)
+ end
+ OmniAuth.config.on_failure = StaticController.action(:login_failure)
+end
-- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: -
--
-COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language';
+-- COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language';
SET search_path = public, pg_catalog;
# Execute query and actually send the matching log rows
count = 0
- limit = 20
+ limit = 10
logs.limit(limit).each do |l|
ws.send(l.as_api_response.to_json)
# Number of rows returned was capped by limit(), we need to schedule
# another query to get more logs (will start from last_log_id
# reported by current query)
- EventMachine::schedule do
+ EventMachine::next_tick do
push_events ws, nil
end
elsif !notify_id.nil? and (ws.last_log_id.nil? or notify_id > ws.last_log_id)
# No filters set up, so just record the sequence number
ws.last_log_id = notify_id
end
+ rescue ArgumentError => e
+ # There was some kind of user error.
+ Rails.logger.warn "Error publishing event: #{$!}"
+ ws.send ({status: 500, message: $!}.to_json)
+ ws.close
rescue => e
Rails.logger.warn "Error publishing event: #{$!}"
Rails.logger.warn "Backtrace:\n\t#{e.backtrace.join("\n\t")}"
- ws.send ({status: 500, message: 'error'}.to_json)
+ ws.send ({status: 500, message: $!}.to_json)
ws.close
# These exceptions typically indicate serious server trouble:
# out of memory issues, database connection problems, etc. Go ahead and
head_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
properties: {}
+project_viewer_member_of_all_users_group:
+ uuid: zzzzz-o0j2j-cdnq6627g0h0r2x
+ owner_uuid: zzzzz-tpzed-000000000000000
+ created_at: 2015-07-28T21:34:41.361747000Z
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-000000000000000
+ modified_at: 2015-07-28T21:34:41.361747000Z
+ updated_at: 2015-07-28T21:34:41.361747000Z
+ tail_uuid: zzzzz-tpzed-projectviewer1a
+ link_class: permission
+ name: can_read
+ head_uuid: zzzzz-j7d0g-fffffffffffffff
+ properties: {}
+
project_viewer_can_read_project:
uuid: zzzzz-o0j2j-projviewerreadp
owner_uuid: zzzzz-tpzed-000000000000000
end
end
+ test "get_all_permissions takes into account is_active flag" do
+ r = nil
+ act_as_user users(:active) do
+ r = Repository.create! name: 'active/testrepo'
+ end
+ act_as_system_user do
+ u = users(:active)
+ u.is_active = false
+ u.save!
+ end
+ authorize_with :admin
+ get :get_all_permissions
+ assert_response :success
+ json_response['repositories'].each do |r|
+ r['user_permissions'].each do |user_uuid, perms|
+ refute_equal user_uuid, users(:active).uuid
+ end
+ end
+ end
+
test "get_all_permissions does not give any access to user without permission" do
viewer_uuid = users(:project_viewer).uuid
assert_equal(authorized_keys(:project_viewer).authorized_user_uuid,
end
end
+ test "get_all_permissions lists all repos regardless of permissions" do
+ act_as_system_user do
+ # Create repos that could potentially be left out of the
+ # permission list by accident.
+
+ # No authorized_key, no username (this can't even be done
+ # without skipping validations)
+ r = Repository.create name: 'root/testrepo'
+ assert r.save validate: false
+
+ r = Repository.create name: 'invalid username / repo name', owner_uuid: users(:inactive).uuid
+ assert r.save validate: false
+ end
+ authorize_with :admin
+ get :get_all_permissions
+ assert_response :success
+ assert_equal(Repository.count, json_response["repositories"].size)
+ end
+
+ test "get_all_permissions lists user permissions for users with no authorized keys" do
+ authorize_with :admin
+ AuthorizedKey.destroy_all
+ get :get_all_permissions
+ assert_response :success
+ assert_equal(Repository.count, json_response["repositories"].size)
+ repos_with_perms = []
+ json_response['repositories'].each do |repo|
+ if repo['user_permissions'].any?
+ repos_with_perms << repo['uuid']
+ end
+ end
+ assert_not_empty repos_with_perms, 'permissions are missing'
+ end
+
+ # Ensure get_all_permissions correctly describes what the normal
+ # permission system would do.
+ test "get_all_permissions obeys group permissions" do
+ act_as_user system_user do
+ r = Repository.create!(name: 'admin/groupcanwrite', owner_uuid: users(:admin).uuid)
+ g = Group.create!(group_class: 'group', name: 'repo-writers')
+ u1 = users(:active)
+ u2 = users(:spectator)
+ Link.create!(tail_uuid: g.uuid, head_uuid: r.uuid, link_class: 'permission', name: 'can_manage')
+ Link.create!(tail_uuid: u1.uuid, head_uuid: g.uuid, link_class: 'permission', name: 'can_write')
+ Link.create!(tail_uuid: u2.uuid, head_uuid: g.uuid, link_class: 'permission', name: 'can_read')
+
+ r = Repository.create!(name: 'admin/groupreadonly', owner_uuid: users(:admin).uuid)
+ g = Group.create!(group_class: 'group', name: 'repo-readers')
+ u1 = users(:active)
+ u2 = users(:spectator)
+ Link.create!(tail_uuid: g.uuid, head_uuid: r.uuid, link_class: 'permission', name: 'can_read')
+ Link.create!(tail_uuid: u1.uuid, head_uuid: g.uuid, link_class: 'permission', name: 'can_write')
+ Link.create!(tail_uuid: u2.uuid, head_uuid: g.uuid, link_class: 'permission', name: 'can_read')
+ end
+ authorize_with :admin
+ get :get_all_permissions
+ assert_response :success
+ json_response['repositories'].each do |repo|
+ repo['user_permissions'].each do |user_uuid, perms|
+ u = User.find_by_uuid(user_uuid)
+ if perms['can_read']
+ assert u.can? read: repo['uuid']
+ assert_match /R/, perms['gitolite_permissions']
+ else
+ refute_match /R/, perms['gitolite_permissions']
+ end
+ if perms['can_write']
+ assert u.can? write: repo['uuid']
+ assert_match /RW/, perms['gitolite_permissions']
+ else
+ refute_match /W/, perms['gitolite_permissions']
+ end
+ if perms['can_manage']
+ assert u.can? manage: repo['uuid']
+ assert_match /RW/, perms['gitolite_permissions']
+ end
+ end
+ end
+ end
+
test "default index includes fetch_url" do
authorize_with :active
get(:index)
assert_equal Rails.configuration.user_notifier_email_from, setup_email.from[0]
assert_equal 'foo@example.com', setup_email.to[0]
- assert_equal 'Welcome to Curoverse', setup_email.subject
- assert (setup_email.body.to_s.include? 'Your Arvados account has been set up'),
- 'Expected Your Arvados account has been set up in email body'
- assert (setup_email.body.to_s.include? 'foo@example.com'),
- 'Expected user email in email body'
+ assert_equal 'Welcome to Curoverse - shell account enabled', setup_email.subject
+ assert (setup_email.body.to_s.include? 'Your Arvados shell account has been set up'),
+ 'Expected Your Arvados shell account has been set up in email body'
assert (setup_email.body.to_s.include? Rails.configuration.workbench_address),
'Expected workbench url in email body'
end
assert_empty(json_response.
select { |login| login["user_uuid"] == spectator_uuid })
end
+
+ test "logins without ssh keys are listed" do
+ u, vm = nil
+ act_as_system_user do
+ u = create :active_user, first_name: 'Bob', last_name: 'Blogin'
+ vm = VirtualMachine.create! hostname: 'foo.shell'
+ Link.create!(tail_uuid: u.uuid,
+ head_uuid: vm.uuid,
+ link_class: 'permission',
+ name: 'can_login',
+ properties: {'username' => 'bobblogin'})
+ end
+ authorize_with :admin
+ get :logins, id: vm.uuid
+ assert_response :success
+ assert_equal 1, json_response['items'].length
+ assert_equal nil, json_response['items'][0]['public_key']
+ assert_equal nil, json_response['items'][0]['authorized_key_uuid']
+ assert_equal u.uuid, json_response['items'][0]['user_uuid']
+ assert_equal 'bobblogin', json_response['items'][0]['username']
+ end
end
assert_equal 200, status
end
- test "connect, subscribe, get event" do
+ def subscribe_test
state = 1
spec = nil
ev_uuid = nil
assert_equal spec.uuid, ev_uuid
end
+ test "connect, subscribe, get event" do
+ subscribe_test()
+ end
+
test "connect, subscribe, get two events" do
state = 1
spec = nil
end
+ test "connect, subscribe with invalid filter" do
+ state = 1
+ human = nil
+ human_ev_uuid = nil
+
+ authorize_with :admin
+
+ ws_helper :admin do |ws|
+ ws.on :open do |event|
+ # test that #6451 is fixed (invalid filter crashes websockets)
+ ws.send ({method: 'subscribe', filters: [['object_blarg', 'is_a', 'arvados#human']]}.to_json)
+ end
+
+ ws.on :message do |event|
+ d = Oj.load event.data
+ case state
+ when 1
+ assert_equal 200, d["status"]
+ Specimen.create
+ human = Human.create
+ state = 2
+ when 2
+ assert_equal 500, d["status"]
+ state = 3
+ ws.close
+ when 3
+ assert false, "Should not get any more events"
+ end
+ end
+
+ end
+
+ assert_equal 3, state
+
+ # Try connecting again, ensure that websockets server is still running and
+ # didn't crash per #6451
+ subscribe_test()
+
+ end
+
+
end
require 'test_helper'
class AuthorizedKeyTest < ActiveSupport::TestCase
- # test "the truth" do
- # assert true
- # end
+ TEST_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCf5aTI55uyWr44TckP/ELUAyPsdnf5fTZDcSDN4qiMZYAL7TYV2ixwnbPObLObM0GmHSSFLV1KqsuFICUPgkyKoHbAH6XPgmtfOLU60VkGf1v5uxQ/kXCECRCJmPb3K9dIXGEw+1DXPdOV/xG7rJNvo4a9WK9iqqZr8p+VGKM6C017b8BDLk0tuEEjZ5jXcT/ka/hTScxWkKgF6auPOVQ79OA5+0VaYm4uQLzVUdgwVUPWQQecRrtnc08XYM1htpcLDIAbWfUNK7uE6XR3/OhtrJGf05FGbtGguPgi33F9W3Q3yw6saOK5Y3TfLbskgFaEdLgzqK/QSBRk2zBF49Tj test@localhost"
+
+ test 'create and update key' do
+ u1 = users(:active)
+ act_as_user u1 do
+ ak = AuthorizedKey.new(name: "foo", public_key: TEST_KEY, authorized_user_uuid: u1.uuid)
+ assert ak.save, ak.errors.full_messages.to_s
+ ak.name = "bar"
+ assert ak.valid?, ak.errors.full_messages.to_s
+ assert ak.save, ak.errors.full_messages.to_s
+ end
+ end
+
+ test 'duplicate key not permitted' do
+ u1 = users(:active)
+ act_as_user u1 do
+ ak = AuthorizedKey.new(name: "foo", public_key: TEST_KEY, authorized_user_uuid: u1.uuid)
+ assert ak.save
+ end
+ u2 = users(:spectator)
+ act_as_user u2 do
+ ak2 = AuthorizedKey.new(name: "bar", public_key: TEST_KEY, authorized_user_uuid: u2.uuid)
+ refute ak2.valid?
+ refute ak2.save
+ assert_match /already exists/, ak2.errors.full_messages.to_s
+ end
+ end
+
+ test 'attach key to wrong user account' do
+ act_as_user users(:active) do
+ ak = AuthorizedKey.new(name: "foo", public_key: TEST_KEY)
+ ak.authorized_user_uuid = users(:spectator).uuid
+ refute ak.save
+ ak.uuid = nil
+ ak.authorized_user_uuid = users(:admin).uuid
+ refute ak.save
+ ak.uuid = nil
+ ak.authorized_user_uuid = users(:active).uuid
+ assert ak.save, ak.errors.full_messages.to_s
+ ak.authorized_user_uuid = users(:admin).uuid
+ refute ak.save
+ end
+ end
end
# Test the body of the sent email contains what we expect it to
assert_equal Rails.configuration.user_notifier_email_from, email.from.first
assert_equal user.email, email.to.first
- assert_equal 'Welcome to Curoverse', email.subject
- assert (email.body.to_s.include? 'Your Arvados account has been set up'),
- 'Expected Your Arvados account has been set up in email body'
- assert (email.body.to_s.include? user.email),
- 'Expected user email in email body'
+ assert_equal 'Welcome to Curoverse - shell account enabled', email.subject
+ assert (email.body.to_s.include? 'Your Arvados shell account has been set up'),
+ 'Expected Your Arvados shell account has been set up in email body'
assert (email.body.to_s.include? Rails.configuration.workbench_address),
'Expected workbench url in email body'
end
-/* Deals with parsing Collection responses from API Server. */
+// Deals with parsing Collection responses from API Server.
package collection
"git.curoverse.com/arvados.git/services/datamanager/loggerutil"
"log"
"os"
- "runtime"
"runtime/pprof"
"time"
)
var (
- heap_profile_filename string
+ heapProfileFilename string
// globals for debugging
totalManifestSize uint64
maxManifestSize uint64
}
type ReadCollections struct {
- ReadAllCollections bool
- UuidToCollection map[string]Collection
- OwnerToCollectionSize map[string]int
+ ReadAllCollections bool
+ UuidToCollection map[string]Collection
+ OwnerToCollectionSize map[string]int
+ BlockToDesiredReplication map[blockdigest.DigestWithSize]int
+ CollectionUuidToIndex map[string]int
+ CollectionIndexToUuid []string
+ BlockToCollectionIndices map[blockdigest.DigestWithSize][]int
}
type GetCollectionsParams struct {
}
func init() {
- flag.StringVar(&heap_profile_filename,
+ flag.StringVar(&heapProfileFilename,
"heap-profile",
"",
"File to write the heap profiles to. Leave blank to skip profiling.")
// Otherwise we would see cumulative numbers as explained here:
// https://groups.google.com/d/msg/golang-nuts/ZyHciRglQYc/2nh4Ndu2fZcJ
func WriteHeapProfile() {
- if heap_profile_filename != "" {
+ if heapProfileFilename != "" {
- heap_profile, err := os.Create(heap_profile_filename)
+ heap_profile, err := os.Create(heapProfileFilename)
if err != nil {
log.Fatal(err)
}
func GetCollectionsAndSummarize(params GetCollectionsParams) (results ReadCollections) {
results = GetCollections(params)
- ComputeSizeOfOwnedCollections(&results)
-
- if params.Logger != nil {
- params.Logger.Update(func(p map[string]interface{}, e map[string]interface{}) {
- collectionInfo := p["collection_info"].(map[string]interface{})
- // Since maps are shallow copied, we run a risk of concurrent
- // updates here. By copying results.OwnerToCollectionSize into
- // the log, we're assuming that it won't be updated.
- collectionInfo["owner_to_collection_size"] = results.OwnerToCollectionSize
- })
- }
+ results.Summarize(params.Logger)
log.Printf("Uuid to Size used: %v", results.OwnerToCollectionSize)
log.Printf("Read and processed %d collections",
fieldsWanted := []string{"manifest_text",
"owner_uuid",
"uuid",
- // TODO(misha): Start using the redundancy field.
"redundancy",
"modified_at"}
sdkParams["limit"] = params.BatchSize
}
+ var defaultReplicationLevel int
+ {
+ value, err := params.Client.Discovery("defaultCollectionReplication")
+ if err != nil {
+ loggerutil.FatalWithMessage(params.Logger,
+ fmt.Sprintf("Error querying default collection replication: %v", err))
+ }
+
+ defaultReplicationLevel = int(value.(float64))
+ if defaultReplicationLevel <= 0 {
+ loggerutil.FatalWithMessage(params.Logger,
+ fmt.Sprintf("Default collection replication returned by arvados SDK "+
+ "should be a positive integer but instead it was %d.",
+ defaultReplicationLevel))
+ }
+ }
+
initialNumberOfCollectionsAvailable, err :=
util.NumberItemsAvailable(params.Client, "collections")
if err != nil {
if params.Logger != nil {
params.Logger.Update(func(p map[string]interface{}, e map[string]interface{}) {
- collectionInfo := make(map[string]interface{})
+ collectionInfo := logger.GetOrCreateMap(p, "collection_info")
collectionInfo["num_collections_at_start"] = initialNumberOfCollectionsAvailable
collectionInfo["batch_size"] = params.BatchSize
- p["collection_info"] = collectionInfo
+ collectionInfo["default_replication_level"] = defaultReplicationLevel
})
}
sdkParams["filters"].([][]string)[0][2] =
ProcessCollections(params.Logger,
collections.Items,
+ defaultReplicationLevel,
results.UuidToCollection).Format(time.RFC3339)
// update counts
if params.Logger != nil {
params.Logger.Update(func(p map[string]interface{}, e map[string]interface{}) {
- collectionInfo := p["collection_info"].(map[string]interface{})
+ collectionInfo := logger.GetOrCreateMap(p, "collection_info")
collectionInfo["collections_read"] = totalCollections
collectionInfo["latest_modified_date_seen"] = sdkParams["filters"].([][]string)[0][2]
collectionInfo["total_manifest_size"] = totalManifestSize
}
}
- // Just in case this lowers the numbers reported in the heap profile.
- runtime.GC()
-
// Write the heap profile for examining memory usage
WriteHeapProfile()
func ProcessCollections(arvLogger *logger.Logger,
receivedCollections []SdkCollectionInfo,
+ defaultReplicationLevel int,
uuidToCollection map[string]Collection) (latestModificationDate time.Time) {
for _, sdkCollection := range receivedCollections {
collection := Collection{Uuid: StrCopy(sdkCollection.Uuid),
loggerutil.FatalWithMessage(arvLogger,
fmt.Sprintf(
"Arvados SDK collection returned with unexpected zero "+
- "modifcation date. This probably means that either we failed to "+
+ "modification date. This probably means that either we failed to "+
"parse the modification date or the API server has changed how "+
- "it returns modification dates: %v",
+ "it returns modification dates: %+v",
collection))
}
if sdkCollection.ModifiedAt.After(latestModificationDate) {
latestModificationDate = sdkCollection.ModifiedAt
}
+
+ if collection.ReplicationLevel == 0 {
+ collection.ReplicationLevel = defaultReplicationLevel
+ }
+
manifest := manifest.Manifest{sdkCollection.ManifestText}
manifestSize := uint64(len(sdkCollection.ManifestText))
return
}
-func ComputeSizeOfOwnedCollections(readCollections *ReadCollections) {
+func (readCollections *ReadCollections) Summarize(arvLogger *logger.Logger) {
readCollections.OwnerToCollectionSize = make(map[string]int)
+ readCollections.BlockToDesiredReplication = make(map[blockdigest.DigestWithSize]int)
+ numCollections := len(readCollections.UuidToCollection)
+ readCollections.CollectionUuidToIndex = make(map[string]int, numCollections)
+ readCollections.CollectionIndexToUuid = make([]string, 0, numCollections)
+ readCollections.BlockToCollectionIndices = make(map[blockdigest.DigestWithSize][]int)
+
for _, coll := range readCollections.UuidToCollection {
+ collectionIndex := len(readCollections.CollectionIndexToUuid)
+ readCollections.CollectionIndexToUuid =
+ append(readCollections.CollectionIndexToUuid, coll.Uuid)
+ readCollections.CollectionUuidToIndex[coll.Uuid] = collectionIndex
+
readCollections.OwnerToCollectionSize[coll.OwnerUuid] =
readCollections.OwnerToCollectionSize[coll.OwnerUuid] + coll.TotalSize
+
+ for block, size := range coll.BlockDigestToSize {
+ locator := blockdigest.DigestWithSize{Digest: block, Size: uint32(size)}
+ readCollections.BlockToCollectionIndices[locator] =
+ append(readCollections.BlockToCollectionIndices[locator],
+ collectionIndex)
+ storedReplication := readCollections.BlockToDesiredReplication[locator]
+ if coll.ReplicationLevel > storedReplication {
+ readCollections.BlockToDesiredReplication[locator] =
+ coll.ReplicationLevel
+ }
+ }
+ }
+
+ if arvLogger != nil {
+ arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) {
+ collectionInfo := logger.GetOrCreateMap(p, "collection_info")
+ // Since maps are shallow copied, we run a risk of concurrent
+ // updates here. By copying results.OwnerToCollectionSize into
+ // the log, we're assuming that it won't be updated.
+ collectionInfo["owner_to_collection_size"] =
+ readCollections.OwnerToCollectionSize
+ collectionInfo["distinct_blocks_named"] =
+ len(readCollections.BlockToDesiredReplication)
+ })
}
return
--- /dev/null
+package collection
+
+import (
+ "git.curoverse.com/arvados.git/sdk/go/blockdigest"
+ . "gopkg.in/check.v1"
+ "testing"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+ TestingT(t)
+}
+
+type MySuite struct{}
+
+var _ = Suite(&MySuite{})
+
+// This captures the result we expect from
+// ReadCollections.Summarize(). Because CollectionUuidToIndex is
+// indeterminate, we replace BlockToCollectionIndices with
+// BlockToCollectionUuids.
+type ExpectedSummary struct {
+ OwnerToCollectionSize map[string]int
+ BlockToDesiredReplication map[blockdigest.DigestWithSize]int
+ BlockToCollectionUuids map[blockdigest.DigestWithSize][]string
+}
+
+func CompareSummarizedReadCollections(c *C,
+ summarized ReadCollections,
+ expected ExpectedSummary) {
+
+ c.Assert(summarized.OwnerToCollectionSize, DeepEquals,
+ expected.OwnerToCollectionSize)
+
+ c.Assert(summarized.BlockToDesiredReplication, DeepEquals,
+ expected.BlockToDesiredReplication)
+
+ summarizedBlockToCollectionUuids :=
+ make(map[blockdigest.DigestWithSize]map[string]struct{})
+ for digest, indices := range summarized.BlockToCollectionIndices {
+ uuidSet := make(map[string]struct{})
+ summarizedBlockToCollectionUuids[digest] = uuidSet
+ for _, index := range indices {
+ uuidSet[summarized.CollectionIndexToUuid[index]] = struct{}{}
+ }
+ }
+
+ expectedBlockToCollectionUuids :=
+ make(map[blockdigest.DigestWithSize]map[string]struct{})
+ for digest, uuidSlice := range expected.BlockToCollectionUuids {
+ uuidSet := make(map[string]struct{})
+ expectedBlockToCollectionUuids[digest] = uuidSet
+ for _, uuid := range uuidSlice {
+ uuidSet[uuid] = struct{}{}
+ }
+ }
+
+ c.Assert(summarizedBlockToCollectionUuids, DeepEquals,
+ expectedBlockToCollectionUuids)
+}
+
+func (s *MySuite) TestSummarizeSimple(checker *C) {
+ rc := MakeTestReadCollections([]TestCollectionSpec{TestCollectionSpec{
+ ReplicationLevel: 5,
+ Blocks: []int{1, 2},
+ }})
+
+ rc.Summarize(nil)
+
+ c := rc.UuidToCollection["col0"]
+
+ blockDigest1 := blockdigest.MakeTestDigestWithSize(1)
+ blockDigest2 := blockdigest.MakeTestDigestWithSize(2)
+
+ expected := ExpectedSummary{
+ OwnerToCollectionSize: map[string]int{c.OwnerUuid: c.TotalSize},
+ BlockToDesiredReplication: map[blockdigest.DigestWithSize]int{blockDigest1: 5, blockDigest2: 5},
+ BlockToCollectionUuids: map[blockdigest.DigestWithSize][]string{blockDigest1: []string{c.Uuid}, blockDigest2: []string{c.Uuid}},
+ }
+
+ CompareSummarizedReadCollections(checker, rc, expected)
+}
+
+func (s *MySuite) TestSummarizeOverlapping(checker *C) {
+ rc := MakeTestReadCollections([]TestCollectionSpec{
+ TestCollectionSpec{
+ ReplicationLevel: 5,
+ Blocks: []int{1, 2},
+ },
+ TestCollectionSpec{
+ ReplicationLevel: 8,
+ Blocks: []int{2, 3},
+ },
+ })
+
+ rc.Summarize(nil)
+
+ c0 := rc.UuidToCollection["col0"]
+ c1 := rc.UuidToCollection["col1"]
+
+ blockDigest1 := blockdigest.MakeTestDigestWithSize(1)
+ blockDigest2 := blockdigest.MakeTestDigestWithSize(2)
+ blockDigest3 := blockdigest.MakeTestDigestWithSize(3)
+
+ expected := ExpectedSummary{
+ OwnerToCollectionSize: map[string]int{
+ c0.OwnerUuid: c0.TotalSize,
+ c1.OwnerUuid: c1.TotalSize,
+ },
+ BlockToDesiredReplication: map[blockdigest.DigestWithSize]int{
+ blockDigest1: 5,
+ blockDigest2: 8,
+ blockDigest3: 8,
+ },
+ BlockToCollectionUuids: map[blockdigest.DigestWithSize][]string{
+ blockDigest1: []string{c0.Uuid},
+ blockDigest2: []string{c0.Uuid, c1.Uuid},
+ blockDigest3: []string{c1.Uuid},
+ },
+ }
+
+ CompareSummarizedReadCollections(checker, rc, expected)
+}
--- /dev/null
+// Code used for testing only.
+
+package collection
+
+import (
+ "fmt"
+ "git.curoverse.com/arvados.git/sdk/go/blockdigest"
+)
+
+type TestCollectionSpec struct {
+ // The desired replication level
+ ReplicationLevel int
+ // Blocks this contains, represented by ints. Ints repeated will
+ // still only represent one block
+ Blocks []int
+}
+
+// Creates a ReadCollections object for testing based on the give
+// specs. Only the ReadAllCollections and UuidToCollection fields are
+// populated. To populate other fields call rc.Summarize().
+func MakeTestReadCollections(specs []TestCollectionSpec) (rc ReadCollections) {
+ rc = ReadCollections{
+ ReadAllCollections: true,
+ UuidToCollection: map[string]Collection{},
+ }
+
+ for i, spec := range specs {
+ c := Collection{
+ Uuid: fmt.Sprintf("col%d", i),
+ OwnerUuid: fmt.Sprintf("owner%d", i),
+ ReplicationLevel: spec.ReplicationLevel,
+ BlockDigestToSize: map[blockdigest.BlockDigest]int{},
+ }
+ rc.UuidToCollection[c.Uuid] = c
+ for _, j := range spec.Blocks {
+ c.BlockDigestToSize[blockdigest.MakeTestBlockDigest(j)] = j
+ }
+ // We compute the size in a separate loop because the value
+ // computed in the above loop would be invalid if c.Blocks
+ // contained duplicates.
+ for _, size := range c.BlockDigestToSize {
+ c.TotalSize += size
+ }
+ }
+ return
+}
+
+// Returns a slice giving the collection index of each collection that
+// was passed in to MakeTestReadCollections. rc.Summarize() must be
+// called before this method, since Summarize() assigns an index to
+// each collection.
+func (rc ReadCollections) CollectionIndicesForTesting() (indices []int) {
+ // TODO(misha): Assert that rc.Summarize() has been called.
+ numCollections := len(rc.CollectionIndexToUuid)
+ indices = make([]int, numCollections)
+ for i := 0; i < numCollections; i++ {
+ indices[i] = rc.CollectionUuidToIndex[fmt.Sprintf("col%d", i)]
+ }
+ return
+}
import (
"flag"
+ "fmt"
"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+ "git.curoverse.com/arvados.git/sdk/go/keepclient"
"git.curoverse.com/arvados.git/sdk/go/logger"
"git.curoverse.com/arvados.git/sdk/go/util"
"git.curoverse.com/arvados.git/services/datamanager/collection"
"git.curoverse.com/arvados.git/services/datamanager/keep"
"git.curoverse.com/arvados.git/services/datamanager/loggerutil"
+ "git.curoverse.com/arvados.git/services/datamanager/summary"
"log"
"time"
)
var arvLogger *logger.Logger
if logEventTypePrefix != "" {
- arvLogger = logger.NewLogger(logger.LoggerParams{Client: arv,
+ arvLogger = logger.NewLogger(logger.LoggerParams{
+ Client: arv,
EventTypePrefix: logEventTypePrefix,
WriteInterval: time.Second * time.Duration(logFrequencySeconds)})
}
arvLogger.AddWriteHook(loggerutil.LogMemoryAlloc)
}
- collectionChannel := make(chan collection.ReadCollections)
+ var (
+ dataFetcher summary.DataFetcher
+ readCollections collection.ReadCollections
+ keepServerInfo keep.ReadServers
+ )
- go func() {
- collectionChannel <- collection.GetCollectionsAndSummarize(
- collection.GetCollectionsParams{
- Client: arv, Logger: arvLogger, BatchSize: 50})
- }()
+ if summary.ShouldReadData() {
+ dataFetcher = summary.ReadData
+ } else {
+ dataFetcher = BuildDataFetcher(arv)
+ }
+
+ dataFetcher(arvLogger, &readCollections, &keepServerInfo)
+
+ summary.MaybeWriteData(arvLogger, readCollections, keepServerInfo)
+
+ buckets := summary.BucketReplication(readCollections, keepServerInfo)
+ bucketCounts := buckets.Counts()
- keepServerInfo := keep.GetKeepServersAndSummarize(
- keep.GetKeepServersParams{Client: arv, Logger: arvLogger, Limit: 1000})
+ replicationSummary := buckets.SummarizeBuckets(readCollections)
+ replicationCounts := replicationSummary.ComputeCounts()
- readCollections := <-collectionChannel
+ log.Printf("Blocks In Collections: %d, "+
+ "\nBlocks In Keep: %d.",
+ len(readCollections.BlockToDesiredReplication),
+ len(keepServerInfo.BlockToServers))
+ log.Println(replicationCounts.PrettyPrint())
+
+ log.Printf("Blocks Histogram:")
+ for _, rlbss := range bucketCounts {
+ log.Printf("%+v: %10d",
+ rlbss.Levels,
+ rlbss.Count)
+ }
+
+ kc, err := keepclient.MakeKeepClient(&arv)
+ if err != nil {
+ loggerutil.FatalWithMessage(arvLogger,
+ fmt.Sprintf("Error setting up keep client %s", err.Error()))
+ }
- // TODO(misha): Use these together to verify replication.
- _ = readCollections
- _ = keepServerInfo
+ pullServers := summary.ComputePullServers(kc,
+ &keepServerInfo,
+ readCollections.BlockToDesiredReplication,
+ replicationSummary.UnderReplicatedBlocks)
+
+ pullLists := summary.BuildPullLists(pullServers)
+
+ summary.WritePullLists(arvLogger, pullLists)
// Log that we're finished. We force the recording, since go will
- // not wait for the timer before exiting.
+ // not wait for the write timer before exiting.
if arvLogger != nil {
arvLogger.FinalUpdate(func(p map[string]interface{}, e map[string]interface{}) {
+ summaryInfo := logger.GetOrCreateMap(p, "summary_info")
+ summaryInfo["block_replication_counts"] = bucketCounts
+ summaryInfo["replication_summary"] = replicationCounts
+ p["summary_info"] = summaryInfo
+
p["run_info"].(map[string]interface{})["finished_at"] = time.Now()
})
}
}
+
+// Returns a data fetcher that fetches data from remote servers.
+func BuildDataFetcher(arv arvadosclient.ArvadosClient) summary.DataFetcher {
+ return func(arvLogger *logger.Logger,
+ readCollections *collection.ReadCollections,
+ keepServerInfo *keep.ReadServers) {
+ collectionChannel := make(chan collection.ReadCollections)
+
+ go func() {
+ collectionChannel <- collection.GetCollectionsAndSummarize(
+ collection.GetCollectionsParams{
+ Client: arv,
+ Logger: arvLogger,
+ BatchSize: 50})
+ }()
+
+ *keepServerInfo = keep.GetKeepServersAndSummarize(
+ keep.GetKeepServersParams{
+ Client: arv,
+ Logger: arvLogger,
+ Limit: 1000})
+
+ *readCollections = <-collectionChannel
+ }
+}
"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
"git.curoverse.com/arvados.git/sdk/go/blockdigest"
"git.curoverse.com/arvados.git/sdk/go/logger"
- "git.curoverse.com/arvados.git/sdk/go/manifest"
"git.curoverse.com/arvados.git/services/datamanager/loggerutil"
"io"
"io/ioutil"
// Info about a particular block returned by the server
type BlockInfo struct {
- Digest blockdigest.BlockDigest
- Size int
+ Digest blockdigest.DigestWithSize
Mtime int64 // TODO(misha): Replace this with a timestamp.
}
// Info about a specified block given by a server
type BlockServerInfo struct {
ServerIndex int
- Size int
Mtime int64 // TODO(misha): Replace this with a timestamp.
}
type ServerContents struct {
- BlockDigestToInfo map[blockdigest.BlockDigest]BlockInfo
+ BlockDigestToInfo map[blockdigest.DigestWithSize]BlockInfo
}
type ServerResponse struct {
KeepServerIndexToAddress []ServerAddress
KeepServerAddressToIndex map[ServerAddress]int
ServerToContents map[ServerAddress]ServerContents
- BlockToServers map[blockdigest.BlockDigest][]BlockServerInfo
+ BlockToServers map[blockdigest.DigestWithSize][]BlockServerInfo
BlockReplicationCounts map[int]int
}
// TODO(misha): Change this to include the UUID as well.
func (s ServerAddress) String() string {
+ return s.HostPort()
+}
+
+func (s ServerAddress) HostPort() string {
return fmt.Sprintf("%s:%d", s.Host, s.Port)
}
results = GetKeepServers(params)
log.Printf("Returned %d keep disks", len(results.ServerToContents))
- ComputeBlockReplicationCounts(&results)
+ results.Summarize(params.Logger)
log.Printf("Replication level distribution: %v",
results.BlockReplicationCounts)
if params.Logger != nil {
params.Logger.Update(func(p map[string]interface{}, e map[string]interface{}) {
- keepInfo := make(map[string]interface{})
-
+ keepInfo := logger.GetOrCreateMap(p, "keep_info")
keepInfo["num_keep_servers_available"] = sdkResponse.ItemsAvailable
keepInfo["num_keep_servers_received"] = len(sdkResponse.KeepServers)
keepInfo["keep_servers"] = sdkResponse.KeepServers
-
- p["keep_info"] = keepInfo
})
}
}
results.ServerToContents = make(map[ServerAddress]ServerContents)
- results.BlockToServers = make(map[blockdigest.BlockDigest][]BlockServerInfo)
+ results.BlockToServers = make(map[blockdigest.DigestWithSize][]BlockServerInfo)
// Read all the responses
for i := range sdkResponse.KeepServers {
results.BlockToServers[blockInfo.Digest] = append(
results.BlockToServers[blockInfo.Digest],
BlockServerInfo{ServerIndex: serverIndex,
- Size: blockInfo.Size,
Mtime: blockInfo.Mtime})
}
}
resp, err := client.Do(req)
if err != nil {
loggerutil.FatalWithMessage(arvLogger,
- fmt.Sprintf("Error fetching %s: %v", req.URL.String(), err))
+ fmt.Sprintf("Error fetching %s: %v. Response was %+v",
+ req.URL.String(),
+ err,
+ resp))
}
return ReadServerResponse(arvLogger, keepServer, resp)
if arvLogger != nil {
now := time.Now()
arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) {
- keepInfo := p["keep_info"].(map[string]interface{})
+ keepInfo := logger.GetOrCreateMap(p, "keep_info")
serverInfo := make(map[string]interface{})
serverInfo["status_request_sent_at"] = now
serverInfo["host"] = keepServer.Host
if arvLogger != nil {
now := time.Now()
arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) {
- keepInfo := p["keep_info"].(map[string]interface{})
+ keepInfo := logger.GetOrCreateMap(p, "keep_info")
serverInfo := keepInfo[keepServer.Uuid].(map[string]interface{})
serverInfo["status_response_processed_at"] = now
serverInfo["status"] = keepStatus
if arvLogger != nil {
now := time.Now()
arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) {
- keepInfo := p["keep_info"].(map[string]interface{})
+ keepInfo := logger.GetOrCreateMap(p, "keep_info")
serverInfo := keepInfo[keepServer.Uuid].(map[string]interface{})
serverInfo["index_request_sent_at"] = now
})
if arvLogger != nil {
now := time.Now()
arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) {
- keepInfo := p["keep_info"].(map[string]interface{})
+ keepInfo := logger.GetOrCreateMap(p, "keep_info")
serverInfo := keepInfo[keepServer.Uuid].(map[string]interface{})
serverInfo["index_response_received_at"] = now
})
response.Address = keepServer
response.Contents.BlockDigestToInfo =
- make(map[blockdigest.BlockDigest]BlockInfo)
+ make(map[blockdigest.DigestWithSize]BlockInfo)
reader := bufio.NewReader(resp.Body)
numLines, numDuplicates, numSizeDisagreements := 0, 0, 0
for {
if storedBlock, ok := response.Contents.BlockDigestToInfo[blockInfo.Digest]; ok {
// This server returned multiple lines containing the same block digest.
numDuplicates += 1
- if storedBlock.Size != blockInfo.Size {
- numSizeDisagreements += 1
- // TODO(misha): Consider failing here.
- message := fmt.Sprintf("Saw different sizes for the same block "+
- "on %s: %+v %+v",
- keepServer.String(),
- storedBlock,
- blockInfo)
- log.Println(message)
- if arvLogger != nil {
- arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) {
- keepInfo := p["keep_info"].(map[string]interface{})
- serverInfo := keepInfo[keepServer.Uuid].(map[string]interface{})
- var error_list []string
- read_error_list, has_list := serverInfo["error_list"]
- if has_list {
- error_list = read_error_list.([]string)
- } // If we didn't have the list, error_list is already an empty list
- serverInfo["error_list"] = append(error_list, message)
- })
- }
- }
- // Keep the block that is bigger, or the block that's newer in
- // the case of a size tie.
- if storedBlock.Size < blockInfo.Size ||
- (storedBlock.Size == blockInfo.Size &&
- storedBlock.Mtime < blockInfo.Mtime) {
+ // Keep the block that's newer.
+ if storedBlock.Mtime < blockInfo.Mtime {
response.Contents.BlockDigestToInfo[blockInfo.Digest] = blockInfo
}
} else {
if arvLogger != nil {
now := time.Now()
arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) {
- keepInfo := p["keep_info"].(map[string]interface{})
+ keepInfo := logger.GetOrCreateMap(p, "keep_info")
serverInfo := keepInfo[keepServer.Uuid].(map[string]interface{})
serverInfo["processing_finished_at"] = now
tokens)
}
- var locator manifest.BlockLocator
- if locator, err = manifest.ParseBlockLocator(tokens[0]); err != nil {
+ var locator blockdigest.BlockLocator
+ if locator, err = blockdigest.ParseBlockLocator(tokens[0]); err != nil {
+ err = fmt.Errorf("%v Received error while parsing line \"%s\"",
+ err, indexLine)
return
}
if len(locator.Hints) > 0 {
if err != nil {
return
}
- blockInfo.Digest = locator.Digest
- blockInfo.Size = locator.Size
+ blockInfo.Digest =
+ blockdigest.DigestWithSize{Digest: locator.Digest,
+ Size: uint32(locator.Size)}
return
}
-func ComputeBlockReplicationCounts(readServers *ReadServers) {
+func (readServers *ReadServers) Summarize(arvLogger *logger.Logger) {
readServers.BlockReplicationCounts = make(map[int]int)
for _, infos := range readServers.BlockToServers {
replication := len(infos)
readServers.BlockReplicationCounts[replication] += 1
}
+
+ if arvLogger != nil {
+ arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) {
+ keepInfo := logger.GetOrCreateMap(p, "keep_info")
+ keepInfo["distinct_blocks_stored"] = len(readServers.BlockToServers)
+ })
+ }
+
}
if arvLogger != nil {
now := time.Now()
arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) {
- runInfo := make(map[string]interface{})
+ runInfo := logger.GetOrCreateMap(p, "run_info")
runInfo["started_at"] = now
runInfo["args"] = os.Args
hostname, err := os.Hostname()
runInfo["hostname"] = hostname
}
runInfo["pid"] = os.Getpid()
- p["run_info"] = runInfo
})
}
}
// A LogMutator that records the current memory usage. This is most useful as a logger write hook.
-//
-// Assumes we already have a map named "run_info" in properties. LogRunInfo() can create such a map for you if you call it.
func LogMemoryAlloc(p map[string]interface{}, e map[string]interface{}) {
- runInfo := p["run_info"].(map[string]interface{})
+ runInfo := logger.GetOrCreateMap(p, "run_info")
var memStats runtime.MemStats
runtime.ReadMemStats(&memStats)
- runInfo["alloc_bytes_in_use"] = memStats.Alloc
+ runInfo["memory_bytes_in_use"] = memStats.Alloc
+ runInfo["memory_bytes_reserved"] = memStats.Sys
}
func FatalWithMessage(arvLogger *logger.Logger, message string) {
if arvLogger != nil {
arvLogger.FinalUpdate(func(p map[string]interface{}, e map[string]interface{}) {
p["FATAL"] = message
- p["run_info"].(map[string]interface{})["finished_at"] = time.Now()
+ runInfo := logger.GetOrCreateMap(p, "run_info")
+ runInfo["finished_at"] = time.Now()
})
}
--- /dev/null
+/* Ensures that we only have one copy of each unique string. This is
+/* not designed for concurrent access. */
+package summary
+
+// This code should probably be moved somewhere more universal.
+
+type CanonicalString struct {
+ m map[string]string
+}
+
+func (cs *CanonicalString) Get(s string) (r string) {
+ if cs.m == nil {
+ cs.m = make(map[string]string)
+ }
+ value, found := cs.m[s]
+ if found {
+ return value
+ }
+
+ // s may be a substring of a much larger string.
+ // If we store s, it will prevent that larger string from getting
+ // garbage collected.
+ // If this is something you worry about you should change this code
+ // to make an explict copy of s using a byte array.
+ cs.m[s] = s
+ return s
+}
--- /dev/null
+// Handles writing data to and reading data from disk to speed up development.
+
+package summary
+
+import (
+ "encoding/gob"
+ "flag"
+ "fmt"
+ "git.curoverse.com/arvados.git/sdk/go/logger"
+ "git.curoverse.com/arvados.git/services/datamanager/collection"
+ "git.curoverse.com/arvados.git/services/datamanager/keep"
+ "git.curoverse.com/arvados.git/services/datamanager/loggerutil"
+ "log"
+ "os"
+)
+
+// Used to locally cache data read from servers to reduce execution
+// time when developing. Not for use in production.
+type serializedData struct {
+ ReadCollections collection.ReadCollections
+ KeepServerInfo keep.ReadServers
+}
+
+var (
+ writeDataTo string
+ readDataFrom string
+)
+
+type DataFetcher func(arvLogger *logger.Logger,
+ readCollections *collection.ReadCollections,
+ keepServerInfo *keep.ReadServers)
+
+func init() {
+ flag.StringVar(&writeDataTo,
+ "write-data-to",
+ "",
+ "Write summary of data received to this file. Used for development only.")
+ flag.StringVar(&readDataFrom,
+ "read-data-from",
+ "",
+ "Avoid network i/o and read summary data from this file instead. Used for development only.")
+}
+
+// Writes data we've read to a file.
+//
+// This is useful for development, so that we don't need to read all
+// our data from the network every time we tweak something.
+//
+// This should not be used outside of development, since you'll be
+// working with stale data.
+func MaybeWriteData(arvLogger *logger.Logger,
+ readCollections collection.ReadCollections,
+ keepServerInfo keep.ReadServers) bool {
+ if writeDataTo == "" {
+ return false
+ } else {
+ summaryFile, err := os.Create(writeDataTo)
+ if err != nil {
+ loggerutil.FatalWithMessage(arvLogger,
+ fmt.Sprintf("Failed to open %s: %v", writeDataTo, err))
+ }
+ defer summaryFile.Close()
+
+ enc := gob.NewEncoder(summaryFile)
+ data := serializedData{
+ ReadCollections: readCollections,
+ KeepServerInfo: keepServerInfo}
+ err = enc.Encode(data)
+ if err != nil {
+ loggerutil.FatalWithMessage(arvLogger,
+ fmt.Sprintf("Failed to write summary data: %v", err))
+ }
+ log.Printf("Wrote summary data to: %s", writeDataTo)
+ return true
+ }
+}
+
+func ShouldReadData() bool {
+ return readDataFrom != ""
+}
+
+// Reads data that we've written to a file.
+//
+// This is useful for development, so that we don't need to read all
+// our data from the network every time we tweak something.
+//
+// This should not be used outside of development, since you'll be
+// working with stale data.
+func ReadData(arvLogger *logger.Logger,
+ readCollections *collection.ReadCollections,
+ keepServerInfo *keep.ReadServers) {
+ if readDataFrom == "" {
+ loggerutil.FatalWithMessage(arvLogger,
+ "ReadData() called with empty filename.")
+ } else {
+ summaryFile, err := os.Open(readDataFrom)
+ if err != nil {
+ loggerutil.FatalWithMessage(arvLogger,
+ fmt.Sprintf("Failed to open %s: %v", readDataFrom, err))
+ }
+ defer summaryFile.Close()
+
+ dec := gob.NewDecoder(summaryFile)
+ data := serializedData{}
+ err = dec.Decode(&data)
+ if err != nil {
+ loggerutil.FatalWithMessage(arvLogger,
+ fmt.Sprintf("Failed to read summary data: %v", err))
+ }
+
+ // re-summarize data, so that we can update our summarizing
+ // functions without needing to do all our network i/o
+ data.ReadCollections.Summarize(arvLogger)
+ data.KeepServerInfo.Summarize(arvLogger)
+
+ *readCollections = data.ReadCollections
+ *keepServerInfo = data.KeepServerInfo
+ log.Printf("Read summary data from: %s", readDataFrom)
+ }
+}
--- /dev/null
+// Code for generating pull lists as described in https://arvados.org/projects/arvados/wiki/Keep_Design_Doc#Pull-List
+package summary
+
+import (
+ "encoding/json"
+ "fmt"
+ "git.curoverse.com/arvados.git/sdk/go/blockdigest"
+ "git.curoverse.com/arvados.git/sdk/go/keepclient"
+ "git.curoverse.com/arvados.git/sdk/go/logger"
+ "git.curoverse.com/arvados.git/services/datamanager/keep"
+ "git.curoverse.com/arvados.git/services/datamanager/loggerutil"
+ "log"
+ "os"
+ "strings"
+)
+
+type Locator blockdigest.DigestWithSize
+
+func (l Locator) MarshalJSON() ([]byte, error) {
+ return []byte("\"" + blockdigest.DigestWithSize(l).String() + "\""), nil
+}
+
+// One entry in the Pull List
+type PullRequest struct {
+ Locator Locator `json:"locator"`
+ Servers []string `json:"servers"`
+}
+
+// The Pull List for a particular server
+type PullList []PullRequest
+
+// PullListByLocator implements sort.Interface for PullList based on
+// the Digest.
+type PullListByLocator PullList
+
+func (a PullListByLocator) Len() int { return len(a) }
+func (a PullListByLocator) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a PullListByLocator) Less(i, j int) bool {
+ di, dj := a[i].Locator.Digest, a[j].Locator.Digest
+ if di.H < dj.H {
+ return true
+ } else if di.H == dj.H {
+ if di.L < dj.L {
+ return true
+ } else if di.L == dj.L {
+ return a[i].Locator.Size < a[j].Locator.Size
+ }
+ }
+ return false
+}
+
+// For a given under-replicated block, this structure represents which
+// servers should pull the specified block and which servers they can
+// pull it from.
+type PullServers struct {
+ To []string // Servers that should pull the specified block
+ From []string // Servers that already contain the specified block
+}
+
+// Creates a map from block locator to PullServers with one entry for
+// each under-replicated block.
+//
+// This method ignores zero-replica blocks since there are no servers
+// to pull them from, so callers should feel free to omit them, but
+// this function will ignore them if they are provided.
+func ComputePullServers(kc *keepclient.KeepClient,
+ keepServerInfo *keep.ReadServers,
+ blockToDesiredReplication map[blockdigest.DigestWithSize]int,
+ underReplicated BlockSet) (m map[Locator]PullServers) {
+ m = map[Locator]PullServers{}
+ // We use CanonicalString to avoid filling memory with dupicate
+ // copies of the same string.
+ var cs CanonicalString
+
+ // Servers that are writeable
+ writableServers := map[string]struct{}{}
+ for _, url := range kc.WritableLocalRoots() {
+ writableServers[cs.Get(url)] = struct{}{}
+ }
+
+ for block, _ := range underReplicated {
+ serversStoringBlock := keepServerInfo.BlockToServers[block]
+ numCopies := len(serversStoringBlock)
+ numCopiesMissing := blockToDesiredReplication[block] - numCopies
+ if numCopiesMissing > 0 {
+ // We expect this to always be true, since the block was listed
+ // in underReplicated.
+
+ if numCopies > 0 {
+ // Not much we can do with blocks with no copies.
+
+ // A server's host-port string appears as a key in this map
+ // iff it contains the block.
+ serverHasBlock := map[string]struct{}{}
+ for _, info := range serversStoringBlock {
+ sa := keepServerInfo.KeepServerIndexToAddress[info.ServerIndex]
+ serverHasBlock[cs.Get(sa.HostPort())] = struct{}{}
+ }
+
+ roots := keepclient.NewRootSorter(kc.LocalRoots(),
+ block.String()).GetSortedRoots()
+
+ l := Locator(block)
+ m[l] = CreatePullServers(cs, serverHasBlock, writableServers,
+ roots, numCopiesMissing)
+ }
+ }
+ }
+ return m
+}
+
+// Creates a pull list in which the To and From fields preserve the
+// ordering of sorted servers and the contents are all canonical
+// strings.
+func CreatePullServers(cs CanonicalString,
+ serverHasBlock map[string]struct{},
+ writableServers map[string]struct{},
+ sortedServers []string,
+ maxToFields int) (ps PullServers) {
+
+ ps = PullServers{
+ To: make([]string, 0, maxToFields),
+ From: make([]string, 0, len(serverHasBlock)),
+ }
+
+ for _, host := range sortedServers {
+ // Strip the protocol portion of the url.
+ // Use the canonical copy of the string to avoid memory waste.
+ server := cs.Get(RemoveProtocolPrefix(host))
+ _, hasBlock := serverHasBlock[server]
+ if hasBlock {
+ // The from field should include the protocol.
+ ps.From = append(ps.From, cs.Get(host))
+ } else if len(ps.To) < maxToFields {
+ _, writable := writableServers[host]
+ if writable {
+ ps.To = append(ps.To, server)
+ }
+ }
+ }
+
+ return
+}
+
+// Strips the protocol prefix from a url.
+func RemoveProtocolPrefix(url string) string {
+ return url[(strings.LastIndex(url, "/") + 1):]
+}
+
+// Produces a PullList for each keep server.
+func BuildPullLists(lps map[Locator]PullServers) (spl map[string]PullList) {
+ spl = map[string]PullList{}
+ // We don't worry about canonicalizing our strings here, because we
+ // assume lps was created by ComputePullServers() which already
+ // canonicalized the strings for us.
+ for locator, pullServers := range lps {
+ for _, destination := range pullServers.To {
+ pullList, pullListExists := spl[destination]
+ if !pullListExists {
+ pullList = PullList{}
+ }
+ spl[destination] = append(pullList,
+ PullRequest{Locator: locator, Servers: pullServers.From})
+ }
+ }
+ return
+}
+
+// Writes each pull list to a file.
+// The filename is based on the hostname.
+//
+// This is just a hack for prototyping, it is not expected to be used
+// in production.
+func WritePullLists(arvLogger *logger.Logger,
+ pullLists map[string]PullList) {
+ r := strings.NewReplacer(":", ".")
+ for host, list := range pullLists {
+ filename := fmt.Sprintf("pull_list.%s", r.Replace(host))
+ pullListFile, err := os.Create(filename)
+ if err != nil {
+ loggerutil.FatalWithMessage(arvLogger,
+ fmt.Sprintf("Failed to open %s: %v", filename, err))
+ }
+ defer pullListFile.Close()
+
+ enc := json.NewEncoder(pullListFile)
+ err = enc.Encode(list)
+ if err != nil {
+ loggerutil.FatalWithMessage(arvLogger,
+ fmt.Sprintf("Failed to write pull list to %s: %v", filename, err))
+ }
+ log.Printf("Wrote pull list to %s.", filename)
+ }
+}
--- /dev/null
+package summary
+
+import (
+ "encoding/json"
+ "git.curoverse.com/arvados.git/sdk/go/blockdigest"
+ . "gopkg.in/check.v1"
+ "sort"
+ "testing"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+ TestingT(t)
+}
+
+type MySuite struct{}
+
+var _ = Suite(&MySuite{})
+
+// Helper method to declare string sets more succinctly
+// Could be placed somewhere more general.
+func stringSet(slice ...string) (m map[string]struct{}) {
+ m = map[string]struct{}{}
+ for _, s := range slice {
+ m[s] = struct{}{}
+ }
+ return
+}
+
+func (s *MySuite) TestPullListPrintsJSONCorrectly(c *C) {
+ pl := PullList{PullRequest{
+ Locator: Locator(blockdigest.MakeTestDigestSpecifySize(0xBadBeef, 56789)),
+ Servers: []string{"keep0.qr1hi.arvadosapi.com:25107",
+ "keep1.qr1hi.arvadosapi.com:25108"}}}
+
+ b, err := json.Marshal(pl)
+ c.Assert(err, IsNil)
+ expectedOutput := `[{"locator":"0000000000000000000000000badbeef+56789",` +
+ `"servers":["keep0.qr1hi.arvadosapi.com:25107",` +
+ `"keep1.qr1hi.arvadosapi.com:25108"]}]`
+ c.Check(string(b), Equals, expectedOutput)
+}
+
+func (s *MySuite) TestCreatePullServers(c *C) {
+ var cs CanonicalString
+ c.Check(
+ CreatePullServers(cs,
+ stringSet(),
+ stringSet(),
+ []string{},
+ 5),
+ DeepEquals,
+ PullServers{To: []string{}, From: []string{}})
+
+ c.Check(
+ CreatePullServers(cs,
+ stringSet("keep0:25107", "keep1:25108"),
+ stringSet(),
+ []string{},
+ 5),
+ DeepEquals,
+ PullServers{To: []string{}, From: []string{}})
+
+ c.Check(
+ CreatePullServers(cs,
+ stringSet("keep0:25107", "keep1:25108"),
+ stringSet("keep0:25107"),
+ []string{"keep0:25107"},
+ 5),
+ DeepEquals,
+ PullServers{To: []string{}, From: []string{"keep0:25107"}})
+
+ c.Check(
+ CreatePullServers(cs,
+ stringSet("keep0:25107", "keep1:25108"),
+ stringSet("keep3:25110", "keep2:25109", "keep1:25108", "keep0:25107"),
+ []string{"keep3:25110", "keep2:25109", "keep1:25108", "keep0:25107"},
+ 5),
+ DeepEquals,
+ PullServers{To: []string{"keep3:25110", "keep2:25109"},
+ From: []string{"keep1:25108", "keep0:25107"}})
+
+ c.Check(
+ CreatePullServers(cs,
+ stringSet("keep0:25107", "keep1:25108"),
+ stringSet("keep3:25110", "keep1:25108", "keep0:25107"),
+ []string{"keep3:25110", "keep2:25109", "keep1:25108", "keep0:25107"},
+ 5),
+ DeepEquals,
+ PullServers{To: []string{"keep3:25110"},
+ From: []string{"keep1:25108", "keep0:25107"}})
+
+ c.Check(
+ CreatePullServers(cs,
+ stringSet("keep0:25107", "keep1:25108"),
+ stringSet("keep3:25110", "keep2:25109", "keep1:25108", "keep0:25107"),
+ []string{"keep3:25110", "keep2:25109", "keep1:25108", "keep0:25107"},
+ 1),
+ DeepEquals,
+ PullServers{To: []string{"keep3:25110"},
+ From: []string{"keep1:25108", "keep0:25107"}})
+
+ c.Check(
+ CreatePullServers(cs,
+ stringSet("keep0:25107", "keep1:25108"),
+ stringSet("https://keep3:25110", "http://keep2:25109",
+ "https://keep1:25108", "http://keep0:25107"),
+ []string{"https://keep3:25110", "http://keep2:25109",
+ "https://keep1:25108", "http://keep0:25107"},
+ 1),
+ DeepEquals,
+ PullServers{To: []string{"keep3:25110"},
+ From: []string{"https://keep1:25108", "http://keep0:25107"}})
+
+ c.Check(
+ CreatePullServers(cs,
+ stringSet("keep0:25107", "keep1:25108"),
+ stringSet("keep3:25110", "keep2:25109", "keep1:25108", "keep0:25107"),
+ []string{"keep3:25110", "keep2:25109", "keep1:25108", "keep0:25107"},
+ 0),
+ DeepEquals,
+ PullServers{To: []string{},
+ From: []string{"keep1:25108", "keep0:25107"}})
+}
+
+// Checks whether two pull list maps are equal. Since pull lists are
+// ordered arbitrarily, we need to sort them by digest before
+// comparing them for deep equality.
+type pullListMapEqualsChecker struct {
+ *CheckerInfo
+}
+
+func (c *pullListMapEqualsChecker) Check(params []interface{}, names []string) (result bool, error string) {
+ obtained, ok := params[0].(map[string]PullList)
+ if !ok {
+ return false, "First parameter is not a PullList map"
+ }
+ expected, ok := params[1].(map[string]PullList)
+ if !ok {
+ return false, "Second parameter is not a PullList map"
+ }
+
+ for _, v := range obtained {
+ sort.Sort(PullListByLocator(v))
+ }
+ for _, v := range expected {
+ sort.Sort(PullListByLocator(v))
+ }
+
+ return DeepEquals.Check(params, names)
+}
+
+var PullListMapEquals Checker = &pullListMapEqualsChecker{&CheckerInfo{
+ Name: "PullListMapEquals",
+ Params: []string{"obtained", "expected"},
+}}
+
+func (s *MySuite) TestBuildPullLists(c *C) {
+ c.Check(
+ BuildPullLists(map[Locator]PullServers{}),
+ PullListMapEquals,
+ map[string]PullList{})
+
+ locator1 := Locator{Digest: blockdigest.MakeTestBlockDigest(0xBadBeef)}
+ c.Check(
+ BuildPullLists(map[Locator]PullServers{
+ locator1: PullServers{To: []string{}, From: []string{}}}),
+ PullListMapEquals,
+ map[string]PullList{})
+
+ c.Check(
+ BuildPullLists(map[Locator]PullServers{
+ locator1: PullServers{To: []string{}, From: []string{"f1", "f2"}}}),
+ PullListMapEquals,
+ map[string]PullList{})
+
+ c.Check(
+ BuildPullLists(map[Locator]PullServers{
+ locator1: PullServers{To: []string{"t1"}, From: []string{"f1", "f2"}}}),
+ PullListMapEquals,
+ map[string]PullList{
+ "t1": PullList{PullRequest{locator1, []string{"f1", "f2"}}}})
+
+ c.Check(
+ BuildPullLists(map[Locator]PullServers{
+ locator1: PullServers{To: []string{"t1"}, From: []string{}}}),
+ PullListMapEquals,
+ map[string]PullList{"t1": PullList{
+ PullRequest{locator1, []string{}}}})
+
+ c.Check(
+ BuildPullLists(map[Locator]PullServers{
+ locator1: PullServers{
+ To: []string{"t1", "t2"},
+ From: []string{"f1", "f2"},
+ }}),
+ PullListMapEquals,
+ map[string]PullList{
+ "t1": PullList{PullRequest{locator1, []string{"f1", "f2"}}},
+ "t2": PullList{PullRequest{locator1, []string{"f1", "f2"}}},
+ })
+
+ locator2 := Locator{Digest: blockdigest.MakeTestBlockDigest(0xCabbed)}
+ c.Check(
+ BuildPullLists(map[Locator]PullServers{
+ locator1: PullServers{To: []string{"t1"}, From: []string{"f1", "f2"}},
+ locator2: PullServers{To: []string{"t2"}, From: []string{"f3", "f4"}}}),
+ PullListMapEquals,
+ map[string]PullList{
+ "t1": PullList{PullRequest{locator1, []string{"f1", "f2"}}},
+ "t2": PullList{PullRequest{locator2, []string{"f3", "f4"}}},
+ })
+
+ c.Check(
+ BuildPullLists(map[Locator]PullServers{
+ locator1: PullServers{
+ To: []string{"t1"},
+ From: []string{"f1", "f2"}},
+ locator2: PullServers{
+ To: []string{"t2", "t1"},
+ From: []string{"f3", "f4"}},
+ }),
+ PullListMapEquals,
+ map[string]PullList{
+ "t1": PullList{
+ PullRequest{locator1, []string{"f1", "f2"}},
+ PullRequest{locator2, []string{"f3", "f4"}},
+ },
+ "t2": PullList{
+ PullRequest{locator2, []string{"f3", "f4"}},
+ },
+ })
+
+ locator3 := Locator{Digest: blockdigest.MakeTestBlockDigest(0xDeadBeef)}
+ locator4 := Locator{Digest: blockdigest.MakeTestBlockDigest(0xFedBeef)}
+ c.Check(
+ BuildPullLists(map[Locator]PullServers{
+ locator1: PullServers{
+ To: []string{"t1"},
+ From: []string{"f1", "f2"}},
+ locator2: PullServers{
+ To: []string{"t2", "t1"},
+ From: []string{"f3", "f4"}},
+ locator3: PullServers{
+ To: []string{"t3", "t2", "t1"},
+ From: []string{"f4", "f5"}},
+ locator4: PullServers{
+ To: []string{"t4", "t3", "t2", "t1"},
+ From: []string{"f1", "f5"}},
+ }),
+ PullListMapEquals,
+ map[string]PullList{
+ "t1": PullList{
+ PullRequest{locator1, []string{"f1", "f2"}},
+ PullRequest{locator2, []string{"f3", "f4"}},
+ PullRequest{locator3, []string{"f4", "f5"}},
+ PullRequest{locator4, []string{"f1", "f5"}},
+ },
+ "t2": PullList{
+ PullRequest{locator2, []string{"f3", "f4"}},
+ PullRequest{locator3, []string{"f4", "f5"}},
+ PullRequest{locator4, []string{"f1", "f5"}},
+ },
+ "t3": PullList{
+ PullRequest{locator3, []string{"f4", "f5"}},
+ PullRequest{locator4, []string{"f1", "f5"}},
+ },
+ "t4": PullList{
+ PullRequest{locator4, []string{"f1", "f5"}},
+ },
+ })
+}
+
+func (s *MySuite) TestRemoveProtocolPrefix(c *C) {
+ c.Check(RemoveProtocolPrefix("blah"), Equals, "blah")
+ c.Check(RemoveProtocolPrefix("bl/ah"), Equals, "ah")
+ c.Check(RemoveProtocolPrefix("http://blah.com"), Equals, "blah.com")
+ c.Check(RemoveProtocolPrefix("https://blah.com:8900"), Equals, "blah.com:8900")
+}
--- /dev/null
+// Summarizes Collection Data and Keep Server Contents.
+package summary
+
+// TODO(misha): Check size of blocks as well as their digest.
+
+import (
+ "fmt"
+ "git.curoverse.com/arvados.git/sdk/go/blockdigest"
+ "git.curoverse.com/arvados.git/services/datamanager/collection"
+ "git.curoverse.com/arvados.git/services/datamanager/keep"
+ "sort"
+)
+
+type BlockSet map[blockdigest.DigestWithSize]struct{}
+
+// Adds a single block to the set.
+func (bs BlockSet) Insert(digest blockdigest.DigestWithSize) {
+ bs[digest] = struct{}{}
+}
+
+// Adds a set of blocks to the set.
+func (bs BlockSet) Union(obs BlockSet) {
+ for k, v := range obs {
+ bs[k] = v
+ }
+}
+
+// We use the collection index to save space. To convert to and from
+// the uuid, use collection.ReadCollections' fields
+// CollectionIndexToUuid and CollectionUuidToIndex.
+type CollectionIndexSet map[int]struct{}
+
+// Adds a single collection to the set. The collection is specified by
+// its index.
+func (cis CollectionIndexSet) Insert(collectionIndex int) {
+ cis[collectionIndex] = struct{}{}
+}
+
+func (bs BlockSet) ToCollectionIndexSet(
+ readCollections collection.ReadCollections,
+ collectionIndexSet *CollectionIndexSet) {
+ for block := range bs {
+ for _, collectionIndex := range readCollections.BlockToCollectionIndices[block] {
+ collectionIndexSet.Insert(collectionIndex)
+ }
+ }
+}
+
+// Keeps track of the requested and actual replication levels.
+// Currently this is only used for blocks but could easily be used for
+// collections as well.
+type ReplicationLevels struct {
+ // The requested replication level.
+ // For Blocks this is the maximum replication level among all the
+ // collections this block belongs to.
+ Requested int
+
+ // The actual number of keep servers this is on.
+ Actual int
+}
+
+// Maps from replication levels to their blocks.
+type ReplicationLevelBlockSetMap map[ReplicationLevels]BlockSet
+
+// An individual entry from ReplicationLevelBlockSetMap which only reports the number of blocks, not which blocks.
+type ReplicationLevelBlockCount struct {
+ Levels ReplicationLevels
+ Count int
+}
+
+// An ordered list of ReplicationLevelBlockCount useful for reporting.
+type ReplicationLevelBlockSetSlice []ReplicationLevelBlockCount
+
+type ReplicationSummary struct {
+ CollectionBlocksNotInKeep BlockSet
+ UnderReplicatedBlocks BlockSet
+ OverReplicatedBlocks BlockSet
+ CorrectlyReplicatedBlocks BlockSet
+ KeepBlocksNotInCollections BlockSet
+
+ CollectionsNotFullyInKeep CollectionIndexSet
+ UnderReplicatedCollections CollectionIndexSet
+ OverReplicatedCollections CollectionIndexSet
+ CorrectlyReplicatedCollections CollectionIndexSet
+}
+
+// This struct counts the elements in each set in ReplicationSummary.
+type ReplicationSummaryCounts struct {
+ CollectionBlocksNotInKeep int
+ UnderReplicatedBlocks int
+ OverReplicatedBlocks int
+ CorrectlyReplicatedBlocks int
+ KeepBlocksNotInCollections int
+ CollectionsNotFullyInKeep int
+ UnderReplicatedCollections int
+ OverReplicatedCollections int
+ CorrectlyReplicatedCollections int
+}
+
+// Gets the BlockSet for a given set of ReplicationLevels, creating it
+// if it doesn't already exist.
+func (rlbs ReplicationLevelBlockSetMap) GetOrCreate(
+ repLevels ReplicationLevels) (bs BlockSet) {
+ bs, exists := rlbs[repLevels]
+ if !exists {
+ bs = make(BlockSet)
+ rlbs[repLevels] = bs
+ }
+ return
+}
+
+// Adds a block to the set for a given replication level.
+func (rlbs ReplicationLevelBlockSetMap) Insert(
+ repLevels ReplicationLevels,
+ block blockdigest.DigestWithSize) {
+ rlbs.GetOrCreate(repLevels).Insert(block)
+}
+
+// Adds a set of blocks to the set for a given replication level.
+func (rlbs ReplicationLevelBlockSetMap) Union(
+ repLevels ReplicationLevels,
+ bs BlockSet) {
+ rlbs.GetOrCreate(repLevels).Union(bs)
+}
+
+// Outputs a sorted list of ReplicationLevelBlockCounts.
+func (rlbs ReplicationLevelBlockSetMap) Counts() (
+ sorted ReplicationLevelBlockSetSlice) {
+ sorted = make(ReplicationLevelBlockSetSlice, len(rlbs))
+ i := 0
+ for levels, set := range rlbs {
+ sorted[i] = ReplicationLevelBlockCount{Levels: levels, Count: len(set)}
+ i++
+ }
+ sort.Sort(sorted)
+ return
+}
+
+// Implemented to meet sort.Interface
+func (rlbss ReplicationLevelBlockSetSlice) Len() int {
+ return len(rlbss)
+}
+
+// Implemented to meet sort.Interface
+func (rlbss ReplicationLevelBlockSetSlice) Less(i, j int) bool {
+ return rlbss[i].Levels.Requested < rlbss[j].Levels.Requested ||
+ (rlbss[i].Levels.Requested == rlbss[j].Levels.Requested &&
+ rlbss[i].Levels.Actual < rlbss[j].Levels.Actual)
+}
+
+// Implemented to meet sort.Interface
+func (rlbss ReplicationLevelBlockSetSlice) Swap(i, j int) {
+ rlbss[i], rlbss[j] = rlbss[j], rlbss[i]
+}
+
+func (rs ReplicationSummary) ComputeCounts() (rsc ReplicationSummaryCounts) {
+ // TODO(misha): Consider rewriting this method to iterate through
+ // the fields using reflection, instead of explictily listing the
+ // fields as we do now.
+ rsc.CollectionBlocksNotInKeep = len(rs.CollectionBlocksNotInKeep)
+ rsc.UnderReplicatedBlocks = len(rs.UnderReplicatedBlocks)
+ rsc.OverReplicatedBlocks = len(rs.OverReplicatedBlocks)
+ rsc.CorrectlyReplicatedBlocks = len(rs.CorrectlyReplicatedBlocks)
+ rsc.KeepBlocksNotInCollections = len(rs.KeepBlocksNotInCollections)
+ rsc.CollectionsNotFullyInKeep = len(rs.CollectionsNotFullyInKeep)
+ rsc.UnderReplicatedCollections = len(rs.UnderReplicatedCollections)
+ rsc.OverReplicatedCollections = len(rs.OverReplicatedCollections)
+ rsc.CorrectlyReplicatedCollections = len(rs.CorrectlyReplicatedCollections)
+ return rsc
+}
+
+func (rsc ReplicationSummaryCounts) PrettyPrint() string {
+ return fmt.Sprintf("Replication Block Counts:"+
+ "\n Missing From Keep: %d, "+
+ "\n Under Replicated: %d, "+
+ "\n Over Replicated: %d, "+
+ "\n Replicated Just Right: %d, "+
+ "\n Not In Any Collection: %d. "+
+ "\nReplication Collection Counts:"+
+ "\n Missing From Keep: %d, "+
+ "\n Under Replicated: %d, "+
+ "\n Over Replicated: %d, "+
+ "\n Replicated Just Right: %d.",
+ rsc.CollectionBlocksNotInKeep,
+ rsc.UnderReplicatedBlocks,
+ rsc.OverReplicatedBlocks,
+ rsc.CorrectlyReplicatedBlocks,
+ rsc.KeepBlocksNotInCollections,
+ rsc.CollectionsNotFullyInKeep,
+ rsc.UnderReplicatedCollections,
+ rsc.OverReplicatedCollections,
+ rsc.CorrectlyReplicatedCollections)
+}
+
+func BucketReplication(readCollections collection.ReadCollections,
+ keepServerInfo keep.ReadServers) (rlbsm ReplicationLevelBlockSetMap) {
+ rlbsm = make(ReplicationLevelBlockSetMap)
+
+ for block, requestedReplication := range readCollections.BlockToDesiredReplication {
+ rlbsm.Insert(
+ ReplicationLevels{
+ Requested: requestedReplication,
+ Actual: len(keepServerInfo.BlockToServers[block])},
+ block)
+ }
+
+ for block, servers := range keepServerInfo.BlockToServers {
+ if 0 == readCollections.BlockToDesiredReplication[block] {
+ rlbsm.Insert(
+ ReplicationLevels{Requested: 0, Actual: len(servers)},
+ block)
+ }
+ }
+ return
+}
+
+func (rlbsm ReplicationLevelBlockSetMap) SummarizeBuckets(
+ readCollections collection.ReadCollections) (
+ rs ReplicationSummary) {
+ rs.CollectionBlocksNotInKeep = make(BlockSet)
+ rs.UnderReplicatedBlocks = make(BlockSet)
+ rs.OverReplicatedBlocks = make(BlockSet)
+ rs.CorrectlyReplicatedBlocks = make(BlockSet)
+ rs.KeepBlocksNotInCollections = make(BlockSet)
+
+ rs.CollectionsNotFullyInKeep = make(CollectionIndexSet)
+ rs.UnderReplicatedCollections = make(CollectionIndexSet)
+ rs.OverReplicatedCollections = make(CollectionIndexSet)
+ rs.CorrectlyReplicatedCollections = make(CollectionIndexSet)
+
+ for levels, bs := range rlbsm {
+ if levels.Actual == 0 {
+ rs.CollectionBlocksNotInKeep.Union(bs)
+ } else if levels.Requested == 0 {
+ rs.KeepBlocksNotInCollections.Union(bs)
+ } else if levels.Actual < levels.Requested {
+ rs.UnderReplicatedBlocks.Union(bs)
+ } else if levels.Actual > levels.Requested {
+ rs.OverReplicatedBlocks.Union(bs)
+ } else {
+ rs.CorrectlyReplicatedBlocks.Union(bs)
+ }
+ }
+
+ rs.CollectionBlocksNotInKeep.ToCollectionIndexSet(readCollections,
+ &rs.CollectionsNotFullyInKeep)
+ // Since different collections can specify different replication
+ // levels, the fact that a block is under-replicated does not imply
+ // that all collections that it belongs to are under-replicated, but
+ // we'll ignore that for now.
+ // TODO(misha): Fix this and report the correct set of collections.
+ rs.UnderReplicatedBlocks.ToCollectionIndexSet(readCollections,
+ &rs.UnderReplicatedCollections)
+ rs.OverReplicatedBlocks.ToCollectionIndexSet(readCollections,
+ &rs.OverReplicatedCollections)
+
+ for i := range readCollections.CollectionIndexToUuid {
+ if _, notInKeep := rs.CollectionsNotFullyInKeep[i]; notInKeep {
+ } else if _, underReplicated := rs.UnderReplicatedCollections[i]; underReplicated {
+ } else if _, overReplicated := rs.OverReplicatedCollections[i]; overReplicated {
+ } else {
+ rs.CorrectlyReplicatedCollections.Insert(i)
+ }
+ }
+
+ return
+}
--- /dev/null
+package summary
+
+import (
+ "git.curoverse.com/arvados.git/sdk/go/blockdigest"
+ "git.curoverse.com/arvados.git/services/datamanager/collection"
+ "git.curoverse.com/arvados.git/services/datamanager/keep"
+ "reflect"
+ "sort"
+ "testing"
+)
+
+func BlockSetFromSlice(digests []int) (bs BlockSet) {
+ bs = make(BlockSet)
+ for _, digest := range digests {
+ bs.Insert(blockdigest.MakeTestDigestWithSize(digest))
+ }
+ return
+}
+
+func CollectionIndexSetFromSlice(indices []int) (cis CollectionIndexSet) {
+ cis = make(CollectionIndexSet)
+ for _, index := range indices {
+ cis.Insert(index)
+ }
+ return
+}
+
+func (cis CollectionIndexSet) ToSlice() (ints []int) {
+ ints = make([]int, len(cis))
+ i := 0
+ for collectionIndex := range cis {
+ ints[i] = collectionIndex
+ i++
+ }
+ sort.Ints(ints)
+ return
+}
+
+// Helper method to meet interface expected by older tests.
+func SummarizeReplication(readCollections collection.ReadCollections,
+ keepServerInfo keep.ReadServers) (rs ReplicationSummary) {
+ return BucketReplication(readCollections, keepServerInfo).
+ SummarizeBuckets(readCollections)
+}
+
+// Takes a map from block digest to replication level and represents
+// it in a keep.ReadServers structure.
+func SpecifyReplication(digestToReplication map[int]int) (rs keep.ReadServers) {
+ rs.BlockToServers = make(map[blockdigest.DigestWithSize][]keep.BlockServerInfo)
+ for digest, replication := range digestToReplication {
+ rs.BlockToServers[blockdigest.MakeTestDigestWithSize(digest)] =
+ make([]keep.BlockServerInfo, replication)
+ }
+ return
+}
+
+// Verifies that
+// blocks.ToCollectionIndexSet(rc.BlockToCollectionIndices) returns
+// expectedCollections.
+func VerifyToCollectionIndexSet(
+ t *testing.T,
+ blocks []int,
+ blockToCollectionIndices map[int][]int,
+ expectedCollections []int) {
+
+ expected := CollectionIndexSetFromSlice(expectedCollections)
+
+ rc := collection.ReadCollections{
+ BlockToCollectionIndices: map[blockdigest.DigestWithSize][]int{},
+ }
+ for digest, indices := range blockToCollectionIndices {
+ rc.BlockToCollectionIndices[blockdigest.MakeTestDigestWithSize(digest)] = indices
+ }
+
+ returned := make(CollectionIndexSet)
+ BlockSetFromSlice(blocks).ToCollectionIndexSet(rc, &returned)
+
+ if !reflect.DeepEqual(returned, expected) {
+ t.Errorf("Expected %v.ToCollectionIndexSet(%v) to return \n %v \n but instead received \n %v",
+ blocks,
+ blockToCollectionIndices,
+ expectedCollections,
+ returned.ToSlice())
+ }
+}
+
+func TestToCollectionIndexSet(t *testing.T) {
+ VerifyToCollectionIndexSet(t, []int{6}, map[int][]int{6: []int{0}}, []int{0})
+ VerifyToCollectionIndexSet(t, []int{4}, map[int][]int{4: []int{1}}, []int{1})
+ VerifyToCollectionIndexSet(t, []int{4}, map[int][]int{4: []int{1, 9}}, []int{1, 9})
+ VerifyToCollectionIndexSet(t, []int{5, 6},
+ map[int][]int{5: []int{2, 3}, 6: []int{3, 4}},
+ []int{2, 3, 4})
+ VerifyToCollectionIndexSet(t, []int{5, 6},
+ map[int][]int{5: []int{8}, 6: []int{4}},
+ []int{4, 8})
+ VerifyToCollectionIndexSet(t, []int{6}, map[int][]int{5: []int{0}}, []int{})
+}
+
+func TestSimpleSummary(t *testing.T) {
+ rc := collection.MakeTestReadCollections([]collection.TestCollectionSpec{
+ collection.TestCollectionSpec{ReplicationLevel: 1, Blocks: []int{1, 2}},
+ })
+ rc.Summarize(nil)
+ cIndex := rc.CollectionIndicesForTesting()
+
+ keepInfo := SpecifyReplication(map[int]int{1: 1, 2: 1})
+
+ expectedSummary := ReplicationSummary{
+ CollectionBlocksNotInKeep: BlockSet{},
+ UnderReplicatedBlocks: BlockSet{},
+ OverReplicatedBlocks: BlockSet{},
+ CorrectlyReplicatedBlocks: BlockSetFromSlice([]int{1, 2}),
+ KeepBlocksNotInCollections: BlockSet{},
+
+ CollectionsNotFullyInKeep: CollectionIndexSet{},
+ UnderReplicatedCollections: CollectionIndexSet{},
+ OverReplicatedCollections: CollectionIndexSet{},
+ CorrectlyReplicatedCollections: CollectionIndexSetFromSlice([]int{cIndex[0]}),
+ }
+
+ returnedSummary := SummarizeReplication(rc, keepInfo)
+
+ if !reflect.DeepEqual(returnedSummary, expectedSummary) {
+ t.Fatalf("Expected returnedSummary to look like %+v but instead it is %+v", expectedSummary, returnedSummary)
+ }
+}
+
+func TestMissingBlock(t *testing.T) {
+ rc := collection.MakeTestReadCollections([]collection.TestCollectionSpec{
+ collection.TestCollectionSpec{ReplicationLevel: 1, Blocks: []int{1, 2}},
+ })
+ rc.Summarize(nil)
+ cIndex := rc.CollectionIndicesForTesting()
+
+ keepInfo := SpecifyReplication(map[int]int{1: 1})
+
+ expectedSummary := ReplicationSummary{
+ CollectionBlocksNotInKeep: BlockSetFromSlice([]int{2}),
+ UnderReplicatedBlocks: BlockSet{},
+ OverReplicatedBlocks: BlockSet{},
+ CorrectlyReplicatedBlocks: BlockSetFromSlice([]int{1}),
+ KeepBlocksNotInCollections: BlockSet{},
+
+ CollectionsNotFullyInKeep: CollectionIndexSetFromSlice([]int{cIndex[0]}),
+ UnderReplicatedCollections: CollectionIndexSet{},
+ OverReplicatedCollections: CollectionIndexSet{},
+ CorrectlyReplicatedCollections: CollectionIndexSet{},
+ }
+
+ returnedSummary := SummarizeReplication(rc, keepInfo)
+
+ if !reflect.DeepEqual(returnedSummary, expectedSummary) {
+ t.Fatalf("Expected returnedSummary to look like %+v but instead it is %+v",
+ expectedSummary,
+ returnedSummary)
+ }
+}
+
+func TestUnderAndOverReplicatedBlocks(t *testing.T) {
+ rc := collection.MakeTestReadCollections([]collection.TestCollectionSpec{
+ collection.TestCollectionSpec{ReplicationLevel: 2, Blocks: []int{1, 2}},
+ })
+ rc.Summarize(nil)
+ cIndex := rc.CollectionIndicesForTesting()
+
+ keepInfo := SpecifyReplication(map[int]int{1: 1, 2: 3})
+
+ expectedSummary := ReplicationSummary{
+ CollectionBlocksNotInKeep: BlockSet{},
+ UnderReplicatedBlocks: BlockSetFromSlice([]int{1}),
+ OverReplicatedBlocks: BlockSetFromSlice([]int{2}),
+ CorrectlyReplicatedBlocks: BlockSet{},
+ KeepBlocksNotInCollections: BlockSet{},
+
+ CollectionsNotFullyInKeep: CollectionIndexSet{},
+ UnderReplicatedCollections: CollectionIndexSetFromSlice([]int{cIndex[0]}),
+ OverReplicatedCollections: CollectionIndexSetFromSlice([]int{cIndex[0]}),
+ CorrectlyReplicatedCollections: CollectionIndexSet{},
+ }
+
+ returnedSummary := SummarizeReplication(rc, keepInfo)
+
+ if !reflect.DeepEqual(returnedSummary, expectedSummary) {
+ t.Fatalf("Expected returnedSummary to look like %+v but instead it is %+v",
+ expectedSummary,
+ returnedSummary)
+ }
+}
+
+func TestMixedReplication(t *testing.T) {
+ rc := collection.MakeTestReadCollections([]collection.TestCollectionSpec{
+ collection.TestCollectionSpec{ReplicationLevel: 1, Blocks: []int{1, 2}},
+ collection.TestCollectionSpec{ReplicationLevel: 1, Blocks: []int{3, 4}},
+ collection.TestCollectionSpec{ReplicationLevel: 2, Blocks: []int{5, 6}},
+ })
+ rc.Summarize(nil)
+ cIndex := rc.CollectionIndicesForTesting()
+
+ keepInfo := SpecifyReplication(map[int]int{1: 1, 2: 1, 3: 1, 5: 1, 6: 3, 7: 2})
+
+ expectedSummary := ReplicationSummary{
+ CollectionBlocksNotInKeep: BlockSetFromSlice([]int{4}),
+ UnderReplicatedBlocks: BlockSetFromSlice([]int{5}),
+ OverReplicatedBlocks: BlockSetFromSlice([]int{6}),
+ CorrectlyReplicatedBlocks: BlockSetFromSlice([]int{1, 2, 3}),
+ KeepBlocksNotInCollections: BlockSetFromSlice([]int{7}),
+
+ CollectionsNotFullyInKeep: CollectionIndexSetFromSlice([]int{cIndex[1]}),
+ UnderReplicatedCollections: CollectionIndexSetFromSlice([]int{cIndex[2]}),
+ OverReplicatedCollections: CollectionIndexSetFromSlice([]int{cIndex[2]}),
+ CorrectlyReplicatedCollections: CollectionIndexSetFromSlice([]int{cIndex[0]}),
+ }
+
+ returnedSummary := SummarizeReplication(rc, keepInfo)
+
+ if !reflect.DeepEqual(returnedSummary, expectedSummary) {
+ t.Fatalf("Expected returnedSummary to look like: \n%+v but instead it is: \n%+v. Index to UUID is %v. BlockToCollectionIndices is %v.", expectedSummary, returnedSummary, rc.CollectionIndexToUuid, rc.BlockToCollectionIndices)
+ }
+}
self.obj.dec_use()
def flush(self):
- return self.obj.flush()
+ if self.obj.writable():
+ return self.obj.flush()
class FileHandle(Handle):
self._total -= obj.cache_size
del self._entries[obj.cache_priority]
if obj.cache_uuid:
- del self._by_uuid[obj.cache_uuid]
+ self._by_uuid[obj.cache_uuid].remove(obj)
+ if not self._by_uuid[obj.cache_uuid]:
+ del self._by_uuid[obj.cache_uuid]
obj.cache_uuid = None
if clear:
_logger.debug("InodeCache cleared %i total now %i", obj.inode, self._total)
self._entries[obj.cache_priority] = obj
obj.cache_uuid = obj.uuid()
if obj.cache_uuid:
- self._by_uuid[obj.cache_uuid] = obj
+ if obj.cache_uuid not in self._by_uuid:
+ self._by_uuid[obj.cache_uuid] = [obj]
+ else:
+ if obj not in self._by_uuid[obj.cache_uuid]:
+ self._by_uuid[obj.cache_uuid].append(obj)
self._total += obj.objsize()
- _logger.debug("InodeCache touched %i (size %i) total now %i", obj.inode, obj.objsize(), self._total)
+ _logger.debug("InodeCache touched %i (size %i) (uuid %s) total now %i", obj.inode, obj.objsize(), obj.cache_uuid, self._total)
self.cap_cache()
else:
obj.cache_priority = None
def find(self, uuid):
return self._by_uuid.get(uuid)
+ def clear(self):
+ self._entries.clear()
+ self._by_uuid.clear()
+ self._total = 0
+
class Inodes(object):
"""Manage the set of inodes. This is the mapping from a numeric id
to a concrete File or Directory object"""
def invalidate_entry(self, inode, name):
llfuse.invalidate_entry(inode, name)
+ def clear(self):
+ self.inode_cache.clear()
+
+ for k,v in self._entries.items():
+ try:
+ v.finalize()
+ except Exception as e:
+ _logger.exception("Error during finalize of inode %i", k)
+
+ self._entries.clear()
+
def catch_exceptions(orig_func):
"""Catch uncaught exceptions and log them consistently."""
self.events.close()
self.events = None
- for k,v in self.inodes.items():
- try:
- v.finalize()
- except Exception as e:
- _logger.exception("Error during finalize of inode %i", k)
- self.inodes = None
+ self.inodes.clear()
def access(self, inode, mode, ctx):
return True
def on_event(self, ev):
if 'event_type' in ev:
with llfuse.lock:
- item = self.inodes.inode_cache.find(ev["object_uuid"])
- if item is not None:
- item.invalidate()
- if ev["object_kind"] == "arvados#collection":
- new_attr = ev.get("properties") and ev["properties"].get("new_attributes") and ev["properties"]["new_attributes"]
-
- # new_attributes.modified_at currently lacks subsecond precision (see #6347) so use event_at which
- # should always be the same.
- #record_version = (new_attr["modified_at"], new_attr["portable_data_hash"]) if new_attr else None
- record_version = (ev["event_at"], new_attr["portable_data_hash"]) if new_attr else None
-
- item.update(to_record_version=record_version)
- else:
- item.update()
+ items = self.inodes.inode_cache.find(ev["object_uuid"])
+ if items is not None:
+ for item in items:
+ item.invalidate()
+ if ev["object_kind"] == "arvados#collection":
+ new_attr = ev.get("properties") and ev["properties"].get("new_attributes") and ev["properties"]["new_attributes"]
+
+ # new_attributes.modified_at currently lacks subsecond precision (see #6347) so use event_at which
+ # should always be the same.
+ #record_version = (new_attr["modified_at"], new_attr["portable_data_hash"]) if new_attr else None
+ record_version = (ev["event_at"], new_attr["portable_data_hash"]) if new_attr else None
+
+ item.update(to_record_version=record_version)
+ else:
+ item.update()
oldowner = ev.get("properties") and ev["properties"].get("old_attributes") and ev["properties"]["old_attributes"].get("owner_uuid")
olditemparent = self.inodes.inode_cache.find(oldowner)
return True
finally:
self._updating_lock.release()
- except arvados.errors.NotFoundError:
- _logger.exception("arv-mount %s: error", self.collection_locator)
+ except arvados.errors.NotFoundError as e:
+ _logger.error("Error fetching collection '%s': %s", self.collection_locator, e)
except arvados.errors.ArgumentError as detail:
_logger.warning("arv-mount %s: error %s", self.collection_locator, detail)
if self.collection_record is not None and "manifest_text" in self.collection_record:
self.inode, self.inodes, self.api, self.num_retries, k))
if e.update():
- self._entries[k] = e
+ if k not in self._entries:
+ self._entries[k] = e
+ else:
+ self.inodes.del_entry(e)
return True
else:
+ self.inodes.del_entry(e)
return False
except Exception as e:
_logger.debug('arv-mount exception keep %s', e)
+ self.inodes.del_entry(e)
return False
def __getitem__(self, item):
],
install_requires=[
'arvados-python-client >= 0.1.20150625175218',
- 'llfuse',
+ 'llfuse>=0.40',
'python-daemon',
'ciso8601'
],
--- /dev/null
+import arvados
+import arvados.safeapi
+import arvados_fuse as fuse
+import llfuse
+import os
+import shutil
+import subprocess
+import sys
+import tempfile
+import threading
+import time
+import unittest
+import logging
+import multiprocessing
+import run_test_server
+
+logger = logging.getLogger('arvados.arv-mount')
+
+class MountTestBase(unittest.TestCase):
+ def setUp(self, api=None):
+ # The underlying C implementation of open() makes a fstat() syscall
+ # with the GIL still held. When the GETATTR message comes back to
+ # llfuse (which in these tests is in the same interpreter process) it
+ # can't acquire the GIL, so it can't service the fstat() call, so it
+ # deadlocks. The workaround is to run some of our test code in a
+ # separate process. Forturnately the multiprocessing module makes this
+ # relatively easy.
+ self.pool = multiprocessing.Pool(1)
+
+ self.keeptmp = tempfile.mkdtemp()
+ os.environ['KEEP_LOCAL_STORE'] = self.keeptmp
+ self.mounttmp = tempfile.mkdtemp()
+ run_test_server.run()
+ run_test_server.authorize_with("admin")
+ self.api = api if api else arvados.safeapi.ThreadSafeApiCache(arvados.config.settings())
+
+ def make_mount(self, root_class, **root_kwargs):
+ self.operations = fuse.Operations(os.getuid(), os.getgid(), enable_write=True)
+ self.operations.inodes.add_entry(root_class(
+ llfuse.ROOT_INODE, self.operations.inodes, self.api, 0, **root_kwargs))
+ llfuse.init(self.operations, self.mounttmp, [])
+ threading.Thread(None, llfuse.main).start()
+ # wait until the driver is finished initializing
+ self.operations.initlock.wait()
+ return self.operations.inodes[llfuse.ROOT_INODE]
+
+ def tearDown(self):
+ self.pool.terminate()
+ self.pool.join()
+ del self.pool
+
+ # llfuse.close is buggy, so use fusermount instead.
+ #llfuse.close(unmount=True)
+
+ count = 0
+ success = 1
+ while (count < 9 and success != 0):
+ success = subprocess.call(["fusermount", "-u", self.mounttmp])
+ time.sleep(0.1)
+ count += 1
+
+ self.operations.destroy()
+
+ os.rmdir(self.mounttmp)
+ shutil.rmtree(self.keeptmp)
+ run_test_server.reset()
+
+ def assertDirContents(self, subdir, expect_content):
+ path = self.mounttmp
+ if subdir:
+ path = os.path.join(path, subdir)
+ self.assertEqual(sorted(expect_content), sorted(llfuse.listdir(path)))
--- /dev/null
+import arvados
+import arvados_fuse as fuse
+import llfuse
+import logging
+import os
+import sys
+import unittest
+from .. import run_test_server
+from ..mount_test_base import MountTestBase
+
+logger = logging.getLogger('arvados.arv-mount')
+
+from performance_profiler import profiled
+
+def fuse_createCollectionWithMultipleBlocks(mounttmp, streams=1, files_per_stream=1, data='x'):
+ class Test(unittest.TestCase):
+ def runTest(self):
+ self.createCollectionWithMultipleBlocks()
+
+ @profiled
+ def createCollectionWithMultipleBlocks(self):
+ for i in range(0, streams):
+ os.mkdir(os.path.join(mounttmp, "./stream" + str(i)))
+
+ # Create files
+ for j in range(0, files_per_stream):
+ with open(os.path.join(mounttmp, "./stream" + str(i), "file" + str(j) +".txt"), "w") as f:
+ f.write(data)
+
+ Test().runTest()
+
+def fuse_readContentsFromCollectionWithMultipleBlocks(mounttmp, streams=1, files_per_stream=1, data='x'):
+ class Test(unittest.TestCase):
+ def runTest(self):
+ self.readContentsFromCollectionWithMultipleBlocks()
+
+ @profiled
+ def readContentsFromCollectionWithMultipleBlocks(self):
+ for i in range(0, streams):
+ d1 = llfuse.listdir(os.path.join(mounttmp, 'stream'+str(i)))
+ for j in range(0, files_per_stream):
+ with open(os.path.join(mounttmp, 'stream'+str(i), 'file'+str(i)+'.txt')) as f:
+ self.assertEqual(data, f.read())
+
+ Test().runTest()
+
+def fuse_moveFileFromCollectionWithMultipleBlocks(mounttmp, stream, filename):
+ class Test(unittest.TestCase):
+ def runTest(self):
+ self.moveFileFromCollectionWithMultipleBlocks()
+
+ @profiled
+ def moveFileFromCollectionWithMultipleBlocks(self):
+ d1 = llfuse.listdir(os.path.join(mounttmp, stream))
+ self.assertIn(filename, d1)
+
+ os.rename(os.path.join(mounttmp, stream, filename), os.path.join(mounttmp, 'moved_from_'+stream+'_'+filename))
+
+ d1 = llfuse.listdir(os.path.join(mounttmp))
+ self.assertIn('moved_from_'+stream+'_'+filename, d1)
+
+ d1 = llfuse.listdir(os.path.join(mounttmp, stream))
+ self.assertNotIn(filename, d1)
+
+ Test().runTest()
+
+def fuse_deleteFileFromCollectionWithMultipleBlocks(mounttmp, stream, filename):
+ class Test(unittest.TestCase):
+ def runTest(self):
+ self.deleteFileFromCollectionWithMultipleBlocks()
+
+ @profiled
+ def deleteFileFromCollectionWithMultipleBlocks(self):
+ os.remove(os.path.join(mounttmp, stream, filename))
+
+ Test().runTest()
+
+# Create a collection with 2 streams, 3 files_per_stream, 2 blocks_per_file, 2**26 bytes_per_block
+class CreateCollectionWithMultipleBlocksAndMoveAndDeleteFile(MountTestBase):
+ def setUp(self):
+ super(CreateCollectionWithMultipleBlocksAndMoveAndDeleteFile, self).setUp()
+
+ def test_CreateCollectionWithManyBlocksAndMoveAndDeleteFile(self):
+ collection = arvados.collection.Collection(api_client=self.api)
+ collection.save_new()
+
+ m = self.make_mount(fuse.CollectionDirectory)
+ with llfuse.lock:
+ m.new_collection(collection.api_response(), collection)
+ self.assertTrue(m.writable())
+
+ streams = 2
+ files_per_stream = 3
+ blocks_per_file = 2
+ bytes_per_block = 2**26
+
+ data = 'x' * blocks_per_file * bytes_per_block
+
+ self.pool.apply(fuse_createCollectionWithMultipleBlocks, (self.mounttmp, streams, files_per_stream, data,))
+
+ collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
+
+ for i in range(0, streams):
+ self.assertIn('./stream' + str(i), collection2["manifest_text"])
+
+ for i in range(0, files_per_stream):
+ self.assertIn('file' + str(i) + '.txt', collection2["manifest_text"])
+
+ # Read file contents
+ self.pool.apply(fuse_readContentsFromCollectionWithMultipleBlocks, (self.mounttmp, streams, files_per_stream, data,))
+
+ # Move file0.txt out of the streams into .
+ for i in range(0, streams):
+ self.pool.apply(fuse_moveFileFromCollectionWithMultipleBlocks, (self.mounttmp, 'stream'+str(i), 'file0.txt',))
+
+ collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
+
+ manifest_streams = collection2['manifest_text'].split('\n')
+ self.assertEqual(4, len(manifest_streams))
+
+ for i in range(0, streams):
+ self.assertIn('file0.txt', manifest_streams[0])
+
+ for i in range(0, streams):
+ self.assertNotIn('file0.txt', manifest_streams[i+1])
+
+ for i in range(0, streams):
+ for j in range(1, files_per_stream):
+ self.assertIn('file' + str(j) + '.txt', manifest_streams[i+1])
+
+ # Delete 'file1.txt' from all the streams
+ for i in range(0, streams):
+ self.pool.apply(fuse_deleteFileFromCollectionWithMultipleBlocks, (self.mounttmp, 'stream'+str(i), 'file1.txt'))
+
+ collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
+
+ manifest_streams = collection2['manifest_text'].split('\n')
+ self.assertEqual(4, len(manifest_streams))
+
+ for i in range(0, streams):
+ self.assertIn('file0.txt', manifest_streams[0])
+
+ self.assertNotIn('file1.txt', collection2['manifest_text'])
+
+ for i in range(0, streams):
+ for j in range(2, files_per_stream):
+ self.assertIn('file' + str(j) + '.txt', manifest_streams[i+1])
+
+
+def fuse_createCollectionWithManyFiles(mounttmp, streams=1, files_per_stream=1, data='x'):
+ class Test(unittest.TestCase):
+ def runTest(self):
+ self.createCollectionWithManyFiles()
+
+ @profiled
+ def createCollectionWithManyFiles(self):
+ for i in range(0, streams):
+ os.mkdir(os.path.join(mounttmp, "./stream" + str(i)))
+
+ # Create files
+ for j in range(0, files_per_stream):
+ with open(os.path.join(mounttmp, "./stream" + str(i), "file" + str(j) +".txt"), "w") as f:
+ f.write(data)
+
+ Test().runTest()
+
+def fuse_readContentsFromCollectionWithManyFiles(mounttmp, streams=1, files_per_stream=1, data='x'):
+ class Test(unittest.TestCase):
+ def runTest(self):
+ self.readContentsFromCollectionWithManyFiles()
+
+ @profiled
+ def readContentsFromCollectionWithManyFiles(self):
+ for i in range(0, streams):
+ d1 = llfuse.listdir(os.path.join(mounttmp, 'stream'+str(i)))
+ for j in range(0, files_per_stream):
+ with open(os.path.join(mounttmp, 'stream'+str(i), 'file'+str(i)+'.txt')) as f:
+ self.assertEqual(data, f.read())
+
+ Test().runTest()
+
+def fuse_moveFileFromCollectionWithManyFiles(mounttmp, stream, filename):
+ class Test(unittest.TestCase):
+ def runTest(self):
+ self.moveFileFromCollectionWithManyFiles()
+
+ @profiled
+ def moveFileFromCollectionWithManyFiles(self):
+ d1 = llfuse.listdir(os.path.join(mounttmp, stream))
+ self.assertIn(filename, d1)
+
+ os.rename(os.path.join(mounttmp, stream, filename), os.path.join(mounttmp, 'moved_from_'+stream+'_'+filename))
+
+ d1 = llfuse.listdir(os.path.join(mounttmp))
+ self.assertIn('moved_from_'+stream+'_'+filename, d1)
+
+ d1 = llfuse.listdir(os.path.join(mounttmp, stream))
+ self.assertNotIn(filename, d1)
+
+ Test().runTest()
+
+def fuse_deleteFileFromCollectionWithManyFiles(mounttmp, stream, filename):
+ class Test(unittest.TestCase):
+ def runTest(self):
+ self.deleteFileFromCollectionWithManyFiles()
+
+ @profiled
+ def deleteFileFromCollectionWithManyFiles(self):
+ os.remove(os.path.join(mounttmp, stream, filename))
+
+ Test().runTest()
+
+# Create a collection with two streams, each with 200 files
+class CreateCollectionWithManyFilesAndMoveAndDeleteFile(MountTestBase):
+ def setUp(self):
+ super(CreateCollectionWithManyFilesAndMoveAndDeleteFile, self).setUp()
+
+ def test_CreateCollectionWithManyFilesAndMoveAndDeleteFile(self):
+ collection = arvados.collection.Collection(api_client=self.api)
+ collection.save_new()
+
+ m = self.make_mount(fuse.CollectionDirectory)
+ with llfuse.lock:
+ m.new_collection(collection.api_response(), collection)
+ self.assertTrue(m.writable())
+
+ streams = 2
+ files_per_stream = 200
+ data = 'x'
+
+ self.pool.apply(fuse_createCollectionWithManyFiles, (self.mounttmp, streams, files_per_stream, data,))
+
+ collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
+
+ for i in range(0, streams):
+ self.assertIn('./stream' + str(i), collection2["manifest_text"])
+
+ for i in range(0, files_per_stream):
+ self.assertIn('file' + str(i) + '.txt', collection2["manifest_text"])
+
+ # Read file contents
+ self.pool.apply(fuse_readContentsFromCollectionWithManyFiles, (self.mounttmp, streams, files_per_stream, data,))
+
+ # Move file0.txt out of the streams into .
+ for i in range(0, streams):
+ self.pool.apply(fuse_moveFileFromCollectionWithManyFiles, (self.mounttmp, 'stream'+str(i), 'file0.txt',))
+
+ collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
+
+ manifest_streams = collection2['manifest_text'].split('\n')
+ self.assertEqual(4, len(manifest_streams))
+
+ for i in range(0, streams):
+ self.assertIn('file0.txt', manifest_streams[0])
+
+ for i in range(0, streams):
+ self.assertNotIn('file0.txt', manifest_streams[i+1])
+
+ for i in range(0, streams):
+ for j in range(1, files_per_stream):
+ self.assertIn('file' + str(j) + '.txt', manifest_streams[i+1])
+
+ # Delete 'file1.txt' from all the streams
+ for i in range(0, streams):
+ self.pool.apply(fuse_deleteFileFromCollectionWithManyFiles, (self.mounttmp, 'stream'+str(i), 'file1.txt'))
+
+ collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
+
+ manifest_streams = collection2['manifest_text'].split('\n')
+ self.assertEqual(4, len(manifest_streams))
+
+ for i in range(0, streams):
+ self.assertIn('file0.txt', manifest_streams[0])
+
+ self.assertNotIn('file1.txt', collection2['manifest_text'])
+
+ for i in range(0, streams):
+ for j in range(2, files_per_stream):
+ self.assertIn('file' + str(j) + '.txt', manifest_streams[i+1])
+
+
+def magicDirTest_MoveFileFromCollection(mounttmp, collection1, collection2, stream, filename):
+ class Test(unittest.TestCase):
+ def runTest(self):
+ self.magicDirTest_moveFileFromCollection()
+
+ @profiled
+ def magicDirTest_moveFileFromCollection(self):
+ os.rename(os.path.join(mounttmp, collection1, filename), os.path.join(mounttmp, collection2, filename))
+
+ Test().runTest()
+
+def magicDirTest_RemoveFileFromCollection(mounttmp, collection1, stream, filename):
+ class Test(unittest.TestCase):
+ def runTest(self):
+ self.magicDirTest_removeFileFromCollection()
+
+ @profiled
+ def magicDirTest_removeFileFromCollection(self):
+ os.remove(os.path.join(mounttmp, collection1, filename))
+
+ Test().runTest()
+
+class UsingMagicDir_CreateCollectionWithManyFilesAndMoveAndDeleteFile(MountTestBase):
+ def setUp(self):
+ super(UsingMagicDir_CreateCollectionWithManyFilesAndMoveAndDeleteFile, self).setUp()
+
+ @profiled
+ def magicDirTest_createCollectionWithManyFiles(self, streams=0, files_per_stream=0, data='x'):
+ # Create collection
+ collection = arvados.collection.Collection(api_client=self.api)
+ for j in range(0, files_per_stream):
+ with collection.open("file"+str(j)+".txt", "w") as f:
+ f.write(data)
+ collection.save_new()
+ return collection
+
+ @profiled
+ def magicDirTest_readCollectionContents(self, collection, streams=1, files_per_stream=1, data='x'):
+ mount_ls = os.listdir(os.path.join(self.mounttmp, collection))
+
+ files = {}
+ for j in range(0, files_per_stream):
+ files[os.path.join(self.mounttmp, collection, 'file'+str(j)+'.txt')] = data
+
+ for k, v in files.items():
+ with open(os.path.join(self.mounttmp, collection, k)) as f:
+ self.assertEqual(v, f.read())
+
+ def test_UsingMagicDirCreateCollectionWithManyFilesAndMoveAndDeleteFile(self):
+ streams = 2
+ files_per_stream = 200
+ data = 'x'
+
+ collection1 = self.magicDirTest_createCollectionWithManyFiles()
+ # Create collection with multiple files
+ collection2 = self.magicDirTest_createCollectionWithManyFiles(streams, files_per_stream, data)
+
+ # Mount FuseMagicDir
+ self.make_mount(fuse.MagicDirectory)
+
+ self.magicDirTest_readCollectionContents(collection2.manifest_locator(), streams, files_per_stream, data)
+
+ # Move file0.txt out of the collection2 into collection1
+ self.pool.apply(magicDirTest_MoveFileFromCollection, (self.mounttmp, collection2.manifest_locator(),
+ collection1.manifest_locator(), 'stream0', 'file0.txt',))
+ updated_collection = self.api.collections().get(uuid=collection2.manifest_locator()).execute()
+ self.assertFalse('file0.txt' in updated_collection['manifest_text'])
+ self.assertTrue('file1.txt' in updated_collection['manifest_text'])
+
+ # Delete file1.txt from collection2
+ self.pool.apply(magicDirTest_RemoveFileFromCollection, (self.mounttmp, collection2.manifest_locator(), 'stream0', 'file1.txt',))
+ updated_collection = self.api.collections().get(uuid=collection2.manifest_locator()).execute()
+ self.assertFalse('file1.txt' in updated_collection['manifest_text'])
+ self.assertTrue('file2.txt' in updated_collection['manifest_text'])
+
+
+def magicDirTest_MoveAllFilesFromCollection(mounttmp, from_collection, to_collection, stream, files_per_stream):
+ class Test(unittest.TestCase):
+ def runTest(self):
+ self.magicDirTest_moveAllFilesFromCollection()
+
+ @profiled
+ def magicDirTest_moveAllFilesFromCollection(self):
+ for j in range(0, files_per_stream):
+ os.rename(os.path.join(mounttmp, from_collection, 'file'+str(j)+'.txt'), os.path.join(mounttmp, to_collection, 'file'+str(j)+'.txt'))
+
+ Test().runTest()
+
+class UsingMagicDir_CreateCollectionWithManyFilesAndMoveAllFilesIntoAnother(MountTestBase):
+ def setUp(self):
+ super(UsingMagicDir_CreateCollectionWithManyFilesAndMoveAllFilesIntoAnother, self).setUp()
+
+ @profiled
+ def magicDirTestMoveAllFiles_createCollectionWithManyFiles(self, streams=0, files_per_stream=0,
+ blocks_per_file=0, bytes_per_block=0, data='x'):
+ # Create collection
+ collection = arvados.collection.Collection(api_client=self.api)
+ for j in range(0, files_per_stream):
+ with collection.open("file"+str(j)+".txt", "w") as f:
+ f.write(data)
+ collection.save_new()
+ return collection
+
+ def test_UsingMagicDirCreateCollectionWithManyFilesAndMoveAllFilesIntoAnother(self):
+ streams = 2
+ files_per_stream = 200
+ data = 'x'
+
+ collection1 = self.magicDirTestMoveAllFiles_createCollectionWithManyFiles()
+ # Create collection with multiple files
+ collection2 = self.magicDirTestMoveAllFiles_createCollectionWithManyFiles(streams, files_per_stream, data)
+
+ # Mount FuseMagicDir
+ self.make_mount(fuse.MagicDirectory)
+
+ # Move all files from collection2 into collection1
+ self.pool.apply(magicDirTest_MoveAllFilesFromCollection, (self.mounttmp, collection2.manifest_locator(),
+ collection1.manifest_locator(), 'stream0', files_per_stream,))
+
+ updated_collection = self.api.collections().get(uuid=collection2.manifest_locator()).execute()
+ file_names = ["file%i.txt" % i for i in range(0, files_per_stream)]
+ for name in file_names:
+ self.assertFalse(name in updated_collection['manifest_text'])
+
+ updated_collection = self.api.collections().get(uuid=collection1.manifest_locator()).execute()
+ for name in file_names:
+ self.assertTrue(name in updated_collection['manifest_text'])
+
+
+# Move one file at a time from one collection into another
+class UsingMagicDir_CreateCollectionWithManyFilesAndMoveEachFileIntoAnother(MountTestBase):
+ def setUp(self):
+ super(UsingMagicDir_CreateCollectionWithManyFilesAndMoveEachFileIntoAnother, self).setUp()
+
+ @profiled
+ def magicDirTestMoveFiles_createCollectionWithManyFiles(self, streams=0, files_per_stream=0, data='x'):
+ # Create collection
+ collection = arvados.collection.Collection(api_client=self.api)
+ for j in range(0, files_per_stream):
+ with collection.open("file"+str(j)+".txt", "w") as f:
+ f.write(data)
+ collection.save_new()
+ return collection
+
+ def magicDirTestMoveFiles_oneEachIntoAnother(self, from_collection, to_collection, files_per_stream):
+ for j in range(0, files_per_stream):
+ self.pool.apply(magicDirTest_MoveFileFromCollection, (self.mounttmp, from_collection.manifest_locator(),
+ to_collection.manifest_locator(), 'stream0', 'file'+str(j)+'.txt',))
+
+ def test_UsingMagicDirCreateCollectionWithManyFilesAndMoveEachFileIntoAnother(self):
+ streams = 2
+ files_per_stream = 200
+ data = 'x'
+
+ collection1 = self.magicDirTestMoveFiles_createCollectionWithManyFiles()
+ # Create collection with multiple files
+ collection2 = self.magicDirTestMoveFiles_createCollectionWithManyFiles(streams, files_per_stream, data)
+
+ # Mount FuseMagicDir
+ self.make_mount(fuse.MagicDirectory)
+
+ # Move all files from collection2 into collection1
+ self.magicDirTestMoveFiles_oneEachIntoAnother(collection2, collection1, files_per_stream)
+
+ updated_collection = self.api.collections().get(uuid=collection2.manifest_locator()).execute()
+ file_names = ["file%i.txt" % i for i in range(0, files_per_stream)]
+ for name in file_names:
+ self.assertFalse(name in updated_collection['manifest_text'])
+
+ updated_collection = self.api.collections().get(uuid=collection1.manifest_locator()).execute()
+ for name in file_names:
+ self.assertTrue(name in updated_collection['manifest_text'])
+
+class FuseListLargeProjectContents(MountTestBase):
+ @profiled
+ def getProjectWithManyCollections(self):
+ project_contents = llfuse.listdir(self.mounttmp)
+ self.assertEqual(201, len(project_contents))
+ self.assertIn('Collection_1', project_contents)
+ return project_contents
+
+ @profiled
+ def listContentsInProjectWithManyCollections(self, project_contents):
+ project_contents = llfuse.listdir(self.mounttmp)
+ self.assertEqual(201, len(project_contents))
+ self.assertIn('Collection_1', project_contents)
+
+ for collection_name in project_contents:
+ collection_contents = llfuse.listdir(os.path.join(self.mounttmp, collection_name))
+ self.assertIn('baz', collection_contents)
+
+ def test_listLargeProjectContents(self):
+ self.make_mount(fuse.ProjectDirectory,
+ project_object=run_test_server.fixture('groups')['project_with_201_collections'])
+ project_contents = self.getProjectWithManyCollections()
+ self.listContentsInProjectWithManyCollections(project_contents)
import logging
import multiprocessing
import run_test_server
+import mock
-logger = logging.getLogger('arvados.arv-mount')
+from mount_test_base import MountTestBase
-class MountTestBase(unittest.TestCase):
- def setUp(self):
- # The underlying C implementation of open() makes a fstat() syscall
- # with the GIL still held. When the GETATTR message comes back to
- # llfuse (which in these tests is in the same interpreter process) it
- # can't acquire the GIL, so it can't service the fstat() call, so it
- # deadlocks. The workaround is to run some of our test code in a
- # separate process. Forturnately the multiprocessing module makes this
- # relatively easy.
- self.pool = multiprocessing.Pool(1)
-
- self.keeptmp = tempfile.mkdtemp()
- os.environ['KEEP_LOCAL_STORE'] = self.keeptmp
- self.mounttmp = tempfile.mkdtemp()
- run_test_server.run()
- run_test_server.authorize_with("admin")
- self.api = arvados.safeapi.ThreadSafeApiCache(arvados.config.settings())
-
- def make_mount(self, root_class, **root_kwargs):
- self.operations = fuse.Operations(os.getuid(), os.getgid(), enable_write=True)
- self.operations.inodes.add_entry(root_class(
- llfuse.ROOT_INODE, self.operations.inodes, self.api, 0, **root_kwargs))
- llfuse.init(self.operations, self.mounttmp, [])
- threading.Thread(None, llfuse.main).start()
- # wait until the driver is finished initializing
- self.operations.initlock.wait()
- return self.operations.inodes[llfuse.ROOT_INODE]
-
- def tearDown(self):
- self.pool.terminate()
- self.pool.join()
- del self.pool
-
- # llfuse.close is buggy, so use fusermount instead.
- #llfuse.close(unmount=True)
-
- count = 0
- success = 1
- while (count < 9 and success != 0):
- success = subprocess.call(["fusermount", "-u", self.mounttmp])
- time.sleep(0.1)
- count += 1
-
- self.operations.destroy()
-
- os.rmdir(self.mounttmp)
- shutil.rmtree(self.keeptmp)
- run_test_server.reset()
-
- def assertDirContents(self, subdir, expect_content):
- path = self.mounttmp
- if subdir:
- path = os.path.join(path, subdir)
- self.assertEqual(sorted(expect_content), sorted(llfuse.listdir(path)))
+logger = logging.getLogger('arvados.arv-mount')
class FuseMountTest(MountTestBase):
class FuseMagicTest(MountTestBase):
- def setUp(self):
- super(FuseMagicTest, self).setUp()
+ def setUp(self, api=None):
+ super(FuseMagicTest, self).setUp(api=api)
cw = arvados.CollectionWriter()
cw.write("data 1")
self.testcollection = cw.finish()
- self.api.collections().create(body={"manifest_text":cw.manifest_text()}).execute()
+ self.test_manifest = cw.manifest_text()
+ self.api.collections().create(body={"manifest_text":self.test_manifest}).execute()
def runTest(self):
self.make_mount(fuse.MagicDirectory)
self.pool.apply(fuseProjectMvTestHelper1, (self.mounttmp,))
+def fuseFsyncTestHelper(mounttmp, k):
+ class Test(unittest.TestCase):
+ def runTest(self):
+ fd = os.open(os.path.join(mounttmp, k), os.O_RDONLY)
+ os.fsync(fd)
+ os.close(fd)
+
+ Test().runTest()
+
+class FuseFsyncTest(FuseMagicTest):
+ def runTest(self):
+ self.make_mount(fuse.MagicDirectory)
+ self.pool.apply(fuseFsyncTestHelper, (self.mounttmp, self.testcollection))
+
+
+class MagicDirApiError(FuseMagicTest):
+ def setUp(self):
+ api = mock.MagicMock()
+ super(MagicDirApiError, self).setUp(api=api)
+ api.collections().get().execute.side_effect = iter([Exception('API fail'), {"manifest_text": self.test_manifest}])
+ api.keep.get.side_effect = Exception('Keep fail')
+
+ def runTest(self):
+ self.make_mount(fuse.MagicDirectory)
+
+ self.operations.inodes.inode_cache.cap = 1
+ self.operations.inodes.inode_cache.min_entries = 2
+
+ with self.assertRaises(OSError):
+ llfuse.listdir(os.path.join(self.mounttmp, self.testcollection))
+
+ llfuse.listdir(os.path.join(self.mounttmp, self.testcollection))
+
+
class FuseUnitTest(unittest.TestCase):
def test_sanitize_filename(self):
acceptable = [
import (
"log"
"sync"
+ "sync/atomic"
"time"
)
type bufferPool struct {
// limiter has a "true" placeholder for each in-use buffer.
limiter chan bool
+ // allocated is the number of bytes currently allocated to buffers.
+ allocated uint64
// Pool has unused buffers.
sync.Pool
}
func newBufferPool(count int, bufSize int) *bufferPool {
p := bufferPool{}
p.New = func() interface{} {
+ atomic.AddUint64(&p.allocated, uint64(bufSize))
return make([]byte, bufSize)
}
p.limiter = make(chan bool, count)
p.Pool.Put(buf)
<-p.limiter
}
+
+// Alloc returns the number of bytes allocated to buffers.
+func (p *bufferPool) Alloc() uint64 {
+ return atomic.LoadUint64(&p.allocated)
+}
+
+// Cap returns the maximum number of buffers allowed.
+func (p *bufferPool) Cap() int {
+ return cap(p.limiter)
+}
+
+// Len returns the number of buffers in use right now.
+func (p *bufferPool) Len() int {
+ return len(p.limiter)
+}
uri: "/" + TEST_HASH,
request_body: TEST_BLOCK,
})
+ never_delete = false
IssueRequest(
&RequestTester{
method: "DELETE",
var user_token = "NOT DATA MANAGER TOKEN"
data_manager_token = "DATA MANAGER TOKEN"
+ never_delete = false
+
unauth_req := &RequestTester{
method: "DELETE",
uri: "/" + TEST_HASH,
"net/http"
"os"
"regexp"
+ "runtime"
"strconv"
- "syscall"
+ "sync"
"time"
)
BytesUsed uint64 `json:"bytes_used"`
}
+type PoolStatus struct {
+ Alloc uint64 `json:"BytesAllocated"`
+ Cap int `json:"BuffersMax"`
+ Len int `json:"BuffersInUse"`
+}
+
type NodeStatus struct {
- Volumes []*VolumeStatus `json:"volumes"`
+ Volumes []*VolumeStatus `json:"volumes"`
+ BufferPool PoolStatus
+ Memory runtime.MemStats
}
+var st NodeStatus
+var stLock sync.Mutex
func StatusHandler(resp http.ResponseWriter, req *http.Request) {
- st := GetNodeStatus()
- if jstat, err := json.Marshal(st); err == nil {
+ stLock.Lock()
+ ReadNodeStatus(&st)
+ jstat, err := json.Marshal(&st)
+ stLock.Unlock()
+ if err == nil {
resp.Write(jstat)
} else {
log.Printf("json.Marshal: %s\n", err)
- log.Printf("NodeStatus = %v\n", st)
+ log.Printf("NodeStatus = %v\n", &st)
http.Error(resp, err.Error(), 500)
}
}
-// GetNodeStatus
-// Returns a NodeStatus struct describing this Keep
-// node's current status.
+// ReadNodeStatus populates the given NodeStatus struct with current
+// values.
//
-func GetNodeStatus() *NodeStatus {
- st := new(NodeStatus)
-
- st.Volumes = make([]*VolumeStatus, len(KeepVM.AllReadable()))
- for i, vol := range KeepVM.AllReadable() {
- st.Volumes[i] = vol.Status()
+func ReadNodeStatus(st *NodeStatus) {
+ vols := KeepVM.AllReadable()
+ if cap(st.Volumes) < len(vols) {
+ st.Volumes = make([]*VolumeStatus, len(vols))
}
- return st
-}
-
-// GetVolumeStatus
-// Returns a VolumeStatus describing the requested volume.
-//
-func GetVolumeStatus(volume string) *VolumeStatus {
- var fs syscall.Statfs_t
- var devnum uint64
-
- if fi, err := os.Stat(volume); err == nil {
- devnum = fi.Sys().(*syscall.Stat_t).Dev
- } else {
- log.Printf("GetVolumeStatus: os.Stat: %s\n", err)
- return nil
+ st.Volumes = st.Volumes[:0]
+ for _, vol := range vols {
+ if s := vol.Status(); s != nil {
+ st.Volumes = append(st.Volumes, s)
+ }
}
-
- err := syscall.Statfs(volume, &fs)
- if err != nil {
- log.Printf("GetVolumeStatus: statfs: %s\n", err)
- return nil
- }
- // These calculations match the way df calculates disk usage:
- // "free" space is measured by fs.Bavail, but "used" space
- // uses fs.Blocks - fs.Bfree.
- free := fs.Bavail * uint64(fs.Bsize)
- used := (fs.Blocks - fs.Bfree) * uint64(fs.Bsize)
- return &VolumeStatus{volume, devnum, free, used}
+ st.BufferPool.Alloc = bufs.Alloc()
+ st.BufferPool.Cap = bufs.Cap()
+ st.BufferPool.Len = bufs.Len()
+ runtime.ReadMemStats(&st.Memory)
}
// DeleteHandler processes DELETE requests.
// never_delete can be used to prevent the DELETE handler from
// actually deleting anything.
-var never_delete = false
+var never_delete = true
var maxBuffers = 128
var bufs *bufferPool
flag.BoolVar(
&never_delete,
"never-delete",
- false,
+ true,
"If set, nothing will be deleted. HTTP 405 will be returned "+
"for valid DELETE requests.")
flag.StringVar(
flag.Parse()
+ if never_delete != true {
+ log.Fatal("never_delete must be true, see #6221")
+ }
+
if maxBuffers < 0 {
log.Fatal("-max-buffers must be greater than zero.")
}
bufs = newBufferPool(maxBuffers, BLOCKSIZE)
if pidfile != "" {
- f, err := os.OpenFile(pidfile, os.O_RDWR | os.O_CREATE, 0777)
+ f, err := os.OpenFile(pidfile, os.O_RDWR|os.O_CREATE, 0777)
if err != nil {
log.Fatalf("open pidfile (%s): %s", pidfile, err)
}
- err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX | syscall.LOCK_NB)
+ err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB)
if err != nil {
log.Fatalf("flock pidfile (%s): %s", pidfile, err)
}
}
}
-// TestNodeStatus
-// Test that GetNodeStatus returns valid info about available volumes.
-//
-// TODO(twp): set up appropriate interfaces to permit more rigorous
-// testing.
-//
-func TestNodeStatus(t *testing.T) {
- defer teardown()
-
- // Set up test Keep volumes with some blocks.
- KeepVM = MakeTestVolumeManager(2)
- defer KeepVM.Close()
-
- vols := KeepVM.AllReadable()
- vols[0].Put(TEST_HASH, TEST_BLOCK)
- vols[1].Put(TEST_HASH_2, TEST_BLOCK_2)
-
- // Get node status and make a basic sanity check.
- st := GetNodeStatus()
- for i := range vols {
- volinfo := st.Volumes[i]
- mtp := volinfo.MountPoint
- if mtp != "/bogo" {
- t.Errorf("GetNodeStatus mount_point %s, expected /bogo", mtp)
- }
- if volinfo.DeviceNum == 0 {
- t.Errorf("uninitialized device_num in %v", volinfo)
- }
- if volinfo.BytesFree == 0 {
- t.Errorf("uninitialized bytes_free in %v", volinfo)
- }
- if volinfo.BytesUsed == 0 {
- t.Errorf("uninitialized bytes_used in %v", volinfo)
- }
- }
-}
-
// ========================================
// Helper functions for unit tests.
// ========================================
package main
import (
+ "errors"
"log"
"time"
)
if err != nil || trashRequest.BlockMtime != mtime.Unix() {
continue
}
- err = volume.Delete(trashRequest.Locator)
+
+ if never_delete {
+ err = errors.New("did not delete block because never_delete is true")
+ } else {
+ err = volume.Delete(trashRequest.Locator)
+ }
+
if err != nil {
log.Printf("%v Delete(%v): %v", volume, trashRequest.Locator, err)
- continue
+ } else {
+ log.Printf("%v Delete(%v) OK", volume, trashRequest.Locator)
}
- log.Printf("%v Delete(%v) OK", volume, trashRequest.Locator)
}
}
Expect no errors.
*/
func TestTrashWorkerIntegration_GetNonExistingLocator(t *testing.T) {
+ never_delete = false
testData := TrashWorkerTestData{
Locator1: "5d41402abc4b2a76b9719d911017c592",
Block1: []byte("hello"),
Expect the second locator in volume 2 to be unaffected.
*/
func TestTrashWorkerIntegration_LocatorInVolume1(t *testing.T) {
+ never_delete = false
testData := TrashWorkerTestData{
Locator1: TEST_HASH,
Block1: TEST_BLOCK,
Expect the first locator in volume 1 to be unaffected.
*/
func TestTrashWorkerIntegration_LocatorInVolume2(t *testing.T) {
+ never_delete = false
testData := TrashWorkerTestData{
Locator1: TEST_HASH,
Block1: TEST_BLOCK,
Expect locator to be deleted from both volumes.
*/
func TestTrashWorkerIntegration_LocatorInBothVolumes(t *testing.T) {
+ never_delete = false
testData := TrashWorkerTestData{
Locator1: TEST_HASH,
Block1: TEST_BLOCK,
Delete the second and expect the first to be still around.
*/
func TestTrashWorkerIntegration_MtimeMatchesForLocator1ButNotForLocator2(t *testing.T) {
+ never_delete = false
testData := TrashWorkerTestData{
Locator1: TEST_HASH,
Block1: TEST_BLOCK,
Expect the other unaffected.
*/
func TestTrashWorkerIntegration_TwoDifferentLocatorsInVolume1(t *testing.T) {
+ never_delete = false
testData := TrashWorkerTestData{
Locator1: TEST_HASH,
Block1: TEST_BLOCK,
will not be deleted becuase its Mtime is within the trash life time.
*/
func TestTrashWorkerIntegration_SameLocatorInTwoVolumesWithDefaultTrashLifeTime(t *testing.T) {
+ never_delete = false
testData := TrashWorkerTestData{
Locator1: TEST_HASH,
Block1: TEST_BLOCK,
performTrashWorkerTest(testData, t)
}
+/* Delete a block with matching mtime for locator in both volumes, but never_delete is true,
+ so block won't be deleted.
+*/
+func TestTrashWorkerIntegration_NeverDelete(t *testing.T) {
+ never_delete = true
+ testData := TrashWorkerTestData{
+ Locator1: TEST_HASH,
+ Block1: TEST_BLOCK,
+
+ Locator2: TEST_HASH,
+ Block2: TEST_BLOCK,
+
+ CreateData: true,
+
+ DeleteLocator: TEST_HASH,
+
+ ExpectLocator1: true,
+ ExpectLocator2: true,
+ }
+ performTrashWorkerTest(testData, t)
+}
+
/* Perform the test */
func performTrashWorkerTest(testData TrashWorkerTestData, t *testing.T) {
// Create Keep Volumes
}
// Status returns a VolumeStatus struct describing the volume's
-// current state.
+// current state, or nil if an error occurs.
//
func (v *UnixVolume) Status() *VolumeStatus {
var fs syscall.Statfs_t
t.Errorf("%s: should no longer be full", v)
}
}
+
+func TestNodeStatus(t *testing.T) {
+ v := TempUnixVolume(t, false, false)
+ defer _teardown(v)
+
+ // Get node status and make a basic sanity check.
+ volinfo := v.Status()
+ if volinfo.MountPoint != v.root {
+ t.Errorf("GetNodeStatus mount_point %s, expected %s", volinfo.MountPoint, v.root)
+ }
+ if volinfo.DeviceNum == 0 {
+ t.Errorf("uninitialized device_num in %v", volinfo)
+ }
+ if volinfo.BytesFree == 0 {
+ t.Errorf("uninitialized bytes_free in %v", volinfo)
+ }
+ if volinfo.BytesUsed == 0 {
+ t.Errorf("uninitialized bytes_used in %v", volinfo)
+ }
+}