services/keep/src/github.com
sdk/java/target
*.class
+apps/workbench/vendor/bundle
+services/api/vendor/bundle
+sdk/java/log
|_. Argument |_. Type |_. Description |_. Location |_. Example |
{background:#ccffcc}.|uuid|string|The UUID of the User in question.|path||
-h2. event_stream
-
-event_stream users
-
Arguments:
table(table table-bordered table-condensed).
"5ee633fe2569d2a42dd81b07490d5d13+82",
"c905c8d8443a9c44274d98b7c6cfaa32+94",
"d237a90bae3870b3b033aea1e99de4a9+10820"
- ],
- "log_stream_href":"https://qr1hi.arvadosapi.com/arvados/v1/jobs/qr1hi-8i9sb-n9k7qyp7bs5b9d4/log_tail_follow"
+ ]
}
-~$ <span class="userinput">arv job log_tail_follow --uuid qr1hi-8i9sb-n9k7qyp7bs5b9d4</span>
-Tue Dec 17 19:02:16 2013 salloc: Granted job allocation 1251
-Tue Dec 17 19:02:17 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 check slurm allocation
-Tue Dec 17 19:02:17 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 node compute13 - 8 slots
-Tue Dec 17 19:02:17 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 start
-Tue Dec 17 19:02:17 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 Install revision 76588bfc57f33ea1b36b82ca7187f465b73b4ca4
-Tue Dec 17 19:02:18 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 Clean-work-dir exited 0
-Tue Dec 17 19:02:19 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 Install exited 0
-Tue Dec 17 19:02:19 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 script GATK2-VariantFiltration
-Tue Dec 17 19:02:19 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 script_version 76588bfc57f33ea1b36b82ca7187f465b73b4ca4
-Tue Dec 17 19:02:19 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 script_parameters {"input":"5ee633fe2569d2a42dd81b07490d5d13+82","gatk_bundle":"d237a90bae3870b3b033aea1e99de4a9+10820","gatk_binary_tarball":"c905c8d8443a9c44274d98b7c6cfaa32+94"}
-Tue Dec 17 19:02:19 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 runtime_constraints {"max_tasks_per_node":0}
-Tue Dec 17 19:02:19 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 start level 0
-Tue Dec 17 19:02:19 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 status: 0 done, 0 running, 1 todo
-Tue Dec 17 19:02:19 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 0 job_task qr1hi-ot0gb-d3sjxerucfbvyev
-Tue Dec 17 19:02:19 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 0 child 4946 started on compute13.1
-Tue Dec 17 19:02:19 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 status: 0 done, 1 running, 0 todo
-Tue Dec 17 19:02:20 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 0 child 4946 on compute13.1 exit 0 signal 0 success=true
-Tue Dec 17 19:02:20 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 0 success in 1 seconds
-Tue Dec 17 19:02:20 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 0 output
-Tue Dec 17 19:02:20 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 wait for last 0 children to finish
-Tue Dec 17 19:02:20 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 status: 1 done, 0 running, 1 todo
-Tue Dec 17 19:02:20 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 start level 1
-Tue Dec 17 19:02:20 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 status: 1 done, 0 running, 1 todo
-Tue Dec 17 19:02:20 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 1 job_task qr1hi-ot0gb-w8ujbnulxjaamxf
-Tue Dec 17 19:02:20 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 1 child 4984 started on compute13.1
-Tue Dec 17 19:02:20 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 status: 1 done, 1 running, 0 todo
-Tue Dec 17 19:04:10 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 1 child 4984 on compute13.1 exit 0 signal 0 success=true
-Tue Dec 17 19:04:10 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 1 success in 110 seconds
-Tue Dec 17 19:04:10 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 1 output bedd6ff56b3ae9f90d873b1fcb72f9a3+91
-Tue Dec 17 19:04:10 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 wait for last 0 children to finish
-Tue Dec 17 19:04:10 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 status: 2 done, 0 running, 0 todo
-Tue Dec 17 19:04:10 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 release job allocation
-Tue Dec 17 19:04:10 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 Freeze not implemented
-Tue Dec 17 19:04:10 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 collate
-Tue Dec 17 19:04:10 2013 salloc: Job allocation 1251 has been revoked.
-Tue Dec 17 19:04:10 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 output bedd6ff56b3ae9f90d873b1fcb72f9a3+91
-Tue Dec 17 19:04:11 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 finish
-Tue Dec 17 19:04:12 2013 qr1hi-8i9sb-n9k7qyp7bs5b9d4 4867 log manifest is 1e77aaceee2df499e14dc5dde5c3d328+91
</code></pre>
</notextile>
"tasks_summary":{},
"dependencies":[
"c1bad4b39ca5a924e481008009d94e32+210"
- ],
- "log_stream_href":"https://qr1hi.arvadosapi.com/arvados/v1/jobs/qr1hi-8i9sb-1pm1t02dezhupss/log_tail_follow"
+ ]
}
</code></pre>
</notextile>
Go to the "Workbench dashboard":https://{{site.arvados_workbench_host}} and visit *Activity* %(rarr)→% *Recent jobs*. Your job should be near the top of the table. This table refreshes automatically. When the job has completed successfully, it will show <span class="label label-success">finished</span> in the *Status* column.
-On the command line, you can access log messages while the job runs using @arv job log_tail_follow@:
-
-notextile. <pre><code>~$ <span class="userinput">arv job log_tail_follow --uuid qr1hi-8i9sb-xxxxxxxxxxxxxxx</span></code></pre>
-
-This will print out the last several lines of the log for that job.
-
h2. Inspect the job output
On the "Workbench dashboard":https://{{site.arvados_workbench_host}}, look for the *Output* column of the *Recent jobs* table. Click on the link under *Output* for your job to go to the files page with the job output. The files page lists all the files that were output by the job. Click on the link under the *file* column to view a file, or click on the download icon <span class="glyphicon glyphicon-download-alt"></span> to download the output file.
},
"dependencies":[
"c1bad4b39ca5a924e481008009d94e32+210"
- ],
- "log_stream_href":null
+ ]
}
</code></pre>
</notextile>
# Make sure the secret is at least 30 characters and all random,
# no regular words or you'll be exposed to dictionary attacks.
Server::Application.config.secret_token = '@@API_SECRET@@'
+
+# The blob_signing_key is a string of alphanumeric characters used
+# to sign permission hints for Keep locators. It must be identical
+# to the permission key given to Keep.
+Server::Application.config.blob_signing_key = '@@KEEP_SIGNING_SECRET@@'
rsync -rlp --exclude=docker/ --exclude='**/log/*' --exclude='**/tmp/*' \
--chmod=Da+rx,Fa+rX ../ build/
find build/ -name \*.gem -delete
- cd build/sdk/python/ && ./build.sh
+ cd build/services/fuse/ && python setup.py build
+ cd build/sdk/python/ && python setup.py build
cd build/sdk/cli && gem build arvados-cli.gemspec
cd build/sdk/ruby && gem build arvados.gemspec
touch build/.buildstamp
# will be chosen randomly at build time. This is the
# recommended setting.
+# The signing key shared by Keep at the API server to verify
+# blob permission signatures.
+KEEP_SIGNING_SECRET:
+
# The value for the Rails config.secret_token setting.
API_SECRET:
# Install Arvados packages.
RUN find /usr/src/arvados/sdk -name '*.gem' -print0 | \
xargs -0rn 1 gem install && \
+ cd /usr/src/arvados/services/fuse && \
+ python setup.py install && \
cd /usr/src/arvados/sdk/python && \
python setup.py install
case api_method
when
- 'arvados.users.event_stream',
- 'arvados.jobs.log_stream',
'arvados.jobs.log_tail_follow'
# Special case for methods that respond with data streams rather
gem 'omniauth-oauth2', '1.1.1'
gem 'andand'
-gem 'redis'
gem 'test_after_commit', :group => :test
rake (10.2.2)
rdoc (3.12.2)
json (~> 1.4)
- redis (3.0.7)
ref (1.0.5)
rvm-capistrano (1.5.1)
capistrano (~> 2.15.4)
pg
pg_power
rails (~> 3.2.0)
- redis
rvm-capistrano
sass-rails (>= 3.2.0)
simplecov (~> 0.7.1)
@job.reload
end
end
- @redis = Redis.new(:timeout => 0)
- if @redis.exists @job.uuid
- # A log buffer exists. Start by showing the last few KB.
- @redis.
- getrange(@job.uuid, 0 - [@opts[:buffer_size], 1].max, -1).
- sub(/^[^\n]*\n?/, '').
- split("\n").
- each do |line|
- yield "#{line}\n"
- end
- end
- # TODO: avoid missing log entries between getrange() above and
- # subscribe() below.
- @redis.subscribe(@job.uuid) do |event|
- event.message do |channel, msg|
- if msg == "end"
- @redis.unsubscribe @job.uuid
- else
- yield "#{msg}\n"
- end
- end
- end
- end
- end
-
- def self._log_tail_follow_requires_parameters
- {
- buffer_size: {type: 'integer', required: false, default: 2**13}
- }
- end
- def log_tail_follow
- if !@object.andand.uuid
- return render_not_found
- end
- if client_accepts_plain_text_stream
- self.response.headers['Last-Modified'] = Time.now.ctime.to_s
- self.response_body = LogStreamer.new @object, {
- buffer_size: (params[:buffer_size].to_i rescue 2**13)
- }
- else
- render json: {
- href: url_for(uuid: @object.uuid),
- comment: ('To retrieve the log stream as plain text, ' +
- 'use a request header like "Accept: text/plain"')
- }
end
end
accept_attribute_as_json :prefs, Hash
skip_before_filter :find_object_by_uuid, only:
- [:activate, :event_stream, :current, :system, :setup]
+ [:activate, :current, :system, :setup]
skip_before_filter :render_404_if_no_object, only:
- [:activate, :event_stream, :current, :system, :setup]
+ [:activate, :current, :system, :setup]
before_filter :admin_required, only: [:setup, :unsetup]
def current
show
end
- class ChannelStreamer
- Q_UPDATE_INTERVAL = 12
- def initialize(opts={})
- @opts = opts
- end
- def each
- return unless @opts[:channel]
- @redis = Redis.new(:timeout => 0)
- @redis.subscribe(@opts[:channel]) do |event|
- event.message do |channel, msg|
- yield msg + "\n"
- end
- end
- end
- end
-
- def event_stream
- channel = current_user.andand.uuid
- if current_user.andand.is_admin
- channel = params[:uuid] || channel
- end
- if client_accepts_plain_text_stream
- self.response.headers['Last-Modified'] = Time.now.ctime.to_s
- self.response_body = ChannelStreamer.new(channel: channel)
- else
- render json: {
- href: url_for(uuid: channel),
- comment: ('To retrieve the event stream as plain text, ' +
- 'use a request header like "Accept: text/plain"')
- }
- end
- end
-
def activate
if current_user.andand.is_admin && params[:uuid]
@object = User.find params[:uuid]
t.add :runtime_constraints
t.add :tasks_summary
t.add :dependencies
- t.add :log_stream_href
- t.add :log_buffer
t.add :nondeterministic
t.add :repository
t.add :supplied_script_version
running: false)
end
- def log_stream_href
- unless self.finished_at
- "#{current_api_base}/#{self.class.to_s.pluralize.underscore}/#{self.uuid}/log_tail_follow"
- end
- end
-
def self.queue
self.where('started_at is ? and is_locked_by_uuid is ? and cancelled_at is ? and success is ?',
nil, nil, nil, nil).
end
end
end
-
- def log_buffer
- begin
- @@redis ||= Redis.new(:timeout => 0)
- if @@redis.exists uuid
- @@redis.getrange(uuid, 0 - 2**10, -1)
- end
- rescue Redis::CannotConnectError
- return '(not available)'
- end
- end
end
resources :job_tasks
resources :jobs do
get 'queue', on: :collection
- get 'log_tail_follow', on: :member
post 'cancel', on: :member
end
resources :keep_disks do
resources :users do
get 'current', on: :collection
get 'system', on: :collection
- get 'event_stream', on: :member
post 'activate', on: :member
post 'setup', on: :collection
post 'unsetup', on: :member
"bytes"
"crypto/md5"
"encoding/json"
- "errors"
"flag"
"fmt"
"github.com/gorilla/mux"
return e.ErrMsg
}
-// This error is returned by ReadAtMost if the available
-// data exceeds BLOCKSIZE bytes.
-var ReadErrorTooLong = errors.New("Too long")
-
// TODO(twp): continue moving as much code as possible out of main
// so it can be effectively tested. Esp. handling and postprocessing
// of command line flags (identifying Keep volumes and initializing
// Read the block data to be stored.
// If the request exceeds BLOCKSIZE bytes, issue a HTTP 500 error.
//
- // Note: because req.Body is a buffered Reader, each Read() call will
- // collect only the data in the network buffer (typically 16384 bytes),
- // even if it is passed a much larger slice.
- //
- // Instead, call ReadAtMost to read data from the socket
- // repeatedly until either EOF or BLOCKSIZE bytes have been read.
- //
- if buf, err := ReadAtMost(req.Body, BLOCKSIZE); err == nil {
+ if req.ContentLength > BLOCKSIZE {
+ http.Error(resp, TooLongError.Error(), TooLongError.HTTPCode)
+ return
+ }
+
+ buf := make([]byte, req.ContentLength)
+ nread, err := io.ReadFull(req.Body, buf)
+ if err != nil {
+ http.Error(resp, err.Error(), 500)
+ } else if int64(nread) < req.ContentLength {
+ http.Error(resp, "request truncated", 500)
+ } else {
if err := PutBlock(buf, hash); err == nil {
// Success; add a size hint, sign the locator if
// possible, and return it to the client.
ke := err.(*KeepError)
http.Error(resp, ke.Error(), ke.HTTPCode)
}
- } else {
- log.Println("error reading request: ", err)
- errmsg := err.Error()
- if err == ReadErrorTooLong {
- // Use a more descriptive error message that includes
- // the maximum request size.
- errmsg = fmt.Sprintf("Max request size %d bytes", BLOCKSIZE)
- }
- http.Error(resp, errmsg, 500)
}
+ return
}
// IndexHandler
}
}
-// ReadAtMost
-// Reads bytes repeatedly from an io.Reader until either
-// encountering EOF, or the maxbytes byte limit has been reached.
-// Returns a byte slice of the bytes that were read.
-//
-// If the reader contains more than maxbytes, returns a nil slice
-// and an error.
-//
-func ReadAtMost(r io.Reader, maxbytes int) ([]byte, error) {
- // Attempt to read one more byte than maxbytes.
- lr := io.LimitReader(r, int64(maxbytes+1))
- buf, err := ioutil.ReadAll(lr)
- if len(buf) > maxbytes {
- return nil, ReadErrorTooLong
- }
- return buf, err
-}
-
// IsValidLocator
// Return true if the specified string is a valid Keep locator.
// When Keep is extended to support hash types other than MD5,
// corrupted data block.
//
func (v *UnixVolume) Read(loc string) ([]byte, error) {
- var f *os.File
- var err error
- var buf []byte
-
blockFilename := filepath.Join(v.root, loc[0:3], loc)
-
- f, err = os.Open(blockFilename)
+ buf, err := ioutil.ReadFile(blockFilename)
if err != nil {
- return nil, err
- }
-
- if buf, err = ioutil.ReadAll(f); err != nil {
log.Printf("%s: reading %s: %s\n", v, blockFilename, err)
- return buf, err
+ return nil, err
}
- // Success!
return buf, nil
}