2 # -*- mode: perl; perl-indent-level: 2; indent-tabs-mode: nil; -*-
6 crunch-job: Execute job steps, save snapshots as requested, collate output.
10 Obtain job details from Arvados, run tasks on compute nodes (typically
11 invoked by scheduler on controller):
13 crunch-job --job x-y-z --git-dir /path/to/repo/.git
15 Obtain job details from command line, run tasks on local machine
16 (typically invoked by application or developer on VM):
18 crunch-job --job '{"script_version":"/path/to/working/tree","script":"scriptname",...}'
20 crunch-job --job '{"repository":"https://github.com/curoverse/arvados.git","script_version":"master","script":"scriptname",...}'
28 If the job is already locked, steal the lock and run it anyway.
32 Path to a .git directory (or a git URL) where the commit given in the
33 job's C<script_version> attribute is to be found. If this is I<not>
34 given, the job's C<repository> attribute will be used.
38 Arvados API authorization token to use during the course of the job.
42 Do not clear per-job/task temporary directories during initial job
43 setup. This can speed up development and debugging when running jobs
48 UUID of the job to run, or a JSON-encoded job resource without a
49 UUID. If the latter is given, a new job object will be created.
53 =head1 RUNNING JOBS LOCALLY
55 crunch-job's log messages appear on stderr along with the job tasks'
56 stderr streams. The log is saved in Keep at each checkpoint and when
59 If the job succeeds, the job's output locator is printed on stdout.
61 While the job is running, the following signals are accepted:
65 =item control-C, SIGINT, SIGQUIT
67 Save a checkpoint, terminate any job tasks that are running, and stop.
71 Save a checkpoint and continue.
75 Refresh node allocation (i.e., check whether any nodes have been added
76 or unallocated) and attributes of the Job record that should affect
77 behavior (e.g., cancel job if cancelled_at becomes non-nil).
85 use POSIX ':sys_wait_h';
86 use POSIX qw(strftime);
87 use Fcntl qw(F_GETFL F_SETFL O_NONBLOCK);
91 use Digest::MD5 qw(md5_hex);
97 use File::Path qw( make_path remove_tree );
99 use constant EX_TEMPFAIL => 75;
101 $ENV{"TMPDIR"} ||= "/tmp";
102 unless (defined $ENV{"CRUNCH_TMP"}) {
103 $ENV{"CRUNCH_TMP"} = $ENV{"TMPDIR"} . "/crunch-job";
104 if ($ENV{"USER"} ne "crunch" && $< != 0) {
105 # use a tmp dir unique for my uid
106 $ENV{"CRUNCH_TMP"} .= "-$<";
110 # Create the tmp directory if it does not exist
111 if ( ! -d $ENV{"CRUNCH_TMP"} ) {
112 make_path $ENV{"CRUNCH_TMP"} or die "Failed to create temporary working directory: " . $ENV{"CRUNCH_TMP"};
115 $ENV{"JOB_WORK"} = $ENV{"CRUNCH_TMP"} . "/work";
116 $ENV{"CRUNCH_INSTALL"} = "$ENV{CRUNCH_TMP}/opt";
117 $ENV{"CRUNCH_WORK"} = $ENV{"JOB_WORK"}; # deprecated
118 mkdir ($ENV{"JOB_WORK"});
126 GetOptions('force-unlock' => \$force_unlock,
127 'git-dir=s' => \$git_dir,
128 'job=s' => \$jobspec,
129 'job-api-token=s' => \$job_api_token,
130 'no-clear-tmp' => \$no_clear_tmp,
131 'resume-stash=s' => \$resume_stash,
134 if (defined $job_api_token) {
135 $ENV{ARVADOS_API_TOKEN} = $job_api_token;
138 my $have_slurm = exists $ENV{SLURM_JOBID} && exists $ENV{SLURM_NODELIST};
144 $main::ENV{CRUNCH_DEBUG} = 1;
148 $main::ENV{CRUNCH_DEBUG} = 0;
153 my $arv = Arvados->new('apiVersion' => 'v1');
161 my $User = api_call("users/current");
163 if ($jobspec =~ /^[-a-z\d]+$/)
165 # $jobspec is an Arvados UUID, not a JSON job specification
166 $Job = api_call("jobs/get", uuid => $jobspec);
167 if (!$force_unlock) {
168 # Claim this job, and make sure nobody else does
169 eval { api_call("jobs/lock", uuid => $Job->{uuid}); };
171 Log(undef, "Error while locking job, exiting ".EX_TEMPFAIL);
178 $Job = JSON::decode_json($jobspec);
182 map { croak ("No $_ specified") unless $Job->{$_} }
183 qw(script script_version script_parameters);
186 $Job->{'is_locked_by_uuid'} = $User->{'uuid'};
187 $Job->{'started_at'} = gmtime;
188 $Job->{'state'} = 'Running';
190 $Job = api_call("jobs/create", job => $Job);
192 $job_id = $Job->{'uuid'};
194 my $keep_logfile = $job_id . '.log.txt';
195 log_writer_start($keep_logfile);
197 $Job->{'runtime_constraints'} ||= {};
198 $Job->{'runtime_constraints'}->{'max_tasks_per_node'} ||= 0;
199 my $max_ncpus = $Job->{'runtime_constraints'}->{'max_tasks_per_node'};
201 my $gem_versions = `gem list --quiet arvados-cli 2>/dev/null`;
203 $gem_versions =~ s/^arvados-cli \(/ with arvados-cli Gem version(s) /;
204 chomp($gem_versions);
205 chop($gem_versions); # Closing parentheses
210 "running from " . ((-e $0) ? realpath($0) : "stdin") . $gem_versions);
212 Log (undef, "check slurm allocation");
215 # Should use $ENV{SLURM_TASKS_PER_NODE} instead of sinfo? (eg. "4(x3),2,4(x2)")
219 my $localcpus = 0 + `grep -cw ^processor /proc/cpuinfo` || 1;
220 push @sinfo, "$localcpus localhost";
222 if (exists $ENV{SLURM_NODELIST})
224 push @sinfo, `sinfo -h --format='%c %N' --nodes=\Q$ENV{SLURM_NODELIST}\E`;
228 my ($ncpus, $slurm_nodelist) = split;
229 $ncpus = $max_ncpus if $max_ncpus && $ncpus > $max_ncpus;
232 while ($slurm_nodelist =~ s/^([^\[,]+?(\[.*?\])?)(,|$)//)
235 if ($nodelist =~ /\[((\d+)(-(\d+))?(,(\d+)(-(\d+))?)*)\]/)
238 foreach (split (",", $ranges))
251 push @nodelist, map {
253 $n =~ s/\[[-,\d]+\]/$_/;
260 push @nodelist, $nodelist;
263 foreach my $nodename (@nodelist)
265 Log (undef, "node $nodename - $ncpus slots");
266 my $node = { name => $nodename,
270 foreach my $cpu (1..$ncpus)
272 push @slot, { node => $node,
276 push @node, @nodelist;
281 # Ensure that we get one jobstep running on each allocated node before
282 # we start overloading nodes with concurrent steps
284 @slot = sort { $a->{cpu} <=> $b->{cpu} } @slot;
287 $Job->update_attributes(
288 'tasks_summary' => { 'failed' => 0,
293 Log (undef, "start");
294 $SIG{'INT'} = sub { $main::please_freeze = 1; };
295 $SIG{'QUIT'} = sub { $main::please_freeze = 1; };
296 $SIG{'TERM'} = \&croak;
297 $SIG{'TSTP'} = sub { $main::please_freeze = 1; };
298 $SIG{'ALRM'} = sub { $main::please_info = 1; };
299 $SIG{'CONT'} = sub { $main::please_continue = 1; };
300 $SIG{'HUP'} = sub { $main::please_refresh = 1; };
302 $main::please_freeze = 0;
303 $main::please_info = 0;
304 $main::please_continue = 0;
305 $main::please_refresh = 0;
306 my $jobsteps_must_output_keys = 0; # becomes 1 when any task outputs a key
308 grep { $ENV{$1} = $2 if /^(NOCACHE.*?)=(.*)/ } split ("\n", $$Job{knobs});
309 $ENV{"CRUNCH_JOB_UUID"} = $job_id;
310 $ENV{"JOB_UUID"} = $job_id;
313 my @jobstep_todo = ();
314 my @jobstep_done = ();
315 my @jobstep_tomerge = ();
316 my $jobstep_tomerge_level = 0;
318 my $squeue_kill_checked;
319 my $latest_refresh = scalar time;
323 if (defined $Job->{thawedfromkey})
325 thaw ($Job->{thawedfromkey});
329 my $first_task = api_call("job_tasks/create", job_task => {
330 'job_uuid' => $Job->{'uuid'},
335 push @jobstep, { 'level' => 0,
337 'arvados_task' => $first_task,
339 push @jobstep_todo, 0;
345 must_lock_now("$ENV{CRUNCH_TMP}/.lock", "a job is already running here.");
348 my $build_script = handle_readall(\*DATA);
349 my $nodelist = join(",", @node);
350 my $git_tar_count = 0;
352 if (!defined $no_clear_tmp) {
353 # Clean out crunch_tmp/work, crunch_tmp/opt, crunch_tmp/src*
354 Log (undef, "Clean work dirs");
356 my $cleanpid = fork();
359 srun (["srun", "--nodelist=$nodelist", "-D", $ENV{'TMPDIR'}],
360 ['bash', '-c', 'if mount | grep -q $JOB_WORK/; then for i in $JOB_WORK/*keep $CRUNCH_TMP/task/*.keep; do /bin/fusermount -z -u $i; done; fi; sleep 1; rm -rf $JOB_WORK $CRUNCH_INSTALL $CRUNCH_TMP/task $CRUNCH_TMP/src*']);
365 last if $cleanpid == waitpid (-1, WNOHANG);
366 freeze_if_want_freeze ($cleanpid);
367 select (undef, undef, undef, 0.1);
369 Log (undef, "Cleanup command exited ".exit_status_s($?));
372 # If this job requires a Docker image, install that.
373 my $docker_bin = "/usr/bin/docker.io";
374 my ($docker_locator, $docker_stream, $docker_hash);
375 if ($docker_locator = $Job->{docker_image_locator}) {
376 ($docker_stream, $docker_hash) = find_docker_image($docker_locator);
379 croak("No Docker image hash found from locator $docker_locator");
381 $docker_stream =~ s/^\.//;
382 my $docker_install_script = qq{
383 if ! $docker_bin images -q --no-trunc | grep -qxF \Q$docker_hash\E; then
384 arv-get \Q$docker_locator$docker_stream/$docker_hash.tar\E | $docker_bin load
387 my $docker_pid = fork();
388 if ($docker_pid == 0)
390 srun (["srun", "--nodelist=" . join(',', @node)],
391 ["/bin/sh", "-ec", $docker_install_script]);
396 last if $docker_pid == waitpid (-1, WNOHANG);
397 freeze_if_want_freeze ($docker_pid);
398 select (undef, undef, undef, 0.1);
402 croak("Installing Docker image from $docker_locator exited "
406 if ($Job->{arvados_sdk_version}) {
407 # The job also specifies an Arvados SDK version. Add the SDKs to the
408 # tar file for the build script to install.
409 Log(undef, sprintf("Packing Arvados SDK version %s for installation",
410 $Job->{arvados_sdk_version}));
411 add_git_archive("git", "--git-dir=$git_dir", "archive",
412 "--prefix=.arvados.sdk/",
413 $Job->{arvados_sdk_version}, "sdk");
417 if (!defined $git_dir && $Job->{'script_version'} =~ m{^/}) {
418 # If script_version looks like an absolute path, *and* the --git-dir
419 # argument was not given -- which implies we were not invoked by
420 # crunch-dispatch -- we will use the given path as a working
421 # directory instead of resolving script_version to a git commit (or
422 # doing anything else with git).
423 $ENV{"CRUNCH_SRC_COMMIT"} = $Job->{'script_version'};
424 $ENV{"CRUNCH_SRC"} = $Job->{'script_version'};
427 # Resolve the given script_version to a git commit sha1. Also, if
428 # the repository is remote, clone it into our local filesystem: this
429 # ensures "git archive" will work, and is necessary to reliably
430 # resolve a symbolic script_version like "master^".
431 $ENV{"CRUNCH_SRC"} = "$ENV{CRUNCH_TMP}/src";
433 Log (undef, "Looking for version ".$Job->{script_version}." from repository ".$Job->{repository});
435 $ENV{"CRUNCH_SRC_COMMIT"} = $Job->{script_version};
437 # If we're running under crunch-dispatch, it will have already
438 # pulled the appropriate source tree into its own repository, and
439 # given us that repo's path as $git_dir.
441 # If we're running a "local" job, we might have to fetch content
442 # from a remote repository.
444 # (Currently crunch-dispatch gives a local path with --git-dir, but
445 # we might as well accept URLs there too in case it changes its
447 my $repo = $git_dir || $Job->{'repository'};
449 # Repository can be remote or local. If remote, we'll need to fetch it
450 # to a local dir before doing `git log` et al.
453 if ($repo =~ m{://|^[^/]*:}) {
454 # $repo is a git url we can clone, like git:// or https:// or
455 # file:/// or [user@]host:repo.git. Note "user/name@host:foo" is
456 # not recognized here because distinguishing that from a local
457 # path is too fragile. If you really need something strange here,
458 # use the ssh:// form.
459 $repo_location = 'remote';
460 } elsif ($repo =~ m{^\.*/}) {
461 # $repo is a local path to a git index. We'll also resolve ../foo
462 # to ../foo/.git if the latter is a directory. To help
463 # disambiguate local paths from named hosted repositories, this
464 # form must be given as ./ or ../ if it's a relative path.
465 if (-d "$repo/.git") {
466 $repo = "$repo/.git";
468 $repo_location = 'local';
470 # $repo is none of the above. It must be the name of a hosted
472 my $arv_repo_list = api_call("repositories/list",
473 'filters' => [['name','=',$repo]]);
474 my @repos_found = @{$arv_repo_list->{'items'}};
475 my $n_found = $arv_repo_list->{'serverResponse'}->{'items_available'};
477 Log(undef, "Repository '$repo' -> "
478 . join(", ", map { $_->{'uuid'} } @repos_found));
481 croak("Error: Found $n_found repositories with name '$repo'.");
483 $repo = $repos_found[0]->{'fetch_url'};
484 $repo_location = 'remote';
486 Log(undef, "Using $repo_location repository '$repo'");
487 $ENV{"CRUNCH_SRC_URL"} = $repo;
489 # Resolve given script_version (we'll call that $treeish here) to a
490 # commit sha1 ($commit).
491 my $treeish = $Job->{'script_version'};
493 if ($repo_location eq 'remote') {
494 # We minimize excess object-fetching by re-using the same bare
495 # repository in CRUNCH_TMP/.git for multiple crunch-jobs -- we
496 # just keep adding remotes to it as needed.
497 my $local_repo = $ENV{'CRUNCH_TMP'}."/.git";
498 my $gitcmd = "git --git-dir=\Q$local_repo\E";
500 # Set up our local repo for caching remote objects, making
502 if (!-d $local_repo) {
503 make_path($local_repo) or croak("Error: could not create $local_repo");
505 # This works (exits 0 and doesn't delete fetched objects) even
506 # if $local_repo is already initialized:
507 `$gitcmd init --bare`;
509 croak("Error: $gitcmd init --bare exited ".exit_status_s($?));
512 # If $treeish looks like a hash (or abbrev hash) we look it up in
513 # our local cache first, since that's cheaper. (We don't want to
514 # do that with tags/branches though -- those change over time, so
515 # they should always be resolved by the remote repo.)
516 if ($treeish =~ /^[0-9a-f]{7,40}$/s) {
517 # Hide stderr because it's normal for this to fail:
518 my $sha1 = `$gitcmd rev-list -n1 ''\Q$treeish\E 2>/dev/null`;
520 # Careful not to resolve a branch named abcdeff to commit 1234567:
521 $sha1 =~ /^$treeish/ &&
522 $sha1 =~ /^([0-9a-f]{40})$/s) {
524 Log(undef, "Commit $commit already present in $local_repo");
528 if (!defined $commit) {
529 # If $treeish isn't just a hash or abbrev hash, or isn't here
530 # yet, we need to fetch the remote to resolve it correctly.
532 # First, remove all local heads. This prevents a name that does
533 # not exist on the remote from resolving to (or colliding with)
534 # a previously fetched branch or tag (possibly from a different
536 remove_tree("$local_repo/refs/heads", {keep_root => 1});
538 Log(undef, "Fetching objects from $repo to $local_repo");
539 `$gitcmd fetch --no-progress --tags ''\Q$repo\E \Q+refs/heads/*:refs/heads/*\E`;
541 croak("Error: `$gitcmd fetch` exited ".exit_status_s($?));
545 # Now that the data is all here, we will use our local repo for
546 # the rest of our git activities.
550 my $gitcmd = "git --git-dir=\Q$repo\E";
551 my $sha1 = `$gitcmd rev-list -n1 ''\Q$treeish\E`;
552 unless ($? == 0 && $sha1 =~ /^([0-9a-f]{40})$/) {
553 croak("`$gitcmd rev-list` exited "
555 .", '$treeish' not found. Giving up.");
558 Log(undef, "Version $treeish is commit $commit");
560 if ($commit ne $Job->{'script_version'}) {
561 # Record the real commit id in the database, frozentokey, logs,
562 # etc. -- instead of an abbreviation or a branch name which can
563 # become ambiguous or point to a different commit in the future.
564 if (!$Job->update_attributes('script_version' => $commit)) {
565 croak("Error: failed to update job's script_version attribute");
569 $ENV{"CRUNCH_SRC_COMMIT"} = $commit;
570 add_git_archive("$gitcmd archive ''\Q$commit\E");
573 my $git_archive = combined_git_archive();
574 if (!defined $git_archive) {
575 Log(undef, "Skip install phase (no git archive)");
577 Log(undef, "Warning: This probably means workers have no source tree!");
581 Log(undef, "Run install script on all workers");
583 my @srunargs = ("srun",
584 "--nodelist=$nodelist",
585 "-D", $ENV{'TMPDIR'}, "--job-name=$job_id");
586 my @execargs = ("sh", "-c",
587 "mkdir -p $ENV{CRUNCH_INSTALL} && cd $ENV{CRUNCH_TMP} && perl -");
589 my $installpid = fork();
590 if ($installpid == 0)
592 srun (\@srunargs, \@execargs, {}, $build_script . $git_archive);
597 last if $installpid == waitpid (-1, WNOHANG);
598 freeze_if_want_freeze ($installpid);
599 select (undef, undef, undef, 0.1);
601 my $install_exited = $?;
602 Log (undef, "Install script exited ".exit_status_s($install_exited));
603 foreach my $tar_filename (map { tar_filename_n($_); } (1..$git_tar_count)) {
604 unlink($tar_filename);
606 exit (1) if $install_exited != 0;
609 foreach (qw (script script_version script_parameters runtime_constraints))
613 (ref($Job->{$_}) ? JSON::encode_json($Job->{$_}) : $Job->{$_}));
615 foreach (split (/\n/, $Job->{knobs}))
617 Log (undef, "knob " . $_);
622 $main::success = undef;
628 my $thisround_succeeded = 0;
629 my $thisround_failed = 0;
630 my $thisround_failed_multiple = 0;
632 @jobstep_todo = sort { $jobstep[$a]->{level} <=> $jobstep[$b]->{level}
633 or $a <=> $b } @jobstep_todo;
634 my $level = $jobstep[$jobstep_todo[0]]->{level};
635 Log (undef, "start level $level");
640 my @freeslot = (0..$#slot);
643 my $progress_is_dirty = 1;
644 my $progress_stats_updated = 0;
646 update_progress_stats();
651 for (my $todo_ptr = 0; $todo_ptr <= $#jobstep_todo; $todo_ptr ++)
653 my $id = $jobstep_todo[$todo_ptr];
654 my $Jobstep = $jobstep[$id];
655 if ($Jobstep->{level} != $level)
660 pipe $reader{$id}, "writer" or croak ($!);
661 my $flags = fcntl ($reader{$id}, F_GETFL, 0) or croak ($!);
662 fcntl ($reader{$id}, F_SETFL, $flags | O_NONBLOCK) or croak ($!);
664 my $childslot = $freeslot[0];
665 my $childnode = $slot[$childslot]->{node};
666 my $childslotname = join (".",
667 $slot[$childslot]->{node}->{name},
668 $slot[$childslot]->{cpu});
669 my $childpid = fork();
672 $SIG{'INT'} = 'DEFAULT';
673 $SIG{'QUIT'} = 'DEFAULT';
674 $SIG{'TERM'} = 'DEFAULT';
676 foreach (values (%reader))
680 fcntl ("writer", F_SETFL, 0) or croak ($!); # no close-on-exec
681 open(STDOUT,">&writer");
682 open(STDERR,">&writer");
687 delete $ENV{"GNUPGHOME"};
688 $ENV{"TASK_UUID"} = $Jobstep->{'arvados_task'}->{'uuid'};
689 $ENV{"TASK_QSEQUENCE"} = $id;
690 $ENV{"TASK_SEQUENCE"} = $level;
691 $ENV{"JOB_SCRIPT"} = $Job->{script};
692 while (my ($param, $value) = each %{$Job->{script_parameters}}) {
693 $param =~ tr/a-z/A-Z/;
694 $ENV{"JOB_PARAMETER_$param"} = $value;
696 $ENV{"TASK_SLOT_NODE"} = $slot[$childslot]->{node}->{name};
697 $ENV{"TASK_SLOT_NUMBER"} = $slot[$childslot]->{cpu};
698 $ENV{"TASK_WORK"} = $ENV{"CRUNCH_TMP"}."/task/$childslotname";
699 $ENV{"HOME"} = $ENV{"TASK_WORK"};
700 $ENV{"TASK_KEEPMOUNT"} = $ENV{"TASK_WORK"}.".keep";
701 $ENV{"TASK_TMPDIR"} = $ENV{"TASK_WORK"}; # deprecated
702 $ENV{"CRUNCH_NODE_SLOTS"} = $slot[$childslot]->{node}->{ncpus};
703 $ENV{"PATH"} = $ENV{"CRUNCH_INSTALL"} . "/bin:" . $ENV{"PATH"};
709 "--nodelist=".$childnode->{name},
710 qw(-n1 -c1 -N1 -D), $ENV{'TMPDIR'},
711 "--job-name=$job_id.$id.$$",
714 "if [ -e $ENV{TASK_WORK} ]; then rm -rf $ENV{TASK_WORK}; fi; "
715 ."mkdir -p $ENV{CRUNCH_TMP} $ENV{JOB_WORK} $ENV{TASK_WORK} $ENV{TASK_KEEPMOUNT} "
716 ."&& cd $ENV{CRUNCH_TMP} ";
717 $command .= "&& exec arv-mount --by-id --allow-other $ENV{TASK_KEEPMOUNT} --exec ";
720 my $cidfile = "$ENV{CRUNCH_TMP}/$ENV{TASK_UUID}.cid";
721 $command .= "crunchstat -cgroup-root=/sys/fs/cgroup -cgroup-parent=docker -cgroup-cid=$cidfile -poll=10000 ";
722 $command .= "$docker_bin run --rm=true --attach=stdout --attach=stderr --attach=stdin -i --user=crunch --cidfile=$cidfile --sig-proxy ";
724 # Dynamically configure the container to use the host system as its
725 # DNS server. Get the host's global addresses from the ip command,
726 # and turn them into docker --dns options using gawk.
728 q{$(ip -o address show scope global |
729 gawk 'match($4, /^([0-9\.:]+)\//, x){print "--dns", x[1]}') };
731 # The source tree and $destdir directory (which we have
732 # installed on the worker host) are available in the container,
733 # under the same path.
734 $command .= "--volume=\Q$ENV{CRUNCH_SRC}:$ENV{CRUNCH_SRC}:ro\E ";
735 $command .= "--volume=\Q$ENV{CRUNCH_INSTALL}:$ENV{CRUNCH_INSTALL}:ro\E ";
737 # Currently, we make arv-mount's mount point appear at /keep
738 # inside the container (instead of using the same path as the
739 # host like we do with CRUNCH_SRC and CRUNCH_INSTALL). However,
740 # crunch scripts and utilities must not rely on this. They must
741 # use $TASK_KEEPMOUNT.
742 $command .= "--volume=\Q$ENV{TASK_KEEPMOUNT}:/keep:ro\E ";
743 $ENV{TASK_KEEPMOUNT} = "/keep";
745 # TASK_WORK is almost exactly like a docker data volume: it
746 # starts out empty, is writable, and persists until no
747 # containers use it any more. We don't use --volumes-from to
748 # share it with other containers: it is only accessible to this
749 # task, and it goes away when this task stops.
751 # However, a docker data volume is writable only by root unless
752 # the mount point already happens to exist in the container with
753 # different permissions. Therefore, we [1] assume /tmp already
754 # exists in the image and is writable by the crunch user; [2]
755 # avoid putting TASK_WORK inside CRUNCH_TMP (which won't be
756 # writable if they are created by docker while setting up the
757 # other --volumes); and [3] create $TASK_WORK inside the
758 # container using $build_script.
759 $command .= "--volume=/tmp ";
760 $ENV{"TASK_WORK"} = "/tmp/crunch-job-task-work/$childslotname";
761 $ENV{"HOME"} = $ENV{"TASK_WORK"};
762 $ENV{"TASK_TMPDIR"} = $ENV{"TASK_WORK"}; # deprecated
764 # TODO: Share a single JOB_WORK volume across all task
765 # containers on a given worker node, and delete it when the job
766 # ends (and, in case that doesn't work, when the next job
769 # For now, use the same approach as TASK_WORK above.
770 $ENV{"JOB_WORK"} = "/tmp/crunch-job-work";
772 while (my ($env_key, $env_val) = each %ENV)
774 if ($env_key =~ /^(ARVADOS|CRUNCH|JOB|TASK)_/) {
775 $command .= "--env=\Q$env_key=$env_val\E ";
778 $command .= "--env=\QHOME=$ENV{HOME}\E ";
779 $command .= "\Q$docker_hash\E ";
780 $command .= "stdbuf --output=0 --error=0 ";
781 $command .= "perl - $ENV{CRUNCH_SRC}/crunch_scripts/" . $Job->{"script"};
784 $command .= "crunchstat -cgroup-root=/sys/fs/cgroup -poll=10000 ";
785 $command .= "stdbuf --output=0 --error=0 ";
786 $command .= "perl - $ENV{CRUNCH_SRC}/crunch_scripts/" . $Job->{"script"};
789 my @execargs = ('bash', '-c', $command);
790 srun (\@srunargs, \@execargs, undef, $build_script);
791 # exec() failed, we assume nothing happened.
792 die "srun() failed on build script\n";
795 if (!defined $childpid)
802 $proc{$childpid} = { jobstep => $id,
805 jobstepname => "$job_id.$id.$childpid",
807 croak ("assert failed: \$slot[$childslot]->{'pid'} exists") if exists $slot[$childslot]->{pid};
808 $slot[$childslot]->{pid} = $childpid;
810 Log ($id, "job_task ".$Jobstep->{'arvados_task'}->{'uuid'});
811 Log ($id, "child $childpid started on $childslotname");
812 $Jobstep->{starttime} = time;
813 $Jobstep->{node} = $childnode->{name};
814 $Jobstep->{slotindex} = $childslot;
815 delete $Jobstep->{stderr};
816 delete $Jobstep->{finishtime};
818 $Jobstep->{'arvados_task'}->{started_at} = strftime "%Y-%m-%dT%H:%M:%SZ", gmtime($Jobstep->{starttime});
819 $Jobstep->{'arvados_task'}->save;
821 splice @jobstep_todo, $todo_ptr, 1;
824 $progress_is_dirty = 1;
828 (@slot > @freeslot && $todo_ptr+1 > $#jobstep_todo))
830 last THISROUND if $main::please_freeze;
831 if ($main::please_info)
833 $main::please_info = 0;
835 create_output_collection();
837 update_progress_stats();
844 check_refresh_wanted();
846 update_progress_stats();
847 select (undef, undef, undef, 0.1);
849 elsif (time - $progress_stats_updated >= 30)
851 update_progress_stats();
853 if (($thisround_failed_multiple >= 8 && $thisround_succeeded == 0) ||
854 ($thisround_failed_multiple >= 16 && $thisround_failed_multiple > $thisround_succeeded))
856 my $message = "Repeated failure rate too high ($thisround_failed_multiple/"
857 .($thisround_failed+$thisround_succeeded)
858 .") -- giving up on this round";
859 Log (undef, $message);
863 # move slots from freeslot to holdslot (or back to freeslot) if necessary
864 for (my $i=$#freeslot; $i>=0; $i--) {
865 if ($slot[$freeslot[$i]]->{node}->{hold_until} > scalar time) {
866 push @holdslot, (splice @freeslot, $i, 1);
869 for (my $i=$#holdslot; $i>=0; $i--) {
870 if ($slot[$holdslot[$i]]->{node}->{hold_until} <= scalar time) {
871 push @freeslot, (splice @holdslot, $i, 1);
875 # give up if no nodes are succeeding
876 if (!grep { $_->{node}->{losing_streak} == 0 &&
877 $_->{node}->{hold_count} < 4 } @slot) {
878 my $message = "Every node has failed -- giving up on this round";
879 Log (undef, $message);
886 push @freeslot, splice @holdslot;
887 map { $slot[$freeslot[$_]]->{node}->{losing_streak} = 0 } (0..$#freeslot);
890 Log (undef, "wait for last ".(scalar keys %proc)." children to finish");
893 if ($main::please_continue) {
894 $main::please_continue = 0;
897 $main::please_info = 0, freeze(), create_output_collection(), save_meta(1) if $main::please_info;
901 check_refresh_wanted();
903 update_progress_stats();
904 select (undef, undef, undef, 0.1);
905 killem (keys %proc) if $main::please_freeze;
909 update_progress_stats();
910 freeze_if_want_freeze();
913 if (!defined $main::success)
916 $thisround_succeeded == 0 &&
917 ($thisround_failed == 0 || $thisround_failed > 4))
919 my $message = "stop because $thisround_failed tasks failed and none succeeded";
920 Log (undef, $message);
929 goto ONELEVEL if !defined $main::success;
932 release_allocation();
934 my $collated_output = &create_output_collection();
936 if (!$collated_output) {
937 Log (undef, "Failed to write output collection");
940 Log(undef, "output hash " . $collated_output);
941 $Job->update_attributes('output' => $collated_output);
944 Log (undef, "finish");
949 if ($collated_output && $main::success) {
950 $final_state = 'Complete';
952 $final_state = 'Failed';
954 $Job->update_attributes('state' => $final_state);
956 exit (($final_state eq 'Complete') ? 0 : 1);
960 sub update_progress_stats
962 $progress_stats_updated = time;
963 return if !$progress_is_dirty;
964 my ($todo, $done, $running) = (scalar @jobstep_todo,
965 scalar @jobstep_done,
966 scalar @slot - scalar @freeslot - scalar @holdslot);
967 $Job->{'tasks_summary'} ||= {};
968 $Job->{'tasks_summary'}->{'todo'} = $todo;
969 $Job->{'tasks_summary'}->{'done'} = $done;
970 $Job->{'tasks_summary'}->{'running'} = $running;
971 $Job->update_attributes('tasks_summary' => $Job->{'tasks_summary'});
972 Log (undef, "status: $done done, $running running, $todo todo");
973 $progress_is_dirty = 0;
980 my $pid = waitpid (-1, WNOHANG);
981 return 0 if $pid <= 0;
983 my $whatslot = ($slot[$proc{$pid}->{slot}]->{node}->{name}
985 . $slot[$proc{$pid}->{slot}]->{cpu});
986 my $jobstepid = $proc{$pid}->{jobstep};
987 my $elapsed = time - $proc{$pid}->{time};
988 my $Jobstep = $jobstep[$jobstepid];
990 my $childstatus = $?;
991 my $exitvalue = $childstatus >> 8;
992 my $exitinfo = "exit ".exit_status_s($childstatus);
993 $Jobstep->{'arvados_task'}->reload;
994 my $task_success = $Jobstep->{'arvados_task'}->{success};
996 Log ($jobstepid, "child $pid on $whatslot $exitinfo success=$task_success");
998 if (!defined $task_success) {
999 # task did not indicate one way or the other --> fail
1000 $Jobstep->{'arvados_task'}->{success} = 0;
1001 $Jobstep->{'arvados_task'}->save;
1008 $temporary_fail ||= $Jobstep->{node_fail};
1009 $temporary_fail ||= ($exitvalue == 111);
1011 ++$thisround_failed;
1012 ++$thisround_failed_multiple if $Jobstep->{'failures'} >= 1;
1014 # Check for signs of a failed or misconfigured node
1015 if (++$slot[$proc{$pid}->{slot}]->{node}->{losing_streak} >=
1016 2+$slot[$proc{$pid}->{slot}]->{node}->{ncpus}) {
1017 # Don't count this against jobstep failure thresholds if this
1018 # node is already suspected faulty and srun exited quickly
1019 if ($slot[$proc{$pid}->{slot}]->{node}->{hold_until} &&
1021 Log ($jobstepid, "blaming failure on suspect node " .
1022 $slot[$proc{$pid}->{slot}]->{node}->{name});
1023 $temporary_fail ||= 1;
1025 ban_node_by_slot($proc{$pid}->{slot});
1028 Log ($jobstepid, sprintf('failure (#%d, %s) after %d seconds',
1029 ++$Jobstep->{'failures'},
1030 $temporary_fail ? 'temporary ' : 'permanent',
1033 if (!$temporary_fail || $Jobstep->{'failures'} >= 3) {
1034 # Give up on this task, and the whole job
1036 $main::please_freeze = 1;
1038 # Put this task back on the todo queue
1039 push @jobstep_todo, $jobstepid;
1040 $Job->{'tasks_summary'}->{'failed'}++;
1044 ++$thisround_succeeded;
1045 $slot[$proc{$pid}->{slot}]->{node}->{losing_streak} = 0;
1046 $slot[$proc{$pid}->{slot}]->{node}->{hold_until} = 0;
1047 push @jobstep_done, $jobstepid;
1048 Log ($jobstepid, "success in $elapsed seconds");
1050 $Jobstep->{exitcode} = $childstatus;
1051 $Jobstep->{finishtime} = time;
1052 $Jobstep->{'arvados_task'}->{finished_at} = strftime "%Y-%m-%dT%H:%M:%SZ", gmtime($Jobstep->{finishtime});
1053 $Jobstep->{'arvados_task'}->save;
1054 process_stderr ($jobstepid, $task_success);
1055 Log ($jobstepid, "output " . $Jobstep->{'arvados_task'}->{output});
1057 close $reader{$jobstepid};
1058 delete $reader{$jobstepid};
1059 delete $slot[$proc{$pid}->{slot}]->{pid};
1060 push @freeslot, $proc{$pid}->{slot};
1063 if ($task_success) {
1065 my $newtask_list = [];
1066 my $newtask_results;
1068 $newtask_results = api_call(
1071 'created_by_job_task_uuid' => $Jobstep->{'arvados_task'}->{uuid}
1073 'order' => 'qsequence',
1074 'offset' => scalar(@$newtask_list),
1076 push(@$newtask_list, @{$newtask_results->{items}});
1077 } while (@{$newtask_results->{items}});
1078 foreach my $arvados_task (@$newtask_list) {
1080 'level' => $arvados_task->{'sequence'},
1082 'arvados_task' => $arvados_task
1084 push @jobstep, $jobstep;
1085 push @jobstep_todo, $#jobstep;
1089 $progress_is_dirty = 1;
1093 sub check_refresh_wanted
1095 my @stat = stat $ENV{"CRUNCH_REFRESH_TRIGGER"};
1096 if (@stat && $stat[9] > $latest_refresh) {
1097 $latest_refresh = scalar time;
1098 my $Job2 = api_call("jobs/get", uuid => $jobspec);
1099 for my $attr ('cancelled_at',
1100 'cancelled_by_user_uuid',
1101 'cancelled_by_client_uuid',
1103 $Job->{$attr} = $Job2->{$attr};
1105 if ($Job->{'state'} ne "Running") {
1106 if ($Job->{'state'} eq "Cancelled") {
1107 Log (undef, "Job cancelled at " . $Job->{'cancelled_at'} . " by user " . $Job->{'cancelled_by_user_uuid'});
1109 Log (undef, "Job state unexpectedly changed to " . $Job->{'state'});
1112 $main::please_freeze = 1;
1119 # return if the kill list was checked <4 seconds ago
1120 if (defined $squeue_kill_checked && $squeue_kill_checked > time - 4)
1124 $squeue_kill_checked = time;
1126 # use killem() on procs whose killtime is reached
1129 if (exists $proc{$_}->{killtime}
1130 && $proc{$_}->{killtime} <= time)
1136 # return if the squeue was checked <60 seconds ago
1137 if (defined $squeue_checked && $squeue_checked > time - 60)
1141 $squeue_checked = time;
1145 # here is an opportunity to check for mysterious problems with local procs
1149 # get a list of steps still running
1150 my @squeue = `squeue -s -h -o '%i %j' && echo ok`;
1152 if ($squeue[-1] ne "ok")
1158 # which of my jobsteps are running, according to squeue?
1162 if (/^(\d+)\.(\d+) (\S+)/)
1164 if ($1 eq $ENV{SLURM_JOBID})
1171 # which of my active child procs (>60s old) were not mentioned by squeue?
1172 foreach (keys %proc)
1174 if ($proc{$_}->{time} < time - 60
1175 && !exists $ok{$proc{$_}->{jobstepname}}
1176 && !exists $proc{$_}->{killtime})
1178 # kill this proc if it hasn't exited in 30 seconds
1179 $proc{$_}->{killtime} = time + 30;
1185 sub release_allocation
1189 Log (undef, "release job allocation");
1190 system "scancel $ENV{SLURM_JOBID}";
1198 foreach my $job (keys %reader)
1201 while (0 < sysread ($reader{$job}, $buf, 8192))
1203 print STDERR $buf if $ENV{CRUNCH_DEBUG};
1204 $jobstep[$job]->{stderr} .= $buf;
1205 preprocess_stderr ($job);
1206 if (length ($jobstep[$job]->{stderr}) > 16384)
1208 substr ($jobstep[$job]->{stderr}, 0, 8192) = "";
1217 sub preprocess_stderr
1221 while ($jobstep[$job]->{stderr} =~ /^(.*?)\n/) {
1223 substr $jobstep[$job]->{stderr}, 0, 1+length($line), "";
1224 Log ($job, "stderr $line");
1225 if ($line =~ /srun: error: (SLURM job $ENV{SLURM_JOB_ID} has expired|Unable to confirm allocation for job $ENV{SLURM_JOB_ID})/) {
1227 $main::please_freeze = 1;
1229 elsif ($line =~ /srun: error: (Node failure on|Unable to create job step) /) {
1230 $jobstep[$job]->{node_fail} = 1;
1231 ban_node_by_slot($jobstep[$job]->{slotindex});
1240 my $task_success = shift;
1241 preprocess_stderr ($job);
1244 Log ($job, "stderr $_");
1245 } split ("\n", $jobstep[$job]->{stderr});
1251 my ($keep, $child_out, $output_block);
1253 my $cmd = "arv-get \Q$hash\E";
1254 open($keep, '-|', $cmd) or die "fetch_block: $cmd: $!";
1258 my $bytes = sysread($keep, $buf, 1024 * 1024);
1259 if (!defined $bytes) {
1260 die "reading from arv-get: $!";
1261 } elsif ($bytes == 0) {
1262 # sysread returns 0 at the end of the pipe.
1265 # some bytes were read into buf.
1266 $output_block .= $buf;
1270 return $output_block;
1273 # create_output_collections generates a new collection containing the
1274 # output of each successfully completed task, and returns the
1275 # portable_data_hash for the new collection.
1277 sub create_output_collection
1279 Log (undef, "collate");
1281 my ($child_out, $child_in);
1282 my $pid = open2($child_out, $child_in, 'python', '-c',
1283 'import arvados; ' .
1285 'print arvados.api()' .
1287 '.create(body={"manifest_text":sys.stdin.read()})' .
1288 '.execute()["portable_data_hash"]'
1293 next if (!exists $_->{'arvados_task'}->{'output'} ||
1294 !$_->{'arvados_task'}->{'success'});
1295 my $output = $_->{'arvados_task'}->{output};
1296 if ($output !~ /^[0-9a-f]{32}(\+\S+)*$/)
1298 print $child_in $output;
1300 elsif (defined (my $outblock = fetch_block ($output)))
1302 print $child_in $outblock;
1306 Log (undef, "XXX fetch_block($output) failed XXX");
1313 my $s = IO::Select->new($child_out);
1314 if ($s->can_read(120)) {
1315 sysread($child_out, $joboutput, 64 * 1024 * 1024);
1317 # TODO: Ensure exit status == 0.
1319 Log (undef, "timed out while creating output collection");
1321 # TODO: kill $pid instead of waiting, now that we've decided to
1322 # ignore further output.
1333 my $sig = 2; # SIGINT first
1334 if (exists $proc{$_}->{"sent_$sig"} &&
1335 time - $proc{$_}->{"sent_$sig"} > 4)
1337 $sig = 15; # SIGTERM if SIGINT doesn't work
1339 if (exists $proc{$_}->{"sent_$sig"} &&
1340 time - $proc{$_}->{"sent_$sig"} > 4)
1342 $sig = 9; # SIGKILL if SIGTERM doesn't work
1344 if (!exists $proc{$_}->{"sent_$sig"})
1346 Log ($proc{$_}->{jobstep}, "sending 2x signal $sig to pid $_");
1348 select (undef, undef, undef, 0.1);
1351 kill $sig, $_; # srun wants two SIGINT to really interrupt
1353 $proc{$_}->{"sent_$sig"} = time;
1354 $proc{$_}->{"killedafter"} = time - $proc{$_}->{"time"};
1364 vec($bits,fileno($_),1) = 1;
1370 # Send log output to Keep via arv-put.
1372 # $log_pipe_in and $log_pipe_out are the input and output filehandles to the arv-put pipe.
1373 # $log_pipe_pid is the pid of the arv-put subprocess.
1375 # The only functions that should access these variables directly are:
1377 # log_writer_start($logfilename)
1378 # Starts an arv-put pipe, reading data on stdin and writing it to
1379 # a $logfilename file in an output collection.
1381 # log_writer_send($txt)
1382 # Writes $txt to the output log collection.
1384 # log_writer_finish()
1385 # Closes the arv-put pipe and returns the output that it produces.
1387 # log_writer_is_active()
1388 # Returns a true value if there is currently a live arv-put
1389 # process, false otherwise.
1391 my ($log_pipe_in, $log_pipe_out, $log_pipe_pid);
1393 sub log_writer_start($)
1395 my $logfilename = shift;
1396 $log_pipe_pid = open2($log_pipe_out, $log_pipe_in,
1397 'arv-put', '--portable-data-hash',
1399 '--filename', $logfilename,
1403 sub log_writer_send($)
1406 print $log_pipe_in $txt;
1409 sub log_writer_finish()
1411 return unless $log_pipe_pid;
1413 close($log_pipe_in);
1416 my $s = IO::Select->new($log_pipe_out);
1417 if ($s->can_read(120)) {
1418 sysread($log_pipe_out, $arv_put_output, 1024);
1419 chomp($arv_put_output);
1421 Log (undef, "timed out reading from 'arv-put'");
1424 waitpid($log_pipe_pid, 0);
1425 $log_pipe_pid = $log_pipe_in = $log_pipe_out = undef;
1427 Log("log_writer_finish: arv-put exited ".exit_status_s($?))
1430 return $arv_put_output;
1433 sub log_writer_is_active() {
1434 return $log_pipe_pid;
1437 sub Log # ($jobstep_id, $logmessage)
1439 if ($_[1] =~ /\n/) {
1440 for my $line (split (/\n/, $_[1])) {
1445 my $fh = select STDERR; $|=1; select $fh;
1446 my $message = sprintf ("%s %d %s %s", $job_id, $$, @_);
1447 $message =~ s{([^ -\176])}{"\\" . sprintf ("%03o", ord($1))}ge;
1450 if (log_writer_is_active() || -t STDERR) {
1451 my @gmtime = gmtime;
1452 $datetime = sprintf ("%04d-%02d-%02d_%02d:%02d:%02d",
1453 $gmtime[5]+1900, $gmtime[4]+1, @gmtime[3,2,1,0]);
1455 print STDERR ((-t STDERR) ? ($datetime." ".$message) : $message);
1457 if (log_writer_is_active()) {
1458 log_writer_send($datetime . " " . $message);
1465 my ($package, $file, $line) = caller;
1466 my $message = "@_ at $file line $line\n";
1467 Log (undef, $message);
1468 freeze() if @jobstep_todo;
1469 create_output_collection() if @jobstep_todo;
1479 if ($Job->{'state'} eq 'Cancelled') {
1480 $Job->update_attributes('finished_at' => scalar gmtime);
1482 $Job->update_attributes('state' => 'Failed');
1489 my $justcheckpoint = shift; # false if this will be the last meta saved
1490 return if $justcheckpoint; # checkpointing is not relevant post-Warehouse.pm
1491 return unless log_writer_is_active();
1493 my $loglocator = log_writer_finish();
1494 Log (undef, "log manifest is $loglocator");
1495 $Job->{'log'} = $loglocator;
1496 $Job->update_attributes('log', $loglocator);
1500 sub freeze_if_want_freeze
1502 if ($main::please_freeze)
1504 release_allocation();
1507 # kill some srun procs before freeze+stop
1508 map { $proc{$_} = {} } @_;
1511 killem (keys %proc);
1512 select (undef, undef, undef, 0.1);
1514 while (($died = waitpid (-1, WNOHANG)) > 0)
1516 delete $proc{$died};
1521 create_output_collection();
1531 Log (undef, "Freeze not implemented");
1538 croak ("Thaw not implemented");
1554 $s =~ s{\\(.)}{$1 eq "n" ? "\n" : $1}ge;
1561 my $srunargs = shift;
1562 my $execargs = shift;
1563 my $opts = shift || {};
1565 my $args = $have_slurm ? [@$srunargs, @$execargs] : $execargs;
1567 $Data::Dumper::Terse = 1;
1568 $Data::Dumper::Indent = 0;
1569 my $show_cmd = Dumper($args);
1570 $show_cmd =~ s/(TOKEN\\*=)[^\s\']+/${1}[...]/g;
1571 $show_cmd =~ s/\n/ /g;
1572 warn "starting: $show_cmd\n";
1574 if (defined $stdin) {
1575 my $child = open STDIN, "-|";
1576 defined $child or die "no fork: $!";
1578 print $stdin or die $!;
1579 close STDOUT or die $!;
1584 return system (@$args) if $opts->{fork};
1587 warn "ENV size is ".length(join(" ",%ENV));
1588 die "exec failed: $!: @$args";
1592 sub ban_node_by_slot {
1593 # Don't start any new jobsteps on this node for 60 seconds
1595 $slot[$slotid]->{node}->{hold_until} = 60 + scalar time;
1596 $slot[$slotid]->{node}->{hold_count}++;
1597 Log (undef, "backing off node " . $slot[$slotid]->{node}->{name} . " for 60 seconds");
1602 my ($lockfile, $error_message) = @_;
1603 open L, ">", $lockfile or croak("$lockfile: $!");
1604 if (!flock L, LOCK_EX|LOCK_NB) {
1605 croak("Can't lock $lockfile: $error_message\n");
1609 sub find_docker_image {
1610 # Given a Keep locator, check to see if it contains a Docker image.
1611 # If so, return its stream name and Docker hash.
1612 # If not, return undef for both values.
1613 my $locator = shift;
1614 my ($streamname, $filename);
1615 my $image = api_call("collections/get", uuid => $locator);
1617 foreach my $line (split(/\n/, $image->{manifest_text})) {
1618 my @tokens = split(/\s+/, $line);
1620 $streamname = shift(@tokens);
1621 foreach my $filedata (grep(/^\d+:\d+:/, @tokens)) {
1622 if (defined($filename)) {
1623 return (undef, undef); # More than one file in the Collection.
1625 $filename = (split(/:/, $filedata, 3))[2];
1630 if (defined($filename) and ($filename =~ /^([0-9A-Fa-f]{64})\.tar$/)) {
1631 return ($streamname, $1);
1633 return (undef, undef);
1638 # Calculate the number of times an operation should be retried,
1639 # assuming exponential backoff, and that we're willing to retry as
1640 # long as tasks have been running. Enforce a minimum of 3 retries.
1641 my ($starttime, $endtime, $timediff, $retries);
1643 $starttime = $jobstep[0]->{starttime};
1644 $endtime = $jobstep[-1]->{finishtime};
1646 if (!defined($starttime)) {
1648 } elsif (!defined($endtime)) {
1649 $timediff = time - $starttime;
1651 $timediff = ($endtime - $starttime) - (time - $endtime);
1653 if ($timediff > 0) {
1654 $retries = int(log($timediff) / log(2));
1656 $retries = 1; # Use the minimum.
1658 return ($retries > 3) ? $retries : 3;
1662 # Pass in two function references.
1663 # This method will be called with the remaining arguments.
1664 # If it dies, retry it with exponential backoff until it succeeds,
1665 # or until the current retry_count is exhausted. After each failure
1666 # that can be retried, the second function will be called with
1667 # the current try count (0-based), next try time, and error message.
1668 my $operation = shift;
1669 my $retry_callback = shift;
1670 my $retries = retry_count();
1671 foreach my $try_count (0..$retries) {
1672 my $next_try = time + (2 ** $try_count);
1673 my $result = eval { $operation->(@_); };
1676 } elsif ($try_count < $retries) {
1677 $retry_callback->($try_count, $next_try, $@);
1678 my $sleep_time = $next_try - time;
1679 sleep($sleep_time) if ($sleep_time > 0);
1682 # Ensure the error message ends in a newline, so Perl doesn't add
1683 # retry_op's line number to it.
1689 # Pass in a /-separated API method name, and arguments for it.
1690 # This function will call that method, retrying as needed until
1691 # the current retry_count is exhausted, with a log on the first failure.
1692 my $method_name = shift;
1693 my $log_api_retry = sub {
1694 my ($try_count, $next_try_at, $errmsg) = @_;
1695 $errmsg =~ s/\s*\bat \Q$0\E line \d+\.?\s*//;
1696 $errmsg =~ s/\s/ /g;
1697 $errmsg =~ s/\s+$//;
1699 if ($next_try_at < time) {
1700 $retry_msg = "Retrying.";
1702 my $next_try_fmt = strftime "%Y-%m-%dT%H:%M:%SZ", gmtime($next_try_at);
1703 $retry_msg = "Retrying at $next_try_fmt.";
1705 Log(undef, "API method $method_name failed: $errmsg. $retry_msg");
1708 foreach my $key (split(/\//, $method_name)) {
1709 $method = $method->{$key};
1711 return retry_op(sub { $method->execute(@_); }, $log_api_retry, @_);
1715 # Given a $?, return a human-readable exit code string like "0" or
1716 # "1" or "0 with signal 1" or "1 with signal 11".
1717 my $exitcode = shift;
1718 my $s = $exitcode >> 8;
1719 if ($exitcode & 0x7f) {
1720 $s .= " with signal " . ($exitcode & 0x7f);
1722 if ($exitcode & 0x80) {
1723 $s .= " with core dump";
1728 sub handle_readall {
1729 # Pass in a glob reference to a file handle.
1730 # Read all its contents and return them as a string.
1731 my $fh_glob_ref = shift;
1733 return <$fh_glob_ref>;
1736 sub tar_filename_n {
1738 return sprintf("%s/git.%s.%d.tar", $ENV{CRUNCH_TMP}, $job_id, $n);
1741 sub add_git_archive {
1742 # Pass in a git archive command as a string or list, a la system().
1743 # This method will save its output to be included in the archive sent to the
1747 if (!open(GIT_ARCHIVE, ">", tar_filename_n($git_tar_count))) {
1748 croak("Failed to save git archive: $!");
1750 my $git_pid = open2(">&GIT_ARCHIVE", $git_input, @_);
1752 waitpid($git_pid, 0);
1755 croak("Failed to save git archive: git exited " . exit_status_s($?));
1759 sub combined_git_archive {
1760 # Combine all saved tar archives into a single archive, then return its
1761 # contents in a string. Return undef if no archives have been saved.
1762 if ($git_tar_count < 1) {
1765 my $base_tar_name = tar_filename_n(1);
1766 foreach my $tar_to_append (map { tar_filename_n($_); } (2..$git_tar_count)) {
1767 my $tar_exit = system("tar", "-Af", $base_tar_name, $tar_to_append);
1768 if ($tar_exit != 0) {
1769 croak("Error preparing build archive: tar -A exited " .
1770 exit_status_s($tar_exit));
1773 if (!open(GIT_TAR, "<", $base_tar_name)) {
1774 croak("Could not open build archive: $!");
1776 my $tar_contents = handle_readall(\*GIT_TAR);
1778 return $tar_contents;
1784 # This is crunch-job's internal dispatch script. crunch-job running on the API
1785 # server invokes this script on individual compute nodes, or localhost if we're
1786 # running a job locally. It gets called in two modes:
1788 # * No arguments: Installation mode. Read a tar archive from the DATA
1789 # file handle; it includes the Crunch script's source code, and
1790 # maybe SDKs as well. Those should be installed in the proper
1791 # locations. This runs outside of any Docker container, so don't try to
1792 # introspect Crunch's runtime environment.
1794 # * With arguments: Crunch script run mode. This script should set up the
1795 # environment, then run the command specified in the arguments. This runs
1796 # inside any Docker container.
1799 use File::Path qw( make_path remove_tree );
1800 use POSIX qw(getcwd);
1802 # Map SDK subdirectories to the path environments they belong to.
1803 my %SDK_ENVVARS = ("perl/lib" => "PERLLIB", "ruby/lib" => "RUBYLIB");
1805 my $destdir = $ENV{"CRUNCH_SRC"};
1806 my $commit = $ENV{"CRUNCH_SRC_COMMIT"};
1807 my $repo = $ENV{"CRUNCH_SRC_URL"};
1808 my $install_dir = $ENV{"CRUNCH_INSTALL"} || (getcwd() . "/opt");
1809 my $job_work = $ENV{"JOB_WORK"};
1810 my $task_work = $ENV{"TASK_WORK"};
1812 for my $dir ($destdir, $job_work, $task_work) {
1815 -e $dir or die "Failed to create temporary directory ($dir): $!";
1820 remove_tree($task_work, {keep_root => 1});
1823 open(STDOUT_ORIG, ">&", STDOUT);
1824 open(STDERR_ORIG, ">&", STDERR);
1825 open(STDOUT, ">>", "$destdir.log");
1826 open(STDERR, ">&", STDOUT);
1828 ### Crunch script run mode
1830 # We want to do routine logging during task 0 only. This gives the user
1831 # the information they need, but avoids repeating the information for every
1834 if ($ENV{TASK_SEQUENCE} eq "0") {
1837 printf STDERR_ORIG "[Crunch] $msg\n", @_;
1843 my $python_src = "$install_dir/python";
1844 my $venv_dir = "$job_work/.arvados.venv";
1845 my $venv_built = -e "$venv_dir/bin/activate";
1846 if ((!$venv_built) and (-d $python_src) and can_run("virtualenv")) {
1847 shell_or_die("virtualenv", "--quiet", "--system-site-packages",
1848 "--python=python2.7", $venv_dir);
1849 shell_or_die("$venv_dir/bin/pip", "--quiet", "install", $python_src);
1851 $Log->("Built Python SDK virtualenv");
1856 $Log->("Running in Python SDK virtualenv");
1857 $pkgs = `(\Q$venv_dir/bin/pip\E freeze 2>/dev/null | grep arvados) || dpkg-query --show '*arvados*'`;
1858 my $orig_argv = join(" ", map { quotemeta($_); } @ARGV);
1859 @ARGV = ("/bin/sh", "-ec",
1860 ". \Q$venv_dir/bin/activate\E; exec $orig_argv");
1861 } elsif (-d $python_src) {
1862 $Log->("Warning: virtualenv not found inside Docker container default " +
1863 "\$PATH. Can't install Python SDK.");
1865 $pkgs = `(pip freeze 2>/dev/null | grep arvados) || dpkg-query --show '*arvados*'`;
1869 $Log->("Using Arvados SDK:");
1870 foreach my $line (split /\n/, $pkgs) {
1874 $Log->("Arvados SDK packages not found");
1877 while (my ($sdk_dir, $sdk_envkey) = each(%SDK_ENVVARS)) {
1878 my $sdk_path = "$install_dir/$sdk_dir";
1880 if ($ENV{$sdk_envkey}) {
1881 $ENV{$sdk_envkey} = "$sdk_path:" . $ENV{$sdk_envkey};
1883 $ENV{$sdk_envkey} = $sdk_path;
1885 $Log->("Arvados SDK added to %s", $sdk_envkey);
1891 open(STDOUT, ">&", STDOUT_ORIG);
1892 open(STDERR, ">&", STDERR_ORIG);
1894 die "Cannot exec `@ARGV`: $!";
1897 ### Installation mode
1898 open L, ">", "$destdir.lock" or die "$destdir.lock: $!";
1900 if (readlink ("$destdir.commit") eq $commit && -d $destdir) {
1901 # This version already installed -> nothing to do.
1905 unlink "$destdir.commit";
1907 open TARX, "|-", "tar", "-xC", $destdir;
1913 die "'tar -xC $destdir' exited $?: $!";
1918 my $sdk_root = "$destdir/.arvados.sdk/sdk";
1920 foreach my $sdk_lang (("python",
1921 map { (split /\//, $_, 2)[0]; } keys(%SDK_ENVVARS))) {
1922 if (-d "$sdk_root/$sdk_lang") {
1923 if (!rename("$sdk_root/$sdk_lang", "$install_dir/$sdk_lang")) {
1924 die "Failed to install $sdk_lang SDK: $!";
1930 my $python_dir = "$install_dir/python";
1931 if ((-d $python_dir) and can_run("python2.7") and
1932 (system("python2.7", "$python_dir/setup.py", "--quiet", "egg_info") != 0)) {
1933 # egg_info failed, probably when it asked git for a build tag.
1934 # Specify no build tag.
1935 open(my $pysdk_cfg, ">>", "$python_dir/setup.cfg");
1936 print $pysdk_cfg "\n[egg_info]\ntag_build =\n";
1940 if (-e "$destdir/crunch_scripts/install") {
1941 shell_or_die ("$destdir/crunch_scripts/install", $install_dir);
1942 } elsif (!-e "./install.sh" && -e "./tests/autotests.sh") {
1944 shell_or_die ("./tests/autotests.sh", $install_dir);
1945 } elsif (-e "./install.sh") {
1946 shell_or_die ("./install.sh", $install_dir);
1950 unlink "$destdir.commit.new";
1951 symlink ($commit, "$destdir.commit.new") or die "$destdir.commit.new: $!";
1952 rename ("$destdir.commit.new", "$destdir.commit") or die "$destdir.commit: $!";
1958 my $command_name = shift;
1959 open(my $which, "-|", "which", $command_name);
1960 while (<$which>) { }
1967 if ($ENV{"DEBUG"}) {
1968 print STDERR "@_\n";
1970 if (system (@_) != 0) {
1972 my $exitstatus = sprintf("exit %d signal %d", $? >> 8, $? & 0x7f);
1973 open STDERR, ">&STDERR_ORIG";
1974 system ("cat $destdir.log >&2");
1975 die "@_ failed ($err): $exitstatus";