X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/2340ebeaf34778645e6071e8b077df50e5ae3df5..db4458117af1d4eff14760dfd05e2e6e289fb9c1:/sdk/cli/bin/crunch-job diff --git a/sdk/cli/bin/crunch-job b/sdk/cli/bin/crunch-job index 48a6c9dea7..ca2e8c49a5 100755 --- a/sdk/cli/bin/crunch-job +++ b/sdk/cli/bin/crunch-job @@ -76,6 +76,7 @@ use strict; use POSIX ':sys_wait_h'; use Fcntl qw(F_GETFL F_SETFL O_NONBLOCK); use Arvados; +use Digest::MD5 qw(md5_hex); use Getopt::Long; use IPC::Open2; use IO::Select; @@ -95,15 +96,6 @@ $ENV{"CRUNCH_INSTALL"} = "$ENV{CRUNCH_TMP}/opt"; $ENV{"CRUNCH_WORK"} = $ENV{"JOB_WORK"}; # deprecated mkdir ($ENV{"JOB_WORK"}); -my $arv_cli; - -if (defined $ENV{"ARV_CLI"}) { - $arv_cli = $ENV{"ARV_CLI"}; -} -else { - $arv_cli = 'arv'; -} - my $force_unlock; my $git_dir; my $jobspec; @@ -139,7 +131,7 @@ $SIG{'USR2'} = sub my $arv = Arvados->new('apiVersion' => 'v1'); -my $metastream; +my $local_logfile; my $User = $arv->{'users'}->{'current'}->execute; @@ -185,7 +177,7 @@ else $job_id = $Job->{'uuid'}; my $keep_logfile = $job_id . '.log.txt'; -my $local_logfile = File::Temp->new(); +$local_logfile = File::Temp->new(); $Job->{'runtime_constraints'} ||= {}; $Job->{'runtime_constraints'}->{'max_tasks_per_node'} ||= 0; @@ -498,7 +490,38 @@ if (!$have_slurm) must_lock_now("$ENV{CRUNCH_TMP}/.lock", "a job is already running here."); } - +# If this job requires a Docker image, install that. +my $docker_bin = "/usr/bin/docker.io"; +my ($docker_locator, $docker_hash); +if ($docker_locator = $Job->{docker_image_locator}) { + $docker_hash = find_docker_hash($docker_locator); + if (!$docker_hash) + { + croak("No Docker image hash found from locator $docker_locator"); + } + my $docker_install_script = qq{ +if ! $docker_bin images -q --no-trunc | grep -qxF \Q$docker_hash\E; then + arv-get \Q$docker_locator/$docker_hash.tar\E | $docker_bin load +fi +}; + my $docker_pid = fork(); + if ($docker_pid == 0) + { + srun (["srun", "--nodelist=" . join(',', @node)], + ["/bin/sh", "-ec", $docker_install_script]); + exit ($?); + } + while (1) + { + last if $docker_pid == waitpid (-1, WNOHANG); + freeze_if_want_freeze ($docker_pid); + select (undef, undef, undef, 0.1); + } + if ($? != 0) + { + croak("Installing Docker image from $docker_locator returned exit code $?"); + } +} foreach (qw (script script_version script_parameters runtime_constraints)) { @@ -590,6 +613,7 @@ for (my $todo_ptr = 0; $todo_ptr <= $#jobstep_todo; $todo_ptr ++) $ENV{"TASK_SLOT_NODE"} = $slot[$childslot]->{node}->{name}; $ENV{"TASK_SLOT_NUMBER"} = $slot[$childslot]->{cpu}; $ENV{"TASK_WORK"} = $ENV{"JOB_WORK"}."/$id.$$"; + $ENV{"HOME"} = $ENV{"TASK_WORK"}; $ENV{"TASK_KEEPMOUNT"} = $ENV{"TASK_WORK"}.".keep"; $ENV{"TASK_TMPDIR"} = $ENV{"TASK_WORK"}; # deprecated $ENV{"CRUNCH_NODE_SLOTS"} = $slot[$childslot]->{node}->{ncpus}; @@ -603,11 +627,10 @@ for (my $todo_ptr = 0; $todo_ptr <= $#jobstep_todo; $todo_ptr ++) qw(-n1 -c1 -N1 -D), $ENV{'TMPDIR'}, "--job-name=$job_id.$id.$$", ); - my @execargs = qw(sh); my $build_script_to_send = ""; my $command = "if [ -e $ENV{TASK_WORK} ]; then rm -rf $ENV{TASK_WORK}; fi; " - ."mkdir -p $ENV{JOB_WORK} $ENV{CRUNCH_TMP} $ENV{TASK_WORK} $ENV{TASK_KEEPMOUNT} " + ."mkdir -p $ENV{JOB_WORK} $ENV{CRUNCH_TMP} $ENV{TASK_WORK} $ENV{TASK_KEEPMOUNT}" ."&& cd $ENV{CRUNCH_TMP} "; if ($build_script) { @@ -615,8 +638,48 @@ for (my $todo_ptr = 0; $todo_ptr <= $#jobstep_todo; $todo_ptr ++) $command .= "&& perl -"; } - $command .= - "&& exec arv-mount $ENV{TASK_KEEPMOUNT} --exec $ENV{CRUNCH_SRC}/crunch_scripts/" . $Job->{"script"}; + $command .= "&& exec arv-mount --allow-other $ENV{TASK_KEEPMOUNT} --exec "; + if ($docker_hash) + { + $command .= "crunchstat -cgroup-root=/sys/fs/cgroup -cgroup-parent=docker -cgroup-cid=$ENV{TASK_WORK}/docker.cid -poll=10000 "; + $command .= "$docker_bin run --rm=true --attach=stdout --attach=stderr --user=crunch --cidfile=$ENV{TASK_WORK}/docker.cid "; + # Dynamically configure the container to use the host system as its + # DNS server. Get the host's global addresses from the ip command, + # and turn them into docker --dns options using gawk. + $command .= + q{$(ip -o address show scope global | + gawk 'match($4, /^([0-9\.:]+)\//, x){print "--dns", x[1]}') }; + $command .= "--volume=\Q$ENV{CRUNCH_SRC}:/tmp/crunch-src:ro\E "; + $command .= "--volume=\Q$ENV{TASK_KEEPMOUNT}:/keep:ro\E "; + $command .= "--env=\QHOME=/home/crunch\E "; + while (my ($env_key, $env_val) = each %ENV) + { + if ($env_key =~ /^(ARVADOS|JOB|TASK)_/) { + if ($env_key eq "TASK_WORK") { + $command .= "--env=\QTASK_WORK=/tmp/crunch-job\E "; + } + elsif ($env_key eq "TASK_KEEPMOUNT") { + $command .= "--env=\QTASK_KEEPMOUNT=/keep\E "; + } + elsif ($env_key eq "CRUNCH_SRC") { + $command .= "--env=\QCRUNCH_SRC=/tmp/crunch-src\E "; + } + else { + $command .= "--env=\Q$env_key=$env_val\E "; + } + } + } + $command .= "--env=\QCRUNCH_NODE_SLOTS=$ENV{CRUNCH_NODE_SLOTS}\E "; + $command .= "\Q$docker_hash\E "; + $command .= "stdbuf --output=0 --error=0 "; + $command .= "/tmp/crunch-src/crunch_scripts/" . $Job->{"script"}; + } else { + # Non-docker run + $command .= "crunchstat -cgroup-root=/sys/fs/cgroup -poll=10000 "; + $command .= "stdbuf --output=0 --error=0 "; + $command .= "$ENV{CRUNCH_SRC}/crunch_scripts/" . $Job->{"script"}; + } + my @execargs = ('bash', '-c', $command); srun (\@srunargs, \@execargs, undef, $build_script_to_send); exit (111); @@ -758,21 +821,39 @@ goto ONELEVEL if !defined $main::success; release_allocation(); freeze(); +my $collated_output = &collate_output(); + if ($job_has_uuid) { - $Job->update_attributes('output' => &collate_output(), - 'running' => 0, - 'success' => $Job->{'output'} && $main::success, + $Job->update_attributes('running' => 0, + 'success' => $collated_output && $main::success, 'finished_at' => scalar gmtime) } -if ($Job->{'output'}) +if ($collated_output) { eval { - my $manifest_text = `arv keep get ''\Q$Job->{'output'}\E`; - $arv->{'collections'}->{'create'}->execute('collection' => { - 'uuid' => $Job->{'output'}, - 'manifest_text' => $manifest_text, + open(my $orig_manifest, '-|', 'arv-get', $collated_output) + or die "failed to get collated manifest: $!"; + # Read the original manifest, and strip permission hints from it, + # so we can put the result in a Collection. + my @stripped_manifest_lines = (); + my $orig_manifest_text = ''; + while (my $manifest_line = <$orig_manifest>) { + $orig_manifest_text .= $manifest_line; + my @words = split(/ /, $manifest_line, -1); + foreach my $ii (0..$#words) { + if ($words[$ii] =~ /^[0-9a-f]{32}\+/) { + $words[$ii] =~ s/\+A[0-9a-f]{40}@[0-9a-f]{8}\b//; + } + } + push(@stripped_manifest_lines, join(" ", @words)); + } + my $stripped_manifest_text = join("", @stripped_manifest_lines); + my $output = $arv->{'collections'}->{'create'}->execute('collection' => { + 'uuid' => md5_hex($stripped_manifest_text), + 'manifest_text' => $orig_manifest_text, }); + $Job->update_attributes('output' => $output->{uuid}); if ($Job->{'output_is_persistent'}) { $arv->{'links'}->{'create'}->execute('link' => { 'tail_kind' => 'arvados#user', @@ -905,13 +986,19 @@ sub reapchildren delete $proc{$pid}; # Load new tasks - my $newtask_list = $arv->{'job_tasks'}->{'list'}->execute( - 'where' => { - 'created_by_job_task_uuid' => $Jobstep->{'arvados_task'}->{uuid} - }, - 'order' => 'qsequence' - ); - foreach my $arvados_task (@{$newtask_list->{'items'}}) { + my $newtask_list = []; + my $newtask_results; + do { + $newtask_results = $arv->{'job_tasks'}->{'list'}->execute( + 'where' => { + 'created_by_job_task_uuid' => $Jobstep->{'arvados_task'}->{uuid} + }, + 'order' => 'qsequence', + 'offset' => scalar(@$newtask_list), + ); + push(@$newtask_list, @{$newtask_results->{items}}); + } while (@{$newtask_results->{items}}); + foreach my $arvados_task (@$newtask_list) { my $jobstep = { 'level' => $arvados_task->{'sequence'}, 'failures' => 0, @@ -1083,7 +1170,7 @@ sub fetch_block my $hash = shift; my ($keep, $child_out, $output_block); - my $cmd = "$arv_cli keep get \Q$hash\E"; + my $cmd = "arv-get \Q$hash\E"; open($keep, '-|', $cmd) or die "fetch_block: $cmd: $!"; sysread($keep, $output_block, 64 * 1024 * 1024); close $keep; @@ -1095,7 +1182,7 @@ sub collate_output Log (undef, "collate"); my ($child_out, $child_in); - my $pid = open2($child_out, $child_in, $arv_cli, 'keep', 'put', '--raw'); + my $pid = open2($child_out, $child_in, 'arv-put', '--raw'); my $joboutput; for (@jobstep) { @@ -1132,7 +1219,7 @@ sub collate_output sysread($child_out, $joboutput, 64 * 1024 * 1024); chomp($joboutput); } else { - Log (undef, "timed out reading from 'arv keep put'"); + Log (undef, "timed out reading from 'arv-put'"); } } waitpid($pid, 0); @@ -1204,15 +1291,15 @@ sub Log # ($jobstep_id, $logmessage) $message =~ s{([^ -\176])}{"\\" . sprintf ("%03o", ord($1))}ge; $message .= "\n"; my $datetime; - if ($metastream || -t STDERR) { + if ($local_logfile || -t STDERR) { my @gmtime = gmtime; $datetime = sprintf ("%04d-%02d-%02d_%02d:%02d:%02d", $gmtime[5]+1900, $gmtime[4]+1, @gmtime[3,2,1,0]); } print STDERR ((-t STDERR) ? ($datetime." ".$message) : $message); - if ($metastream) { - print $metastream $datetime . " " . $message; + if ($local_logfile) { + print $local_logfile $datetime . " " . $message; } } @@ -1225,7 +1312,7 @@ sub croak freeze() if @jobstep_todo; collate_output() if @jobstep_todo; cleanup(); - save_meta() if $metastream; + save_meta() if $local_logfile; die; } @@ -1245,10 +1332,11 @@ sub save_meta return if $justcheckpoint; # checkpointing is not relevant post-Warehouse.pm $local_logfile->flush; - my $cmd = "$arv_cli keep put --filename ''\Q$keep_logfile\E " + my $cmd = "arv-put --filename ''\Q$keep_logfile\E " . quotemeta($local_logfile->filename); my $loglocator = `$cmd`; die "system $cmd failed: $?" if $?; + chomp($loglocator); $local_logfile = undef; # the temp file is automatically deleted Log (undef, "log manifest is $loglocator"); @@ -1364,6 +1452,21 @@ sub must_lock_now } } +sub find_docker_hash { + # Given a Keep locator, search for a matching link to find the Docker hash + # of the stored image. + my $locator = shift; + my $links_result = $arv->{links}->{list}->execute( + filters => [["head_uuid", "=", $locator], + ["link_class", "=", "docker_image_hash"]], + limit => 1); + my $docker_hash; + foreach my $link (@{$links_result->{items}}) { + $docker_hash = lc($link->{name}); + } + return $docker_hash; +} + __DATA__ #!/usr/bin/perl