X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/4a05680dd6991129385046ce646c32e1b595055a..ef35b37af2b60cce2b5d7668874ba4cabaf4482e:/sdk/cli/bin/crunch-job diff --git a/sdk/cli/bin/crunch-job b/sdk/cli/bin/crunch-job index c04f29edd4..5d362f4529 100755 --- a/sdk/cli/bin/crunch-job +++ b/sdk/cli/bin/crunch-job @@ -71,9 +71,8 @@ use POSIX ':sys_wait_h'; use Fcntl qw(F_GETFL F_SETFL O_NONBLOCK); use Arvados; use Getopt::Long; -use Warehouse; -use Warehouse::Stream; -use IPC::System::Simple qw(capturex); +use IPC::Open2; +use IO::Select; $ENV{"TMPDIR"} ||= "/tmp"; unless (defined $ENV{"CRUNCH_TMP"}) { @@ -84,6 +83,7 @@ unless (defined $ENV{"CRUNCH_TMP"}) { } } $ENV{"JOB_WORK"} = $ENV{"CRUNCH_TMP"} . "/work"; +$ENV{"CRUNCH_INSTALL"} = "$ENV{CRUNCH_TMP}/opt"; $ENV{"CRUNCH_WORK"} = $ENV{"JOB_WORK"}; # deprecated mkdir ($ENV{"JOB_WORK"}); @@ -119,7 +119,7 @@ $SIG{'USR2'} = sub -my $arv = Arvados->new; +my $arv = Arvados->new('apiVersion' => 'v1'); my $metastream; my $User = $arv->{'users'}->{'current'}->execute; @@ -165,10 +165,10 @@ else } $job_id = $Job->{'uuid'}; -$metastream = Warehouse::Stream->new(whc => new Warehouse); -$metastream->clear; -$metastream->name('.'); -$metastream->write_start($job_id . '.log.txt'); +# $metastream = Warehouse::Stream->new(whc => new Warehouse); +# $metastream->clear; +# $metastream->name('.'); +# $metastream->write_start($job_id . '.log.txt'); $Job->{'runtime_constraints'} ||= {}; @@ -331,6 +331,15 @@ my $skip_install = ($local_job && $Job->{script_version} =~ m{^/}); if ($skip_install) { $ENV{"CRUNCH_SRC"} = $Job->{script_version}; + for my $src_path ("$ENV{CRUNCH_SRC}/arvados/sdk/python") { + if (-d $src_path) { + system("virtualenv", "$ENV{CRUNCH_TMP}/opt") == 0 + or croak ("virtualenv $ENV{CRUNCH_TMP}/opt failed: exit ".($?>>8)); + system ("cd $src_path && ./build.sh && \$CRUNCH_TMP/opt/bin/python setup.py install") + == 0 + or croak ("setup.py in $src_path failed: exit ".($?>>8)); + } + } } else { @@ -341,13 +350,13 @@ else Log (undef, "Install revision ".$Job->{script_version}); my $nodelist = join(",", @node); - # Clean out crunch_tmp/work, crunch_tmp/opt, crunch_tmp/src + # Clean out crunch_tmp/work, crunch_tmp/opt, crunch_tmp/src* my $cleanpid = fork(); if ($cleanpid == 0) { srun (["srun", "--nodelist=$nodelist", "-D", $ENV{'TMPDIR'}], - ['bash', '-c', 'if mount | grep -q $JOB_WORK/; then sudo /bin/umount $JOB_WORK/* 2>/dev/null; fi; sleep 1; rm -rf $JOB_WORK $CRUNCH_TMP/opt $CRUNCH_TMP/src']); + ['bash', '-c', 'if mount | grep -q $JOB_WORK/; then sudo /bin/umount $JOB_WORK/* 2>/dev/null; fi; sleep 1; rm -rf $JOB_WORK $CRUNCH_TMP/opt $CRUNCH_TMP/src*']); exit (1); } while (1) @@ -367,7 +376,6 @@ else $ENV{"CRUNCH_SRC_COMMIT"} = $Job->{script_version}; $ENV{"CRUNCH_SRC"} = "$ENV{CRUNCH_TMP}/src"; - $ENV{"CRUNCH_INSTALL"} = "$ENV{CRUNCH_TMP}/opt"; my $commit; my $git_archive; @@ -546,8 +554,10 @@ for (my $todo_ptr = 0; $todo_ptr <= $#jobstep_todo; $todo_ptr ++) $ENV{"TASK_SLOT_NODE"} = $slot[$childslot]->{node}->{name}; $ENV{"TASK_SLOT_NUMBER"} = $slot[$childslot]->{cpu}; $ENV{"TASK_WORK"} = $ENV{"JOB_WORK"}."/".$slot[$childslot]->{cpu}; + $ENV{"TASK_KEEPMOUNT"} = $ENV{"TASK_WORK"}."/keep"; $ENV{"TASK_TMPDIR"} = $ENV{"TASK_WORK"}; # deprecated $ENV{"CRUNCH_NODE_SLOTS"} = $slot[$childslot]->{node}->{ncpus}; + $ENV{"PATH"} = $ENV{"CRUNCH_INSTALL"} . "/bin:" . $ENV{"PATH"}; $ENV{"GZIP"} = "-n"; @@ -561,7 +571,7 @@ for (my $todo_ptr = 0; $todo_ptr <= $#jobstep_todo; $todo_ptr ++) my $build_script_to_send = ""; my $command = "if [ -e $ENV{TASK_WORK} ]; then rm -rf $ENV{TASK_WORK}; fi; " - ."mkdir -p $ENV{JOB_WORK} $ENV{CRUNCH_TMP} $ENV{TASK_WORK} " + ."mkdir -p $ENV{JOB_WORK} $ENV{CRUNCH_TMP} $ENV{TASK_WORK} $ENV{TASK_KEEPMOUNT} " ."&& cd $ENV{CRUNCH_TMP} "; if ($build_script) { @@ -569,11 +579,8 @@ for (my $todo_ptr = 0; $todo_ptr <= $#jobstep_todo; $todo_ptr ++) $command .= "&& perl -"; } - $ENV{"PYTHONPATH"} =~ s{^}{:} if $ENV{"PYTHONPATH"}; - $ENV{"PYTHONPATH"} =~ s{^}{$ENV{CRUNCH_SRC}/sdk/python}; # xxx hack - $ENV{"PYTHONPATH"} =~ s{$}{:/usr/local/arvados/src/sdk/python}; # xxx hack $command .= - "&& exec $ENV{CRUNCH_SRC}/crunch_scripts/" . $Job->{"script"}; + "&& exec arv-mount $ENV{TASK_KEEPMOUNT} --exec $ENV{CRUNCH_SRC}/crunch_scripts/" . $Job->{"script"}; my @execargs = ('bash', '-c', $command); srun (\@srunargs, \@execargs, undef, $build_script_to_send); exit (111); @@ -725,7 +732,7 @@ if ($job_has_uuid) { if ($Job->{'output'}) { eval { - my $manifest_text = capturex("whget", $Job->{'output'}); + my $manifest_text = `arv keep get $Job->{'output'}`; $arv->{'collections'}->{'create'}->execute('collection' => { 'uuid' => $Job->{'output'}, 'manifest_text' => $manifest_text, @@ -1025,12 +1032,23 @@ sub process_stderr } split ("\n", $jobstep[$job]->{stderr}); } +sub fetch_block +{ + my $hash = shift; + my ($child_out, $child_in, $output_block); + + my $pid = open2($child_out, $child_in, 'arv', 'keep', 'get', $hash); + sysread($child_out, $output_block, 64 * 1024 * 1024); + waitpid($pid, 0); + return $output_block; +} sub collate_output { - my $whc = Warehouse->new; Log (undef, "collate"); - $whc->write_start (1); + + my ($child_out, $child_in); + my $pid = open2($child_out, $child_in, 'arv', 'keep', 'put', '--raw'); my $joboutput; for (@jobstep) { @@ -1041,26 +1059,31 @@ sub collate_output if ($output !~ /^[0-9a-f]{32}(\+\S+)*$/) { $output_in_keep ||= $output =~ / [0-9a-f]{32}\S*\+K/; - $whc->write_data ($output); + print $child_in $output; } elsif (@jobstep == 1) { $joboutput = $output; - $whc->write_finish; + last; } - elsif (defined (my $outblock = $whc->fetch_block ($output))) + elsif (defined (my $outblock = fetch_block ($output))) { $output_in_keep ||= $outblock =~ / [0-9a-f]{32}\S*\+K/; - $whc->write_data ($outblock); + print $child_in $outblock; } else { - my $errstr = $whc->errstr; - $whc->write_data ("XXX fetch_block($output) failed: $errstr XXX\n"); + print $child_in "XXX fetch_block($output) failed XXX\n"; $main::success = 0; } } - $joboutput = $whc->write_finish if !defined $joboutput; + if (!defined $joboutput) { + my $s = IO::Select->new($child_out); + sysread($child_out, $joboutput, 64 * 1024 * 1024) if $s->can_read(0); + } + $child_in->close; + waitpid($pid, 0); + if ($joboutput) { Log (undef, "output $joboutput"); @@ -1135,8 +1158,8 @@ sub Log # ($jobstep_id, $logmessage) } print STDERR ((-t STDERR) ? ($datetime." ".$message) : $message); - return if !$metastream; - $metastream->write_data ($datetime . " " . $message); + # return if !$metastream; + # $metastream->write_data ($datetime . " " . $message); } @@ -1164,20 +1187,20 @@ sub cleanup sub save_meta { - my $justcheckpoint = shift; # false if this will be the last meta saved - my $m = $metastream; - $m = $m->copy if $justcheckpoint; - $m->write_finish; - my $whc = Warehouse->new; - my $loglocator = $whc->store_block ($m->as_string); - $arv->{'collections'}->{'create'}->execute('collection' => { - 'uuid' => $loglocator, - 'manifest_text' => $m->as_string, - }); - undef $metastream if !$justcheckpoint; # otherwise Log() will try to use it - Log (undef, "log manifest is $loglocator"); - $Job->{'log'} = $loglocator; - $Job->update_attributes('log', $loglocator) if $job_has_uuid; +# my $justcheckpoint = shift; # false if this will be the last meta saved +# my $m = $metastream; +# $m = $m->copy if $justcheckpoint; +# $m->write_finish; +# my $whc = Warehouse->new; +# my $loglocator = $whc->store_block ($m->as_string); +# $arv->{'collections'}->{'create'}->execute('collection' => { +# 'uuid' => $loglocator, +# 'manifest_text' => $m->as_string, +# }); +# undef $metastream if !$justcheckpoint; # otherwise Log() will try to use it +# Log (undef, "log manifest is $loglocator"); +# $Job->{'log'} = $loglocator; +# $Job->update_attributes('log', $loglocator) if $job_has_uuid; } @@ -1221,64 +1244,64 @@ sub thaw { croak ("Thaw not implemented"); - my $whc; - my $key = shift; - Log (undef, "thaw from $key"); - - @jobstep = (); - @jobstep_done = (); - @jobstep_todo = (); - @jobstep_tomerge = (); - $jobstep_tomerge_level = 0; - my $frozenjob = {}; - - my $stream = new Warehouse::Stream ( whc => $whc, - hash => [split (",", $key)] ); - $stream->rewind; - while (my $dataref = $stream->read_until (undef, "\n\n")) - { - if ($$dataref =~ /^job /) - { - foreach (split ("\n", $$dataref)) - { - my ($k, $v) = split ("=", $_, 2); - $frozenjob->{$k} = freezeunquote ($v); - } - next; - } - - if ($$dataref =~ /^merge (\d+) (.*)/) - { - $jobstep_tomerge_level = $1; - @jobstep_tomerge - = map { freezeunquote ($_) } split ("\n", freezeunquote($2)); - next; - } - - my $Jobstep = { }; - foreach (split ("\n", $$dataref)) - { - my ($k, $v) = split ("=", $_, 2); - $Jobstep->{$k} = freezeunquote ($v) if $k; - } - $Jobstep->{'failures'} = 0; - push @jobstep, $Jobstep; - - if ($Jobstep->{exitcode} eq "0") - { - push @jobstep_done, $#jobstep; - } - else - { - push @jobstep_todo, $#jobstep; - } - } - - foreach (qw (script script_version script_parameters)) - { - $Job->{$_} = $frozenjob->{$_}; - } - $Job->save if $job_has_uuid; + # my $whc; + # my $key = shift; + # Log (undef, "thaw from $key"); + + # @jobstep = (); + # @jobstep_done = (); + # @jobstep_todo = (); + # @jobstep_tomerge = (); + # $jobstep_tomerge_level = 0; + # my $frozenjob = {}; + + # my $stream = new Warehouse::Stream ( whc => $whc, + # hash => [split (",", $key)] ); + # $stream->rewind; + # while (my $dataref = $stream->read_until (undef, "\n\n")) + # { + # if ($$dataref =~ /^job /) + # { + # foreach (split ("\n", $$dataref)) + # { + # my ($k, $v) = split ("=", $_, 2); + # $frozenjob->{$k} = freezeunquote ($v); + # } + # next; + # } + + # if ($$dataref =~ /^merge (\d+) (.*)/) + # { + # $jobstep_tomerge_level = $1; + # @jobstep_tomerge + # = map { freezeunquote ($_) } split ("\n", freezeunquote($2)); + # next; + # } + + # my $Jobstep = { }; + # foreach (split ("\n", $$dataref)) + # { + # my ($k, $v) = split ("=", $_, 2); + # $Jobstep->{$k} = freezeunquote ($v) if $k; + # } + # $Jobstep->{'failures'} = 0; + # push @jobstep, $Jobstep; + + # if ($Jobstep->{exitcode} eq "0") + # { + # push @jobstep_done, $#jobstep; + # } + # else + # { + # push @jobstep_todo, $#jobstep; + # } + # } + + # foreach (qw (script script_version script_parameters)) + # { + # $Job->{$_} = $frozenjob->{$_}; + # } + # $Job->save if $job_has_uuid; } @@ -1351,7 +1374,7 @@ my $repo = $ENV{"CRUNCH_SRC_URL"}; open L, ">", "$destdir.lock" or die "$destdir.lock: $!"; flock L, LOCK_EX; -if (readlink ("$destdir.commit") eq $commit) { +if (readlink ("$destdir.commit") eq $commit && -d $destdir) { exit 0; } @@ -1360,16 +1383,27 @@ open STDOUT, ">", "$destdir.log"; open STDERR, ">&STDOUT"; mkdir $destdir; -open TARX, "|-", "tar", "-C", $destdir, "-xf", "-"; -print TARX ; -if(!close(TARX)) { - die "'tar -C $destdir -xf -' exited $?: $!"; +my @git_archive_data = ; +if (@git_archive_data) { + open TARX, "|-", "tar", "-C", $destdir, "-xf", "-"; + print TARX @git_archive_data; + if(!close(TARX)) { + die "'tar -C $destdir -xf -' exited $?: $!"; + } } my $pwd; chomp ($pwd = `pwd`); my $install_dir = $ENV{"CRUNCH_INSTALL"} || "$pwd/opt"; mkdir $install_dir; + +for my $src_path ("$destdir/arvados/sdk/python") { + if (-d $src_path) { + shell_or_die ("virtualenv", $install_dir); + shell_or_die ("cd $src_path && ./build.sh && $install_dir/bin/python setup.py install"); + } +} + if (-e "$destdir/crunch_scripts/install") { shell_or_die ("$destdir/crunch_scripts/install", $install_dir); } elsif (!-e "./install.sh" && -e "./tests/autotests.sh") {