Arvados API authorization token to use during the course of the job.
+=item --no-clear-tmp
+
+Do not clear per-job/task temporary directories during initial job
+setup. This can speed up development and debugging when running jobs
+locally.
+
=back
=head1 RUNNING JOBS LOCALLY
use IPC::Open2;
use IO::Select;
use File::Temp;
+use Fcntl ':flock';
$ENV{"TMPDIR"} ||= "/tmp";
unless (defined $ENV{"CRUNCH_TMP"}) {
my $git_dir;
my $jobspec;
my $job_api_token;
+my $no_clear_tmp;
my $resume_stash;
GetOptions('force-unlock' => \$force_unlock,
'git-dir=s' => \$git_dir,
'job=s' => \$jobspec,
'job-api-token=s' => \$job_api_token,
+ 'no-clear-tmp' => \$no_clear_tmp,
'resume-stash=s' => \$resume_stash,
);
}
+if (!$have_slurm)
+{
+ must_lock_now("$ENV{CRUNCH_TMP}/.lock", "a job is already running here.");
+}
+
+
my $build_script;
my $skip_install = ($local_job && $Job->{script_version} =~ m{^/});
if ($skip_install)
{
+ if (!defined $no_clear_tmp) {
+ my $clear_tmp_cmd = 'rm -rf $JOB_WORK $CRUNCH_TMP/opt $CRUNCH_TMP/src*';
+ system($clear_tmp_cmd) == 0
+ or croak ("`$clear_tmp_cmd` failed: ".($?>>8));
+ }
$ENV{"CRUNCH_SRC"} = $Job->{script_version};
for my $src_path ("$ENV{CRUNCH_SRC}/arvados/sdk/python") {
if (-d $src_path) {
Log (undef, "Install revision ".$Job->{script_version});
my $nodelist = join(",", @node);
- # Clean out crunch_tmp/work, crunch_tmp/opt, crunch_tmp/src*
+ if (!defined $no_clear_tmp) {
+ # Clean out crunch_tmp/work, crunch_tmp/opt, crunch_tmp/src*
- my $cleanpid = fork();
- if ($cleanpid == 0)
- {
- srun (["srun", "--nodelist=$nodelist", "-D", $ENV{'TMPDIR'}],
- ['bash', '-c', 'if mount | grep -q $JOB_WORK/; then sudo /bin/umount $JOB_WORK/* 2>/dev/null; fi; sleep 1; rm -rf $JOB_WORK $CRUNCH_TMP/opt $CRUNCH_TMP/src*']);
- exit (1);
- }
- while (1)
- {
- last if $cleanpid == waitpid (-1, WNOHANG);
- freeze_if_want_freeze ($cleanpid);
- select (undef, undef, undef, 0.1);
+ my $cleanpid = fork();
+ if ($cleanpid == 0)
+ {
+ srun (["srun", "--nodelist=$nodelist", "-D", $ENV{'TMPDIR'}],
+ ['bash', '-c', 'if mount | grep -q $JOB_WORK/; then sudo /bin/umount $JOB_WORK/* 2>/dev/null; fi; sleep 1; rm -rf $JOB_WORK $CRUNCH_TMP/opt $CRUNCH_TMP/src*']);
+ exit (1);
+ }
+ while (1)
+ {
+ last if $cleanpid == waitpid (-1, WNOHANG);
+ freeze_if_want_freeze ($cleanpid);
+ select (undef, undef, undef, 0.1);
+ }
+ Log (undef, "Clean-work-dir exited $?");
}
- Log (undef, "Clean-work-dir exited $?");
# Install requested code version
Log (undef, "Install exited $?");
}
+if (!$have_slurm)
+{
+ # Grab our lock again (we might have deleted and re-created CRUNCH_TMP above)
+ must_lock_now("$ENV{CRUNCH_TMP}/.lock", "a job is already running here.");
+}
+
foreach (qw (script script_version script_parameters runtime_constraints))
}
$ENV{"TASK_SLOT_NODE"} = $slot[$childslot]->{node}->{name};
$ENV{"TASK_SLOT_NUMBER"} = $slot[$childslot]->{cpu};
- $ENV{"TASK_WORK"} = $ENV{"JOB_WORK"}."/".$slot[$childslot]->{cpu};
+ $ENV{"TASK_WORK"} = $ENV{"JOB_WORK"}."/$id.$$";
$ENV{"TASK_KEEPMOUNT"} = $ENV{"TASK_WORK"}."/keep";
$ENV{"TASK_TMPDIR"} = $ENV{"TASK_WORK"}; # deprecated
$ENV{"CRUNCH_NODE_SLOTS"} = $slot[$childslot]->{node}->{ncpus};
}
else
{
- print $child_in "XXX fetch_block($output) failed XXX\n";
+ Log (undef, "XXX fetch_block($output) failed XXX");
$main::success = 0;
}
}
+ $child_in->close;
+
if (!defined $joboutput) {
my $s = IO::Select->new($child_out);
- sysread($child_out, $joboutput, 64 * 1024 * 1024) if $s->can_read(5);
+ if ($s->can_read(120)) {
+ sysread($child_out, $joboutput, 64 * 1024 * 1024);
+ } else {
+ Log (undef, "timed out reading from 'arv keep put'");
+ }
}
- $child_in->close;
waitpid($pid, 0);
if ($joboutput)
sub thaw
{
croak ("Thaw not implemented");
-
- # my $whc;
- # my $key = shift;
- # Log (undef, "thaw from $key");
-
- # @jobstep = ();
- # @jobstep_done = ();
- # @jobstep_todo = ();
- # @jobstep_tomerge = ();
- # $jobstep_tomerge_level = 0;
- # my $frozenjob = {};
-
- # my $stream = new Warehouse::Stream ( whc => $whc,
- # hash => [split (",", $key)] );
- # $stream->rewind;
- # while (my $dataref = $stream->read_until (undef, "\n\n"))
- # {
- # if ($$dataref =~ /^job /)
- # {
- # foreach (split ("\n", $$dataref))
- # {
- # my ($k, $v) = split ("=", $_, 2);
- # $frozenjob->{$k} = freezeunquote ($v);
- # }
- # next;
- # }
-
- # if ($$dataref =~ /^merge (\d+) (.*)/)
- # {
- # $jobstep_tomerge_level = $1;
- # @jobstep_tomerge
- # = map { freezeunquote ($_) } split ("\n", freezeunquote($2));
- # next;
- # }
-
- # my $Jobstep = { };
- # foreach (split ("\n", $$dataref))
- # {
- # my ($k, $v) = split ("=", $_, 2);
- # $Jobstep->{$k} = freezeunquote ($v) if $k;
- # }
- # $Jobstep->{'failures'} = 0;
- # push @jobstep, $Jobstep;
-
- # if ($Jobstep->{exitcode} eq "0")
- # {
- # push @jobstep_done, $#jobstep;
- # }
- # else
- # {
- # push @jobstep_todo, $#jobstep;
- # }
- # }
-
- # foreach (qw (script script_version script_parameters))
- # {
- # $Job->{$_} = $frozenjob->{$_};
- # }
- # $Job->save if $job_has_uuid;
}
Log (undef, "backing off node " . $slot[$slotid]->{node}->{name} . " for 60 seconds");
}
+sub must_lock_now
+{
+ my ($lockfile, $error_message) = @_;
+ open L, ">", $lockfile or croak("$lockfile: $!");
+ if (!flock L, LOCK_EX|LOCK_NB) {
+ croak("Can't lock $lockfile: $error_message\n");
+ }
+}
+
__DATA__
#!/usr/bin/perl