10651: Add --submit-runner-ram to specify amount of RAM that should be
[arvados.git] / sdk / cwl / arvados_cwl / __init__.py
1 #!/usr/bin/env python
2
3 # Implement cwl-runner interface for submitting and running work on Arvados, using
4 # either the Crunch jobs API or Crunch containers API.
5
6 import argparse
7 import logging
8 import os
9 import sys
10 import threading
11 import hashlib
12 import copy
13 import json
14 from functools import partial
15 import pkg_resources  # part of setuptools
16
17 from cwltool.errors import WorkflowException
18 import cwltool.main
19 import cwltool.workflow
20 import schema_salad
21
22 import arvados
23 import arvados.config
24
25 from .arvcontainer import ArvadosContainer, RunnerContainer
26 from .arvjob import ArvadosJob, RunnerJob, RunnerTemplate
27 from. runner import Runner, upload_instance
28 from .arvtool import ArvadosCommandTool
29 from .arvworkflow import ArvadosWorkflow, upload_workflow
30 from .fsaccess import CollectionFsAccess
31 from .perf import Perf
32 from .pathmapper import FinalOutputPathMapper
33 from ._version import __version__
34
35 from cwltool.pack import pack
36 from cwltool.process import shortname, UnsupportedRequirement, getListing
37 from cwltool.pathmapper import adjustFileObjs, adjustDirObjs
38 from cwltool.draft2tool import compute_checksums
39 from arvados.api import OrderedJsonModel
40
41 logger = logging.getLogger('arvados.cwl-runner')
42 metrics = logging.getLogger('arvados.cwl-runner.metrics')
43 logger.setLevel(logging.INFO)
44
45
46 class ArvCwlRunner(object):
47     """Execute a CWL tool or workflow, submit work (using either jobs or
48     containers API), wait for them to complete, and report output.
49
50     """
51
52     def __init__(self, api_client, work_api=None, keep_client=None, output_name=None, output_tags=None):
53         self.api = api_client
54         self.processes = {}
55         self.lock = threading.Lock()
56         self.cond = threading.Condition(self.lock)
57         self.final_output = None
58         self.final_status = None
59         self.uploaded = {}
60         self.num_retries = 4
61         self.uuid = None
62         self.stop_polling = threading.Event()
63         self.poll_api = None
64         self.pipeline = None
65         self.final_output_collection = None
66         self.output_name = output_name
67         self.output_tags = output_tags
68         self.project_uuid = None
69
70         if keep_client is not None:
71             self.keep_client = keep_client
72         else:
73             self.keep_client = arvados.keep.KeepClient(api_client=self.api, num_retries=self.num_retries)
74
75         for api in ["jobs", "containers"]:
76             try:
77                 methods = self.api._rootDesc.get('resources')[api]['methods']
78                 if ('httpMethod' in methods['create'] and
79                     (work_api == api or work_api is None)):
80                     self.work_api = api
81                     break
82             except KeyError:
83                 pass
84         if not self.work_api:
85             if work_api is None:
86                 raise Exception("No supported APIs")
87             else:
88                 raise Exception("Unsupported API '%s'" % work_api)
89
90     def arv_make_tool(self, toolpath_object, **kwargs):
91         kwargs["work_api"] = self.work_api
92         if "class" in toolpath_object and toolpath_object["class"] == "CommandLineTool":
93             return ArvadosCommandTool(self, toolpath_object, **kwargs)
94         elif "class" in toolpath_object and toolpath_object["class"] == "Workflow":
95             return ArvadosWorkflow(self, toolpath_object, **kwargs)
96         else:
97             return cwltool.workflow.defaultMakeTool(toolpath_object, **kwargs)
98
99     def output_callback(self, out, processStatus):
100         if processStatus == "success":
101             logger.info("Overall process status is %s", processStatus)
102             if self.pipeline:
103                 self.api.pipeline_instances().update(uuid=self.pipeline["uuid"],
104                                                      body={"state": "Complete"}).execute(num_retries=self.num_retries)
105         else:
106             logger.warn("Overall process status is %s", processStatus)
107             if self.pipeline:
108                 self.api.pipeline_instances().update(uuid=self.pipeline["uuid"],
109                                                      body={"state": "Failed"}).execute(num_retries=self.num_retries)
110         self.final_status = processStatus
111         self.final_output = out
112
113     def on_message(self, event):
114         if "object_uuid" in event:
115             if event["object_uuid"] in self.processes and event["event_type"] == "update":
116                 if event["properties"]["new_attributes"]["state"] == "Running" and self.processes[event["object_uuid"]].running is False:
117                     uuid = event["object_uuid"]
118                     with self.lock:
119                         j = self.processes[uuid]
120                         logger.info("Job %s (%s) is Running", j.name, uuid)
121                         j.running = True
122                         j.update_pipeline_component(event["properties"]["new_attributes"])
123                 elif event["properties"]["new_attributes"]["state"] in ("Complete", "Failed", "Cancelled"):
124                     uuid = event["object_uuid"]
125                     try:
126                         self.cond.acquire()
127                         j = self.processes[uuid]
128                         txt = self.work_api[0].upper() + self.work_api[1:-1]
129                         logger.info("%s %s (%s) is %s", txt, j.name, uuid, event["properties"]["new_attributes"]["state"])
130                         with Perf(metrics, "done %s" % j.name):
131                             j.done(event["properties"]["new_attributes"])
132                         self.cond.notify()
133                     finally:
134                         self.cond.release()
135
136     def poll_states(self):
137         """Poll status of jobs or containers listed in the processes dict.
138
139         Runs in a separate thread.
140         """
141
142         try:
143             while True:
144                 self.stop_polling.wait(15)
145                 if self.stop_polling.is_set():
146                     break
147                 with self.lock:
148                     keys = self.processes.keys()
149                 if not keys:
150                     continue
151
152                 if self.work_api == "containers":
153                     table = self.poll_api.containers()
154                 elif self.work_api == "jobs":
155                     table = self.poll_api.jobs()
156
157                 try:
158                     proc_states = table.list(filters=[["uuid", "in", keys]]).execute(num_retries=self.num_retries)
159                 except Exception as e:
160                     logger.warn("Error checking states on API server: %s", e)
161                     continue
162
163                 for p in proc_states["items"]:
164                     self.on_message({
165                         "object_uuid": p["uuid"],
166                         "event_type": "update",
167                         "properties": {
168                             "new_attributes": p
169                         }
170                     })
171         except:
172             logger.error("Fatal error in state polling thread.", exc_info=(sys.exc_info()[1] if self.debug else False))
173             self.cond.acquire()
174             self.processes.clear()
175             self.cond.notify()
176             self.cond.release()
177         finally:
178             self.stop_polling.set()
179
180     def get_uploaded(self):
181         return self.uploaded.copy()
182
183     def add_uploaded(self, src, pair):
184         self.uploaded[src] = pair
185
186     def check_writable(self, obj):
187         if isinstance(obj, dict):
188             if obj.get("writable"):
189                 raise UnsupportedRequirement("InitialWorkDir feature 'writable: true' not supported")
190             for v in obj.itervalues():
191                 self.check_writable(v)
192         if isinstance(obj, list):
193             for v in obj:
194                 self.check_writable(v)
195
196     def make_output_collection(self, name, tagsString, outputObj):
197         outputObj = copy.deepcopy(outputObj)
198
199         files = []
200         def capture(fileobj):
201             files.append(fileobj)
202
203         adjustDirObjs(outputObj, capture)
204         adjustFileObjs(outputObj, capture)
205
206         generatemapper = FinalOutputPathMapper(files, "", "", separateDirs=False)
207
208         final = arvados.collection.Collection(api_client=self.api,
209                                               keep_client=self.keep_client,
210                                               num_retries=self.num_retries)
211
212         srccollections = {}
213         for k,v in generatemapper.items():
214             if k.startswith("_:"):
215                 if v.type == "Directory":
216                     continue
217                 if v.type == "CreateFile":
218                     with final.open(v.target, "wb") as f:
219                         f.write(v.resolved.encode("utf-8"))
220                     continue
221
222             if not k.startswith("keep:"):
223                 raise Exception("Output source is not in keep or a literal")
224             sp = k.split("/")
225             srccollection = sp[0][5:]
226             if srccollection not in srccollections:
227                 try:
228                     srccollections[srccollection] = arvados.collection.CollectionReader(
229                         srccollection,
230                         api_client=self.api,
231                         keep_client=self.keep_client,
232                         num_retries=self.num_retries)
233                 except arvados.errors.ArgumentError as e:
234                     logger.error("Creating CollectionReader for '%s' '%s': %s", k, v, e)
235                     raise
236             reader = srccollections[srccollection]
237             try:
238                 srcpath = "/".join(sp[1:]) if len(sp) > 1 else "."
239                 final.copy(srcpath, v.target, source_collection=reader, overwrite=False)
240             except IOError as e:
241                 logger.warn("While preparing output collection: %s", e)
242
243         def rewrite(fileobj):
244             fileobj["location"] = generatemapper.mapper(fileobj["location"]).target
245             for k in ("basename", "listing", "contents"):
246                 if k in fileobj:
247                     del fileobj[k]
248
249         adjustDirObjs(outputObj, rewrite)
250         adjustFileObjs(outputObj, rewrite)
251
252         with final.open("cwl.output.json", "w") as f:
253             json.dump(outputObj, f, sort_keys=True, indent=4, separators=(',',': '))
254
255         final.save_new(name=name, owner_uuid=self.project_uuid, ensure_unique_name=True)
256
257         logger.info("Final output collection %s \"%s\" (%s)", final.portable_data_hash(),
258                     final.api_response()["name"],
259                     final.manifest_locator())
260
261         final_uuid = final.manifest_locator()
262         tags = tagsString.split(',')
263         for tag in tags:
264              self.api.links().create(body={
265                 "head_uuid": final_uuid, "link_class": "tag", "name": tag
266                 }).execute(num_retries=self.num_retries)
267
268         def finalcollection(fileobj):
269             fileobj["location"] = "keep:%s/%s" % (final.portable_data_hash(), fileobj["location"])
270
271         adjustDirObjs(outputObj, finalcollection)
272         adjustFileObjs(outputObj, finalcollection)
273
274         return (outputObj, final)
275
276     def set_crunch_output(self):
277         if self.work_api == "containers":
278             try:
279                 current = self.api.containers().current().execute(num_retries=self.num_retries)
280                 self.api.containers().update(uuid=current['uuid'],
281                                              body={
282                                                  'output': self.final_output_collection.portable_data_hash(),
283                                              }).execute(num_retries=self.num_retries)
284             except Exception as e:
285                 logger.info("Setting container output: %s", e)
286         elif self.work_api == "jobs" and "TASK_UUID" in os.environ:
287             self.api.job_tasks().update(uuid=os.environ["TASK_UUID"],
288                                    body={
289                                        'output': self.final_output_collection.portable_data_hash(),
290                                        'success': self.final_status == "success",
291                                        'progress':1.0
292                                    }).execute(num_retries=self.num_retries)
293
294     def arv_executor(self, tool, job_order, **kwargs):
295         self.debug = kwargs.get("debug")
296
297         tool.visit(self.check_writable)
298
299         self.project_uuid = kwargs.get("project_uuid")
300         self.pipeline = None
301         make_fs_access = kwargs.get("make_fs_access") or partial(CollectionFsAccess,
302                                                                  api_client=self.api,
303                                                                  keep_client=self.keep_client)
304         self.fs_access = make_fs_access(kwargs["basedir"])
305
306         existing_uuid = kwargs.get("update_workflow")
307         if existing_uuid or kwargs.get("create_workflow"):
308             if self.work_api == "jobs":
309                 tmpl = RunnerTemplate(self, tool, job_order,
310                                       kwargs.get("enable_reuse"),
311                                       uuid=existing_uuid,
312                                       submit_runner_ram=kwargs.get("submit_runner_ram"))
313                 tmpl.save()
314                 # cwltool.main will write our return value to stdout.
315                 return tmpl.uuid
316             else:
317                 return upload_workflow(self, tool, job_order,
318                                        self.project_uuid,
319                                        uuid=existing_uuid,
320                                        submit_runner_ram=kwargs.get("submit_runner_ram"))
321
322         self.ignore_docker_for_reuse = kwargs.get("ignore_docker_for_reuse")
323
324         kwargs["make_fs_access"] = make_fs_access
325         kwargs["enable_reuse"] = kwargs.get("enable_reuse")
326         kwargs["use_container"] = True
327         kwargs["tmpdir_prefix"] = "tmp"
328         kwargs["on_error"] = "continue"
329         kwargs["compute_checksum"] = kwargs.get("compute_checksum")
330
331         if self.work_api == "containers":
332             kwargs["outdir"] = "/var/spool/cwl"
333             kwargs["docker_outdir"] = "/var/spool/cwl"
334             kwargs["tmpdir"] = "/tmp"
335             kwargs["docker_tmpdir"] = "/tmp"
336         elif self.work_api == "jobs":
337             kwargs["outdir"] = "$(task.outdir)"
338             kwargs["docker_outdir"] = "$(task.outdir)"
339             kwargs["tmpdir"] = "$(task.tmpdir)"
340
341         upload_instance(self, shortname(tool.tool["id"]), tool, job_order)
342
343         runnerjob = None
344         if kwargs.get("submit"):
345             if self.work_api == "containers":
346                 if tool.tool["class"] == "CommandLineTool":
347                     runnerjob = tool.job(job_order,
348                                          self.output_callback,
349                                          **kwargs).next()
350                 else:
351                     runnerjob = RunnerContainer(self, tool, job_order, kwargs.get("enable_reuse"), self.output_name,
352                                                 self.output_tags, submit_runner_ram=kwargs.get("submit_runner_ram"))
353             else:
354                 runnerjob = RunnerJob(self, tool, job_order, kwargs.get("enable_reuse"), self.output_name,
355                                       self.output_tags, submit_runner_ram=kwargs.get("submit_runner_ram"))
356
357         if not kwargs.get("submit") and "cwl_runner_job" not in kwargs and not self.work_api == "containers":
358             # Create pipeline for local run
359             self.pipeline = self.api.pipeline_instances().create(
360                 body={
361                     "owner_uuid": self.project_uuid,
362                     "name": shortname(tool.tool["id"]),
363                     "components": {},
364                     "state": "RunningOnClient"}).execute(num_retries=self.num_retries)
365             logger.info("Pipeline instance %s", self.pipeline["uuid"])
366
367         if runnerjob and not kwargs.get("wait"):
368             runnerjob.run(wait=kwargs.get("wait"))
369             return runnerjob.uuid
370
371         self.poll_api = arvados.api('v1')
372         self.polling_thread = threading.Thread(target=self.poll_states)
373         self.polling_thread.start()
374
375         if runnerjob:
376             jobiter = iter((runnerjob,))
377         else:
378             if "cwl_runner_job" in kwargs:
379                 self.uuid = kwargs.get("cwl_runner_job").get('uuid')
380             jobiter = tool.job(job_order,
381                                self.output_callback,
382                                **kwargs)
383
384         try:
385             self.cond.acquire()
386             # Will continue to hold the lock for the duration of this code
387             # except when in cond.wait(), at which point on_message can update
388             # job state and process output callbacks.
389
390             loopperf = Perf(metrics, "jobiter")
391             loopperf.__enter__()
392             for runnable in jobiter:
393                 loopperf.__exit__()
394
395                 if self.stop_polling.is_set():
396                     break
397
398                 if runnable:
399                     with Perf(metrics, "run"):
400                         runnable.run(**kwargs)
401                 else:
402                     if self.processes:
403                         self.cond.wait(1)
404                     else:
405                         logger.error("Workflow is deadlocked, no runnable jobs and not waiting on any pending jobs.")
406                         break
407                 loopperf.__enter__()
408             loopperf.__exit__()
409
410             while self.processes:
411                 self.cond.wait(1)
412
413         except UnsupportedRequirement:
414             raise
415         except:
416             if sys.exc_info()[0] is KeyboardInterrupt:
417                 logger.error("Interrupted, marking pipeline as failed")
418             else:
419                 logger.error("Execution failed: %s", sys.exc_info()[1], exc_info=(sys.exc_info()[1] if self.debug else False))
420             if self.pipeline:
421                 self.api.pipeline_instances().update(uuid=self.pipeline["uuid"],
422                                                      body={"state": "Failed"}).execute(num_retries=self.num_retries)
423             if runnerjob and runnerjob.uuid and self.work_api == "containers":
424                 self.api.container_requests().update(uuid=runnerjob.uuid,
425                                                      body={"priority": "0"}).execute(num_retries=self.num_retries)
426         finally:
427             self.cond.release()
428             self.stop_polling.set()
429             self.polling_thread.join()
430
431         if self.final_status == "UnsupportedRequirement":
432             raise UnsupportedRequirement("Check log for details.")
433
434         if self.final_output is None:
435             raise WorkflowException("Workflow did not return a result.")
436
437         if kwargs.get("submit") and isinstance(runnerjob, Runner):
438             logger.info("Final output collection %s", runnerjob.final_output)
439         else:
440             if self.output_name is None:
441                 self.output_name = "Output of %s" % (shortname(tool.tool["id"]))
442             if self.output_tags is None:
443                 self.output_tags = ""
444             self.final_output, self.final_output_collection = self.make_output_collection(self.output_name, self.output_tags, self.final_output)
445             self.set_crunch_output()
446
447         if self.final_status != "success":
448             raise WorkflowException("Workflow failed.")
449
450         if kwargs.get("compute_checksum"):
451             adjustDirObjs(self.final_output, partial(getListing, self.fs_access))
452             adjustFileObjs(self.final_output, partial(compute_checksums, self.fs_access))
453
454         return self.final_output
455
456
457 def versionstring():
458     """Print version string of key packages for provenance and debugging."""
459
460     arvcwlpkg = pkg_resources.require("arvados-cwl-runner")
461     arvpkg = pkg_resources.require("arvados-python-client")
462     cwlpkg = pkg_resources.require("cwltool")
463
464     return "%s %s %s, %s %s, %s %s" % (sys.argv[0], __version__, arvcwlpkg[0].version,
465                                     "arvados-python-client", arvpkg[0].version,
466                                     "cwltool", cwlpkg[0].version)
467
468
469 def arg_parser():  # type: () -> argparse.ArgumentParser
470     parser = argparse.ArgumentParser(description='Arvados executor for Common Workflow Language')
471
472     parser.add_argument("--basedir", type=str,
473                         help="Base directory used to resolve relative references in the input, default to directory of input object file or current directory (if inputs piped/provided on command line).")
474     parser.add_argument("--outdir", type=str, default=os.path.abspath('.'),
475                         help="Output directory, default current directory")
476
477     parser.add_argument("--eval-timeout",
478                         help="Time to wait for a Javascript expression to evaluate before giving an error, default 20s.",
479                         type=float,
480                         default=20)
481     parser.add_argument("--version", action="store_true", help="Print version and exit")
482
483     exgroup = parser.add_mutually_exclusive_group()
484     exgroup.add_argument("--verbose", action="store_true", help="Default logging")
485     exgroup.add_argument("--quiet", action="store_true", help="Only print warnings and errors.")
486     exgroup.add_argument("--debug", action="store_true", help="Print even more logging")
487
488     parser.add_argument("--metrics", action="store_true", help="Print timing metrics")
489
490     parser.add_argument("--tool-help", action="store_true", help="Print command line help for tool")
491
492     exgroup = parser.add_mutually_exclusive_group()
493     exgroup.add_argument("--enable-reuse", action="store_true",
494                         default=True, dest="enable_reuse",
495                         help="")
496     exgroup.add_argument("--disable-reuse", action="store_false",
497                         default=True, dest="enable_reuse",
498                         help="")
499
500     parser.add_argument("--project-uuid", type=str, metavar="UUID", help="Project that will own the workflow jobs, if not provided, will go to home project.")
501     parser.add_argument("--output-name", type=str, help="Name to use for collection that stores the final output.", default=None)
502     parser.add_argument("--output-tags", type=str, help="Tags for the final output collection separated by commas, e.g., '--output-tags tag0,tag1,tag2'.", default=None)
503     parser.add_argument("--ignore-docker-for-reuse", action="store_true",
504                         help="Ignore Docker image version when deciding whether to reuse past jobs.",
505                         default=False)
506
507     exgroup = parser.add_mutually_exclusive_group()
508     exgroup.add_argument("--submit", action="store_true", help="Submit workflow to run on Arvados.",
509                         default=True, dest="submit")
510     exgroup.add_argument("--local", action="store_false", help="Run workflow on local host (submits jobs to Arvados).",
511                         default=True, dest="submit")
512     exgroup.add_argument("--create-template", action="store_true", help="(Deprecated) synonym for --create-workflow.",
513                          dest="create_workflow")
514     exgroup.add_argument("--create-workflow", action="store_true", help="Create an Arvados workflow (if using the 'containers' API) or pipeline template (if using the 'jobs' API). See --api.")
515     exgroup.add_argument("--update-workflow", type=str, metavar="UUID", help="Update an existing Arvados workflow or pipeline template with the given UUID.")
516
517     exgroup = parser.add_mutually_exclusive_group()
518     exgroup.add_argument("--wait", action="store_true", help="After submitting workflow runner job, wait for completion.",
519                         default=True, dest="wait")
520     exgroup.add_argument("--no-wait", action="store_false", help="Submit workflow runner job and exit.",
521                         default=True, dest="wait")
522
523     parser.add_argument("--api", type=str,
524                         default=None, dest="work_api",
525                         help="Select work submission API, one of 'jobs' or 'containers'. Default is 'jobs' if that API is available, otherwise 'containers'.")
526
527     parser.add_argument("--compute-checksum", action="store_true", default=False,
528                         help="Compute checksum of contents while collecting outputs",
529                         dest="compute_checksum")
530
531     parser.add_argument("--submit-runner-ram", type=int,
532                        help="RAM (in MiB) required for the workflow runner job.",
533                        default=0)
534
535     parser.add_argument("workflow", type=str, nargs="?", default=None, help="The workflow to execute")
536     parser.add_argument("job_order", nargs=argparse.REMAINDER, help="The input object to the workflow.")
537
538     return parser
539
540 def add_arv_hints():
541     cache = {}
542     res = pkg_resources.resource_stream(__name__, 'arv-cwl-schema.yml')
543     cache["http://arvados.org/cwl"] = res.read()
544     res.close()
545     document_loader, cwlnames, _, _ = cwltool.process.get_schema("v1.0")
546     _, extnames, _, _ = schema_salad.schema.load_schema("http://arvados.org/cwl", cache=cache)
547     for n in extnames.names:
548         if not cwlnames.has_name("http://arvados.org/cwl#"+n, ""):
549             cwlnames.add_name("http://arvados.org/cwl#"+n, "", extnames.get_name(n, ""))
550         document_loader.idx["http://arvados.org/cwl#"+n] = {}
551
552 def main(args, stdout, stderr, api_client=None, keep_client=None):
553     parser = arg_parser()
554
555     job_order_object = None
556     arvargs = parser.parse_args(args)
557
558     if arvargs.update_workflow:
559         if arvargs.update_workflow.find('-7fd4e-') == 5:
560             want_api = 'containers'
561         elif arvargs.update_workflow.find('-p5p6p-') == 5:
562             want_api = 'jobs'
563         else:
564             want_api = None
565         if want_api and arvargs.work_api and want_api != arvargs.work_api:
566             logger.error('--update-workflow arg {!r} uses {!r} API, but --api={!r} specified'.format(
567                 arvargs.update_workflow, want_api, arvargs.work_api))
568             return 1
569         arvargs.work_api = want_api
570
571     if (arvargs.create_workflow or arvargs.update_workflow) and not arvargs.job_order:
572         job_order_object = ({}, "")
573
574     add_arv_hints()
575
576     try:
577         if api_client is None:
578             api_client=arvados.api('v1', model=OrderedJsonModel())
579         runner = ArvCwlRunner(api_client, work_api=arvargs.work_api, keep_client=keep_client, output_name=arvargs.output_name, output_tags=arvargs.output_tags)
580     except Exception as e:
581         logger.error(e)
582         return 1
583
584     if arvargs.debug:
585         logger.setLevel(logging.DEBUG)
586
587     if arvargs.quiet:
588         logger.setLevel(logging.WARN)
589         logging.getLogger('arvados.arv-run').setLevel(logging.WARN)
590
591     if arvargs.metrics:
592         metrics.setLevel(logging.DEBUG)
593         logging.getLogger("cwltool.metrics").setLevel(logging.DEBUG)
594
595     arvargs.conformance_test = None
596     arvargs.use_container = True
597     arvargs.relax_path_checks = True
598
599     return cwltool.main.main(args=arvargs,
600                              stdout=stdout,
601                              stderr=stderr,
602                              executor=runner.arv_executor,
603                              makeTool=runner.arv_make_tool,
604                              versionfunc=versionstring,
605                              job_order_object=job_order_object,
606                              make_fs_access=partial(CollectionFsAccess, api_client=api_client))