You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
File "/opt/venv/lib/python3.10/site-packages/marie/models/unilm/trocr/trocr_models.py", line 169, in build_model
roberta = torch.hub.load('pytorch/fairseq:main', 'roberta.large')
Full stack :
⠏ Waiting extract_t... ━━━━━━━━━━━━━━━━━━━━╺━━━━━━━━━━━━━━━━━━━ 1/2 0:00:31ERROR extract_t/rep-2@45 <HTTPError 403: 'rate limit exceeded'> during 'WorkerRuntime' initialization
add "--quiet-error" to suppress the exception details
Traceback (most recent call last):
File "/opt/venv/lib/python3.10/site-packages/marie/serve/executors/run.py", line 141, in run
runtime = AsyncNewLoopRuntime(
File "/opt/venv/lib/python3.10/site-packages/marie/serve/runtimes/asyncio.py", line 82, in __init__
self._loop.run_until_complete(self.async_setup())
File "/usr/lib/python3.10/asyncio/base_events.py", line 646, in run_until_complete
return future.result()
File "/opt/venv/lib/python3.10/site-packages/marie/serve/runtimes/asyncio.py", line 276, in async_setup
self.server = self._get_server()
File "/opt/venv/lib/python3.10/site-packages/marie/serve/runtimes/asyncio.py", line 185, in _get_server
return GRPCServer(
File "/opt/venv/lib/python3.10/site-packages/marie/serve/runtimes/servers/grpc.py", line 31, in __init__
super().__init__(**kwargs)
File "/opt/venv/lib/python3.10/site-packages/marie/serve/runtimes/servers/__init__.py", line 56, in __init__
self._request_handler = req_handler or self._get_request_handler()
File "/opt/venv/lib/python3.10/site-packages/marie/serve/runtimes/servers/__init__.py", line 81, in _get_request_handler
return self.req_handler_cls(
File "/opt/venv/lib/python3.10/site-packages/marie/serve/runtimes/worker/request_handling.py", line 136, in __init__
self._load_executor(
File "/opt/venv/lib/python3.10/site-packages/marie/serve/runtimes/worker/request_handling.py", line 340, in _load_executor
self._executor: BaseExecutor = BaseExecutor.load_config(
File "/opt/venv/lib/python3.10/site-packages/marie/jaml/__init__.py", line 792, in load_config
obj = JAML.load(tag_yml, substitute=False, runtime_args=runtime_args)
File "/opt/venv/lib/python3.10/site-packages/marie/jaml/__init__.py", line 174, in load
r = yaml.load(stream, Loader=get_jina_loader_with_runtime(runtime_args))
File "/opt/venv/lib/python3.10/site-packages/yaml/__init__.py", line 81, in load
return loader.get_single_data()
File "/opt/venv/lib/python3.10/site-packages/yaml/constructor.py", line 51, in get_single_data
return self.construct_document(node)
File "/opt/venv/lib/python3.10/site-packages/yaml/constructor.py", line 55, in construct_document
data = self.construct_object(node)
File "/opt/venv/lib/python3.10/site-packages/yaml/constructor.py", line 100, in construct_object
data = constructor(self, node)
File "/opt/venv/lib/python3.10/site-packages/marie/jaml/__init__.py", line 582, in _from_yaml
return get_parser(cls, version=data.get('version', None)).parse(
File "/opt/venv/lib/python3.10/site-packages/marie/jaml/parsers/executor/legacy.py", line 46, in parse
obj = cls(
File "/opt/venv/lib/python3.10/site-packages/marie/serve/executors/decorators.py", line 58, in arg_wrapper
f = func(self, *args, **kwargs)
File "/opt/venv/lib/python3.10/site-packages/marie/serve/helper.py", line 74, in arg_wrapper
f = func(self, *args, **kwargs)
File "/opt/venv/lib/python3.10/site-packages/marie/executor/text/text_extraction_executor.py", line 35, in __init__
self.pipeline = ExtractPipeline(cuda=use_cuda)
File "/opt/venv/lib/python3.10/site-packages/marie/ocr/extract_pipeline.py", line 121, in __init__
self.ocr_engine = DefaultOcrEngine(cuda=use_cuda)
File "/opt/venv/lib/python3.10/site-packages/marie/ocr/default_ocr_engine.py", line 61, in __init__
self.icr_processor = TrOcrIcrProcessor(work_dir=work_dir_icr, cuda=has_cuda)
File "/opt/venv/lib/python3.10/site-packages/marie/document/trocr_icr_processor.py", line 228, in __init__
) = init(model_path, beam, device)
File "/opt/venv/lib/python3.10/site-packages/marie/document/trocr_icr_processor.py", line 57, in init
model, cfg, inference_task = fairseq.checkpoint_utils.load_model_ensemble_and_task(
File "/opt/venv/lib/python3.10/site-packages/fairseq/checkpoint_utils.py", line 484, in load_model_ensemble_and_task
model = task.build_model(cfg.model, from_checkpoint=True)
File "/opt/venv/lib/python3.10/site-packages/fairseq/tasks/fairseq_task.py", line 691, in build_model
model = models.build_model(args, self, from_checkpoint)
File "/opt/venv/lib/python3.10/site-packages/fairseq/models/__init__.py", line 106, in build_model
return model.build_model(cfg, task)
File "/opt/venv/lib/python3.10/site-packages/marie/models/unilm/trocr/trocr_models.py", line 169, in build_model
roberta = torch.hub.load('pytorch/fairseq:main', 'roberta.large')
File "/opt/venv/lib/python3.10/site-packages/torch/hub.py", line 562, in load
repo_or_dir = _get_cache_or_reload(repo_or_dir, force_reload, trust_repo, "load",
File "/opt/venv/lib/python3.10/site-packages/torch/hub.py", line 229, in _get_cache_or_reload
_validate_not_a_forked_repo(repo_owner, repo_name, ref)
File "/opt/venv/lib/python3.10/site-packages/torch/hub.py", line 188, in _validate_not_a_forked_repo
response = json.loads(_read_url(Request(url, headers=headers)))
File "/opt/venv/lib/python3.10/site-packages/torch/hub.py", line 171, in _read_url
with urlopen(url) as r:
File "/usr/lib/python3.10/urllib/request.py", line 216, in urlopen
return opener.open(url, data, timeout)
File "/usr/lib/python3.10/urllib/request.py", line 525, in open
response = meth(req, response)
File "/usr/lib/python3.10/urllib/request.py", line 634, in http_response
response = self.parent.error(
File "/usr/lib/python3.10/urllib/request.py", line 563, in error
return self._call_chain(*args)
File "/usr/lib/python3.10/urllib/request.py", line 496, in _call_chain
result = func(*args)
File "/usr/lib/python3.10/urllib/request.py", line 643, in http_error_default
raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 403: rate limit exceeded
INFO marie@47 Box processor [dit, cuda=True]
The text was updated successfully, but these errors were encountered:
When launching a cluster of GPU servers we can exceed number of requests sent to torch.hub and get following error:
HTTPError 403: 'rate limit exceeded'
This is possibly related to HTTP Error 403: rate limit exceeded when loading model #4156
The offending section:
Full stack :
The text was updated successfully, but these errors were encountered: