You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Your proposal
Implement proper caching of FAISEQ models.
raise MaxRetryError(_pool, url, error or ResponseError(cause))
urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='dl.fbaipublicfiles.com', port=443): Max retries exceeded with url:
/fairseq/gpt2_bpe/vocab.bpe (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get
local issuer certificate (_ssl.c:1007)')))
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/opt/venv/lib/python3.10/site-packages/marie/serve/executors/run.py", line 144, in run
runtime = AsyncNewLoopRuntime(
File "/opt/venv/lib/python3.10/site-packages/marie/serve/runtimes/asyncio.py", line 92, in __init__
self._loop.run_until_complete(self.async_setup())
File "/usr/lib/python3.10/asyncio/base_events.py", line 649, in run_until_complete
return future.result()
File "/opt/venv/lib/python3.10/site-packages/marie/serve/runtimes/asyncio.py", line 309, in async_setup
self.server = self._get_server()
File "/opt/venv/lib/python3.10/site-packages/marie/serve/runtimes/asyncio.py", line 214, in _get_server
return GRPCServer(
File "/opt/venv/lib/python3.10/site-packages/marie/serve/runtimes/servers/grpc.py", line 34, in __init__
super().__init__(**kwargs)
File "/opt/venv/lib/python3.10/site-packages/marie/serve/runtimes/servers/__init__.py", line 70, in __init__
] = (req_handler or self._get_request_handler())
File "/opt/venv/lib/python3.10/site-packages/marie/serve/runtimes/servers/__init__.py", line 95, in _get_request_handler
return self.req_handler_cls(
File "/opt/venv/lib/python3.10/site-packages/marie/serve/runtimes/worker/request_handling.py", line 140, in __init__
self._load_executor(
File "/opt/venv/lib/python3.10/site-packages/marie/serve/runtimes/worker/request_handling.py", line 379, in _load_executor
self._executor: BaseExecutor = BaseExecutor.load_config(
File "/opt/venv/lib/python3.10/site-packages/marie/jaml/__init__.py", line 792, in load_config
obj = JAML.load(tag_yml, substitute=False, runtime_args=runtime_args)
File "/opt/venv/lib/python3.10/site-packages/marie/jaml/__init__.py", line 174, in load
r = yaml.load(stream, Loader=get_jina_loader_with_runtime(runtime_args))
File "/opt/venv/lib/python3.10/site-packages/yaml/__init__.py", line 81, in load
return loader.get_single_data()
File "/opt/venv/lib/python3.10/site-packages/yaml/constructor.py", line 51, in get_single_data
return self.construct_document(node)
File "/opt/venv/lib/python3.10/site-packages/yaml/constructor.py", line 55, in construct_document
data = self.construct_object(node)
File "/opt/venv/lib/python3.10/site-packages/yaml/constructor.py", line 100, in construct_object
data = constructor(self, node)
File "/opt/venv/lib/python3.10/site-packages/marie/jaml/__init__.py", line 582, in _from_yaml
return get_parser(cls, version=data.get('version', None)).parse(
File "/opt/venv/lib/python3.10/site-packages/marie/jaml/parsers/executor/legacy.py", line 46, in parse
obj = cls(
File "/opt/venv/lib/python3.10/site-packages/marie/serve/executors/decorators.py", line 58, in arg_wrapper
f = func(self, *args, **kwargs)
File "/opt/venv/lib/python3.10/site-packages/marie/serve/helper.py", line 75, in arg_wrapper
f = func(self, *args, **kwargs)
File "/opt/venv/lib/python3.10/site-packages/marie/executor/text/text_extraction_executor.py", line 104, in __init__
self.pipeline = ExtractPipeline(pipeline_config=pipeline, cuda=has_cuda)
File "/opt/venv/lib/python3.10/site-packages/marie/pipe/extract_pipeline.py", line 98, in __init__
self.ocr_engines = get_known_ocr_engines(device=device, engine=self.engine_name)
File "/opt/venv/lib/python3.10/site-packages/marie/ocr/util.py", line 118, in get_known_ocr_engines
trocr_processor = TrOcrProcessor(work_dir=ensure_exists("/tmp/icr"), cuda=use_cuda)
File "/opt/venv/lib/python3.10/site-packages/marie/document/trocr_ocr_processor.py", line 237, in __init__
) = init(model_path, beam, device)
File "/opt/venv/lib/python3.10/site-packages/marie/document/trocr_ocr_processor.py", line 59, in init
model, cfg, inference_task = fairseq.checkpoint_utils.load_model_ensemble_and_task(
File "/opt/venv/lib/python3.10/site-packages/fairseq/checkpoint_utils.py", line 502, in load_model_ensemble_and_task
model = task.build_model(cfg.model, from_checkpoint=True)
File "/opt/venv/lib/python3.10/site-packages/fairseq/tasks/fairseq_task.py", line 691, in build_model
model = models.build_model(args, self, from_checkpoint)
File "/opt/venv/lib/python3.10/site-packages/fairseq/models/__init__.py", line 106, in build_model
return model.build_model(cfg, task)
File "/opt/venv/lib/python3.10/site-packages/marie/models/unilm/trocr/trocr_models.py", line 169, in build_model
roberta = torch.hub.load('pytorch/fairseq:main', 'roberta.large')
File "/opt/venv/lib/python3.10/site-packages/torch/hub.py", line 568, in load
model = _load_local(repo_or_dir, model, *args, **kwargs)
File "/opt/venv/lib/python3.10/site-packages/torch/hub.py", line 597, in _load_local
model = entry(*args, **kwargs)
File "/opt/venv/lib/python3.10/site-packages/fairseq/models/roberta/model.py", line 380, in from_pretrained
return RobertaHubInterface(x["args"], x["task"], x["models"][0])
File "/opt/venv/lib/python3.10/site-packages/fairseq/models/roberta/hub_interface.py", line 26, in __init__
self.bpe = encoders.build_bpe(cfg.bpe)
File "/opt/venv/lib/python3.10/site-packages/fairseq/registry.py", line 65, in build_x
return builder(cfg, *extra_args, **extra_kwargs)
File "/opt/venv/lib/python3.10/site-packages/fairseq/data/encoders/gpt2_bpe.py", line 33, in __init__
vocab_bpe = file_utils.cached_path(cfg.gpt2_vocab_bpe)
File "/opt/venv/lib/python3.10/site-packages/fairseq/file_utils.py", line 174, in cached_path
return get_from_cache(url_or_filename, cache_dir)
File "/opt/venv/lib/python3.10/site-packages/fairseq/file_utils.py", line 299, in get_from_cache
response = request_wrap_timeout(
File "/opt/venv/lib/python3.10/site-packages/fairseq/file_utils.py", line 251, in request_wrap_timeout
return func(timeout=timeout)
File "/opt/venv/lib/python3.10/site-packages/requests/api.py", line 100, in head
return request("head", url, **kwargs)
File "/opt/venv/lib/python3.10/site-packages/requests/api.py", line 59, in request
return session.request(method=method, url=url, **kwargs)
File "/opt/venv/lib/python3.10/site-packages/requests/sessions.py", line 589, in request
resp = self.send(prep, **send_kwargs)
File "/opt/venv/lib/python3.10/site-packages/requests/sessions.py", line 703, in send
r = adapter.send(request, **kwargs)
File "/opt/venv/lib/python3.10/site-packages/requests/adapters.py", line 517, in send
raise SSLError(e, request=request)
requests.exceptions.SSLError: HTTPSConnectionPool(host='dl.fbaipublicfiles.com', port=443): Max retries exceeded with url: /fairseq/gpt2_bpe/vocab.bpe
(Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate
The text was updated successfully, but these errors were encountered:
Describe the feature
Your proposal
Implement proper caching of FAISEQ models.
The text was updated successfully, but these errors were encountered: