Автор | Сообщение | ||
---|---|---|---|
|
|||
Junior Статус: Не в сети |
Всем привет! |
Реклама | |
Партнер |
Victor91rus |
|
Member Статус: Не в сети |
Может быть память, а может и КП, а может просто контакт похреновился и нало железо пересобрать. |
Beatzor |
|
Junior Статус: Не в сети |
Кп? |
Victor91rus |
|
Member Статус: Не в сети |
Beatzor Если пломбы, то вам в любом случае всё тащить в магазин. Там вам, скорее всего, озу заменят. |
Beatzor |
|
Junior Статус: Не в сети |
Спасибо. Жду окончания карантина и потащу. |
Victor91rus |
|
Member Статус: Не в сети |
Beatzor можете на время карантина через биос убавить частоту памяти. Это может избавить от ошибок. |
<TopUpdate> |
|
Member Статус: Не в сети |
Beatzor писал(а): Кп? контроллер памяти, который находится внутри процессора. |
Beatzor |
|
Junior Статус: Не в сети |
спасибо. тему можно удалять Последний раз редактировалось Beatzor 05.04.2020 22:00, всего редактировалось 1 раз. |
sahaprof |
|
Member Статус: Не в сети |
Beatzor писал(а): Вопрос — повлияет ли это на гарантию С таким дефектом не примут по гарантии. Совет один, покупать новую планку памяти. |
Beatzor |
|
Junior Статус: Не в сети |
Забыл отписаться сюда. Планку благополучно заменили по гарантии. Скол на текстолите ни на что не влияет. |
—
Кто сейчас на конференции |
Сейчас этот форум просматривают: нет зарегистрированных пользователей и гости: 10 |
Вы не можете начинать темы Вы не можете отвечать на сообщения Вы не можете редактировать свои сообщения Вы не можете удалять свои сообщения Вы не можете добавлять вложения |
Лаборатория
Новости
Getting cannot allocate memory . I am using Jupyter notebook in GCP 4 vCPUs, 15 GB RAM and NVIDIA Tesla K80 x 2. I need to restart the keranl every time. cannot do more than two inferences without restarting the kernal
05/27/2021 15:18:51 - INFO - farm.utils - Using device: CUDA
05/27/2021 15:18:51 - INFO - farm.utils - Number of GPUs: 2
05/27/2021 15:18:51 - INFO - farm.utils - Distributed Training: False
05/27/2021 15:18:51 - INFO - farm.utils - Automatic Mixed Precision: None
05/27/2021 15:18:59 - WARNING - farm.modeling.prediction_head - Some unused parameters are passed to the QuestionAnsweringHead. Might not be a problem. Params: {"training": true, "num_labels": 2, "ph_output_type": "per_token_squad", "model_type": "span_classification", "label_tensor_name": "question_answering_label_ids", "label_list": ["start_token", "end_token"], "metric": "squad", "name": "QuestionAnsweringHead"}
05/27/2021 15:18:59 - WARNING - farm.utils - ML Logging is turned off. No parameters, metrics or artifacts will be logged to MLFlow.
05/27/2021 15:18:59 - INFO - farm.utils - Using device: CUDA
05/27/2021 15:18:59 - INFO - farm.utils - Number of GPUs: 2
05/27/2021 15:18:59 - INFO - farm.utils - Distributed Training: False
05/27/2021 15:18:59 - INFO - farm.utils - Automatic Mixed Precision: None
---------------------------------------------------------------------------
OSError Traceback (most recent call last)
<ipython-input-5-0dfe5ccb7c33> in <module>
4
5 torch.cuda.empty_cache()
----> 6 reader=FARMReader('./models/')
7 reader.predict("Can i Install outside", documents=[test])
~/.local/lib/python3.7/site-packages/haystack/reader/farm.py in __init__(self, model_name_or_path, model_version, context_window_size, batch_size, use_gpu, no_ans_boost, return_no_answer, top_k, top_k_per_candidate, top_k_per_sample, num_processes, max_seq_len, doc_stride, progress_bar)
101 doc_stride=doc_stride, num_processes=num_processes, revision=model_version,
102 disable_tqdm=not progress_bar,
--> 103 strict=False)
104 self.inferencer.model.prediction_heads[0].context_window_size = context_window_size
105 self.inferencer.model.prediction_heads[0].no_ans_boost = no_ans_boost
~/.local/lib/python3.7/site-packages/farm/infer.py in load(cls, model_name_or_path, revision, batch_size, gpu, task_type, return_class_probs, strict, max_seq_len, doc_stride, extraction_layer, extraction_strategy, s3e_stats, num_processes, disable_tqdm, tokenizer_class, use_fast, tokenizer_args, multithreading_rust, dummy_ph, benchmarking)
299 disable_tqdm=disable_tqdm,
300 benchmarking=benchmarking,
--> 301 dummy_ph=dummy_ph
302 )
303
~/.local/lib/python3.7/site-packages/farm/infer.py in __init__(self, *args, **kwargs)
671 class QAInferencer(Inferencer):
672 def __init__(self, *args, **kwargs):
--> 673 super().__init__(*args, **kwargs)
674 if self.task_type != "question_answering":
675 logger.warning("QAInferencer always has task_type='question_answering' even if another value is provided "
~/.local/lib/python3.7/site-packages/farm/infer.py in __init__(self, model, processor, task_type, batch_size, gpu, name, return_class_probs, extraction_strategy, extraction_layer, s3e_stats, num_processes, disable_tqdm, benchmarking, dummy_ph)
149 set_all_seeds(42)
150
--> 151 self._set_multiprocessing_pool(num_processes)
152
153 @classmethod
~/.local/lib/python3.7/site-packages/farm/infer.py in _set_multiprocessing_pool(self, num_processes)
326 else:
327 num_processes = mp.cpu_count()
--> 328 self.process_pool = mp.Pool(processes=num_processes)
329 logger.info(
330 f"Got ya {num_processes} parallel workers to do inference ..."
/opt/conda/lib/python3.7/multiprocessing/context.py in Pool(self, processes, initializer, initargs, maxtasksperchild)
117 from .pool import Pool
118 return Pool(processes, initializer, initargs, maxtasksperchild,
--> 119 context=self.get_context())
120
121 def RawValue(self, typecode_or_type, *args):
/opt/conda/lib/python3.7/multiprocessing/pool.py in __init__(self, processes, initializer, initargs, maxtasksperchild, context)
174 self._processes = processes
175 self._pool = []
--> 176 self._repopulate_pool()
177
178 self._worker_handler = threading.Thread(
/opt/conda/lib/python3.7/multiprocessing/pool.py in _repopulate_pool(self)
239 w.name = w.name.replace('Process', 'PoolWorker')
240 w.daemon = True
--> 241 w.start()
242 util.debug('added worker')
243
/opt/conda/lib/python3.7/multiprocessing/process.py in start(self)
110 'daemonic processes are not allowed to have children'
111 _cleanup()
--> 112 self._popen = self._Popen(self)
113 self._sentinel = self._popen.sentinel
114 # Avoid a refcycle if the target function holds an indirect
/opt/conda/lib/python3.7/multiprocessing/context.py in _Popen(process_obj)
275 def _Popen(process_obj):
276 from .popen_fork import Popen
--> 277 return Popen(process_obj)
278
279 class SpawnProcess(process.BaseProcess):
/opt/conda/lib/python3.7/multiprocessing/popen_fork.py in __init__(self, process_obj)
18 self.returncode = None
19 self.finalizer = None
---> 20 self._launch(process_obj)
21
22 def duplicate_for_child(self, fd):
/opt/conda/lib/python3.7/multiprocessing/popen_fork.py in _launch(self, process_obj)
68 code = 1
69 parent_r, child_w = os.pipe()
---> 70 self.pid = os.fork()
71 if self.pid == 0:
72 try:
OSError: [Errno 12] Cannot allocate memory
Getting cannot allocate memory . I am using Jupyter notebook in GCP 4 vCPUs, 15 GB RAM and NVIDIA Tesla K80 x 2. I need to restart the keranl every time. cannot do more than two inferences without restarting the kernal
05/27/2021 15:18:51 - INFO - farm.utils - Using device: CUDA
05/27/2021 15:18:51 - INFO - farm.utils - Number of GPUs: 2
05/27/2021 15:18:51 - INFO - farm.utils - Distributed Training: False
05/27/2021 15:18:51 - INFO - farm.utils - Automatic Mixed Precision: None
05/27/2021 15:18:59 - WARNING - farm.modeling.prediction_head - Some unused parameters are passed to the QuestionAnsweringHead. Might not be a problem. Params: {"training": true, "num_labels": 2, "ph_output_type": "per_token_squad", "model_type": "span_classification", "label_tensor_name": "question_answering_label_ids", "label_list": ["start_token", "end_token"], "metric": "squad", "name": "QuestionAnsweringHead"}
05/27/2021 15:18:59 - WARNING - farm.utils - ML Logging is turned off. No parameters, metrics or artifacts will be logged to MLFlow.
05/27/2021 15:18:59 - INFO - farm.utils - Using device: CUDA
05/27/2021 15:18:59 - INFO - farm.utils - Number of GPUs: 2
05/27/2021 15:18:59 - INFO - farm.utils - Distributed Training: False
05/27/2021 15:18:59 - INFO - farm.utils - Automatic Mixed Precision: None
---------------------------------------------------------------------------
OSError Traceback (most recent call last)
<ipython-input-5-0dfe5ccb7c33> in <module>
4
5 torch.cuda.empty_cache()
----> 6 reader=FARMReader('./models/')
7 reader.predict("Can i Install outside", documents=[test])
~/.local/lib/python3.7/site-packages/haystack/reader/farm.py in __init__(self, model_name_or_path, model_version, context_window_size, batch_size, use_gpu, no_ans_boost, return_no_answer, top_k, top_k_per_candidate, top_k_per_sample, num_processes, max_seq_len, doc_stride, progress_bar)
101 doc_stride=doc_stride, num_processes=num_processes, revision=model_version,
102 disable_tqdm=not progress_bar,
--> 103 strict=False)
104 self.inferencer.model.prediction_heads[0].context_window_size = context_window_size
105 self.inferencer.model.prediction_heads[0].no_ans_boost = no_ans_boost
~/.local/lib/python3.7/site-packages/farm/infer.py in load(cls, model_name_or_path, revision, batch_size, gpu, task_type, return_class_probs, strict, max_seq_len, doc_stride, extraction_layer, extraction_strategy, s3e_stats, num_processes, disable_tqdm, tokenizer_class, use_fast, tokenizer_args, multithreading_rust, dummy_ph, benchmarking)
299 disable_tqdm=disable_tqdm,
300 benchmarking=benchmarking,
--> 301 dummy_ph=dummy_ph
302 )
303
~/.local/lib/python3.7/site-packages/farm/infer.py in __init__(self, *args, **kwargs)
671 class QAInferencer(Inferencer):
672 def __init__(self, *args, **kwargs):
--> 673 super().__init__(*args, **kwargs)
674 if self.task_type != "question_answering":
675 logger.warning("QAInferencer always has task_type='question_answering' even if another value is provided "
~/.local/lib/python3.7/site-packages/farm/infer.py in __init__(self, model, processor, task_type, batch_size, gpu, name, return_class_probs, extraction_strategy, extraction_layer, s3e_stats, num_processes, disable_tqdm, benchmarking, dummy_ph)
149 set_all_seeds(42)
150
--> 151 self._set_multiprocessing_pool(num_processes)
152
153 @classmethod
~/.local/lib/python3.7/site-packages/farm/infer.py in _set_multiprocessing_pool(self, num_processes)
326 else:
327 num_processes = mp.cpu_count()
--> 328 self.process_pool = mp.Pool(processes=num_processes)
329 logger.info(
330 f"Got ya {num_processes} parallel workers to do inference ..."
/opt/conda/lib/python3.7/multiprocessing/context.py in Pool(self, processes, initializer, initargs, maxtasksperchild)
117 from .pool import Pool
118 return Pool(processes, initializer, initargs, maxtasksperchild,
--> 119 context=self.get_context())
120
121 def RawValue(self, typecode_or_type, *args):
/opt/conda/lib/python3.7/multiprocessing/pool.py in __init__(self, processes, initializer, initargs, maxtasksperchild, context)
174 self._processes = processes
175 self._pool = []
--> 176 self._repopulate_pool()
177
178 self._worker_handler = threading.Thread(
/opt/conda/lib/python3.7/multiprocessing/pool.py in _repopulate_pool(self)
239 w.name = w.name.replace('Process', 'PoolWorker')
240 w.daemon = True
--> 241 w.start()
242 util.debug('added worker')
243
/opt/conda/lib/python3.7/multiprocessing/process.py in start(self)
110 'daemonic processes are not allowed to have children'
111 _cleanup()
--> 112 self._popen = self._Popen(self)
113 self._sentinel = self._popen.sentinel
114 # Avoid a refcycle if the target function holds an indirect
/opt/conda/lib/python3.7/multiprocessing/context.py in _Popen(process_obj)
275 def _Popen(process_obj):
276 from .popen_fork import Popen
--> 277 return Popen(process_obj)
278
279 class SpawnProcess(process.BaseProcess):
/opt/conda/lib/python3.7/multiprocessing/popen_fork.py in __init__(self, process_obj)
18 self.returncode = None
19 self.finalizer = None
---> 20 self._launch(process_obj)
21
22 def duplicate_for_child(self, fd):
/opt/conda/lib/python3.7/multiprocessing/popen_fork.py in _launch(self, process_obj)
68 code = 1
69 parent_r, child_w = os.pipe()
---> 70 self.pid = os.fork()
71 if self.pid == 0:
72 try:
OSError: [Errno 12] Cannot allocate memory