You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
"""
Traceback (most recent call last):
File "C:\Users\76425.conda\envs\torch\Lib\site-packages\multiprocess\pool.py", line 125, in worker
result = (True, func(*args, **kwds))
^^^^^^^^^^^^^^^^^^^
File "C:\Users\76425.conda\envs\torch\Lib\site-packages\datasets\utils\py_utils.py", line 680, in _write_generator_to_queue
for i, result in enumerate(func(**kwargs)):
File "C:\Users\76425.conda\envs\torch\Lib\site-packages\datasets\arrow_dataset.py", line 3516, in _map_single
for i, batch in iter_outputs(shard_iterable):
File "C:\Users\76425.conda\envs\torch\Lib\site-packages\datasets\arrow_dataset.py", line 3466, in iter_outputs
yield i, apply_function(example, i, offset=offset)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\76425.conda\envs\torch\Lib\site-packages\datasets\arrow_dataset.py", line 3389, in apply_function
processed_inputs = function(*fn_args, *additional_args, **fn_kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\76425\Desktop\LLaMA-Factory\src\llamafactory\data\processor\supervised.py", line 99, in preprocess_dataset
input_ids, labels = self._encode_data_example(
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\76425\Desktop\LLaMA-Factory\src\llamafactory\data\processor\supervised.py", line 43, in _encode_data_example
messages = self.template.mm_plugin.process_messages(prompt + response, images, videos, audios, self.processor)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\76425\Desktop\LLaMA-Factory\src\llamafactory\data\mm_plugin.py", line 1584, in process_messages
self._validate_input(processor, images, videos, audios)
raise ValueError("Processor was not found, please check and update your processor config.")
ValueError: Processor was not found, please check and update your processor config.
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "", line 198, in _run_module_as_main
File "", line 88, in run_code
File "C:\Users\76425.conda\envs\torch\Scripts\llamafactory-cli.exe_main.py", line 7, in
File "C:\Users\76425\Desktop\LLaMA-Factory\src\llamafactory\cli.py", line 115, in main
COMMAND_MAPcommand
File "C:\Users\76425\Desktop\LLaMA-Factory\src\llamafactory\train\tuner.py", line 107, in run_exp
_training_function(config={"args": args, "callbacks": callbacks})
File "C:\Users\76425\Desktop\LLaMA-Factory\src\llamafactory\train\tuner.py", line 69, in _training_function
run_sft(model_args, data_args, training_args, finetuning_args, generating_args, callbacks)
File "C:\Users\76425\Desktop\LLaMA-Factory\src\llamafactory\train\sft\workflow.py", line 51, in run_sft
dataset_module = get_dataset(template, model_args, data_args, training_args, stage="sft", **tokenizer_module)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\76425\Desktop\LLaMA-Factory\src\llamafactory\data\loader.py", line 310, in get_dataset
dataset = _get_preprocessed_dataset(
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\76425\Desktop\LLaMA-Factory\src\llamafactory\data\loader.py", line 256, in _get_preprocessed_dataset
dataset = dataset.map(
^^^^^^^^^^^^
File "C:\Users\76425.conda\envs\torch\Lib\site-packages\datasets\arrow_dataset.py", line 557, in wrapper
out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\76425.conda\envs\torch\Lib\site-packages\datasets\arrow_dataset.py", line 3166, in map
for rank, done, content in iflatmap_unordered(
File "C:\Users\76425.conda\envs\torch\Lib\site-packages\datasets\utils\py_utils.py", line 720, in iflatmap_unordered
File "C:\Users\76425.conda\envs\torch\Lib\site-packages\datasets\utils\py_utils.py", line 720, in
[async_result.get(timeout=0.05) for async_result in async_results]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\76425.conda\envs\torch\Lib\site-packages\multiprocess\pool.py", line 774, in get
raise self._value
ValueError: Processor was not found, please check and update your processor config.
Reminder
System Info
llamafactory version: 0.9.3.dev0
Python version: 3.11.11
PyTorch version: 2.5.1+cu124
Transformers version: 4.52.0.dev0
Reproduction
"""
Traceback (most recent call last):
File "C:\Users\76425.conda\envs\torch\Lib\site-packages\multiprocess\pool.py", line 125, in worker
result = (True, func(*args, **kwds))
^^^^^^^^^^^^^^^^^^^
File "C:\Users\76425.conda\envs\torch\Lib\site-packages\datasets\utils\py_utils.py", line 680, in _write_generator_to_queue
for i, result in enumerate(func(**kwargs)):
File "C:\Users\76425.conda\envs\torch\Lib\site-packages\datasets\arrow_dataset.py", line 3516, in _map_single
for i, batch in iter_outputs(shard_iterable):
File "C:\Users\76425.conda\envs\torch\Lib\site-packages\datasets\arrow_dataset.py", line 3466, in iter_outputs
yield i, apply_function(example, i, offset=offset)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\76425.conda\envs\torch\Lib\site-packages\datasets\arrow_dataset.py", line 3389, in apply_function
processed_inputs = function(*fn_args, *additional_args, **fn_kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\76425\Desktop\LLaMA-Factory\src\llamafactory\data\processor\supervised.py", line 99, in preprocess_dataset
input_ids, labels = self._encode_data_example(
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\76425\Desktop\LLaMA-Factory\src\llamafactory\data\processor\supervised.py", line 43, in _encode_data_example
messages = self.template.mm_plugin.process_messages(prompt + response, images, videos, audios, self.processor)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\76425\Desktop\LLaMA-Factory\src\llamafactory\data\mm_plugin.py", line 1584, in process_messages
self._validate_input(processor, images, videos, audios)
raise ValueError("Processor was not found, please check and update your processor config.")
ValueError: Processor was not found, please check and update your processor config.
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "", line 198, in _run_module_as_main
File "", line 88, in run_code
File "C:\Users\76425.conda\envs\torch\Scripts\llamafactory-cli.exe_main.py", line 7, in
File "C:\Users\76425\Desktop\LLaMA-Factory\src\llamafactory\cli.py", line 115, in main
COMMAND_MAPcommand
File "C:\Users\76425\Desktop\LLaMA-Factory\src\llamafactory\train\tuner.py", line 107, in run_exp
_training_function(config={"args": args, "callbacks": callbacks})
File "C:\Users\76425\Desktop\LLaMA-Factory\src\llamafactory\train\tuner.py", line 69, in _training_function
run_sft(model_args, data_args, training_args, finetuning_args, generating_args, callbacks)
File "C:\Users\76425\Desktop\LLaMA-Factory\src\llamafactory\train\sft\workflow.py", line 51, in run_sft
dataset_module = get_dataset(template, model_args, data_args, training_args, stage="sft", **tokenizer_module)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\76425\Desktop\LLaMA-Factory\src\llamafactory\data\loader.py", line 310, in get_dataset
dataset = _get_preprocessed_dataset(
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\76425\Desktop\LLaMA-Factory\src\llamafactory\data\loader.py", line 256, in _get_preprocessed_dataset
dataset = dataset.map(
^^^^^^^^^^^^
File "C:\Users\76425.conda\envs\torch\Lib\site-packages\datasets\arrow_dataset.py", line 557, in wrapper
out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\76425.conda\envs\torch\Lib\site-packages\datasets\arrow_dataset.py", line 3166, in map
for rank, done, content in iflatmap_unordered(
File "C:\Users\76425.conda\envs\torch\Lib\site-packages\datasets\utils\py_utils.py", line 720, in iflatmap_unordered
File "C:\Users\76425.conda\envs\torch\Lib\site-packages\datasets\utils\py_utils.py", line 720, in
[async_result.get(timeout=0.05) for async_result in async_results]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\76425.conda\envs\torch\Lib\site-packages\multiprocess\pool.py", line 774, in get
raise self._value
ValueError: Processor was not found, please check and update your processor config.
Others
按照transformers的说明pip install git+https://github.com/huggingface/[email protected]还是报错
The text was updated successfully, but these errors were encountered: