The full dataset viewer is not available (click to read why). Only showing a preview of the rows.
The dataset generation failed
Error code: DatasetGenerationError Exception: ArrowNotImplementedError Message: Cannot write struct type 'model_kwargs' with no child field to Parquet. Consider adding a dummy child field. Traceback: Traceback (most recent call last): File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 2013, in _prepare_split_single writer.write_table(table) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 583, in write_table self._build_writer(inferred_schema=pa_table.schema) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 404, in _build_writer self.pa_writer = self._WRITER_CLASS(self.stream, schema) File "/src/services/worker/.venv/lib/python3.9/site-packages/pyarrow/parquet/core.py", line 1010, in __init__ self.writer = _parquet.ParquetWriter( File "pyarrow/_parquet.pyx", line 2157, in pyarrow._parquet.ParquetWriter.__cinit__ File "pyarrow/error.pxi", line 154, in pyarrow.lib.pyarrow_internal_check_status File "pyarrow/error.pxi", line 91, in pyarrow.lib.check_status pyarrow.lib.ArrowNotImplementedError: Cannot write struct type 'model_kwargs' with no child field to Parquet. Consider adding a dummy child field. During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 2029, in _prepare_split_single num_examples, num_bytes = writer.finalize() File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 602, in finalize self._build_writer(self.schema) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 404, in _build_writer self.pa_writer = self._WRITER_CLASS(self.stream, schema) File "/src/services/worker/.venv/lib/python3.9/site-packages/pyarrow/parquet/core.py", line 1010, in __init__ self.writer = _parquet.ParquetWriter( File "pyarrow/_parquet.pyx", line 2157, in pyarrow._parquet.ParquetWriter.__cinit__ File "pyarrow/error.pxi", line 154, in pyarrow.lib.pyarrow_internal_check_status File "pyarrow/error.pxi", line 91, in pyarrow.lib.check_status pyarrow.lib.ArrowNotImplementedError: Cannot write struct type 'model_kwargs' with no child field to Parquet. Consider adding a dummy child field. The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1396, in compute_config_parquet_and_info_response parquet_operations = convert_to_parquet(builder) File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1045, in convert_to_parquet builder.download_and_prepare( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1029, in download_and_prepare self._download_and_prepare( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1124, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1884, in _prepare_split for job_id, done, content in self._prepare_split_single( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 2040, in _prepare_split_single raise DatasetGenerationError("An error occurred while generating the dataset") from e datasets.exceptions.DatasetGenerationError: An error occurred while generating the dataset
Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
config
dict | report
dict | name
string | backend
dict | scenario
dict | launcher
dict | environment
dict | load
dict | forward
dict |
---|---|---|---|---|---|---|---|---|
{
"name": "2024-09-30-20-07-10/pytorch",
"backend": {
"name": "pytorch",
"version": "2.4.1",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "feature-extraction",
"library": "transformers",
"model_type": "gpt2",
"model": "openai-community/gpt2",
"processor": "openai-community/gpt2",
"device": "cpu",
"device_ids": null,
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"model_kwargs": {},
"processor_kwargs": {},
"no_weights": false,
"device_map": null,
"torch_dtype": "float32",
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
},
"scenario": {
"name": "inference",
"_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
"iterations": 10,
"duration": 10,
"warmup_runs": 10,
"input_shapes": {
"batch_size": 2,
"num_choices": 2,
"sequence_length": 16
},
"new_tokens": null,
"memory": true,
"latency": true,
"energy": false,
"forward_kwargs": {},
"generate_kwargs": {
"max_new_tokens": 32,
"min_new_tokens": 32
},
"call_kwargs": {}
},
"launcher": {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": false,
"device_isolation_action": null,
"numactl": true,
"numactl_kwargs": {
"cpunodebind": 0,
"membind": 0
},
"start_method": "spawn"
},
"environment": {
"cpu": " AMD EPYC 7R13 Processor",
"cpu_count": 64,
"cpu_ram_mb": 529717.026816,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-5.10.205-195.807.amzn2.x86_64-x86_64-with-glibc2.36",
"processor": "",
"python_version": "3.10.15",
"optimum_benchmark_version": "0.5.0",
"optimum_benchmark_commit": null,
"transformers_version": "4.44.2",
"transformers_commit": null,
"accelerate_version": "0.34.2",
"accelerate_commit": null,
"diffusers_version": null,
"diffusers_commit": null,
"optimum_version": "1.22.0",
"optimum_commit": null,
"timm_version": null,
"timm_commit": null,
"peft_version": null,
"peft_commit": null
}
} | {
"load": {
"memory": {
"unit": "MB",
"max_ram": 681.046016,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"values": [
3.966448713093996
],
"count": 1,
"total": 3.966448713093996,
"mean": 3.966448713093996,
"p50": 3.966448713093996,
"p90": 3.966448713093996,
"p95": 3.966448713093996,
"p99": 3.966448713093996,
"stdev": 0,
"stdev_": 0
},
"throughput": null,
"energy": null,
"efficiency": null
},
"forward": {
"memory": {
"unit": "MB",
"max_ram": 1118.568448,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"values": [
0.031664811074733734,
0.09032496809959412,
0.08789736032485962,
0.08879967033863068,
0.03201989829540253,
0.08423884212970734,
0.09121345728635788,
0.08818551898002625,
0.10675350204110146,
0.19785945490002632,
0.0942402333021164,
0.04522080719470978,
0.08535459265112877,
0.08885210379958153,
0.0986480675637722,
0.09995739534497261,
0.09575177729129791,
0.09239067509770393,
0.03208519145846367,
0.09070052206516266,
0.10381597653031349,
0.09207630902528763,
0.10417983680963516,
0.0865817703306675,
0.09156954288482666,
0.040636464953422546,
0.08521194010972977,
0.08829500526189804,
0.08948914706707001,
0.1023632176220417,
0.03461951017379761,
0.0840756930410862,
0.0945257768034935,
0.08783899620175362,
0.09395387768745422,
0.034185975790023804,
0.09510071203112602,
0.0886068120598793,
0.08157702162861824,
0.09246017038822174,
0.031726278364658356,
0.08865620940923691,
0.08059243112802505,
0.032624177634716034,
0.09196900576353073,
0.0810699611902237,
0.032106757164001465,
0.08851849660277367,
0.08451725915074348,
0.08841004222631454,
0.03747054189443588,
0.0800025649368763,
0.08907618746161461,
0.031612418591976166,
0.0850784033536911,
0.08847757801413536,
0.03188742697238922,
0.08446324989199638,
0.08871014416217804,
0.08404295146465302,
0.031542208045721054,
0.0881211906671524,
0.08473438397049904,
0.032881759107112885,
0.08916052803397179,
0.08412286266684532,
0.09036565572023392,
0.03156974911689758,
0.08749698475003242,
0.09203888103365898,
0.10392692685127258,
0.09789970889687538,
0.08869035914540291,
0.03473401442170143,
0.09474534913897514,
0.09939325973391533,
0.09798290580511093,
0.0987880527973175,
0.0935584120452404,
0.0831904374063015,
0.10094492882490158,
0.031647149473428726,
0.08074774593114853,
0.08792132139205933,
0.03603678196668625,
0.09554637596011162,
0.08905418589711189,
0.0875939279794693,
0.09969789162278175,
0.09206515178084373,
0.10020159929990768,
0.03647914156317711,
0.15708202123641968,
0.039025597274303436,
0.0864202082157135,
0.08877819776535034,
0.0871780626475811,
0.0955420508980751,
0.033497247844934464,
0.08007903769612312,
0.08830239623785019,
0.03901897743344307,
0.09567869827151299,
0.09157699719071388,
0.09119665995240211,
0.09682190045714378,
0.033938050270080566,
0.08098247274756432,
0.08844276517629623,
0.09172916784882545,
0.08812952414155006,
0.032098960131406784,
0.08067170158028603,
0.09230662509799004,
0.04153048247098923,
0.08251176029443741,
0.09716947749257088,
0.08605539053678513,
0.09076864272356033,
0.04387693107128143,
0.0814075917005539,
0.08873757347464561,
0.09102284908294678,
0.09644880890846252,
0.09592880681157112,
0.10283755138516426
],
"count": 126,
"total": 10.094007708132267,
"mean": 0.08011117228676402,
"p50": 0.08842640370130539,
"p90": 0.09909065626561642,
"p95": 0.10271896794438362,
"p99": 0.14449989143759012,
"stdev": 0.026657956278453555,
"stdev_": 33.276202953352595
},
"throughput": {
"unit": "samples/s",
"value": 24.965306871816182
},
"energy": null,
"efficiency": null
}
} | null | null | null | null | null | null | null |
null | null | 2024-09-30-20-07-10/pytorch | {
"name": "pytorch",
"version": "2.4.1",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "feature-extraction",
"library": "transformers",
"model_type": "gpt2",
"model": "openai-community/gpt2",
"processor": "openai-community/gpt2",
"device": "cpu",
"device_ids": null,
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"model_kwargs": {},
"processor_kwargs": {},
"no_weights": false,
"device_map": null,
"torch_dtype": "float32",
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
} | {
"name": "inference",
"_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
"iterations": 10,
"duration": 10,
"warmup_runs": 10,
"input_shapes": {
"batch_size": 2,
"num_choices": 2,
"sequence_length": 16
},
"new_tokens": null,
"memory": true,
"latency": true,
"energy": false,
"forward_kwargs": {},
"generate_kwargs": {
"max_new_tokens": 32,
"min_new_tokens": 32
},
"call_kwargs": {}
} | {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": false,
"device_isolation_action": null,
"numactl": true,
"numactl_kwargs": {
"cpunodebind": 0,
"membind": 0
},
"start_method": "spawn"
} | {
"cpu": " AMD EPYC 7R13 Processor",
"cpu_count": 64,
"cpu_ram_mb": 529717.026816,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-5.10.205-195.807.amzn2.x86_64-x86_64-with-glibc2.36",
"processor": "",
"python_version": "3.10.15",
"optimum_benchmark_version": "0.5.0",
"optimum_benchmark_commit": null,
"transformers_version": "4.44.2",
"transformers_commit": null,
"accelerate_version": "0.34.2",
"accelerate_commit": null,
"diffusers_version": null,
"diffusers_commit": null,
"optimum_version": "1.22.0",
"optimum_commit": null,
"timm_version": null,
"timm_commit": null,
"peft_version": null,
"peft_commit": null
} | null | null |
null | null | null | null | null | null | null | {
"memory": {
"unit": "MB",
"max_ram": 681.046016,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"values": [
3.966448713093996
],
"count": 1,
"total": 3.966448713093996,
"mean": 3.966448713093996,
"p50": 3.966448713093996,
"p90": 3.966448713093996,
"p95": 3.966448713093996,
"p99": 3.966448713093996,
"stdev": 0,
"stdev_": 0
},
"throughput": null,
"energy": null,
"efficiency": null
} | {
"memory": {
"unit": "MB",
"max_ram": 1118.568448,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"values": [
0.031664811074733734,
0.09032496809959412,
0.08789736032485962,
0.08879967033863068,
0.03201989829540253,
0.08423884212970734,
0.09121345728635788,
0.08818551898002625,
0.10675350204110146,
0.19785945490002632,
0.0942402333021164,
0.04522080719470978,
0.08535459265112877,
0.08885210379958153,
0.0986480675637722,
0.09995739534497261,
0.09575177729129791,
0.09239067509770393,
0.03208519145846367,
0.09070052206516266,
0.10381597653031349,
0.09207630902528763,
0.10417983680963516,
0.0865817703306675,
0.09156954288482666,
0.040636464953422546,
0.08521194010972977,
0.08829500526189804,
0.08948914706707001,
0.1023632176220417,
0.03461951017379761,
0.0840756930410862,
0.0945257768034935,
0.08783899620175362,
0.09395387768745422,
0.034185975790023804,
0.09510071203112602,
0.0886068120598793,
0.08157702162861824,
0.09246017038822174,
0.031726278364658356,
0.08865620940923691,
0.08059243112802505,
0.032624177634716034,
0.09196900576353073,
0.0810699611902237,
0.032106757164001465,
0.08851849660277367,
0.08451725915074348,
0.08841004222631454,
0.03747054189443588,
0.0800025649368763,
0.08907618746161461,
0.031612418591976166,
0.0850784033536911,
0.08847757801413536,
0.03188742697238922,
0.08446324989199638,
0.08871014416217804,
0.08404295146465302,
0.031542208045721054,
0.0881211906671524,
0.08473438397049904,
0.032881759107112885,
0.08916052803397179,
0.08412286266684532,
0.09036565572023392,
0.03156974911689758,
0.08749698475003242,
0.09203888103365898,
0.10392692685127258,
0.09789970889687538,
0.08869035914540291,
0.03473401442170143,
0.09474534913897514,
0.09939325973391533,
0.09798290580511093,
0.0987880527973175,
0.0935584120452404,
0.0831904374063015,
0.10094492882490158,
0.031647149473428726,
0.08074774593114853,
0.08792132139205933,
0.03603678196668625,
0.09554637596011162,
0.08905418589711189,
0.0875939279794693,
0.09969789162278175,
0.09206515178084373,
0.10020159929990768,
0.03647914156317711,
0.15708202123641968,
0.039025597274303436,
0.0864202082157135,
0.08877819776535034,
0.0871780626475811,
0.0955420508980751,
0.033497247844934464,
0.08007903769612312,
0.08830239623785019,
0.03901897743344307,
0.09567869827151299,
0.09157699719071388,
0.09119665995240211,
0.09682190045714378,
0.033938050270080566,
0.08098247274756432,
0.08844276517629623,
0.09172916784882545,
0.08812952414155006,
0.032098960131406784,
0.08067170158028603,
0.09230662509799004,
0.04153048247098923,
0.08251176029443741,
0.09716947749257088,
0.08605539053678513,
0.09076864272356033,
0.04387693107128143,
0.0814075917005539,
0.08873757347464561,
0.09102284908294678,
0.09644880890846252,
0.09592880681157112,
0.10283755138516426
],
"count": 126,
"total": 10.094007708132267,
"mean": 0.08011117228676402,
"p50": 0.08842640370130539,
"p90": 0.09909065626561642,
"p95": 0.10271896794438362,
"p99": 0.14449989143759012,
"stdev": 0.026657956278453555,
"stdev_": 33.276202953352595
},
"throughput": {
"unit": "samples/s",
"value": 24.965306871816182
},
"energy": null,
"efficiency": null
} |