{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import os\n", "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n", "from datasets import load_dataset, load_metric, Audio, concatenate_datasets\n" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Login successful\n", "Your token has been saved to /home/ubuntu/.huggingface/token\n", "\u001b[1m\u001b[31mAuthenticated through git-credential store but this isn't the helper defined on your machine.\n", "You might have to re-authenticate when pushing to the Hugging Face Hub. Run the following command in your terminal in case you want to set this credential helper as the default\n", "\n", "git config --global credential.helper store\u001b[0m\n" ] } ], "source": [ "from huggingface_hub import notebook_login\n", "\n", "notebook_login()\n", "repo_name = \"smangrul/xls-r-300m-mr\"\n" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Reusing dataset open_slr (/home/ubuntu/.cache/huggingface/datasets/open_slr/SLR64/0.0.0/e0fb9e36094eff565efe812d1aba158f6a46ce834cb9705c91d1e2d6ba78ed31)\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Dataset({\n", " features: ['path', 'audio', 'sentence'],\n", " num_rows: 1569\n", "})\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Reusing dataset common_voice (/home/ubuntu/.cache/huggingface/datasets/mozilla-foundation___common_voice/mr/8.0.0/b8bc4d453193c06a43269b46cd87f075c70f152ac963b7f28f7a2760c45ec3e8)\n", "Reusing dataset common_voice (/home/ubuntu/.cache/huggingface/datasets/mozilla-foundation___common_voice/mr/8.0.0/b8bc4d453193c06a43269b46cd87f075c70f152ac963b7f28f7a2760c45ec3e8)\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Dataset({\n", " features: ['path', 'audio', 'sentence'],\n", " num_rows: 698\n", "})\n" ] } ], "source": [ "\n", "openslr = load_dataset(\"openslr\", \"SLR64\", split=\"train\")\n", "print(openslr)\n", "\n", "common_voice_train = load_dataset(\"mozilla-foundation/common_voice_8_0\", \"mr\", split=\"train+validation\", use_auth_token=True)\n", "common_voice_test = load_dataset(\"mozilla-foundation/common_voice_8_0\", \"mr\", split=\"test\", use_auth_token=True)\n", "common_voice_train = common_voice_train.remove_columns([\"accent\", \"age\", \"client_id\", \"down_votes\", \"gender\", \"locale\", \"segment\", \"up_votes\"])\n", "common_voice_test = common_voice_test.remove_columns([\"accent\", \"age\", \"client_id\", \"down_votes\", \"gender\", \"locale\", \"segment\", \"up_votes\"])\n", "print(common_voice_train)\n", "\n" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "Dataset({\n", " features: ['path', 'audio', 'sentence'],\n", " num_rows: 2267\n", "})" ] }, "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ "train_data = concatenate_datasets([common_voice_train, openslr])\n", "train_data" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "import re\n", "import unicodedata\n", "chars_to_remove_regex = '[,?.!\\-\\;\\:\"“%‘”�—’…–\\।\\!\\\"\\,\\-\\.\\?\\:\\|\\“\\”\\–\\;\\'\\’\\‘\\॔\\u200c\\u200d]'\n", "\n", "def remove_special_characters(batch):\n", " batch[\"sentence\"] = re.sub(chars_to_remove_regex, '', batch[\"sentence\"]).lower()\n", " batch[\"sentence\"] = unicodedata.normalize(\"NFKC\", batch[\"sentence\"])\n", " return batch" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Loading cached processed dataset at /home/ubuntu/.cache/huggingface/datasets/mozilla-foundation___common_voice/mr/8.0.0/b8bc4d453193c06a43269b46cd87f075c70f152ac963b7f28f7a2760c45ec3e8/cache-86933e1c6f2c17a9.arrow\n", "Loading cached processed dataset at /home/ubuntu/.cache/huggingface/datasets/mozilla-foundation___common_voice/mr/8.0.0/b8bc4d453193c06a43269b46cd87f075c70f152ac963b7f28f7a2760c45ec3e8/cache-0b71d94dfe9f8e07.arrow\n" ] } ], "source": [ "train_dataset = train_data.map(remove_special_characters)\n", "test_dataset = common_voice_test.map(remove_special_characters)" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [], "source": [ "def extract_all_chars(batch):\n", " all_text = \" \".join(batch[\"sentence\"])\n", " vocab = list(set(all_text))\n", " return {\"vocab\": [vocab], \"all_text\": [all_text]}" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "54586502931b4e99ab8e4cb90cb9fbc0", "version_major": 2, "version_minor": 0 }, "text/plain": [ " 0%| | 0/1 [00:00, ?ba/s]" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "8fd87aff4e5f483daf6c6e5a4a00e37b", "version_major": 2, "version_minor": 0 }, "text/plain": [ " 0%| | 0/1 [00:00, ?ba/s]" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "vocab_train = train_dataset.map(extract_all_chars, batched=True, batch_size=-1, keep_in_memory=True, remove_columns=train_dataset.column_names)\n", "vocab_test = test_dataset.map(extract_all_chars, batched=True, batch_size=-1, keep_in_memory=True, remove_columns=train_dataset.column_names)\n" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "{' ': 0,\n", " 'ँ': 1,\n", " 'ं': 2,\n", " 'ः': 3,\n", " 'अ': 4,\n", " 'आ': 5,\n", " 'इ': 6,\n", " 'ई': 7,\n", " 'उ': 8,\n", " 'ऊ': 9,\n", " 'ऋ': 10,\n", " 'ए': 11,\n", " 'ऐ': 12,\n", " 'ऑ': 13,\n", " 'ओ': 14,\n", " 'औ': 15,\n", " 'क': 16,\n", " 'ख': 17,\n", " 'ग': 18,\n", " 'घ': 19,\n", " 'च': 20,\n", " 'छ': 21,\n", " 'ज': 22,\n", " 'झ': 23,\n", " 'ञ': 24,\n", " 'ट': 25,\n", " 'ठ': 26,\n", " 'ड': 27,\n", " 'ढ': 28,\n", " 'ण': 29,\n", " 'त': 30,\n", " 'थ': 31,\n", " 'द': 32,\n", " 'ध': 33,\n", " 'न': 34,\n", " 'प': 35,\n", " 'फ': 36,\n", " 'ब': 37,\n", " 'भ': 38,\n", " 'म': 39,\n", " 'य': 40,\n", " 'र': 41,\n", " 'ऱ': 42,\n", " 'ल': 43,\n", " 'ळ': 44,\n", " 'व': 45,\n", " 'श': 46,\n", " 'ष': 47,\n", " 'स': 48,\n", " 'ह': 49,\n", " '़': 50,\n", " 'ा': 51,\n", " 'ि': 52,\n", " 'ी': 53,\n", " 'ु': 54,\n", " 'ू': 55,\n", " 'ृ': 56,\n", " 'ॄ': 57,\n", " 'ॅ': 58,\n", " 'े': 59,\n", " 'ै': 60,\n", " 'ॉ': 61,\n", " 'ॊ': 62,\n", " 'ो': 63,\n", " 'ौ': 64,\n", " '्': 65,\n", " 'ॲ': 66}" ] }, "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ "vocab_list = list(set(vocab_train[\"vocab\"][0]) | set(vocab_test[\"vocab\"][0]))\n", "vocab_dict = {v: k for k, v in enumerate(sorted(vocab_list))}\n", "vocab_dict" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [], "source": [ "vocab_dict[\"|\"] = vocab_dict[\" \"]\n", "del vocab_dict[\" \"]" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "69" ] }, "execution_count": 11, "metadata": {}, "output_type": "execute_result" } ], "source": [ "vocab_dict[\"[UNK]\"] = len(vocab_dict)\n", "vocab_dict[\"[PAD]\"] = len(vocab_dict)\n", "len(vocab_dict)" ] }, { "cell_type": "code", "execution_count": 12, "metadata": {}, "outputs": [], "source": [ "import json\n", "with open('vocab.json', 'w') as vocab_file:\n", " json.dump(vocab_dict, vocab_file)" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "file ./config.json not found\n", "Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.\n", "To https://huggingface.co/smangrul/xls-r-300m-mr\n", " 41422b3..c87c689 main -> main\n", "\n" ] }, { "data": { "text/plain": [ "'https://huggingface.co/smangrul/xls-r-300m-mr/commit/c87c689895462fd42a184ae74fffebe69a4078e8'" ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from transformers import Wav2Vec2CTCTokenizer\n", "\n", "tokenizer = Wav2Vec2CTCTokenizer.from_pretrained(\"./\", unk_token=\"[UNK]\", pad_token=\"[PAD]\", word_delimiter_token=\"|\")\n", "tokenizer.push_to_hub(repo_name)" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "from transformers import Wav2Vec2FeatureExtractor\n", "\n", "feature_extractor = Wav2Vec2FeatureExtractor(feature_size=1, sampling_rate=16000, padding_value=0.0, do_normalize=True, return_attention_mask=True)" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "from transformers import Wav2Vec2Processor\n", "\n", "processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)" ] }, { "cell_type": "code", "execution_count": 16, "metadata": {}, "outputs": [], "source": [ "train_dataset = train_dataset.cast_column(\"audio\", Audio(sampling_rate=16_000))\n", "test_dataset = test_dataset.cast_column(\"audio\", Audio(sampling_rate=16_000))" ] }, { "cell_type": "code", "execution_count": 17, "metadata": {}, "outputs": [], "source": [ "def prepare_dataset(batch):\n", " audio = batch[\"audio\"]\n", "\n", " # batched output is \"un-batched\"\n", " batch[\"input_values\"] = processor(audio[\"array\"], sampling_rate=audio[\"sampling_rate\"]).input_values[0]\n", " batch[\"input_length\"] = len(batch[\"input_values\"])\n", " \n", " with processor.as_target_processor():\n", " batch[\"labels\"] = processor(batch[\"sentence\"]).input_ids\n", " return batch" ] }, { "cell_type": "code", "execution_count": 18, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "a096ebabad914b1f964e3a88f7763913", "version_major": 2, "version_minor": 0 }, "text/plain": [ " 0%| | 0/2267 [00:00, ?ex/s]" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "a35a844a29f748cb9dc8c96c9576cfd6", "version_major": 2, "version_minor": 0 }, "text/plain": [ " 0%| | 0/306 [00:00, ?ex/s]" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "train_dataset = train_dataset.map(prepare_dataset, remove_columns=train_dataset.column_names)\n", "test_dataset = test_dataset.map(prepare_dataset, remove_columns=test_dataset.column_names)" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [], "source": [ "from datasets import load_from_disk\n", "train_dataset = load_from_disk(\"./Data/train_dataset\")\n", "test_dataset = load_from_disk(\"./Data/test_dataset\")" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "Dataset({\n", " features: ['input_values', 'input_length', 'labels'],\n", " num_rows: 2267\n", "})" ] }, "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ "train_dataset" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "Dataset({\n", " features: ['input_values', 'input_length', 'labels'],\n", " num_rows: 306\n", "})" ] }, "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ "test_dataset" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [], "source": [ "import torch\n", "\n", "from dataclasses import dataclass, field\n", "from typing import Any, Dict, List, Optional, Union\n", "\n", "@dataclass\n", "class DataCollatorCTCWithPadding:\n", " \"\"\"\n", " Data collator that will dynamically pad the inputs received.\n", " Args:\n", " processor (:class:`~transformers.Wav2Vec2Processor`)\n", " The processor used for proccessing the data.\n", " padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):\n", " Select a strategy to pad the returned sequences (according to the model's padding side and padding index)\n", " among:\n", " * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single\n", " sequence if provided).\n", " * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the\n", " maximum acceptable input length for the model if that argument is not provided.\n", " * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of\n", " different lengths).\n", " \"\"\"\n", "\n", " processor: Wav2Vec2Processor\n", " padding: Union[bool, str] = True\n", " \n", " def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:\n", " # split inputs and labels since they have to be of different lenghts and need\n", " # different padding methods\n", " input_features = [{\"input_values\": feature[\"input_values\"]} for feature in features]\n", " label_features = [{\"input_ids\": feature[\"labels\"]} for feature in features]\n", "\n", " batch = self.processor.pad(\n", " input_features,\n", " padding=self.padding,\n", " return_tensors=\"pt\",\n", " )\n", " with self.processor.as_target_processor():\n", " labels_batch = self.processor.pad(\n", " label_features,\n", " padding=self.padding,\n", " return_tensors=\"pt\",\n", " )\n", "\n", " # replace padding with -100 to ignore loss correctly\n", " labels = labels_batch[\"input_ids\"].masked_fill(labels_batch.attention_mask.ne(1), -100)\n", "\n", " batch[\"labels\"] = labels\n", "\n", " return batch" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [], "source": [ "data_collator = DataCollatorCTCWithPadding(processor=processor, padding=True)" ] }, { "cell_type": "code", "execution_count": 12, "metadata": {}, "outputs": [], "source": [ "wer_metric = load_metric(\"wer\")" ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "def compute_metrics(pred):\n", " pred_logits = pred.predictions\n", " pred_ids = np.argmax(pred_logits, axis=-1)\n", "\n", " pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id\n", "\n", " pred_str = processor.batch_decode(pred_ids)\n", " # we do not want to group tokens when computing the metrics\n", " label_str = processor.batch_decode(pred.label_ids, group_tokens=False)\n", "\n", " wer = wer_metric.compute(predictions=pred_str, references=label_str)\n", "\n", " return {\"wer\": wer}" ] }, { "cell_type": "code", "execution_count": 14, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Some weights of the model checkpoint at facebook/wav2vec2-xls-r-300m were not used when initializing Wav2Vec2ForCTC: ['project_q.bias', 'project_q.weight', 'project_hid.weight', 'quantizer.weight_proj.bias', 'quantizer.codevectors', 'project_hid.bias', 'quantizer.weight_proj.weight']\n", "- This IS expected if you are initializing Wav2Vec2ForCTC from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", "- This IS NOT expected if you are initializing Wav2Vec2ForCTC from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", "Some weights of Wav2Vec2ForCTC were not initialized from the model checkpoint at facebook/wav2vec2-xls-r-300m and are newly initialized: ['lm_head.weight', 'lm_head.bias']\n", "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n" ] } ], "source": [ "from transformers import Wav2Vec2ForCTC\n", "\n", "model = Wav2Vec2ForCTC.from_pretrained(\n", " \"facebook/wav2vec2-xls-r-300m\", \n", " attention_dropout=0.1,\n", " layerdrop=0.0,\n", " feat_proj_dropout=0.0,\n", " mask_time_prob=0.75,\n", " mask_time_length=10,\n", " mask_feature_prob=0.25,\n", " mask_feature_length=64,\n", " ctc_loss_reduction=\"mean\", \n", " pad_token_id=processor.tokenizer.pad_token_id,\n", " vocab_size=len(processor.tokenizer),\n", ")" ] }, { "cell_type": "code", "execution_count": 15, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/home/ubuntu/transformers/src/transformers/models/wav2vec2/modeling_wav2vec2.py:1717: FutureWarning: The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.Please use the equivalent `freeze_feature_encoder` method instead.\n", " FutureWarning,\n" ] } ], "source": [ "model.freeze_feature_extractor()" ] }, { "cell_type": "code", "execution_count": 16, "metadata": {}, "outputs": [], "source": [ "from transformers import TrainingArguments\n", "\n", "training_args = TrainingArguments(\n", " output_dir=repo_name,\n", " group_by_length=True,\n", " per_device_train_batch_size=16,\n", " gradient_accumulation_steps=2,\n", " evaluation_strategy=\"steps\",\n", " num_train_epochs=200,\n", " gradient_checkpointing=True,\n", " fp16=True,\n", " save_steps=400,\n", " eval_steps=400,\n", " logging_steps=100,\n", " learning_rate=1e-4,\n", " warmup_steps=1000,\n", " save_total_limit=1,\n", " push_to_hub=True,\n", ")" ] }, { "cell_type": "code", "execution_count": 17, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/ebs/learn/ASR/smangrul/xls-r-300m-mr is already a clone of https://huggingface.co/smangrul/xls-r-300m-mr. Make sure you pull the latest changes with `repo.git_pull()`.\n", "Using amp half precision backend\n" ] } ], "source": [ "from transformers import Trainer\n", "\n", "trainer = Trainer(\n", " model=model,\n", " data_collator=data_collator,\n", " args=training_args,\n", " compute_metrics=compute_metrics,\n", " train_dataset=train_dataset,\n", " eval_dataset=test_dataset,\n", " tokenizer=processor.feature_extractor,\n", ")\n" ] }, { "cell_type": "code", "execution_count": 18, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "The following columns in the training set don't have a corresponding argument in `Wav2Vec2ForCTC.forward` and have been ignored: input_length. If input_length are not expected by `Wav2Vec2ForCTC.forward`, you can safely ignore this message.\n", "/home/ubuntu/transformers/src/transformers/optimization.py:309: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use thePyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n", " FutureWarning,\n", "***** Running training *****\n", " Num examples = 2267\n", " Num Epochs = 200\n", " Instantaneous batch size per device = 16\n", " Total train batch size (w. parallel, distributed & accumulation) = 32\n", " Gradient Accumulation steps = 2\n", " Total optimization steps = 14200\n" ] }, { "data": { "text/html": [ "\n", "
Step | \n", "Training Loss | \n", "Validation Loss | \n", "Wer | \n", "
---|---|---|---|
400 | \n", "3.794000 | \n", "3.532227 | \n", "1.000000 | \n", "
800 | \n", "3.362400 | \n", "3.359044 | \n", "1.000000 | \n", "
1200 | \n", "2.293900 | \n", "1.011279 | \n", "0.829924 | \n", "
1600 | \n", "1.233000 | \n", "0.502743 | \n", "0.593662 | \n", "
2000 | \n", "0.962600 | \n", "0.412519 | \n", "0.496992 | \n", "
2400 | \n", "0.831800 | \n", "0.402903 | \n", "0.493783 | \n", "
2800 | \n", "0.737000 | \n", "0.389773 | \n", "0.469314 | \n", "
3200 | \n", "0.677100 | \n", "0.373987 | \n", "0.436021 | \n", "
3600 | \n", "0.634400 | \n", "0.383823 | \n", "0.432010 | \n", "
4000 | \n", "0.586000 | \n", "0.375610 | \n", "0.419575 | \n", "
4400 | \n", "0.561000 | \n", "0.387891 | \n", "0.418371 | \n", "
4800 | \n", "0.518500 | \n", "0.386357 | \n", "0.417569 | \n", "
5200 | \n", "0.515300 | \n", "0.415069 | \n", "0.430004 | \n", "
5600 | \n", "0.478100 | \n", "0.399211 | \n", "0.408744 | \n", "
6000 | \n", "0.468100 | \n", "0.424542 | \n", "0.402327 | \n", "
6400 | \n", "0.439400 | \n", "0.430979 | \n", "0.410750 | \n", "
6800 | \n", "0.429600 | \n", "0.427700 | \n", "0.409146 | \n", "
7200 | \n", "0.400300 | \n", "0.451111 | \n", "0.419976 | \n", "
7600 | \n", "0.395100 | \n", "0.463446 | \n", "0.405134 | \n", "
8000 | \n", "0.381800 | \n", "0.454752 | \n", "0.407942 | \n", "
8400 | \n", "0.371500 | \n", "0.461547 | \n", "0.404733 | \n", "
8800 | \n", "0.362500 | \n", "0.461543 | \n", "0.411151 | \n", "
9200 | \n", "0.338200 | \n", "0.468299 | \n", "0.417168 | \n", "
9600 | \n", "0.338800 | \n", "0.480989 | \n", "0.412355 | \n", "
10000 | \n", "0.317600 | \n", "0.475700 | \n", "0.410750 | \n", "
10400 | \n", "0.315100 | \n", "0.478920 | \n", "0.403530 | \n", "
10800 | \n", "0.296200 | \n", "0.480600 | \n", "0.398315 | \n", "
11200 | \n", "0.299000 | \n", "0.477083 | \n", "0.393502 | \n", "
11600 | \n", "0.290000 | \n", "0.465646 | \n", "0.393903 | \n", "
12000 | \n", "0.290900 | \n", "0.490041 | \n", "0.405937 | \n", "
12400 | \n", "0.275600 | \n", "0.489354 | \n", "0.399519 | \n", "
12800 | \n", "0.272600 | \n", "0.494580 | \n", "0.395909 | \n", "
13200 | \n", "0.265900 | \n", "0.497918 | \n", "0.397112 | \n", "
13600 | \n", "0.266300 | \n", "0.498627 | \n", "0.397513 | \n", "
14000 | \n", "0.259600 | \n", "0.504610 | \n", "0.401524 | \n", "
"
],
"text/plain": [
"