Datasets:
File size: 15,940 Bytes
9cff1e5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 |
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# This is a test-script that loads the dataset."
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"# %pip install datasets\n",
"from datasets import load_dataset\n",
"import pandas as pd\n"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "b56274faa04d46c0a8ce3871242ffc6e",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Downloading readme: 0%| | 0.00/1.37k [00:00<?, ?B/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"ename": "FileNotFoundError",
"evalue": "Couldn't find a dataset script at /Users/amittaijoel/workspace/crawl.hs/data/metadata/siavava/ai-tech-articles/ai-tech-articles.py or any data file in the same directory. Couldn't find 'siavava/ai-tech-articles' on the Hugging Face Hub either: FileNotFoundError: No (supported) data files or dataset script found in siavava/ai-tech-articles. ",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m/Users/amittaijoel/workspace/crawl.hs/data/metadata/test.ipynb Cell 3\u001b[0m line \u001b[0;36m1\n\u001b[0;32m----> <a href='vscode-notebook-cell:/Users/amittaijoel/workspace/crawl.hs/data/metadata/test.ipynb#W2sZmlsZQ%3D%3D?line=0'>1</a>\u001b[0m dt \u001b[39m=\u001b[39m load_dataset(\u001b[39m\"\u001b[39;49m\u001b[39msiavava/ai-tech-articles\u001b[39;49m\u001b[39m\"\u001b[39;49m)\n",
"File \u001b[0;32m~/miniconda3/envs/data-mining/lib/python3.11/site-packages/datasets/load.py:2129\u001b[0m, in \u001b[0;36mload_dataset\u001b[0;34m(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, verification_mode, ignore_verifications, keep_in_memory, save_infos, revision, token, use_auth_token, task, streaming, num_proc, storage_options, **config_kwargs)\u001b[0m\n\u001b[1;32m 2124\u001b[0m verification_mode \u001b[39m=\u001b[39m VerificationMode(\n\u001b[1;32m 2125\u001b[0m (verification_mode \u001b[39mor\u001b[39;00m VerificationMode\u001b[39m.\u001b[39mBASIC_CHECKS) \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m save_infos \u001b[39melse\u001b[39;00m VerificationMode\u001b[39m.\u001b[39mALL_CHECKS\n\u001b[1;32m 2126\u001b[0m )\n\u001b[1;32m 2128\u001b[0m \u001b[39m# Create a dataset builder\u001b[39;00m\n\u001b[0;32m-> 2129\u001b[0m builder_instance \u001b[39m=\u001b[39m load_dataset_builder(\n\u001b[1;32m 2130\u001b[0m path\u001b[39m=\u001b[39;49mpath,\n\u001b[1;32m 2131\u001b[0m name\u001b[39m=\u001b[39;49mname,\n\u001b[1;32m 2132\u001b[0m data_dir\u001b[39m=\u001b[39;49mdata_dir,\n\u001b[1;32m 2133\u001b[0m data_files\u001b[39m=\u001b[39;49mdata_files,\n\u001b[1;32m 2134\u001b[0m cache_dir\u001b[39m=\u001b[39;49mcache_dir,\n\u001b[1;32m 2135\u001b[0m features\u001b[39m=\u001b[39;49mfeatures,\n\u001b[1;32m 2136\u001b[0m download_config\u001b[39m=\u001b[39;49mdownload_config,\n\u001b[1;32m 2137\u001b[0m download_mode\u001b[39m=\u001b[39;49mdownload_mode,\n\u001b[1;32m 2138\u001b[0m revision\u001b[39m=\u001b[39;49mrevision,\n\u001b[1;32m 2139\u001b[0m token\u001b[39m=\u001b[39;49mtoken,\n\u001b[1;32m 2140\u001b[0m storage_options\u001b[39m=\u001b[39;49mstorage_options,\n\u001b[1;32m 2141\u001b[0m \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mconfig_kwargs,\n\u001b[1;32m 2142\u001b[0m )\n\u001b[1;32m 2144\u001b[0m \u001b[39m# Return iterable dataset in case of streaming\u001b[39;00m\n\u001b[1;32m 2145\u001b[0m \u001b[39mif\u001b[39;00m streaming:\n",
"File \u001b[0;32m~/miniconda3/envs/data-mining/lib/python3.11/site-packages/datasets/load.py:1815\u001b[0m, in \u001b[0;36mload_dataset_builder\u001b[0;34m(path, name, data_dir, data_files, cache_dir, features, download_config, download_mode, revision, token, use_auth_token, storage_options, **config_kwargs)\u001b[0m\n\u001b[1;32m 1813\u001b[0m download_config \u001b[39m=\u001b[39m download_config\u001b[39m.\u001b[39mcopy() \u001b[39mif\u001b[39;00m download_config \u001b[39melse\u001b[39;00m DownloadConfig()\n\u001b[1;32m 1814\u001b[0m download_config\u001b[39m.\u001b[39mstorage_options\u001b[39m.\u001b[39mupdate(storage_options)\n\u001b[0;32m-> 1815\u001b[0m dataset_module \u001b[39m=\u001b[39m dataset_module_factory(\n\u001b[1;32m 1816\u001b[0m path,\n\u001b[1;32m 1817\u001b[0m revision\u001b[39m=\u001b[39;49mrevision,\n\u001b[1;32m 1818\u001b[0m download_config\u001b[39m=\u001b[39;49mdownload_config,\n\u001b[1;32m 1819\u001b[0m download_mode\u001b[39m=\u001b[39;49mdownload_mode,\n\u001b[1;32m 1820\u001b[0m data_dir\u001b[39m=\u001b[39;49mdata_dir,\n\u001b[1;32m 1821\u001b[0m data_files\u001b[39m=\u001b[39;49mdata_files,\n\u001b[1;32m 1822\u001b[0m )\n\u001b[1;32m 1823\u001b[0m \u001b[39m# Get dataset builder class from the processing script\u001b[39;00m\n\u001b[1;32m 1824\u001b[0m builder_kwargs \u001b[39m=\u001b[39m dataset_module\u001b[39m.\u001b[39mbuilder_kwargs\n",
"File \u001b[0;32m~/miniconda3/envs/data-mining/lib/python3.11/site-packages/datasets/load.py:1508\u001b[0m, in \u001b[0;36mdataset_module_factory\u001b[0;34m(path, revision, download_config, download_mode, dynamic_modules_path, data_dir, data_files, **download_kwargs)\u001b[0m\n\u001b[1;32m 1506\u001b[0m \u001b[39mraise\u001b[39;00m e1 \u001b[39mfrom\u001b[39;00m \u001b[39mNone\u001b[39;00m\n\u001b[1;32m 1507\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39misinstance\u001b[39m(e1, \u001b[39mFileNotFoundError\u001b[39;00m):\n\u001b[0;32m-> 1508\u001b[0m \u001b[39mraise\u001b[39;00m \u001b[39mFileNotFoundError\u001b[39;00m(\n\u001b[1;32m 1509\u001b[0m \u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mCouldn\u001b[39m\u001b[39m'\u001b[39m\u001b[39mt find a dataset script at \u001b[39m\u001b[39m{\u001b[39;00mrelative_to_absolute_path(combined_path)\u001b[39m}\u001b[39;00m\u001b[39m or any data file in the same directory. \u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m 1510\u001b[0m \u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mCouldn\u001b[39m\u001b[39m'\u001b[39m\u001b[39mt find \u001b[39m\u001b[39m'\u001b[39m\u001b[39m{\u001b[39;00mpath\u001b[39m}\u001b[39;00m\u001b[39m'\u001b[39m\u001b[39m on the Hugging Face Hub either: \u001b[39m\u001b[39m{\u001b[39;00m\u001b[39mtype\u001b[39m(e1)\u001b[39m.\u001b[39m\u001b[39m__name__\u001b[39m\u001b[39m}\u001b[39;00m\u001b[39m: \u001b[39m\u001b[39m{\u001b[39;00me1\u001b[39m}\u001b[39;00m\u001b[39m\"\u001b[39m\n\u001b[1;32m 1511\u001b[0m ) \u001b[39mfrom\u001b[39;00m \u001b[39mNone\u001b[39;00m\n\u001b[1;32m 1512\u001b[0m \u001b[39mraise\u001b[39;00m e1 \u001b[39mfrom\u001b[39;00m \u001b[39mNone\u001b[39;00m\n\u001b[1;32m 1513\u001b[0m \u001b[39melse\u001b[39;00m:\n",
"\u001b[0;31mFileNotFoundError\u001b[0m: Couldn't find a dataset script at /Users/amittaijoel/workspace/crawl.hs/data/metadata/siavava/ai-tech-articles/ai-tech-articles.py or any data file in the same directory. Couldn't find 'siavava/ai-tech-articles' on the Hugging Face Hub either: FileNotFoundError: No (supported) data files or dataset script found in siavava/ai-tech-articles. "
]
}
],
"source": [
"dt = load_dataset(\"siavava/ai-tech-articles\")\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>id</th>\n",
" <th>year</th>\n",
" <th>title</th>\n",
" <th>url</th>\n",
" <th>text</th>\n",
" <th>__index_level_0__</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>0</td>\n",
" <td>2023.0</td>\n",
" <td>\"MIT Technology Review\"</td>\n",
" <td>\"https://www.technologyreview.com\"</td>\n",
" <td>\"Featured Topics Newsletters Events Podcasts F...</td>\n",
" <td>0</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>1</td>\n",
" <td>2023.0</td>\n",
" <td>\"WIRED - The Latest in Technology, Science, Cu...</td>\n",
" <td>\"https://www.wired.com\"</td>\n",
" <td>\"Open Navigation Menu To revisit this article,...</td>\n",
" <td>1</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>2</td>\n",
" <td>2019.0</td>\n",
" <td>\"The Verge\"</td>\n",
" <td>\"https://www.theverge.com\"</td>\n",
" <td>\"The Verge homepage The Verge The Verge logo.\\...</td>\n",
" <td>2</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>3</td>\n",
" <td>2023.0</td>\n",
" <td>\"TechCrunch | Startup and Technology News\"</td>\n",
" <td>\"https://www.techcrunch.com\"</td>\n",
" <td>\"WeWork reportedly on the verge of filing for ...</td>\n",
" <td>3</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>4</td>\n",
" <td>2022.0</td>\n",
" <td>\"A new vision of artificial intelligence for t...</td>\n",
" <td>\"https://www.technologyreview.com/2022/04/22/1...</td>\n",
" <td>\"Featured Topics Newsletters Events Podcasts A...</td>\n",
" <td>4</td>\n",
" </tr>\n",
" <tr>\n",
" <th>5</th>\n",
" <td>5</td>\n",
" <td>2022.0</td>\n",
" <td>\"The scientist who co-created CRISPR isn’t rul...</td>\n",
" <td>\"https://www.technologyreview.com/2022/04/26/1...</td>\n",
" <td>\"Featured Topics Newsletters Events Podcasts F...</td>\n",
" <td>5</td>\n",
" </tr>\n",
" <tr>\n",
" <th>6</th>\n",
" <td>6</td>\n",
" <td>2022.0</td>\n",
" <td>\"These fast, cheap tests could help us coexist...</td>\n",
" <td>\"https://www.technologyreview.com/2022/04/27/1...</td>\n",
" <td>\"Featured Topics Newsletters Events Podcasts F...</td>\n",
" <td>6</td>\n",
" </tr>\n",
" <tr>\n",
" <th>7</th>\n",
" <td>7</td>\n",
" <td>2022.0</td>\n",
" <td>\"Tackling multiple tasks with a single visual ...</td>\n",
" <td>\"http://www.deepmind.com/blog/tackling-multipl...</td>\n",
" <td>\"DeepMind Search Search Close DeepMind About O...</td>\n",
" <td>7</td>\n",
" </tr>\n",
" <tr>\n",
" <th>8</th>\n",
" <td>8</td>\n",
" <td>2019.0</td>\n",
" <td>\"About - Google DeepMind\"</td>\n",
" <td>\"https://www.deepmind.com/about\"</td>\n",
" <td>\"DeepMind Search Search Close DeepMind About O...</td>\n",
" <td>8</td>\n",
" </tr>\n",
" <tr>\n",
" <th>9</th>\n",
" <td>9</td>\n",
" <td>2023.0</td>\n",
" <td>\"Blog - Google DeepMind\"</td>\n",
" <td>\"https://www.deepmind.com/blog-categories/appl...</td>\n",
" <td>\"DeepMind Search Search Close DeepMind About O...</td>\n",
" <td>9</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" id year title \\\n",
"0 0 2023.0 \"MIT Technology Review\" \n",
"1 1 2023.0 \"WIRED - The Latest in Technology, Science, Cu... \n",
"2 2 2019.0 \"The Verge\" \n",
"3 3 2023.0 \"TechCrunch | Startup and Technology News\" \n",
"4 4 2022.0 \"A new vision of artificial intelligence for t... \n",
"5 5 2022.0 \"The scientist who co-created CRISPR isn’t rul... \n",
"6 6 2022.0 \"These fast, cheap tests could help us coexist... \n",
"7 7 2022.0 \"Tackling multiple tasks with a single visual ... \n",
"8 8 2019.0 \"About - Google DeepMind\" \n",
"9 9 2023.0 \"Blog - Google DeepMind\" \n",
"\n",
" url \\\n",
"0 \"https://www.technologyreview.com\" \n",
"1 \"https://www.wired.com\" \n",
"2 \"https://www.theverge.com\" \n",
"3 \"https://www.techcrunch.com\" \n",
"4 \"https://www.technologyreview.com/2022/04/22/1... \n",
"5 \"https://www.technologyreview.com/2022/04/26/1... \n",
"6 \"https://www.technologyreview.com/2022/04/27/1... \n",
"7 \"http://www.deepmind.com/blog/tackling-multipl... \n",
"8 \"https://www.deepmind.com/about\" \n",
"9 \"https://www.deepmind.com/blog-categories/appl... \n",
"\n",
" text __index_level_0__ \n",
"0 \"Featured Topics Newsletters Events Podcasts F... 0 \n",
"1 \"Open Navigation Menu To revisit this article,... 1 \n",
"2 \"The Verge homepage The Verge The Verge logo.\\... 2 \n",
"3 \"WeWork reportedly on the verge of filing for ... 3 \n",
"4 \"Featured Topics Newsletters Events Podcasts A... 4 \n",
"5 \"Featured Topics Newsletters Events Podcasts F... 5 \n",
"6 \"Featured Topics Newsletters Events Podcasts F... 6 \n",
"7 \"DeepMind Search Search Close DeepMind About O... 7 \n",
"8 \"DeepMind Search Search Close DeepMind About O... 8 \n",
"9 \"DeepMind Search Search Close DeepMind About O... 9 "
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df = dt[\"train\"].to_pandas()\n",
"df.head(10)\n"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "data-mining",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
|