--- language: - de - fr - it pipeline_tag: token-classification license: cc-by-sa-4.0 --- # Tagset - O - B-CITATION - I-CITATION - B-LAW - I-LAW # Training - The model was trained with the following hyperparamters: - batch size: 64 - learning_rate: 0.00001 - number of training epochs: 50 (actually trained: 23) - early stopping patience: 5 # Predict scores | \bf metric | \bf score | |---------------------------------|--------------------| | de_predict/_CITATION_f1 | 0.9793131792857794 | | de_predict/_CITATION_precision | 0.9852522282458881 | | de_predict/_CITATION_recall | 0.9734453018610985 | | de_predict/_LAW_f1 | 0.9207842961099632 | | de_predict/_LAW_precision | 0.8598544432559407 | | de_predict/_LAW_recall | 0.9910077594333921 | | de_predict/_accuracy_normalized | 0.9880353522464387 | | de_predict/_macro-f1 | 0.9504272924171073 | | de_predict/_macro-precision | 0.9822265306472453 | | de_predict/_macro-recall | 0.9232171398568052 | | de_predict/_micro-f1 | 0.9405898834091524 | | de_predict/_micro-precision | 0.9849051246865093 | | de_predict/_micro-recall | 0.9000908134288556 | | de_predict/_steps_per_second | 0.549 | | de_predict/_weighted-f1 | 0.939658320951984 | | de_predict/_weighted-precision | 0.9854977355183103 | | de_predict/_weighted-recall | 0.9000908134288556 | | fr_predict/_CITATION_f1 | 0.9554686901203342 | | fr_predict/_CITATION_precision | 0.9684586699813549 | | fr_predict/_CITATION_recall | 0.9428225684465286 | | fr_predict/_LAW_f1 | 0.910095519316377 | | fr_predict/_LAW_precision | 0.8366717393986756 | | fr_predict/_LAW_recall | 0.9976459048553212 | | fr_predict/_accuracy_normalized | 0.9830767480044869 | | fr_predict/_macro-f1 | 0.9330080903677362 | | fr_predict/_macro-precision | 0.9702342366509249 | | fr_predict/_macro-recall | 0.9029739799827206 | | fr_predict/_micro-f1 | 0.920617324580396 | | fr_predict/_micro-precision | 0.9842228065627199 | | fr_predict/_micro-recall | 0.8647338279317974 | | fr_predict/_steps_per_second | 0.593 | | fr_predict/_weighted-f1 | 0.9198669665372888 | | fr_predict/_weighted-precision | 0.9861681830521788 | | fr_predict/_weighted-recall | 0.8647338279317974 | | it_predict/_CITATION_f1 | 0.9703896103896105 | | it_predict/_CITATION_precision | 0.9769874476987448 | | it_predict/_CITATION_recall | 0.9638802889576883 | | it_predict/_LAW_f1 | 0.9099276791584483 | | it_predict/_LAW_precision | 0.8422590068159689 | | it_predict/_LAW_recall | 0.9894195024306548 | | it_predict/_accuracy_normalized | 0.9892137683075134 | | it_predict/_macro-f1 | 0.9413484848298093 | | it_predict/_macro-precision | 0.9766498956941716 | | it_predict/_macro-recall | 0.9119834901073706 | | it_predict/_micro-f1 | 0.9311429570080392 | | it_predict/_micro-precision | 0.9803127874885005 | | it_predict/_micro-recall | 0.8866699950074888 | | it_predict/_steps_per_second | 0.563 | | it_predict/_weighted-f1 | 0.929971077318579 | | it_predict/_weighted-precision | 0.9813271971464931 | | it_predict/_weighted-recall | 0.8866699950074888 | | predict/_CITATION_f1 | 0.973621340187501 | | predict/_CITATION_precision | 0.981138340970977 | | predict/_CITATION_recall | 0.9662186467837405 | | predict/_LAW_f1 | 0.9168199439712499 | | predict/_LAW_precision | 0.8514980289093298 | | predict/_LAW_recall | 0.9929968125536349 | | predict/_accuracy_normalized | 0.986841752305624 | | predict/_macro-f1 | 0.9455976917351873 | | predict/_macro-precision | 0.9796077296686877 | | predict/_macro-recall | 0.9169959471957758 | | predict/_micro-f1 | 0.934344809828224 | | predict/_micro-precision | 0.9844524443053164 | | predict/_micro-recall | 0.8890909776278342 | | predict/_steps_per_second | 0.557 | | predict/_weighted-f1 | 0.9333974918752409 | | predict/_weighted-precision | 0.9854002360022739 | | predict/_weighted-recall | 0.8890909776278342 | | predict_samples | 28218 |