2023-03-06 10:59:16,343 ---------------------------------------------------------------------------------------------------- 2023-03-06 10:59:16,345 Model: "EntityLinker( (embeddings): TransformerWordEmbeddings( (model): BertModel( (embeddings): BertEmbeddings( (word_embeddings): Embedding(30523, 768) (position_embeddings): Embedding(512, 768) (token_type_embeddings): Embedding(2, 768) (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) (encoder): BertEncoder( (layer): ModuleList( (0): BertLayer( (attention): BertAttention( (self): BertSelfAttention( (query): Linear(in_features=768, out_features=768, bias=True) (key): Linear(in_features=768, out_features=768, bias=True) (value): Linear(in_features=768, out_features=768, bias=True) (dropout): Dropout(p=0.1, inplace=False) ) (output): BertSelfOutput( (dense): Linear(in_features=768, out_features=768, bias=True) (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) ) (intermediate): BertIntermediate( (dense): Linear(in_features=768, out_features=3072, bias=True) (intermediate_act_fn): GELUActivation() ) (output): BertOutput( (dense): Linear(in_features=3072, out_features=768, bias=True) (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) ) (1): BertLayer( (attention): BertAttention( (self): BertSelfAttention( (query): Linear(in_features=768, out_features=768, bias=True) (key): Linear(in_features=768, out_features=768, bias=True) (value): Linear(in_features=768, out_features=768, bias=True) (dropout): Dropout(p=0.1, inplace=False) ) (output): BertSelfOutput( (dense): Linear(in_features=768, out_features=768, bias=True) (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) ) (intermediate): BertIntermediate( (dense): Linear(in_features=768, out_features=3072, bias=True) (intermediate_act_fn): GELUActivation() ) (output): BertOutput( (dense): Linear(in_features=3072, out_features=768, bias=True) (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) ) (2): BertLayer( (attention): BertAttention( (self): BertSelfAttention( (query): Linear(in_features=768, out_features=768, bias=True) (key): Linear(in_features=768, out_features=768, bias=True) (value): Linear(in_features=768, out_features=768, bias=True) (dropout): Dropout(p=0.1, inplace=False) ) (output): BertSelfOutput( (dense): Linear(in_features=768, out_features=768, bias=True) (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) ) (intermediate): BertIntermediate( (dense): Linear(in_features=768, out_features=3072, bias=True) (intermediate_act_fn): GELUActivation() ) (output): BertOutput( (dense): Linear(in_features=3072, out_features=768, bias=True) (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) ) (3): BertLayer( (attention): BertAttention( (self): BertSelfAttention( (query): Linear(in_features=768, out_features=768, bias=True) (key): Linear(in_features=768, out_features=768, bias=True) (value): Linear(in_features=768, out_features=768, bias=True) (dropout): Dropout(p=0.1, inplace=False) ) (output): BertSelfOutput( (dense): Linear(in_features=768, out_features=768, bias=True) (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) ) (intermediate): BertIntermediate( (dense): Linear(in_features=768, out_features=3072, bias=True) (intermediate_act_fn): GELUActivation() ) (output): BertOutput( (dense): Linear(in_features=3072, out_features=768, bias=True) (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) ) (4): BertLayer( (attention): BertAttention( (self): BertSelfAttention( (query): Linear(in_features=768, out_features=768, bias=True) (key): Linear(in_features=768, out_features=768, bias=True) (value): Linear(in_features=768, out_features=768, bias=True) (dropout): Dropout(p=0.1, inplace=False) ) (output): BertSelfOutput( (dense): Linear(in_features=768, out_features=768, bias=True) (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) ) (intermediate): BertIntermediate( (dense): Linear(in_features=768, out_features=3072, bias=True) (intermediate_act_fn): GELUActivation() ) (output): BertOutput( (dense): Linear(in_features=3072, out_features=768, bias=True) (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) ) (5): BertLayer( (attention): BertAttention( (self): BertSelfAttention( (query): Linear(in_features=768, out_features=768, bias=True) (key): Linear(in_features=768, out_features=768, bias=True) (value): Linear(in_features=768, out_features=768, bias=True) (dropout): Dropout(p=0.1, inplace=False) ) (output): BertSelfOutput( (dense): Linear(in_features=768, out_features=768, bias=True) (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) ) (intermediate): BertIntermediate( (dense): Linear(in_features=768, out_features=3072, bias=True) (intermediate_act_fn): GELUActivation() ) (output): BertOutput( (dense): Linear(in_features=3072, out_features=768, bias=True) (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) ) (6): BertLayer( (attention): BertAttention( (self): BertSelfAttention( (query): Linear(in_features=768, out_features=768, bias=True) (key): Linear(in_features=768, out_features=768, bias=True) (value): Linear(in_features=768, out_features=768, bias=True) (dropout): Dropout(p=0.1, inplace=False) ) (output): BertSelfOutput( (dense): Linear(in_features=768, out_features=768, bias=True) (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) ) (intermediate): BertIntermediate( (dense): Linear(in_features=768, out_features=3072, bias=True) (intermediate_act_fn): GELUActivation() ) (output): BertOutput( (dense): Linear(in_features=3072, out_features=768, bias=True) (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) ) (7): BertLayer( (attention): BertAttention( (self): BertSelfAttention( (query): Linear(in_features=768, out_features=768, bias=True) (key): Linear(in_features=768, out_features=768, bias=True) (value): Linear(in_features=768, out_features=768, bias=True) (dropout): Dropout(p=0.1, inplace=False) ) (output): BertSelfOutput( (dense): Linear(in_features=768, out_features=768, bias=True) (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) ) (intermediate): BertIntermediate( (dense): Linear(in_features=768, out_features=3072, bias=True) (intermediate_act_fn): GELUActivation() ) (output): BertOutput( (dense): Linear(in_features=3072, out_features=768, bias=True) (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) ) (8): BertLayer( (attention): BertAttention( (self): BertSelfAttention( (query): Linear(in_features=768, out_features=768, bias=True) (key): Linear(in_features=768, out_features=768, bias=True) (value): Linear(in_features=768, out_features=768, bias=True) (dropout): Dropout(p=0.1, inplace=False) ) (output): BertSelfOutput( (dense): Linear(in_features=768, out_features=768, bias=True) (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) ) (intermediate): BertIntermediate( (dense): Linear(in_features=768, out_features=3072, bias=True) (intermediate_act_fn): GELUActivation() ) (output): BertOutput( (dense): Linear(in_features=3072, out_features=768, bias=True) (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) ) (9): BertLayer( (attention): BertAttention( (self): BertSelfAttention( (query): Linear(in_features=768, out_features=768, bias=True) (key): Linear(in_features=768, out_features=768, bias=True) (value): Linear(in_features=768, out_features=768, bias=True) (dropout): Dropout(p=0.1, inplace=False) ) (output): BertSelfOutput( (dense): Linear(in_features=768, out_features=768, bias=True) (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) ) (intermediate): BertIntermediate( (dense): Linear(in_features=768, out_features=3072, bias=True) (intermediate_act_fn): GELUActivation() ) (output): BertOutput( (dense): Linear(in_features=3072, out_features=768, bias=True) (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) ) (10): BertLayer( (attention): BertAttention( (self): BertSelfAttention( (query): Linear(in_features=768, out_features=768, bias=True) (key): Linear(in_features=768, out_features=768, bias=True) (value): Linear(in_features=768, out_features=768, bias=True) (dropout): Dropout(p=0.1, inplace=False) ) (output): BertSelfOutput( (dense): Linear(in_features=768, out_features=768, bias=True) (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) ) (intermediate): BertIntermediate( (dense): Linear(in_features=768, out_features=3072, bias=True) (intermediate_act_fn): GELUActivation() ) (output): BertOutput( (dense): Linear(in_features=3072, out_features=768, bias=True) (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) ) (11): BertLayer( (attention): BertAttention( (self): BertSelfAttention( (query): Linear(in_features=768, out_features=768, bias=True) (key): Linear(in_features=768, out_features=768, bias=True) (value): Linear(in_features=768, out_features=768, bias=True) (dropout): Dropout(p=0.1, inplace=False) ) (output): BertSelfOutput( (dense): Linear(in_features=768, out_features=768, bias=True) (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) ) (intermediate): BertIntermediate( (dense): Linear(in_features=768, out_features=3072, bias=True) (intermediate_act_fn): GELUActivation() ) (output): BertOutput( (dense): Linear(in_features=3072, out_features=768, bias=True) (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) ) ) ) (pooler): BertPooler( (dense): Linear(in_features=768, out_features=768, bias=True) (activation): Tanh() ) ) ) (decoder): PrototypicalDecoder( (distance): NegativeScaledDotProduct() ) (dropout): Dropout(p=0.0, inplace=False) (locked_dropout): LockedDropout(p=0.0) (word_dropout): WordDropout(p=0.0) (loss_function): CrossEntropyLoss() (weights): None (weight_tensor) None )" 2023-03-06 10:59:16,345 ---------------------------------------------------------------------------------------------------- 2023-03-06 10:59:16,345 Corpus: "Corpus: 2025740 train + 225082 dev + 70408 test sentences" 2023-03-06 10:59:16,345 ---------------------------------------------------------------------------------------------------- 2023-03-06 10:59:16,345 Parameters: 2023-03-06 10:59:16,345 - learning_rate: "0.000500,0.000005" 2023-03-06 10:59:16,345 - mini_batch_size: "64" 2023-03-06 10:59:16,345 - patience: "3" 2023-03-06 10:59:16,345 - anneal_factor: "0.5" 2023-03-06 10:59:16,345 - max_epochs: "50" 2023-03-06 10:59:16,345 - shuffle: "True" 2023-03-06 10:59:16,345 - train_with_dev: "True" 2023-03-06 10:59:16,345 - batch_growth_annealing: "False" 2023-03-06 10:59:16,345 ---------------------------------------------------------------------------------------------------- 2023-03-06 10:59:16,345 Model training base path: "resources/taggers/zelda_compare_first-last/link+context+prototypes-dot_product-bert-base-uncased-lr-5e-06x100-mbz-64-min-4-candidates-False-chunk-16" 2023-03-06 10:59:16,345 ---------------------------------------------------------------------------------------------------- 2023-03-06 10:59:16,346 Device: cuda:2 2023-03-06 10:59:16,346 ---------------------------------------------------------------------------------------------------- 2023-03-06 10:59:16,346 Embeddings storage mode: none 2023-03-06 10:59:16,346 ---------------------------------------------------------------------------------------------------- 2023-03-06 11:31:18,472 epoch 1 - iter 3517/35170 - loss 7.27392333 - time (sec): 1922.13 - samples/sec: 158.05 - lr: 0.000500,0.000005 2023-03-06 12:01:33,298 epoch 1 - iter 7034/35170 - loss 7.20012919 - time (sec): 3736.95 - samples/sec: 149.75 - lr: 0.000500,0.000005 2023-03-06 12:32:29,408 epoch 1 - iter 10551/35170 - loss 7.18079014 - time (sec): 5593.06 - samples/sec: 145.09 - lr: 0.000500,0.000005 2023-03-06 13:03:31,685 epoch 1 - iter 14068/35170 - loss 7.06327152 - time (sec): 7455.34 - samples/sec: 143.66 - lr: 0.000500,0.000005 2023-03-06 13:33:46,154 epoch 1 - iter 17585/35170 - loss 6.97434192 - time (sec): 9269.81 - samples/sec: 143.47 - lr: 0.000500,0.000005 2023-03-06 14:04:41,303 epoch 1 - iter 21102/35170 - loss 6.83117753 - time (sec): 11124.96 - samples/sec: 144.40 - lr: 0.000500,0.000005 2023-03-06 14:35:34,810 epoch 1 - iter 24619/35170 - loss 6.75524001 - time (sec): 12978.46 - samples/sec: 145.04 - lr: 0.000500,0.000005 2023-03-06 15:02:12,277 epoch 1 - iter 28136/35170 - loss 6.63900872 - time (sec): 14575.93 - samples/sec: 144.00 - lr: 0.000500,0.000005 2023-03-06 15:31:55,208 epoch 1 - iter 31653/35170 - loss 6.49530659 - time (sec): 16358.86 - samples/sec: 143.63 - lr: 0.000500,0.000005 2023-03-06 16:03:00,086 epoch 1 - iter 35170/35170 - loss 6.30553566 - time (sec): 18223.74 - samples/sec: 143.27 - lr: 0.000500,0.000005 2023-03-06 16:03:00,087 saving model of current epoch 2023-03-06 16:03:07,205 ---------------------------------------------------------------------------------------------------- 2023-03-06 16:03:07,205 EPOCH 1 done: loss 6.3055 - lr 0.000500,0.000005 2023-03-06 16:04:41,344 Evaluating as a multi-label problem: False 2023-03-06 16:04:41,707 TEST : loss 6.256093502044678 - f1-score (micro avg) 0.2969 2023-03-06 16:04:57,877 ---------------------------------------------------------------------------------------------------- 2023-03-06 16:33:03,079 epoch 2 - iter 3517/35170 - loss 4.83233070 - time (sec): 1685.20 - samples/sec: 155.35 - lr: 0.000500,0.000005 2023-03-06 17:00:31,898 epoch 2 - iter 7034/35170 - loss 4.75164016 - time (sec): 3334.02 - samples/sec: 156.63 - lr: 0.000500,0.000005 2023-03-06 17:28:01,777 epoch 2 - iter 10551/35170 - loss 4.66590115 - time (sec): 4983.90 - samples/sec: 157.38 - lr: 0.000500,0.000005 2023-03-06 17:55:39,916 epoch 2 - iter 14068/35170 - loss 4.58558401 - time (sec): 6642.04 - samples/sec: 157.36 - lr: 0.000500,0.000005 2023-03-06 18:23:14,203 epoch 2 - iter 17585/35170 - loss 4.50167154 - time (sec): 8296.33 - samples/sec: 157.20 - lr: 0.000500,0.000005 2023-03-06 18:50:55,053 epoch 2 - iter 21102/35170 - loss 4.42428891 - time (sec): 9957.18 - samples/sec: 157.24 - lr: 0.000500,0.000005 2023-03-06 19:18:34,318 epoch 2 - iter 24619/35170 - loss 4.35643807 - time (sec): 11616.44 - samples/sec: 157.26 - lr: 0.000500,0.000005 2023-03-06 19:45:47,649 epoch 2 - iter 28136/35170 - loss 4.28721713 - time (sec): 13249.77 - samples/sec: 157.48 - lr: 0.000500,0.000005 2023-03-06 20:13:03,864 epoch 2 - iter 31653/35170 - loss 4.22330019 - time (sec): 14885.99 - samples/sec: 157.67 - lr: 0.000500,0.000005 2023-03-06 20:40:16,011 epoch 2 - iter 35170/35170 - loss 4.16157372 - time (sec): 16518.13 - samples/sec: 157.84 - lr: 0.000500,0.000005 2023-03-06 20:40:16,012 saving model of current epoch 2023-03-06 20:40:22,275 ---------------------------------------------------------------------------------------------------- 2023-03-06 20:40:22,275 EPOCH 2 done: loss 4.1616 - lr 0.000500,0.000005 2023-03-06 20:41:53,374 Evaluating as a multi-label problem: False 2023-03-06 20:41:53,763 TEST : loss 4.25714111328125 - f1-score (micro avg) 0.4316 2023-03-06 20:42:09,029 ---------------------------------------------------------------------------------------------------- 2023-03-06 21:12:54,979 epoch 3 - iter 3517/35170 - loss 3.30776397 - time (sec): 1845.95 - samples/sec: 140.94 - lr: 0.000500,0.000005 2023-03-06 21:43:45,369 epoch 3 - iter 7034/35170 - loss 3.26904130 - time (sec): 3696.34 - samples/sec: 140.89 - lr: 0.000500,0.000005 2023-03-06 22:14:34,301 epoch 3 - iter 10551/35170 - loss 3.23566595 - time (sec): 5545.27 - samples/sec: 140.82 - lr: 0.000500,0.000005 2023-03-06 22:45:29,669 epoch 3 - iter 14068/35170 - loss 3.20276952 - time (sec): 7400.64 - samples/sec: 140.73 - lr: 0.000500,0.000005 2023-03-06 23:16:28,637 epoch 3 - iter 17585/35170 - loss 3.17269843 - time (sec): 9259.61 - samples/sec: 140.85 - lr: 0.000500,0.000005 2023-03-06 23:47:32,048 epoch 3 - iter 21102/35170 - loss 3.14287143 - time (sec): 11123.02 - samples/sec: 140.79 - lr: 0.000500,0.000005 2023-03-07 00:18:24,388 epoch 3 - iter 24619/35170 - loss 3.11397495 - time (sec): 12975.36 - samples/sec: 140.66 - lr: 0.000500,0.000005 2023-03-07 00:49:22,405 epoch 3 - iter 28136/35170 - loss 3.08189552 - time (sec): 14833.38 - samples/sec: 140.66 - lr: 0.000500,0.000005 2023-03-07 01:20:21,750 epoch 3 - iter 31653/35170 - loss 3.05172403 - time (sec): 16692.72 - samples/sec: 140.59 - lr: 0.000500,0.000005 2023-03-07 01:51:01,483 epoch 3 - iter 35170/35170 - loss 3.02223837 - time (sec): 18532.45 - samples/sec: 140.68 - lr: 0.000500,0.000005 2023-03-07 01:51:01,484 saving model of current epoch 2023-03-07 01:51:07,261 ---------------------------------------------------------------------------------------------------- 2023-03-07 01:51:07,262 EPOCH 3 done: loss 3.0222 - lr 0.000500,0.000005 2023-03-07 01:52:38,377 Evaluating as a multi-label problem: False 2023-03-07 01:52:38,788 TEST : loss 3.1518092155456543 - f1-score (micro avg) 0.5313 2023-03-07 01:52:53,858 ---------------------------------------------------------------------------------------------------- 2023-03-07 02:23:47,906 epoch 4 - iter 3517/35170 - loss 2.50896422 - time (sec): 1854.05 - samples/sec: 141.79 - lr: 0.000500,0.000005 2023-03-07 02:54:42,836 epoch 4 - iter 7034/35170 - loss 2.50075229 - time (sec): 3708.98 - samples/sec: 141.53 - lr: 0.000500,0.000005 2023-03-07 03:25:35,896 epoch 4 - iter 10551/35170 - loss 2.48093741 - time (sec): 5562.04 - samples/sec: 141.33 - lr: 0.000500,0.000005 2023-03-07 03:56:33,223 epoch 4 - iter 14068/35170 - loss 2.46597423 - time (sec): 7419.36 - samples/sec: 141.02 - lr: 0.000500,0.000005 2023-03-07 04:27:12,247 epoch 4 - iter 17585/35170 - loss 2.45227888 - time (sec): 9258.39 - samples/sec: 141.20 - lr: 0.000500,0.000005 2023-03-07 04:58:04,324 epoch 4 - iter 21102/35170 - loss 2.43501157 - time (sec): 11110.47 - samples/sec: 141.12 - lr: 0.000500,0.000005 2023-03-07 05:28:52,424 epoch 4 - iter 24619/35170 - loss 2.42278415 - time (sec): 12958.57 - samples/sec: 141.07 - lr: 0.000500,0.000005 2023-03-07 05:59:30,044 epoch 4 - iter 28136/35170 - loss 2.40899956 - time (sec): 14796.19 - samples/sec: 141.08 - lr: 0.000500,0.000005 2023-03-07 06:30:28,063 epoch 4 - iter 31653/35170 - loss 2.39578329 - time (sec): 16654.20 - samples/sec: 141.08 - lr: 0.000500,0.000005 2023-03-07 07:01:05,692 epoch 4 - iter 35170/35170 - loss 2.38181312 - time (sec): 18491.83 - samples/sec: 140.99 - lr: 0.000500,0.000005 2023-03-07 07:01:05,693 saving model of current epoch 2023-03-07 07:01:13,170 ---------------------------------------------------------------------------------------------------- 2023-03-07 07:01:13,170 EPOCH 4 done: loss 2.3818 - lr 0.000500,0.000005 2023-03-07 07:02:44,327 Evaluating as a multi-label problem: False 2023-03-07 07:02:44,743 TEST : loss 2.526414394378662 - f1-score (micro avg) 0.6224 2023-03-07 07:02:59,831 ---------------------------------------------------------------------------------------------------- 2023-03-07 07:33:51,294 epoch 5 - iter 3517/35170 - loss 2.02625078 - time (sec): 1851.46 - samples/sec: 140.65 - lr: 0.000500,0.000005 2023-03-07 08:04:38,373 epoch 5 - iter 7034/35170 - loss 2.01957462 - time (sec): 3698.54 - samples/sec: 140.50 - lr: 0.000500,0.000005 2023-03-07 08:35:25,597 epoch 5 - iter 10551/35170 - loss 2.01875108 - time (sec): 5545.77 - samples/sec: 140.57 - lr: 0.000500,0.000005 2023-03-07 09:05:38,929 epoch 5 - iter 14068/35170 - loss 2.01799276 - time (sec): 7359.10 - samples/sec: 140.96 - lr: 0.000500,0.000005 2023-03-07 09:32:47,369 epoch 5 - iter 17585/35170 - loss 2.01404337 - time (sec): 8987.54 - samples/sec: 144.41 - lr: 0.000500,0.000005 2023-03-07 10:00:01,935 epoch 5 - iter 21102/35170 - loss 2.01014971 - time (sec): 10622.10 - samples/sec: 146.85 - lr: 0.000500,0.000005 2023-03-07 10:27:12,101 epoch 5 - iter 24619/35170 - loss 2.00314272 - time (sec): 12252.27 - samples/sec: 148.52 - lr: 0.000500,0.000005 2023-03-07 10:54:23,157 epoch 5 - iter 28136/35170 - loss 1.99833802 - time (sec): 13883.33 - samples/sec: 150.02 - lr: 0.000500,0.000005 2023-03-07 11:22:16,816 epoch 5 - iter 31653/35170 - loss 1.99177097 - time (sec): 15556.98 - samples/sec: 150.76 - lr: 0.000500,0.000005 2023-03-07 11:52:58,436 epoch 5 - iter 35170/35170 - loss 1.98683986 - time (sec): 17398.60 - samples/sec: 149.85 - lr: 0.000500,0.000005 2023-03-07 11:52:58,437 saving model of current epoch 2023-03-07 11:53:04,509 ---------------------------------------------------------------------------------------------------- 2023-03-07 11:53:04,509 EPOCH 5 done: loss 1.9868 - lr 0.000500,0.000005 2023-03-07 11:54:54,417 Evaluating as a multi-label problem: False 2023-03-07 11:54:55,297 TEST : loss 2.2056853771209717 - f1-score (micro avg) 0.6763 2023-03-07 11:55:20,425 ---------------------------------------------------------------------------------------------------- 2023-03-07 12:26:56,226 epoch 6 - iter 3517/35170 - loss 1.72378572 - time (sec): 1895.80 - samples/sec: 137.04 - lr: 0.000500,0.000005 2023-03-07 12:57:54,237 epoch 6 - iter 7034/35170 - loss 1.73493826 - time (sec): 3753.81 - samples/sec: 138.62 - lr: 0.000500,0.000005 2023-03-07 13:32:36,229 epoch 6 - iter 10551/35170 - loss 1.73114972 - time (sec): 5835.80 - samples/sec: 133.51 - lr: 0.000500,0.000005 2023-03-07 14:05:47,110 epoch 6 - iter 14068/35170 - loss 1.73440839 - time (sec): 7826.68 - samples/sec: 132.81 - lr: 0.000500,0.000005 2023-03-07 14:37:03,659 epoch 6 - iter 17585/35170 - loss 1.73249863 - time (sec): 9703.23 - samples/sec: 133.90 - lr: 0.000500,0.000005 2023-03-07 15:08:19,213 epoch 6 - iter 21102/35170 - loss 1.73240138 - time (sec): 11578.79 - samples/sec: 134.88 - lr: 0.000500,0.000005 2023-03-07 15:39:40,895 epoch 6 - iter 24619/35170 - loss 1.73091647 - time (sec): 13460.47 - samples/sec: 135.46 - lr: 0.000500,0.000005 2023-03-07 16:11:06,417 epoch 6 - iter 28136/35170 - loss 1.73084811 - time (sec): 15345.99 - samples/sec: 135.82 - lr: 0.000500,0.000005 2023-03-07 16:42:42,677 epoch 6 - iter 31653/35170 - loss 1.72952253 - time (sec): 17242.25 - samples/sec: 136.13 - lr: 0.000500,0.000005 2023-03-07 17:13:52,495 epoch 6 - iter 35170/35170 - loss 1.72821343 - time (sec): 19112.07 - samples/sec: 136.41 - lr: 0.000500,0.000005 2023-03-07 17:13:52,496 saving model of current epoch 2023-03-07 17:13:58,718 ---------------------------------------------------------------------------------------------------- 2023-03-07 17:13:58,719 EPOCH 6 done: loss 1.7282 - lr 0.000500,0.000005 2023-03-07 17:15:32,009 Evaluating as a multi-label problem: False 2023-03-07 17:15:32,459 TEST : loss 2.0377469062805176 - f1-score (micro avg) 0.7015 2023-03-07 17:15:48,594 ---------------------------------------------------------------------------------------------------- 2023-03-07 17:47:08,998 epoch 7 - iter 3517/35170 - loss 1.51800777 - time (sec): 1880.40 - samples/sec: 138.46 - lr: 0.000500,0.000005 2023-03-07 18:18:43,179 epoch 7 - iter 7034/35170 - loss 1.53094539 - time (sec): 3774.58 - samples/sec: 138.07 - lr: 0.000500,0.000005 2023-03-07 18:50:34,836 epoch 7 - iter 10551/35170 - loss 1.53711361 - time (sec): 5686.24 - samples/sec: 137.71 - lr: 0.000500,0.000005 2023-03-07 19:22:06,115 epoch 7 - iter 14068/35170 - loss 1.54176563 - time (sec): 7577.52 - samples/sec: 138.01 - lr: 0.000500,0.000005 2023-03-07 19:53:30,811 epoch 7 - iter 17585/35170 - loss 1.54063968 - time (sec): 9462.22 - samples/sec: 138.13 - lr: 0.000500,0.000005 2023-03-07 20:25:10,141 epoch 7 - iter 21102/35170 - loss 1.54266337 - time (sec): 11361.55 - samples/sec: 138.02 - lr: 0.000500,0.000005 2023-03-07 20:56:51,704 epoch 7 - iter 24619/35170 - loss 1.54553816 - time (sec): 13263.11 - samples/sec: 137.97 - lr: 0.000500,0.000005 2023-03-07 21:28:30,023 epoch 7 - iter 28136/35170 - loss 1.54721824 - time (sec): 15161.43 - samples/sec: 137.88 - lr: 0.000500,0.000005 2023-03-07 22:00:00,987 epoch 7 - iter 31653/35170 - loss 1.54854315 - time (sec): 17052.39 - samples/sec: 137.67 - lr: 0.000500,0.000005 2023-03-07 22:31:23,778 epoch 7 - iter 35170/35170 - loss 1.55029451 - time (sec): 18935.18 - samples/sec: 137.69 - lr: 0.000500,0.000005 2023-03-07 22:31:23,779 saving model of current epoch 2023-03-07 22:31:30,609 ---------------------------------------------------------------------------------------------------- 2023-03-07 22:31:30,609 EPOCH 7 done: loss 1.5503 - lr 0.000500,0.000005 2023-03-07 22:33:03,579 Evaluating as a multi-label problem: False 2023-03-07 22:33:04,014 TEST : loss 1.9512856006622314 - f1-score (micro avg) 0.7206 2023-03-07 22:33:19,298 ---------------------------------------------------------------------------------------------------- 2023-03-07 23:04:41,262 epoch 8 - iter 3517/35170 - loss 1.36082366 - time (sec): 1881.96 - samples/sec: 138.21 - lr: 0.000500,0.000005 2023-03-07 23:36:03,737 epoch 8 - iter 7034/35170 - loss 1.37509424 - time (sec): 3764.44 - samples/sec: 138.12 - lr: 0.000500,0.000005 2023-03-08 00:07:31,946 epoch 8 - iter 10551/35170 - loss 1.38454412 - time (sec): 5652.65 - samples/sec: 138.30 - lr: 0.000500,0.000005 2023-03-08 00:39:11,351 epoch 8 - iter 14068/35170 - loss 1.39526371 - time (sec): 7552.05 - samples/sec: 137.97 - lr: 0.000500,0.000005 2023-03-08 01:10:40,384 epoch 8 - iter 17585/35170 - loss 1.40103515 - time (sec): 9441.09 - samples/sec: 138.05 - lr: 0.000500,0.000005 2023-03-08 01:42:24,505 epoch 8 - iter 21102/35170 - loss 1.40660784 - time (sec): 11345.21 - samples/sec: 137.98 - lr: 0.000500,0.000005 2023-03-08 02:13:55,046 epoch 8 - iter 24619/35170 - loss 1.41084423 - time (sec): 13235.75 - samples/sec: 137.79 - lr: 0.000500,0.000005 2023-03-08 02:45:33,126 epoch 8 - iter 28136/35170 - loss 1.41587730 - time (sec): 15133.83 - samples/sec: 137.75 - lr: 0.000500,0.000005 2023-03-08 03:17:02,113 epoch 8 - iter 31653/35170 - loss 1.42091686 - time (sec): 17022.81 - samples/sec: 137.73 - lr: 0.000500,0.000005 2023-03-08 03:48:32,645 epoch 8 - iter 35170/35170 - loss 1.42502918 - time (sec): 18913.35 - samples/sec: 137.85 - lr: 0.000500,0.000005 2023-03-08 03:48:32,646 saving model of current epoch 2023-03-08 03:48:38,933 ---------------------------------------------------------------------------------------------------- 2023-03-08 03:48:38,933 EPOCH 8 done: loss 1.4250 - lr 0.000500,0.000005 2023-03-08 03:50:11,300 Evaluating as a multi-label problem: False 2023-03-08 03:50:11,735 TEST : loss 1.917593002319336 - f1-score (micro avg) 0.7267 2023-03-08 03:50:26,829 ---------------------------------------------------------------------------------------------------- 2023-03-08 04:20:48,474 epoch 9 - iter 3517/35170 - loss 1.27093325 - time (sec): 1821.64 - samples/sec: 144.48 - lr: 0.000500,0.000005 2023-03-08 04:52:25,420 epoch 9 - iter 7034/35170 - loss 1.27675406 - time (sec): 3718.59 - samples/sec: 141.31 - lr: 0.000500,0.000005 2023-03-08 05:24:02,369 epoch 9 - iter 10551/35170 - loss 1.28415352 - time (sec): 5615.54 - samples/sec: 139.78 - lr: 0.000500,0.000005 2023-03-08 05:55:23,388 epoch 9 - iter 14068/35170 - loss 1.29091568 - time (sec): 7496.56 - samples/sec: 139.38 - lr: 0.000500,0.000005 2023-03-08 06:26:56,599 epoch 9 - iter 17585/35170 - loss 1.30089922 - time (sec): 9389.77 - samples/sec: 139.07 - lr: 0.000500,0.000005 2023-03-08 06:58:33,994 epoch 9 - iter 21102/35170 - loss 1.31043160 - time (sec): 11287.16 - samples/sec: 139.00 - lr: 0.000500,0.000005 2023-03-08 07:29:55,321 epoch 9 - iter 24619/35170 - loss 1.31706407 - time (sec): 13168.49 - samples/sec: 138.90 - lr: 0.000500,0.000005 2023-03-08 08:01:21,978 epoch 9 - iter 28136/35170 - loss 1.32080213 - time (sec): 15055.15 - samples/sec: 138.73 - lr: 0.000500,0.000005 2023-03-08 08:32:47,127 epoch 9 - iter 31653/35170 - loss 1.32586179 - time (sec): 16940.30 - samples/sec: 138.63 - lr: 0.000500,0.000005 2023-03-08 09:04:04,726 epoch 9 - iter 35170/35170 - loss 1.32996374 - time (sec): 18817.90 - samples/sec: 138.55 - lr: 0.000500,0.000005 2023-03-08 09:04:04,728 saving model of current epoch 2023-03-08 09:04:11,957 ---------------------------------------------------------------------------------------------------- 2023-03-08 09:04:11,957 EPOCH 9 done: loss 1.3300 - lr 0.000500,0.000005 2023-03-08 09:05:44,263 Evaluating as a multi-label problem: False 2023-03-08 09:05:44,696 TEST : loss 1.9297962188720703 - f1-score (micro avg) 0.7289 2023-03-08 09:05:59,834 ---------------------------------------------------------------------------------------------------- 2023-03-08 09:37:36,393 epoch 10 - iter 3517/35170 - loss 1.19444035 - time (sec): 1896.56 - samples/sec: 137.30 - lr: 0.000500,0.000005 2023-03-08 10:09:10,015 epoch 10 - iter 7034/35170 - loss 1.20096060 - time (sec): 3790.18 - samples/sec: 137.77 - lr: 0.000500,0.000005 2023-03-08 10:40:30,303 epoch 10 - iter 10551/35170 - loss 1.20963674 - time (sec): 5670.47 - samples/sec: 137.78 - lr: 0.000500,0.000005 2023-03-08 11:12:00,465 epoch 10 - iter 14068/35170 - loss 1.21522832 - time (sec): 7560.63 - samples/sec: 137.94 - lr: 0.000500,0.000005 2023-03-08 11:43:27,682 epoch 10 - iter 17585/35170 - loss 1.22146733 - time (sec): 9447.85 - samples/sec: 137.75 - lr: 0.000500,0.000005 2023-03-08 12:14:56,108 epoch 10 - iter 21102/35170 - loss 1.22730667 - time (sec): 11336.27 - samples/sec: 137.80 - lr: 0.000500,0.000005 2023-03-08 12:46:28,542 epoch 10 - iter 24619/35170 - loss 1.23534618 - time (sec): 13228.71 - samples/sec: 137.78 - lr: 0.000500,0.000005 2023-03-08 13:18:02,631 epoch 10 - iter 28136/35170 - loss 1.24494125 - time (sec): 15122.80 - samples/sec: 137.81 - lr: 0.000500,0.000005 2023-03-08 13:49:27,258 epoch 10 - iter 31653/35170 - loss 1.25214782 - time (sec): 17007.42 - samples/sec: 137.80 - lr: 0.000500,0.000005 2023-03-08 14:21:00,287 epoch 10 - iter 35170/35170 - loss 1.25887281 - time (sec): 18900.45 - samples/sec: 137.94 - lr: 0.000500,0.000005 2023-03-08 14:21:00,288 saving model of current epoch 2023-03-08 14:21:06,948 ---------------------------------------------------------------------------------------------------- 2023-03-08 14:21:06,948 EPOCH 10 done: loss 1.2589 - lr 0.000500,0.000005 2023-03-08 14:22:43,923 Evaluating as a multi-label problem: False 2023-03-08 14:22:44,365 TEST : loss 1.8825819492340088 - f1-score (micro avg) 0.742 2023-03-08 14:22:55,052 ---------------------------------------------------------------------------------------------------- 2023-03-08 14:54:12,413 epoch 11 - iter 3517/35170 - loss 1.13191158 - time (sec): 1877.36 - samples/sec: 138.87 - lr: 0.000500,0.000005 2023-03-08 15:25:44,025 epoch 11 - iter 7034/35170 - loss 1.14205765 - time (sec): 3768.97 - samples/sec: 138.44 - lr: 0.000500,0.000005 2023-03-08 15:57:19,678 epoch 11 - iter 10551/35170 - loss 1.14973266 - time (sec): 5664.63 - samples/sec: 138.15 - lr: 0.000500,0.000005 2023-03-08 16:28:47,711 epoch 11 - iter 14068/35170 - loss 1.16107622 - time (sec): 7552.66 - samples/sec: 138.05 - lr: 0.000500,0.000005 2023-03-08 17:00:23,212 epoch 11 - iter 17585/35170 - loss 1.16724740 - time (sec): 9448.16 - samples/sec: 137.82 - lr: 0.000500,0.000005 2023-03-08 17:31:49,558 epoch 11 - iter 21102/35170 - loss 1.17422775 - time (sec): 11334.51 - samples/sec: 137.96 - lr: 0.000500,0.000005 2023-03-08 18:03:15,330 epoch 11 - iter 24619/35170 - loss 1.18139287 - time (sec): 13220.28 - samples/sec: 138.00 - lr: 0.000500,0.000005 2023-03-08 18:34:44,763 epoch 11 - iter 28136/35170 - loss 1.18825476 - time (sec): 15109.71 - samples/sec: 138.10 - lr: 0.000500,0.000005 2023-03-08 19:06:14,821 epoch 11 - iter 31653/35170 - loss 1.19478132 - time (sec): 16999.77 - samples/sec: 138.08 - lr: 0.000500,0.000005 2023-03-08 19:37:40,725 epoch 11 - iter 35170/35170 - loss 1.20059780 - time (sec): 18885.67 - samples/sec: 138.05 - lr: 0.000500,0.000005 2023-03-08 19:37:40,727 saving model of current epoch 2023-03-08 19:37:47,736 ---------------------------------------------------------------------------------------------------- 2023-03-08 19:37:47,737 EPOCH 11 done: loss 1.2006 - lr 0.000500,0.000005 2023-03-08 19:39:20,088 Evaluating as a multi-label problem: False 2023-03-08 19:39:20,546 TEST : loss 1.9015861749649048 - f1-score (micro avg) 0.7431 2023-03-08 19:39:31,383 ---------------------------------------------------------------------------------------------------- 2023-03-08 20:11:13,719 epoch 12 - iter 3517/35170 - loss 1.07784352 - time (sec): 1902.34 - samples/sec: 137.74 - lr: 0.000500,0.000005 2023-03-08 20:42:38,559 epoch 12 - iter 7034/35170 - loss 1.09041492 - time (sec): 3787.18 - samples/sec: 137.64 - lr: 0.000500,0.000005 2023-03-08 21:14:11,023 epoch 12 - iter 10551/35170 - loss 1.10239660 - time (sec): 5679.64 - samples/sec: 137.49 - lr: 0.000500,0.000005 2023-03-08 21:45:33,853 epoch 12 - iter 14068/35170 - loss 1.10685448 - time (sec): 7562.47 - samples/sec: 137.76 - lr: 0.000500,0.000005 2023-03-08 22:17:13,769 epoch 12 - iter 17585/35170 - loss 1.11294834 - time (sec): 9462.39 - samples/sec: 137.72 - lr: 0.000500,0.000005 2023-03-08 22:48:51,845 epoch 12 - iter 21102/35170 - loss 1.12238810 - time (sec): 11360.46 - samples/sec: 137.79 - lr: 0.000500,0.000005 2023-03-08 23:20:20,175 epoch 12 - iter 24619/35170 - loss 1.13070642 - time (sec): 13248.79 - samples/sec: 137.82 - lr: 0.000500,0.000005 2023-03-08 23:51:47,527 epoch 12 - iter 28136/35170 - loss 1.13910913 - time (sec): 15136.14 - samples/sec: 137.83 - lr: 0.000500,0.000005 2023-03-09 00:23:15,652 epoch 12 - iter 31653/35170 - loss 1.14693189 - time (sec): 17024.27 - samples/sec: 137.81 - lr: 0.000500,0.000005 2023-03-09 00:54:39,354 epoch 12 - iter 35170/35170 - loss 1.15439639 - time (sec): 18907.97 - samples/sec: 137.89 - lr: 0.000500,0.000005 2023-03-09 00:54:39,355 saving model of current epoch 2023-03-09 00:54:46,219 ---------------------------------------------------------------------------------------------------- 2023-03-09 00:54:46,220 EPOCH 12 done: loss 1.1544 - lr 0.000500,0.000005 2023-03-09 00:56:18,221 Evaluating as a multi-label problem: False 2023-03-09 00:56:18,657 TEST : loss 1.8860255479812622 - f1-score (micro avg) 0.7558 2023-03-09 00:56:33,680 ---------------------------------------------------------------------------------------------------- 2023-03-09 01:28:06,514 epoch 13 - iter 3517/35170 - loss 1.02674995 - time (sec): 1892.83 - samples/sec: 138.07 - lr: 0.000500,0.000005 2023-03-09 01:59:34,505 epoch 13 - iter 7034/35170 - loss 1.04204067 - time (sec): 3780.82 - samples/sec: 138.38 - lr: 0.000500,0.000005 2023-03-09 02:31:08,517 epoch 13 - iter 10551/35170 - loss 1.05312117 - time (sec): 5674.84 - samples/sec: 138.04 - lr: 0.000500,0.000005 2023-03-09 03:02:22,259 epoch 13 - iter 14068/35170 - loss 1.06244746 - time (sec): 7548.58 - samples/sec: 138.11 - lr: 0.000500,0.000005 2023-03-09 03:33:51,506 epoch 13 - iter 17585/35170 - loss 1.07305278 - time (sec): 9437.83 - samples/sec: 138.08 - lr: 0.000500,0.000005 2023-03-09 04:05:17,703 epoch 13 - iter 21102/35170 - loss 1.08394958 - time (sec): 11324.02 - samples/sec: 138.12 - lr: 0.000500,0.000005 2023-03-09 04:36:39,738 epoch 13 - iter 24619/35170 - loss 1.09277884 - time (sec): 13206.06 - samples/sec: 138.15 - lr: 0.000500,0.000005 2023-03-09 05:08:01,802 epoch 13 - iter 28136/35170 - loss 1.10052816 - time (sec): 15088.12 - samples/sec: 138.19 - lr: 0.000500,0.000005 2023-03-09 05:39:39,859 epoch 13 - iter 31653/35170 - loss 1.10887488 - time (sec): 16986.18 - samples/sec: 138.07 - lr: 0.000500,0.000005 2023-03-09 06:10:11,794 epoch 13 - iter 35170/35170 - loss 1.11587508 - time (sec): 18818.11 - samples/sec: 138.54 - lr: 0.000500,0.000005 2023-03-09 06:10:11,795 saving model of current epoch 2023-03-09 06:10:18,020 ---------------------------------------------------------------------------------------------------- 2023-03-09 06:10:18,021 EPOCH 13 done: loss 1.1159 - lr 0.000500,0.000005 2023-03-09 06:11:49,693 Evaluating as a multi-label problem: False 2023-03-09 06:11:50,137 TEST : loss 1.885928988456726 - f1-score (micro avg) 0.7596 2023-03-09 06:12:05,102 ---------------------------------------------------------------------------------------------------- 2023-03-09 06:42:17,322 epoch 14 - iter 3517/35170 - loss 0.98226997 - time (sec): 1812.22 - samples/sec: 143.04 - lr: 0.000500,0.000005 2023-03-09 07:12:44,773 epoch 14 - iter 7034/35170 - loss 0.99806430 - time (sec): 3639.67 - samples/sec: 143.57 - lr: 0.000500,0.000005 2023-03-09 07:43:07,838 epoch 14 - iter 10551/35170 - loss 1.01612563 - time (sec): 5462.74 - samples/sec: 143.60 - lr: 0.000500,0.000005 2023-03-09 08:14:45,811 epoch 14 - iter 14068/35170 - loss 1.02778409 - time (sec): 7360.71 - samples/sec: 141.77 - lr: 0.000500,0.000005 2023-03-09 08:46:08,637 epoch 14 - iter 17585/35170 - loss 1.03981984 - time (sec): 9243.53 - samples/sec: 141.19 - lr: 0.000500,0.000005 2023-03-09 09:17:43,678 epoch 14 - iter 21102/35170 - loss 1.05046260 - time (sec): 11138.57 - samples/sec: 140.62 - lr: 0.000500,0.000005 2023-03-09 09:49:06,740 epoch 14 - iter 24619/35170 - loss 1.06175204 - time (sec): 13021.64 - samples/sec: 140.20 - lr: 0.000500,0.000005 2023-03-09 10:20:33,687 epoch 14 - iter 28136/35170 - loss 1.07035243 - time (sec): 14908.58 - samples/sec: 140.01 - lr: 0.000500,0.000005 2023-03-09 10:52:07,562 epoch 14 - iter 31653/35170 - loss 1.07819039 - time (sec): 16802.46 - samples/sec: 139.68 - lr: 0.000500,0.000005 2023-03-09 11:23:00,029 epoch 14 - iter 35170/35170 - loss 1.08540837 - time (sec): 18654.93 - samples/sec: 139.76 - lr: 0.000500,0.000005 2023-03-09 11:23:00,030 saving model of current epoch 2023-03-09 11:23:06,337 ---------------------------------------------------------------------------------------------------- 2023-03-09 11:23:06,337 EPOCH 14 done: loss 1.0854 - lr 0.000500,0.000005 2023-03-09 11:24:38,080 Evaluating as a multi-label problem: False 2023-03-09 11:24:38,515 TEST : loss 1.9420851469039917 - f1-score (micro avg) 0.7527 2023-03-09 11:24:53,682 ---------------------------------------------------------------------------------------------------- 2023-03-09 11:56:15,566 epoch 15 - iter 3517/35170 - loss 0.96768369 - time (sec): 1881.88 - samples/sec: 139.37 - lr: 0.000500,0.000005 2023-03-09 12:27:33,024 epoch 15 - iter 7034/35170 - loss 0.98695102 - time (sec): 3759.34 - samples/sec: 139.31 - lr: 0.000500,0.000005 2023-03-09 12:58:50,283 epoch 15 - iter 10551/35170 - loss 0.99719236 - time (sec): 5636.60 - samples/sec: 138.64 - lr: 0.000500,0.000005 2023-03-09 13:29:54,359 epoch 15 - iter 14068/35170 - loss 1.01028884 - time (sec): 7500.68 - samples/sec: 138.78 - lr: 0.000500,0.000005 2023-03-09 14:01:22,824 epoch 15 - iter 17585/35170 - loss 1.01895935 - time (sec): 9389.14 - samples/sec: 138.66 - lr: 0.000500,0.000005 2023-03-09 14:32:50,201 epoch 15 - iter 21102/35170 - loss 1.02760237 - time (sec): 11276.52 - samples/sec: 138.64 - lr: 0.000500,0.000005 2023-03-09 15:04:05,065 epoch 15 - iter 24619/35170 - loss 1.03706006 - time (sec): 13151.38 - samples/sec: 138.70 - lr: 0.000500,0.000005 2023-03-09 15:35:27,902 epoch 15 - iter 28136/35170 - loss 1.04607397 - time (sec): 15034.22 - samples/sec: 138.70 - lr: 0.000500,0.000005 2023-03-09 16:07:01,804 epoch 15 - iter 31653/35170 - loss 1.05400717 - time (sec): 16928.12 - samples/sec: 138.64 - lr: 0.000500,0.000005 2023-03-09 16:38:15,607 epoch 15 - iter 35170/35170 - loss 1.06115452 - time (sec): 18801.92 - samples/sec: 138.66 - lr: 0.000500,0.000005 2023-03-09 16:38:15,608 saving model of current epoch 2023-03-09 16:38:21,744 ---------------------------------------------------------------------------------------------------- 2023-03-09 16:38:21,745 EPOCH 15 done: loss 1.0612 - lr 0.000500,0.000005 2023-03-09 16:39:54,060 Evaluating as a multi-label problem: False 2023-03-09 16:39:54,496 TEST : loss 1.9173129796981812 - f1-score (micro avg) 0.7586 2023-03-09 16:40:09,518 ---------------------------------------------------------------------------------------------------- 2023-03-09 17:11:23,637 epoch 16 - iter 3517/35170 - loss 0.94413688 - time (sec): 1874.12 - samples/sec: 139.38 - lr: 0.000500,0.000005 2023-03-09 17:42:47,299 epoch 16 - iter 7034/35170 - loss 0.96269610 - time (sec): 3757.78 - samples/sec: 138.82 - lr: 0.000500,0.000005 2023-03-09 18:14:21,901 epoch 16 - iter 10551/35170 - loss 0.97523268 - time (sec): 5652.38 - samples/sec: 138.61 - lr: 0.000500,0.000005 2023-03-09 18:46:00,937 epoch 16 - iter 14068/35170 - loss 0.98993541 - time (sec): 7551.42 - samples/sec: 138.48 - lr: 0.000500,0.000005 2023-03-09 19:17:29,069 epoch 16 - iter 17585/35170 - loss 0.99930330 - time (sec): 9439.55 - samples/sec: 138.20 - lr: 0.000500,0.000005 2023-03-09 19:48:50,805 epoch 16 - iter 21102/35170 - loss 1.00880767 - time (sec): 11321.29 - samples/sec: 138.29 - lr: 0.000500,0.000005 2023-03-09 20:20:18,364 epoch 16 - iter 24619/35170 - loss 1.01870589 - time (sec): 13208.85 - samples/sec: 138.31 - lr: 0.000500,0.000005 2023-03-09 20:51:47,809 epoch 16 - iter 28136/35170 - loss 1.02720699 - time (sec): 15098.29 - samples/sec: 138.24 - lr: 0.000500,0.000005 2023-03-09 21:23:21,055 epoch 16 - iter 31653/35170 - loss 1.03612060 - time (sec): 16991.54 - samples/sec: 138.22 - lr: 0.000500,0.000005 2023-03-09 21:54:52,421 epoch 16 - iter 35170/35170 - loss 1.04272276 - time (sec): 18882.90 - samples/sec: 138.07 - lr: 0.000500,0.000005 2023-03-09 21:54:52,422 saving model of current epoch 2023-03-09 21:54:59,226 ---------------------------------------------------------------------------------------------------- 2023-03-09 21:54:59,226 EPOCH 16 done: loss 1.0427 - lr 0.000500,0.000005 2023-03-09 21:56:35,810 Evaluating as a multi-label problem: False 2023-03-09 21:56:36,256 TEST : loss 1.9607620239257812 - f1-score (micro avg) 0.7583 2023-03-09 21:56:46,881 ---------------------------------------------------------------------------------------------------- 2023-03-09 22:28:25,333 epoch 17 - iter 3517/35170 - loss 0.93312020 - time (sec): 1898.45 - samples/sec: 138.03 - lr: 0.000500,0.000005 2023-03-09 23:00:13,406 epoch 17 - iter 7034/35170 - loss 0.94638753 - time (sec): 3806.53 - samples/sec: 137.84 - lr: 0.000500,0.000005 2023-03-09 23:31:40,950 epoch 17 - iter 10551/35170 - loss 0.95881393 - time (sec): 5694.07 - samples/sec: 137.60 - lr: 0.000500,0.000005 2023-03-10 00:03:18,971 epoch 17 - iter 14068/35170 - loss 0.97020914 - time (sec): 7592.09 - samples/sec: 137.33 - lr: 0.000500,0.000005 2023-03-10 00:34:45,702 epoch 17 - iter 17585/35170 - loss 0.98020132 - time (sec): 9478.82 - samples/sec: 137.42 - lr: 0.000500,0.000005 2023-03-10 01:06:21,135 epoch 17 - iter 21102/35170 - loss 0.99002787 - time (sec): 11374.25 - samples/sec: 137.67 - lr: 0.000500,0.000005 2023-03-10 01:37:59,325 epoch 17 - iter 24619/35170 - loss 0.99994198 - time (sec): 13272.44 - samples/sec: 137.60 - lr: 0.000500,0.000005 2023-03-10 02:09:30,619 epoch 17 - iter 28136/35170 - loss 1.00943037 - time (sec): 15163.74 - samples/sec: 137.57 - lr: 0.000500,0.000005 2023-03-10 02:40:58,381 epoch 17 - iter 31653/35170 - loss 1.01881330 - time (sec): 17051.50 - samples/sec: 137.58 - lr: 0.000500,0.000005 2023-03-10 03:12:27,490 epoch 17 - iter 35170/35170 - loss 1.02650989 - time (sec): 18940.61 - samples/sec: 137.65 - lr: 0.000500,0.000005 2023-03-10 03:12:27,491 saving model of current epoch 2023-03-10 03:12:34,553 ---------------------------------------------------------------------------------------------------- 2023-03-10 03:12:34,554 EPOCH 17 done: loss 1.0265 - lr 0.000500,0.000005 2023-03-10 03:14:07,360 Evaluating as a multi-label problem: False 2023-03-10 03:14:07,807 TEST : loss 1.9712779521942139 - f1-score (micro avg) 0.7544 2023-03-10 03:14:18,735 ---------------------------------------------------------------------------------------------------- 2023-03-10 03:46:01,042 epoch 18 - iter 3517/35170 - loss 0.91075228 - time (sec): 1902.31 - samples/sec: 136.76 - lr: 0.000500,0.000005 2023-03-10 04:17:34,595 epoch 18 - iter 7034/35170 - loss 0.92448904 - time (sec): 3795.86 - samples/sec: 136.73 - lr: 0.000500,0.000005 2023-03-10 04:49:13,728 epoch 18 - iter 10551/35170 - loss 0.93869160 - time (sec): 5694.99 - samples/sec: 137.31 - lr: 0.000500,0.000005 2023-03-10 05:20:33,030 epoch 18 - iter 14068/35170 - loss 0.95222432 - time (sec): 7574.29 - samples/sec: 137.62 - lr: 0.000500,0.000005 2023-03-10 05:52:02,703 epoch 18 - iter 17585/35170 - loss 0.96378830 - time (sec): 9463.97 - samples/sec: 137.74 - lr: 0.000500,0.000005 2023-03-10 06:23:37,779 epoch 18 - iter 21102/35170 - loss 0.97574844 - time (sec): 11359.04 - samples/sec: 137.80 - lr: 0.000500,0.000005 2023-03-10 06:55:07,967 epoch 18 - iter 24619/35170 - loss 0.98494412 - time (sec): 13249.23 - samples/sec: 137.88 - lr: 0.000500,0.000005 2023-03-10 07:26:31,410 epoch 18 - iter 28136/35170 - loss 0.99386105 - time (sec): 15132.67 - samples/sec: 137.90 - lr: 0.000500,0.000005 2023-03-10 07:57:54,655 epoch 18 - iter 31653/35170 - loss 1.00299973 - time (sec): 17015.92 - samples/sec: 137.89 - lr: 0.000500,0.000005 2023-03-10 08:29:27,977 epoch 18 - iter 35170/35170 - loss 1.01105020 - time (sec): 18909.24 - samples/sec: 137.88 - lr: 0.000500,0.000005 2023-03-10 08:29:27,978 saving model of current epoch 2023-03-10 08:29:34,589 ---------------------------------------------------------------------------------------------------- 2023-03-10 08:29:34,590 EPOCH 18 done: loss 1.0111 - lr 0.000500,0.000005 2023-03-10 08:31:06,879 Evaluating as a multi-label problem: False 2023-03-10 08:31:07,324 TEST : loss 1.9697309732437134 - f1-score (micro avg) 0.7601 2023-03-10 08:31:18,150 ---------------------------------------------------------------------------------------------------- 2023-03-10 09:02:51,600 epoch 19 - iter 3517/35170 - loss 0.89109001 - time (sec): 1893.45 - samples/sec: 136.96 - lr: 0.000500,0.000005 2023-03-10 09:34:13,852 epoch 19 - iter 7034/35170 - loss 0.91109720 - time (sec): 3775.70 - samples/sec: 137.80 - lr: 0.000500,0.000005 2023-03-10 10:05:35,591 epoch 19 - iter 10551/35170 - loss 0.92479799 - time (sec): 5657.44 - samples/sec: 137.72 - lr: 0.000500,0.000005 2023-03-10 10:37:14,908 epoch 19 - iter 14068/35170 - loss 0.93831726 - time (sec): 7556.76 - samples/sec: 137.65 - lr: 0.000500,0.000005 2023-03-10 11:08:50,756 epoch 19 - iter 17585/35170 - loss 0.94988053 - time (sec): 9452.61 - samples/sec: 137.73 - lr: 0.000500,0.000005 2023-03-10 11:40:24,861 epoch 19 - iter 21102/35170 - loss 0.95966249 - time (sec): 11346.71 - samples/sec: 137.88 - lr: 0.000500,0.000005 2023-03-10 12:11:55,389 epoch 19 - iter 24619/35170 - loss 0.97052343 - time (sec): 13237.24 - samples/sec: 137.81 - lr: 0.000500,0.000005