Search is not available for this dataset
pipeline_tag
stringclasses 48
values | library_name
stringclasses 205
values | text
stringlengths 0
18.3M
| metadata
stringlengths 2
1.07B
| id
stringlengths 5
122
| last_modified
null | tags
sequencelengths 1
1.84k
| sha
null | created_at
stringlengths 25
25
|
---|---|---|---|---|---|---|---|---|
question-answering | transformers | {} | AnonymousSub/rule_based_roberta_bert_triplet_epochs_1_shard_1_squad2.0 | null | [
"transformers",
"pytorch",
"roberta",
"question-answering",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | AnonymousSub/rule_based_roberta_bert_triplet_epochs_1_shard_1_wikiqa | null | [
"transformers",
"pytorch",
"roberta",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/rule_based_roberta_bert_triplet_epochs_1_shard_1_wikiqa_copy | null | [
"transformers",
"pytorch",
"roberta",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/rule_based_roberta_hier_quadruplet_0.1_epochs_1_shard_1 | null | [
"transformers",
"pytorch",
"roberta",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
question-answering | transformers | {} | AnonymousSub/rule_based_roberta_hier_quadruplet_0.1_epochs_1_shard_1_squad2.0 | null | [
"transformers",
"pytorch",
"roberta",
"question-answering",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/rule_based_roberta_hier_quadruplet_epochs_1_shard_1 | null | [
"transformers",
"pytorch",
"roberta",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/rule_based_roberta_hier_quadruplet_epochs_1_shard_10 | null | [
"transformers",
"pytorch",
"roberta",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
question-answering | transformers | {} | AnonymousSub/rule_based_roberta_hier_quadruplet_epochs_1_shard_1_squad2.0 | null | [
"transformers",
"pytorch",
"roberta",
"question-answering",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | AnonymousSub/rule_based_roberta_hier_quadruplet_epochs_1_shard_1_wikiqa | null | [
"transformers",
"pytorch",
"roberta",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/rule_based_roberta_hier_triplet_0.1_epochs_1_shard_1 | null | [
"transformers",
"pytorch",
"roberta",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
question-answering | transformers | {} | AnonymousSub/rule_based_roberta_hier_triplet_0.1_epochs_1_shard_1_squad2.0 | null | [
"transformers",
"pytorch",
"roberta",
"question-answering",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/rule_based_roberta_hier_triplet_epochs_1_shard_1 | null | [
"transformers",
"pytorch",
"roberta",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/rule_based_roberta_hier_triplet_epochs_1_shard_10 | null | [
"transformers",
"pytorch",
"roberta",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
question-answering | transformers | {} | AnonymousSub/rule_based_roberta_hier_triplet_epochs_1_shard_1_squad2.0 | null | [
"transformers",
"pytorch",
"roberta",
"question-answering",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | AnonymousSub/rule_based_roberta_hier_triplet_epochs_1_shard_1_wikiqa | null | [
"transformers",
"pytorch",
"roberta",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/rule_based_roberta_hier_triplet_epochs_1_shard_1_wikiqa_copy | null | [
"transformers",
"pytorch",
"roberta",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/rule_based_roberta_only_classfn_epochs_1_shard_1 | null | [
"transformers",
"pytorch",
"roberta",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/rule_based_roberta_only_classfn_epochs_1_shard_10 | null | [
"transformers",
"pytorch",
"roberta",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
question-answering | transformers | {} | AnonymousSub/rule_based_roberta_only_classfn_epochs_1_shard_1_squad2.0 | null | [
"transformers",
"pytorch",
"roberta",
"question-answering",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | AnonymousSub/rule_based_roberta_only_classfn_epochs_1_shard_1_wikiqa | null | [
"transformers",
"pytorch",
"roberta",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/rule_based_roberta_only_classfn_twostage_epochs_1_shard_1 | null | [
"transformers",
"pytorch",
"roberta",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/rule_based_roberta_only_classfn_twostage_epochs_1_shard_10 | null | [
"transformers",
"pytorch",
"roberta",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
question-answering | transformers | {} | AnonymousSub/rule_based_roberta_only_classfn_twostage_epochs_1_shard_1_squad2.0 | null | [
"transformers",
"pytorch",
"roberta",
"question-answering",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | AnonymousSub/rule_based_roberta_only_classfn_twostage_epochs_1_shard_1_wikiqa | null | [
"transformers",
"pytorch",
"roberta",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/rule_based_roberta_twostage_quadruplet_epochs_1_shard_1 | null | [
"transformers",
"pytorch",
"roberta",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/rule_based_roberta_twostage_quadruplet_epochs_1_shard_10 | null | [
"transformers",
"pytorch",
"roberta",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
question-answering | transformers | {} | AnonymousSub/rule_based_roberta_twostage_quadruplet_epochs_1_shard_1_squad2.0 | null | [
"transformers",
"pytorch",
"roberta",
"question-answering",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | AnonymousSub/rule_based_roberta_twostage_quadruplet_epochs_1_shard_1_wikiqa | null | [
"transformers",
"pytorch",
"roberta",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/rule_based_roberta_twostagequadruplet_hier_epochs_1_shard_1 | null | [
"transformers",
"pytorch",
"roberta",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/rule_based_roberta_twostagequadruplet_hier_epochs_1_shard_10 | null | [
"transformers",
"pytorch",
"roberta",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
question-answering | transformers | {} | AnonymousSub/rule_based_roberta_twostagequadruplet_hier_epochs_1_shard_1_squad2.0 | null | [
"transformers",
"pytorch",
"roberta",
"question-answering",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | AnonymousSub/rule_based_roberta_twostagequadruplet_hier_epochs_1_shard_1_wikiqa | null | [
"transformers",
"pytorch",
"roberta",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/rule_based_roberta_twostagetriplet_epochs_1_shard_1 | null | [
"transformers",
"pytorch",
"roberta",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/rule_based_roberta_twostagetriplet_epochs_1_shard_10 | null | [
"transformers",
"pytorch",
"roberta",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
question-answering | transformers | {} | AnonymousSub/rule_based_roberta_twostagetriplet_epochs_1_shard_1_squad2.0 | null | [
"transformers",
"pytorch",
"roberta",
"question-answering",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | AnonymousSub/rule_based_roberta_twostagetriplet_epochs_1_shard_1_wikiqa | null | [
"transformers",
"pytorch",
"roberta",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/rule_based_roberta_twostagetriplet_hier_epochs_1_shard_1 | null | [
"transformers",
"pytorch",
"roberta",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/rule_based_roberta_twostagetriplet_hier_epochs_1_shard_10 | null | [
"transformers",
"pytorch",
"roberta",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
question-answering | transformers | {} | AnonymousSub/rule_based_roberta_twostagetriplet_hier_epochs_1_shard_1_squad2.0 | null | [
"transformers",
"pytorch",
"roberta",
"question-answering",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | AnonymousSub/rule_based_roberta_twostagetriplet_hier_epochs_1_shard_1_wikiqa | null | [
"transformers",
"pytorch",
"roberta",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/rule_based_twostage_quadruplet_epochs_1_shard_1 | null | [
"transformers",
"pytorch",
"bert",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | AnonymousSub/rule_based_twostage_quadruplet_epochs_1_shard_1_wikiqa | null | [
"transformers",
"pytorch",
"bert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/rule_based_twostagequadruplet_hier_epochs_1_shard_1 | null | [
"transformers",
"pytorch",
"bert",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | AnonymousSub/rule_based_twostagequadruplet_hier_epochs_1_shard_1_wikiqa | null | [
"transformers",
"pytorch",
"bert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/rule_based_twostagetriplet_epochs_1_shard_1 | null | [
"transformers",
"pytorch",
"bert",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | AnonymousSub/rule_based_twostagetriplet_epochs_1_shard_1_wikiqa | null | [
"transformers",
"pytorch",
"bert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/rule_based_twostagetriplet_hier_epochs_1_shard_1 | null | [
"transformers",
"pytorch",
"bert",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | AnonymousSub/rule_based_twostagetriplet_hier_epochs_1_shard_1_wikiqa | null | [
"transformers",
"pytorch",
"bert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/specter-bert-model | null | [
"transformers",
"pytorch",
"bert",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/specter-bert-model_copy | null | [
"transformers",
"pytorch",
"bert",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | AnonymousSub/specter-bert-model_copy_wikiqa | null | [
"transformers",
"pytorch",
"bert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
question-answering | transformers | {} | AnonymousSub/specter-bert-model_squad2.0 | null | [
"transformers",
"pytorch",
"bert",
"question-answering",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/specter-emanuals-model | null | [
"transformers",
"pytorch",
"bert",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/unsup-consert-base | null | [
"transformers",
"pytorch",
"bert",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/unsup-consert-base_copy | null | [
"transformers",
"pytorch",
"bert",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | AnonymousSub/unsup-consert-base_copy_wikiqa | null | [
"transformers",
"pytorch",
"bert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
question-answering | transformers | {} | AnonymousSub/unsup-consert-base_squad2.0 | null | [
"transformers",
"pytorch",
"bert",
"question-answering",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/unsup-consert-emanuals | null | [
"transformers",
"pytorch",
"bert",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/unsup-consert-papers-bert | null | [
"transformers",
"pytorch",
"bert",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | AnonymousSub/unsup-consert-papers | null | [
"transformers",
"pytorch",
"bert",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | AnonymousSubmission/pretrained-model-1 | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | Anonymreign/savagebeta | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text2text-generation | transformers |
# Model Trained Using AutoNLP
- Problem type: Summarization
- Model ID: 20384195
- CO2 Emissions (in grams): 4.214012748213151
## Validation Metrics
- Loss: 1.0120062828063965
- Rouge1: 41.1808
- Rouge2: 26.2564
- RougeL: 31.3106
- RougeLsum: 38.9991
- Gen Len: 58.45
## Usage
You can use cURL to access this model:
```
$ curl -X POST -H "Authorization: Bearer YOUR_HUGGINGFACE_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoNLP"}' /static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2FAnorak%2Fautonlp-Niravana-test2-20384195
``` | {"language": "unk", "tags": "autonlp", "datasets": ["Anorak/autonlp-data-Niravana-test2"], "widget": [{"text": "I love AutoNLP \ud83e\udd17"}], "co2_eq_emissions": 4.214012748213151} | Anorak/nirvana | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autonlp",
"unk",
"dataset:Anorak/autonlp-data-Niravana-test2",
"co2_eq_emissions",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
text-generation | transformers |
# Rick Sanchez DialoGPT Model | {"tags": ["conversational"]} | AnthonyNelson/DialoGPT-small-ricksanchez | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"conversational",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
text-classification | transformers | {} | Anthos23/FS-distilroberta-fine-tuned | null | [
"transformers",
"pytorch",
"roberta",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"has_space",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers |
<!-- This model card has been generated automatically according to the information Keras had access to. You should
probably proofread and complete it, then remove this comment. -->
# Anthos23/distilbert-base-uncased-finetuned-sst2
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset.
It achieves the following results on the evaluation set:
- Train Loss: 0.0662
- Validation Loss: 0.2623
- Train Accuracy: 0.9083
- Epoch: 2
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- optimizer: {'name': 'Adam', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 21045, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False}
- training_precision: float32
### Training results
| Train Loss | Validation Loss | Train Accuracy | Epoch |
|:----------:|:---------------:|:--------------:|:-----:|
| 0.2101 | 0.2373 | 0.9083 | 0 |
| 0.1065 | 0.2645 | 0.9060 | 1 |
| 0.0662 | 0.2623 | 0.9083 | 2 |
### Framework versions
- Transformers 4.17.0.dev0
- TensorFlow 2.5.0
- Datasets 1.18.3
- Tokenizers 0.11.0
| {"license": "apache-2.0", "tags": ["generated_from_keras_callback"], "model-index": [{"name": "Anthos23/distilbert-base-uncased-finetuned-sst2", "results": []}]} | Anthos23/distilbert-base-uncased-finetuned-sst2 | null | [
"transformers",
"tf",
"tensorboard",
"distilbert",
"text-classification",
"generated_from_keras_callback",
"license:apache-2.0",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
text-classification | transformers | {} | Anthos23/my-awesome-model | null | [
"transformers",
"pytorch",
"tf",
"roberta",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | Anthos23/sentiment-roberta-large-english-finetuned-sentiment-analysis | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | Anthos23/test_trainer | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | AntonClaesson/finetuning_test | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-generation | transformers | {} | AntonClaesson/movie-plot-generator | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | Antony/mint_model | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | Anubhav23/IndianlegalBert | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | Anubhav23/indianlegal | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | Anubhav23/model_name | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | Anupam/QuestionClassifier | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-generation | transformers |
# Jordan DialoGPT Model | {"tags": ["conversational"]} | Apisate/DialoGPT-small-jordan | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"conversational",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
text-generation | transformers | {} | Apisate/Discord-Ai-Bot | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | Aplinxy9plin/toxic-detection-rus | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text2text-generation | transformers |
Idea is to build a model which will take keywords as inputs and generate sentences as outputs.
Potential use case can include:
- Marketing
- Search Engine Optimization
- Topic generation etc.
- Fine tuning of topic modeling models | {"language": "en", "tags": ["keytotext", "k2t", "Keywords to Sentences"], "thumbnail": "Keywords to Sentences"} | Apoorva/k2t-test | null | [
"transformers",
"pytorch",
"t5",
"text2text-generation",
"keytotext",
"k2t",
"Keywords to Sentences",
"en",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
null | null | {} | Appolo/TestModel | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
token-classification | transformers | {} | ArBert/albert-base-v2-finetuned-ner-agglo-twitter | null | [
"transformers",
"pytorch",
"tensorboard",
"albert",
"token-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
token-classification | transformers | {} | ArBert/albert-base-v2-finetuned-ner-agglo | null | [
"transformers",
"pytorch",
"tensorboard",
"albert",
"token-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
token-classification | transformers | {} | ArBert/albert-base-v2-finetuned-ner-gmm-twitter | null | [
"transformers",
"pytorch",
"tensorboard",
"albert",
"token-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
token-classification | transformers | {} | ArBert/albert-base-v2-finetuned-ner-gmm | null | [
"transformers",
"pytorch",
"tensorboard",
"albert",
"token-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
token-classification | transformers | {} | ArBert/albert-base-v2-finetuned-ner-kmeans-twitter | null | [
"transformers",
"pytorch",
"tensorboard",
"albert",
"token-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
token-classification | transformers | {} | ArBert/albert-base-v2-finetuned-ner-kmeans | null | [
"transformers",
"pytorch",
"tensorboard",
"albert",
"token-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
token-classification | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# albert-base-v2-finetuned-ner
This model is a fine-tuned version of [albert-base-v2](https://huggingface.co/albert-base-v2) on the conll2003 dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0700
- Precision: 0.9301
- Recall: 0.9376
- F1: 0.9338
- Accuracy: 0.9852
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
| 0.096 | 1.0 | 1756 | 0.0752 | 0.9163 | 0.9201 | 0.9182 | 0.9811 |
| 0.0481 | 2.0 | 3512 | 0.0761 | 0.9169 | 0.9293 | 0.9231 | 0.9830 |
| 0.0251 | 3.0 | 5268 | 0.0700 | 0.9301 | 0.9376 | 0.9338 | 0.9852 |
### Framework versions
- Transformers 4.14.1
- Pytorch 1.10.1
- Datasets 1.17.0
- Tokenizers 0.10.3
| {"license": "apache-2.0", "tags": ["generated_from_trainer"], "datasets": ["conll2003"], "metrics": ["precision", "recall", "f1", "accuracy"], "model-index": [{"name": "albert-base-v2-finetuned-ner", "results": [{"task": {"type": "token-classification", "name": "Token Classification"}, "dataset": {"name": "conll2003", "type": "conll2003", "args": "conll2003"}, "metrics": [{"type": "precision", "value": 0.9301181102362205, "name": "Precision"}, {"type": "recall", "value": 0.9376033513394334, "name": "Recall"}, {"type": "f1", "value": 0.9338457315399397, "name": "F1"}, {"type": "accuracy", "value": 0.9851613086447802, "name": "Accuracy"}]}]}]} | ArBert/albert-base-v2-finetuned-ner | null | [
"transformers",
"pytorch",
"tensorboard",
"albert",
"token-classification",
"generated_from_trainer",
"dataset:conll2003",
"license:apache-2.0",
"model-index",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
null | null | {} | ArBert/bert-base-uncased-finetuned-ner-agglo | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | ArBert/bert-base-uncased-finetuned-ner-gmm | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | ArBert/bert-base-uncased-finetuned-ner-kmeans-twitter | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
token-classification | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# bert-base-uncased-finetuned-ner-kmeans
This model is a fine-tuned version of [ArBert/bert-base-uncased-finetuned-ner](https://huggingface.co/ArBert/bert-base-uncased-finetuned-ner) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.1169
- Precision: 0.9084
- Recall: 0.9245
- F1: 0.9164
- Accuracy: 0.9792
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
| 0.036 | 1.0 | 1123 | 0.1010 | 0.9086 | 0.9117 | 0.9101 | 0.9779 |
| 0.0214 | 2.0 | 2246 | 0.1094 | 0.9033 | 0.9199 | 0.9115 | 0.9784 |
| 0.014 | 3.0 | 3369 | 0.1169 | 0.9084 | 0.9245 | 0.9164 | 0.9792 |
### Framework versions
- Transformers 4.16.2
- Pytorch 1.10.0+cu111
- Datasets 1.18.3
- Tokenizers 0.11.0
| {"license": "apache-2.0", "tags": ["generated_from_trainer"], "metrics": ["precision", "recall", "f1", "accuracy"], "model-index": [{"name": "bert-base-uncased-finetuned-ner-kmeans", "results": []}]} | ArBert/bert-base-uncased-finetuned-ner-kmeans | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"token-classification",
"generated_from_trainer",
"license:apache-2.0",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
token-classification | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# bert-base-uncased-finetuned-ner
This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0905
- Precision: 0.9068
- Recall: 0.9200
- F1: 0.9133
- Accuracy: 0.9787
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
| 0.1266 | 1.0 | 1123 | 0.0952 | 0.8939 | 0.8869 | 0.8904 | 0.9742 |
| 0.0741 | 2.0 | 2246 | 0.0866 | 0.8936 | 0.9247 | 0.9089 | 0.9774 |
| 0.0496 | 3.0 | 3369 | 0.0905 | 0.9068 | 0.9200 | 0.9133 | 0.9787 |
### Framework versions
- Transformers 4.16.2
- Pytorch 1.10.0+cu111
- Datasets 1.18.3
- Tokenizers 0.11.0
| {"license": "apache-2.0", "tags": ["generated_from_trainer"], "metrics": ["precision", "recall", "f1", "accuracy"], "model-index": [{"name": "bert-base-uncased-finetuned-ner", "results": []}]} | ArBert/bert-base-uncased-finetuned-ner | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"token-classification",
"generated_from_trainer",
"license:apache-2.0",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
token-classification | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# roberta-base-finetuned-ner-agglo-twitter
This model is a fine-tuned version of [ArBert/roberta-base-finetuned-ner](https://huggingface.co/ArBert/roberta-base-finetuned-ner) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6645
- Precision: 0.6885
- Recall: 0.7665
- F1: 0.7254
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 20
### Training results
| Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 |
|:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|
| No log | 1.0 | 245 | 0.2820 | 0.6027 | 0.7543 | 0.6700 |
| No log | 2.0 | 490 | 0.2744 | 0.6308 | 0.7864 | 0.7000 |
| 0.2301 | 3.0 | 735 | 0.2788 | 0.6433 | 0.7637 | 0.6984 |
| 0.2301 | 4.0 | 980 | 0.3255 | 0.6834 | 0.7221 | 0.7022 |
| 0.1153 | 5.0 | 1225 | 0.3453 | 0.6686 | 0.7439 | 0.7043 |
| 0.1153 | 6.0 | 1470 | 0.3988 | 0.6797 | 0.7420 | 0.7094 |
| 0.0617 | 7.0 | 1715 | 0.4711 | 0.6702 | 0.7259 | 0.6969 |
| 0.0617 | 8.0 | 1960 | 0.4904 | 0.6904 | 0.7505 | 0.7192 |
| 0.0328 | 9.0 | 2205 | 0.5088 | 0.6591 | 0.7713 | 0.7108 |
| 0.0328 | 10.0 | 2450 | 0.5709 | 0.6468 | 0.7788 | 0.7067 |
| 0.019 | 11.0 | 2695 | 0.5570 | 0.6642 | 0.7533 | 0.7059 |
| 0.019 | 12.0 | 2940 | 0.5574 | 0.6899 | 0.7656 | 0.7258 |
| 0.0131 | 13.0 | 3185 | 0.5858 | 0.6952 | 0.7609 | 0.7265 |
| 0.0131 | 14.0 | 3430 | 0.6239 | 0.6556 | 0.7826 | 0.7135 |
| 0.0074 | 15.0 | 3675 | 0.5931 | 0.6825 | 0.7599 | 0.7191 |
| 0.0074 | 16.0 | 3920 | 0.6364 | 0.6785 | 0.7580 | 0.7161 |
| 0.005 | 17.0 | 4165 | 0.6437 | 0.6855 | 0.7580 | 0.7199 |
| 0.005 | 18.0 | 4410 | 0.6610 | 0.6779 | 0.7599 | 0.7166 |
| 0.0029 | 19.0 | 4655 | 0.6625 | 0.6853 | 0.7656 | 0.7232 |
| 0.0029 | 20.0 | 4900 | 0.6645 | 0.6885 | 0.7665 | 0.7254 |
### Framework versions
- Transformers 4.16.2
- Pytorch 1.10.0+cu111
- Datasets 1.18.3
- Tokenizers 0.11.0
| {"license": "mit", "tags": ["generated_from_trainer"], "metrics": ["precision", "recall", "f1"], "model-index": [{"name": "roberta-base-finetuned-ner-agglo-twitter", "results": []}]} | ArBert/roberta-base-finetuned-ner-agglo-twitter | null | [
"transformers",
"pytorch",
"tensorboard",
"roberta",
"token-classification",
"generated_from_trainer",
"license:mit",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
null | null | {} | ArBert/roberta-base-finetuned-ner-agglo | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | ArBert/roberta-base-finetuned-ner-gmm-twitter | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | ArBert/roberta-base-finetuned-ner-gmm | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
token-classification | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# roberta-base-finetuned-ner-kmeans-twitter
This model is a fine-tuned version of [ArBert/roberta-base-finetuned-ner](https://huggingface.co/ArBert/roberta-base-finetuned-ner) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6645
- Precision: 0.6885
- Recall: 0.7665
- F1: 0.7254
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 20
### Training results
| Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 |
|:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|
| No log | 1.0 | 245 | 0.2820 | 0.6027 | 0.7543 | 0.6700 |
| No log | 2.0 | 490 | 0.2744 | 0.6308 | 0.7864 | 0.7000 |
| 0.2301 | 3.0 | 735 | 0.2788 | 0.6433 | 0.7637 | 0.6984 |
| 0.2301 | 4.0 | 980 | 0.3255 | 0.6834 | 0.7221 | 0.7022 |
| 0.1153 | 5.0 | 1225 | 0.3453 | 0.6686 | 0.7439 | 0.7043 |
| 0.1153 | 6.0 | 1470 | 0.3988 | 0.6797 | 0.7420 | 0.7094 |
| 0.0617 | 7.0 | 1715 | 0.4711 | 0.6702 | 0.7259 | 0.6969 |
| 0.0617 | 8.0 | 1960 | 0.4904 | 0.6904 | 0.7505 | 0.7192 |
| 0.0328 | 9.0 | 2205 | 0.5088 | 0.6591 | 0.7713 | 0.7108 |
| 0.0328 | 10.0 | 2450 | 0.5709 | 0.6468 | 0.7788 | 0.7067 |
| 0.019 | 11.0 | 2695 | 0.5570 | 0.6642 | 0.7533 | 0.7059 |
| 0.019 | 12.0 | 2940 | 0.5574 | 0.6899 | 0.7656 | 0.7258 |
| 0.0131 | 13.0 | 3185 | 0.5858 | 0.6952 | 0.7609 | 0.7265 |
| 0.0131 | 14.0 | 3430 | 0.6239 | 0.6556 | 0.7826 | 0.7135 |
| 0.0074 | 15.0 | 3675 | 0.5931 | 0.6825 | 0.7599 | 0.7191 |
| 0.0074 | 16.0 | 3920 | 0.6364 | 0.6785 | 0.7580 | 0.7161 |
| 0.005 | 17.0 | 4165 | 0.6437 | 0.6855 | 0.7580 | 0.7199 |
| 0.005 | 18.0 | 4410 | 0.6610 | 0.6779 | 0.7599 | 0.7166 |
| 0.0029 | 19.0 | 4655 | 0.6625 | 0.6853 | 0.7656 | 0.7232 |
| 0.0029 | 20.0 | 4900 | 0.6645 | 0.6885 | 0.7665 | 0.7254 |
### Framework versions
- Transformers 4.16.2
- Pytorch 1.10.0+cu111
- Datasets 1.18.3
- Tokenizers 0.11.0
| {"license": "mit", "tags": ["generated_from_trainer"], "metrics": ["precision", "recall", "f1"], "model-index": [{"name": "roberta-base-finetuned-ner-kmeans-twitter", "results": []}]} | ArBert/roberta-base-finetuned-ner-kmeans-twitter | null | [
"transformers",
"pytorch",
"tensorboard",
"roberta",
"token-classification",
"generated_from_trainer",
"license:mit",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
token-classification | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# roberta-base-finetuned-ner-kmeans
This model is a fine-tuned version of [ArBert/roberta-base-finetuned-ner](https://huggingface.co/ArBert/roberta-base-finetuned-ner) on the conll2003 dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0592
- Precision: 0.9559
- Recall: 0.9615
- F1: 0.9587
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 |
|:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|
| 0.0248 | 1.0 | 878 | 0.0609 | 0.9507 | 0.9561 | 0.9534 |
| 0.0163 | 2.0 | 1756 | 0.0640 | 0.9515 | 0.9578 | 0.9546 |
| 0.0089 | 3.0 | 2634 | 0.0592 | 0.9559 | 0.9615 | 0.9587 |
### Framework versions
- Transformers 4.16.2
- Pytorch 1.10.0+cu111
- Datasets 1.18.3
- Tokenizers 0.11.0
| {"license": "mit", "tags": ["generated_from_trainer"], "datasets": ["conll2003"], "metrics": ["precision", "recall", "f1"], "model-index": [{"name": "roberta-base-finetuned-ner-kmeans", "results": [{"task": {"type": "token-classification", "name": "Token Classification"}, "dataset": {"name": "conll2003", "type": "conll2003", "args": "conll2003"}, "metrics": [{"type": "precision", "value": 0.955868544600939, "name": "Precision"}, {"type": "recall", "value": 0.9614658103513412, "name": "Recall"}, {"type": "f1", "value": 0.9586590074394953, "name": "F1"}]}]}]} | ArBert/roberta-base-finetuned-ner-kmeans | null | [
"transformers",
"pytorch",
"tensorboard",
"roberta",
"token-classification",
"generated_from_trainer",
"dataset:conll2003",
"license:mit",
"model-index",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
token-classification | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# roberta-base-finetuned-ner
This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0738
- Precision: 0.9232
- Recall: 0.9437
- F1: 0.9333
- Accuracy: 0.9825
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
| 0.1397 | 1.0 | 1368 | 0.0957 | 0.9141 | 0.9048 | 0.9094 | 0.9753 |
| 0.0793 | 2.0 | 2736 | 0.0728 | 0.9274 | 0.9324 | 0.9299 | 0.9811 |
| 0.0499 | 3.0 | 4104 | 0.0738 | 0.9232 | 0.9437 | 0.9333 | 0.9825 |
### Framework versions
- Transformers 4.15.0
- Pytorch 1.10.0+cu111
- Datasets 1.17.0
- Tokenizers 0.10.3
| {"license": "mit", "tags": ["generated_from_trainer"], "metrics": ["precision", "recall", "f1", "accuracy"], "model-index": [{"name": "roberta-base-finetuned-ner", "results": []}]} | ArBert/roberta-base-finetuned-ner | null | [
"transformers",
"pytorch",
"tensorboard",
"roberta",
"token-classification",
"generated_from_trainer",
"license:mit",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |