Xenova HF staff commited on
Commit
bf8b5d6
1 Parent(s): 4dd947e

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_attn_implementation_autoset": true,
3
+ "_name_or_path": "ibm-granite/granite-timeseries-patchtsmixer",
4
+ "architectures": [
5
+ "PatchTSMixerForPrediction"
6
+ ],
7
+ "channel_consistent_masking": true,
8
+ "context_length": 512,
9
+ "d_model": 48,
10
+ "distribution_output": "student_t",
11
+ "dropout": 0.5,
12
+ "expansion_factor": 3,
13
+ "gated_attn": true,
14
+ "head_aggregation": "max_pool",
15
+ "head_dropout": 0.5,
16
+ "init_std": 0.02,
17
+ "loss": "mse",
18
+ "mask_type": "forecast",
19
+ "mask_value": 0,
20
+ "masked_loss": true,
21
+ "mode": "common_channel",
22
+ "model_type": "patchtsmixer",
23
+ "norm_eps": 1e-05,
24
+ "norm_mlp": "LayerNorm",
25
+ "num_forecast_mask_patches": [
26
+ 2
27
+ ],
28
+ "num_input_channels": 7,
29
+ "num_layers": 2,
30
+ "num_parallel_samples": 100,
31
+ "num_patches": 32,
32
+ "num_targets": 3,
33
+ "output_range": null,
34
+ "patch_last": true,
35
+ "patch_length": 16,
36
+ "patch_stride": 16,
37
+ "positional_encoding_type": "sincos",
38
+ "post_init": false,
39
+ "prediction_channel_indices": null,
40
+ "prediction_length": 96,
41
+ "random_mask_ratio": 0.5,
42
+ "scaling": true,
43
+ "self_attn": false,
44
+ "self_attn_heads": 1,
45
+ "torch_dtype": "float32",
46
+ "transformers_version": "4.46.1",
47
+ "unmasked_channel_indices": null,
48
+ "use_positional_encoding": false
49
+ }
generation_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "transformers_version": "4.46.1"
4
+ }
onnx/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50f10dbbc5be19a8cdca403d176257e2a168794936769ca604b487a77a02fb11
3
+ size 850461
onnx/model_bnb4.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e1d880a11e5666a41219749707c80a1c618e47e2d7d01b3af7131d0b520a6fe
3
+ size 182902
onnx/model_fp16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ad5487926c2de86d334855c7417898d28df41cde75fd94100429e3a2979802b
3
+ size 458375
onnx/model_int8.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75295e623f152c4e3e62c2cf5c98bbc40b292de6f05714926fb905a42810933a
3
+ size 293870
onnx/model_q4.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abef84ca8524d4696bfb9218a61d76f6b8312ae91c59666c100e7e77cce31882
3
+ size 200259
onnx/model_q4f16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:674d02d26e71ea7624453562ae8f3424178e0c43858fb7b6b27b662dd2e2a419
3
+ size 185101
onnx/model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75295e623f152c4e3e62c2cf5c98bbc40b292de6f05714926fb905a42810933a
3
+ size 293870
onnx/model_uint8.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23280fef23b526f8606af5d9a207eae1c78f7f352e69f14424290287c27861d8
3
+ size 293870
quantize_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "modes": [
3
+ "fp16",
4
+ "q8",
5
+ "int8",
6
+ "uint8",
7
+ "q4",
8
+ "q4f16",
9
+ "bnb4"
10
+ ],
11
+ "per_channel": true,
12
+ "reduce_range": true,
13
+ "block_size": null,
14
+ "is_symmetric": true,
15
+ "accuracy_level": null,
16
+ "quant_type": 1
17
+ }