amamrnaf commited on
Commit
1123dbf
·
verified ·
1 Parent(s): cc86ab7

Update metaVoice.py

Browse files
Files changed (1) hide show
  1. metaVoice.py +2 -2
metaVoice.py CHANGED
@@ -137,7 +137,7 @@ class Model:
137
  tokenizer_cls: Type[TrainedBPETokeniser],
138
  decoder_cls: Type[Decoder],
139
  data_adapter_fn,
140
- use_kv_cache: None,
141
  ):
142
  # TODO: disentangle the encodec stuff and numbers etc with rest of this code (esp at encoder-only / second stage model inference)
143
  # TODO: remove magic number
@@ -724,7 +724,7 @@ class SamplingControllerConfig:
724
  init_from: str = "resume"
725
  """Either 'resume' (from an out_dir) or a gpt2 variant (e.g. 'gpt2-xl')."""
726
 
727
- use_kv_cache: None
728
  # """Type of kv caching to use for inference: 1) [none] no kv caching, 2) [flash_decoding] use the
729
  # flash decoding kernel, 3) [vanilla] use torch attention with hand implemented kv-cache."""
730
 
 
137
  tokenizer_cls: Type[TrainedBPETokeniser],
138
  decoder_cls: Type[Decoder],
139
  data_adapter_fn,
140
+ use_kv_cache= None,
141
  ):
142
  # TODO: disentangle the encodec stuff and numbers etc with rest of this code (esp at encoder-only / second stage model inference)
143
  # TODO: remove magic number
 
724
  init_from: str = "resume"
725
  """Either 'resume' (from an out_dir) or a gpt2 variant (e.g. 'gpt2-xl')."""
726
 
727
+ use_kv_cache: Optional[None] = None
728
  # """Type of kv caching to use for inference: 1) [none] no kv caching, 2) [flash_decoding] use the
729
  # flash decoding kernel, 3) [vanilla] use torch attention with hand implemented kv-cache."""
730