[mob][photos] Use quantized text model
This commit is contained in:
@@ -10,7 +10,8 @@ import 'package:photos/services/machine_learning/semantic_search/clip/clip_text_
|
||||
import "package:photos/utils/ml_util.dart";
|
||||
|
||||
class ClipTextEncoder extends MlModel {
|
||||
static const _kRemoteBucketModelPath = "clip-text-vit-32-float32-int32.onnx";
|
||||
// static const _kRemoteBucketModelPath = "clip-text-vit-32-float32-int32.onnx"; // Unquantized model
|
||||
static const _kRemoteBucketModelPath = "clip-text-vit-32-uint8.onnx"; // Quantized model
|
||||
static const _kVocabRemotePath = "bpe_simple_vocab_16e6.txt";
|
||||
|
||||
// static const kRemoteBucketModelPath = "clip-text-vit-32-uint8.onnx";
|
||||
|
||||
Reference in New Issue
Block a user