224 => 256
https://github.com/apple/ml-mobileclip/blob/main/mobileclip/configs/mobileclip_s2.json
This commit is contained in:
@@ -220,7 +220,7 @@ const cachedCLIPImageSession = makeCachedInferenceSession(
|
||||
export const computeCLIPImageEmbedding = async (input: Float32Array) => {
|
||||
const session = await cachedCLIPImageSession();
|
||||
const feeds = {
|
||||
input: new ort.Tensor("float32", input, [1, 3, 224, 224]),
|
||||
input: new ort.Tensor("float32", input, [1, 3, 256, 256]),
|
||||
};
|
||||
const t = Date.now();
|
||||
const results = await session.run(feeds);
|
||||
|
||||
@@ -120,8 +120,7 @@ const computeEmbedding = async (
|
||||
* Convert {@link imageData} into the format that the CLIP model expects.
|
||||
*/
|
||||
const convertToCLIPInput = (imageData: ImageData) => {
|
||||
const requiredWidth = 224;
|
||||
const requiredHeight = 224;
|
||||
const [requiredWidth, requiredHeight] = [256, 256];
|
||||
|
||||
const mean = [0.48145466, 0.4578275, 0.40821073] as const;
|
||||
const std = [0.26862954, 0.26130258, 0.27577711] as const;
|
||||
|
||||
Reference in New Issue
Block a user