Merge remote-tracking branch 'origin/memories_widget_api' into widget-superpowered
This commit is contained in:
@@ -798,8 +798,8 @@ const uploadVideoSegments = async (
|
||||
// Success.
|
||||
return;
|
||||
}
|
||||
if (res.status >= 400 && res.status < 500) {
|
||||
// HTTP 4xx.
|
||||
if (res.status >= 400 && res.status < 500 && res.status != 429) {
|
||||
// HTTP 4xx, except potentially transient 429 rate limits.
|
||||
abort = true;
|
||||
}
|
||||
throw new Error(
|
||||
|
||||
@@ -345,6 +345,26 @@ class MemoriesCacheService {
|
||||
return;
|
||||
}
|
||||
|
||||
Future<List<SmartMemory>> getMemoriesForWidget({
|
||||
required bool pastYears,
|
||||
required bool smart,
|
||||
}) async {
|
||||
if (!pastYears && !smart) {
|
||||
return [];
|
||||
}
|
||||
final allMemories = await getMemories();
|
||||
if (pastYears && smart) {
|
||||
return allMemories;
|
||||
}
|
||||
final filteredMemories = <SmartMemory>[];
|
||||
for (final memory in allMemories) {
|
||||
if (!pastYears && memory.type == MemoryType.filler) continue;
|
||||
if (!smart && memory.type != MemoryType.filler) continue;
|
||||
filteredMemories.add(memory);
|
||||
}
|
||||
return filteredMemories;
|
||||
}
|
||||
|
||||
Future<List<SmartMemory>> getMemories() async {
|
||||
if (!showAnyMemories) {
|
||||
_logger.info('Showing memories is disabled in settings, showing none');
|
||||
|
||||
@@ -2,7 +2,6 @@ package filedata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
@@ -139,12 +138,12 @@ func (c *Controller) GetFileData(ctx *gin.Context, actorUser int64, req fileData
|
||||
}
|
||||
doRows, err := c.Repo.GetFilesData(ctx, req.Type, []int64{req.FileID})
|
||||
if err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) && req.PreferNoContent {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, stacktrace.Propagate(err, "")
|
||||
}
|
||||
if len(doRows) == 0 || doRows[0].IsDeleted {
|
||||
if req.PreferNoContent {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, stacktrace.Propagate(&ente.ErrNotFoundError, "")
|
||||
}
|
||||
ctxLogger := log.WithFields(log.Fields{
|
||||
|
||||
@@ -26,15 +26,13 @@ import ItemList from "components/ItemList";
|
||||
import { FilledIconButton } from "ente-base/components/mui";
|
||||
import { useBaseContext } from "ente-base/context";
|
||||
import { formattedListJoin } from "ente-base/i18n";
|
||||
import {
|
||||
type UploadPhase,
|
||||
type UploadResult,
|
||||
} from "ente-gallery/services/upload";
|
||||
import { type UploadPhase } from "ente-gallery/services/upload";
|
||||
import { SpaceBetweenFlex } from "ente-shared/components/Container";
|
||||
import { t } from "i18next";
|
||||
import React, { createContext, useContext, useEffect, useState } from "react";
|
||||
import { Trans } from "react-i18next";
|
||||
import type {
|
||||
FinishedUploadResult,
|
||||
InProgressUpload,
|
||||
SegregatedFinishedUploads,
|
||||
UploadCounter,
|
||||
@@ -251,7 +249,6 @@ const uploadedFileCount = (
|
||||
let c = 0;
|
||||
c += finishedUploads.get("uploaded")?.length ?? 0;
|
||||
c += finishedUploads.get("uploadedWithStaticThumbnail")?.length ?? 0;
|
||||
c += finishedUploads.get("addedSymlink")?.length ?? 0;
|
||||
|
||||
return c;
|
||||
};
|
||||
@@ -474,7 +471,7 @@ const NotUploadSectionHeader = styled("div")(
|
||||
);
|
||||
|
||||
interface ResultSectionProps {
|
||||
uploadResult: UploadResult;
|
||||
uploadResult: FinishedUploadResult;
|
||||
sectionTitle: string;
|
||||
sectionInfo?: React.ReactNode;
|
||||
}
|
||||
|
||||
@@ -177,18 +177,18 @@ const NoWatches: React.FC = () => (
|
||||
<Typography variant="small" sx={{ py: 1, color: "text.muted" }}>
|
||||
{t("watch_folders_hint_1")}
|
||||
</Typography>
|
||||
<Typography variant="small" sx={{ color: "text.muted" }}>
|
||||
<Stack direction="row" sx={{ gap: 1 }}>
|
||||
<Check />
|
||||
<Stack direction="row" sx={{ gap: 1 }}>
|
||||
<Check />
|
||||
<Typography variant="small" sx={{ color: "text.muted" }}>
|
||||
{t("watch_folders_hint_2")}
|
||||
</Stack>
|
||||
</Typography>
|
||||
<Typography variant="small" sx={{ color: "text.muted" }}>
|
||||
<Stack direction="row" sx={{ gap: 1 }}>
|
||||
<Check />
|
||||
</Typography>
|
||||
</Stack>
|
||||
<Stack direction="row" sx={{ gap: 1 }}>
|
||||
<Check />
|
||||
<Typography variant="small" sx={{ color: "text.muted" }}>
|
||||
{t("watch_folders_hint_3")}
|
||||
</Stack>
|
||||
</Typography>
|
||||
</Typography>
|
||||
</Stack>
|
||||
</Stack>
|
||||
</CenteredFill>
|
||||
);
|
||||
|
||||
@@ -64,16 +64,23 @@ export interface InProgressUpload {
|
||||
progress: PercentageUploaded;
|
||||
}
|
||||
|
||||
/**
|
||||
* A variant of {@link UploadResult} used when segregating finished uploads in
|
||||
* the UI. "addedSymlink" is treated as "uploaded", everything else remains as
|
||||
* it were.
|
||||
*/
|
||||
export type FinishedUploadResult = Exclude<UploadResult, "addedSymlink">;
|
||||
|
||||
export interface FinishedUpload {
|
||||
localFileID: FileID;
|
||||
result: UploadResult;
|
||||
result: FinishedUploadResult;
|
||||
}
|
||||
|
||||
export type InProgressUploads = Map<FileID, PercentageUploaded>;
|
||||
|
||||
export type FinishedUploads = Map<FileID, UploadResult>;
|
||||
export type FinishedUploads = Map<FileID, FinishedUploadResult>;
|
||||
|
||||
export type SegregatedFinishedUploads = Map<UploadResult, FileID[]>;
|
||||
export type SegregatedFinishedUploads = Map<FinishedUploadResult, FileID[]>;
|
||||
|
||||
export interface ProgressUpdater {
|
||||
setPercentComplete: React.Dispatch<React.SetStateAction<number>>;
|
||||
@@ -158,7 +165,7 @@ class UIService {
|
||||
this.setTotalFileCount(count);
|
||||
this.filesUploadedCount = 0;
|
||||
this.inProgressUploads = new Map<number, number>();
|
||||
this.finishedUploads = new Map<number, UploadResult>();
|
||||
this.finishedUploads = new Map<number, FinishedUploadResult>();
|
||||
this.updateProgressBarUI();
|
||||
}
|
||||
|
||||
@@ -202,7 +209,7 @@ class UIService {
|
||||
this.updateProgressBarUI();
|
||||
}
|
||||
|
||||
moveFileToResultList(key: number, uploadResult: UploadResult) {
|
||||
moveFileToResultList(key: number, uploadResult: FinishedUploadResult) {
|
||||
this.finishedUploads.set(key, uploadResult);
|
||||
this.inProgressUploads.delete(key);
|
||||
this.updateProgressBarUI();
|
||||
@@ -595,8 +602,10 @@ class UploadManager {
|
||||
uploadableItem: UploadableUploadItem,
|
||||
uploadResult: UploadResult,
|
||||
uploadedFile: EncryptedEnteFile | EnteFile | undefined,
|
||||
) {
|
||||
): Promise<FinishedUploadResult> {
|
||||
log.info(`Upload ${uploadableItem.fileName} | ${uploadResult}`);
|
||||
const finishedUploadResult =
|
||||
uploadResult == "addedSymlink" ? "uploaded" : uploadResult;
|
||||
try {
|
||||
const processableUploadItem =
|
||||
await markUploadedAndObtainProcessableItem(uploadableItem);
|
||||
@@ -608,11 +617,8 @@ class UploadManager {
|
||||
this.failedItems.push(uploadableItem);
|
||||
break;
|
||||
case "alreadyUploaded":
|
||||
decryptedFile = uploadedFile as EnteFile;
|
||||
break;
|
||||
case "addedSymlink":
|
||||
decryptedFile = uploadedFile as EnteFile;
|
||||
uploadResult = "uploaded";
|
||||
break;
|
||||
case "uploaded":
|
||||
case "uploadedWithStaticThumbnail":
|
||||
@@ -653,7 +659,7 @@ class UploadManager {
|
||||
uploadableItem,
|
||||
uploadedFile as EncryptedEnteFile,
|
||||
);
|
||||
return uploadResult;
|
||||
return finishedUploadResult;
|
||||
} catch (e) {
|
||||
log.error("Post file upload action failed", e);
|
||||
return "failed";
|
||||
|
||||
@@ -136,7 +136,7 @@ export const getKV = async (key: string) => {
|
||||
return db.get("kv", key);
|
||||
};
|
||||
|
||||
export const _getKV = async <T extends string | number | boolean>(
|
||||
const _getKV = async <T extends string | number | boolean>(
|
||||
key: string,
|
||||
type: string,
|
||||
): Promise<T | undefined> => {
|
||||
|
||||
@@ -26,7 +26,9 @@ export type LocalUser = z.infer<typeof LocalUser>;
|
||||
* Return the logged-in user, if someone is indeed logged in. Otherwise return
|
||||
* `undefined`.
|
||||
*
|
||||
* The user's data is stored in the browser's localStorage.
|
||||
* The user's data is stored in the browser's localStorage. Thus, this function
|
||||
* only works from the main thread, not from web workers (local storage is not
|
||||
* accessible to web workers).
|
||||
*/
|
||||
export const localUser = (): LocalUser | undefined => {
|
||||
// TODO: duplicate of getData("user")
|
||||
|
||||
@@ -182,7 +182,7 @@ const isHDRVideo = async (ffmpeg: FFmpeg, inputFilePath: string) => {
|
||||
// correct in a multi stream file because the ffmpeg automatic
|
||||
// mapping will use the highest resolution stream, but short of
|
||||
// reinventing ffmpeg's resolution mechanism, it is a reasonable
|
||||
// assumption for our current, heuristic, check.
|
||||
// assumption for our current heuristic check.
|
||||
["-select_streams", "v:0"],
|
||||
// Output JSON
|
||||
["-of", "json"],
|
||||
|
||||
@@ -100,7 +100,13 @@ export const fetchFileData = async (
|
||||
fileID: number,
|
||||
publicAlbumsCredentials?: PublicAlbumsCredentials,
|
||||
): Promise<RemoteFileData | undefined> => {
|
||||
const params = new URLSearchParams({ type, fileID: fileID.toString() });
|
||||
const params = new URLSearchParams({
|
||||
type,
|
||||
fileID: fileID.toString(),
|
||||
// Ask museum to respond with 204 instead of 404 if no playlist exists
|
||||
// for the given file.
|
||||
preferNoContent: "true",
|
||||
});
|
||||
|
||||
let res: Response;
|
||||
if (publicAlbumsCredentials) {
|
||||
@@ -116,6 +122,11 @@ export const fetchFileData = async (
|
||||
});
|
||||
}
|
||||
|
||||
if (res.status == 204) return undefined;
|
||||
// We're passing `preferNoContent` so the expected response is 204, but this
|
||||
// might be a self hoster running an older museum that does not recognize
|
||||
// that flag, so retain the old behavior. This fallback can be removed in a
|
||||
// few months (tag: Migration, note added May 2025).
|
||||
if (res.status == 404) return undefined;
|
||||
ensureOk(res);
|
||||
return z.object({ data: RemoteFileData }).parse(await res.json()).data;
|
||||
@@ -126,9 +137,21 @@ export const fetchFileData = async (
|
||||
* structure has more fields, there are just the fields we are interested in.
|
||||
*/
|
||||
const RemoteFDStatus = z.object({
|
||||
/**
|
||||
* The ID of the file whose file data we're querying.
|
||||
*/
|
||||
fileID: z.number(),
|
||||
/** Expected to be one of {@link FileDataType} */
|
||||
/**
|
||||
* Expected to be one of {@link FileDataType}
|
||||
*/
|
||||
type: z.string(),
|
||||
/**
|
||||
* `true` if the file data has been deleted.
|
||||
*
|
||||
* This can be true in the in-progress partial deletion case, which the file
|
||||
* data deletion has been processed but the file deletion has not yet been
|
||||
* processed.
|
||||
*/
|
||||
isDeleted: z.boolean(),
|
||||
/**
|
||||
* The epoch microseconds when this file data entry was added or updated.
|
||||
@@ -147,7 +170,8 @@ export interface UpdatedFileDataFileIDsPage {
|
||||
fileIDs: Set<number>;
|
||||
/**
|
||||
* The latest updatedAt (epoch microseconds) time obtained from remote in
|
||||
* this batch this sync from amongst all of these files.
|
||||
* this batch of sync (from amongst all of the files in the batch, not just
|
||||
* those that were filtered to be part of {@link fileIDs}).
|
||||
*/
|
||||
lastUpdatedAt: number;
|
||||
}
|
||||
@@ -167,18 +191,25 @@ export interface UpdatedFileDataFileIDsPage {
|
||||
* Set this to zero to start from the beginning.
|
||||
*
|
||||
* @param onPage A callback invoked for each page of results received from
|
||||
* remote. It is passed both the fileIDs received in the batch under
|
||||
* consideration, and the largest of the updated time for all entries
|
||||
* (irrespective of {@link type}) in that batch.
|
||||
* remote. It is passed the fileIDs received in the batch under consideration,
|
||||
* and the largest of the updated time for all entries (irrespective of
|
||||
* {@link type}) in that batch.
|
||||
*
|
||||
* ----
|
||||
*
|
||||
* Implementation notes:
|
||||
* [Note: Pruning stale status-diff entries]
|
||||
*
|
||||
* Unlike other "diff" APIs, the diff API used here won't return tombstone
|
||||
* entries for deleted files. This is not a problem because there are no current
|
||||
* cases where existing playlists or ML indexes get deleted (unless the
|
||||
* underlying file is deleted). See: [Note: Caching HLS playlist data].
|
||||
*
|
||||
* Note that the "/files/data/status-diff" includes entries for files that are
|
||||
* in trash. This means that, while not a practical problem (because it's just
|
||||
* numeric ids), the number of fileIDs we store locally can grow unbounded as
|
||||
* files move to trash and then get deleted. So to prune them, we also add a
|
||||
* hook to the /trash/v2/diff processing, and prune any locally saved file IDs
|
||||
* which have been deleted from trash.
|
||||
*/
|
||||
export const syncUpdatedFileDataFileIDs = async (
|
||||
type: FileDataType,
|
||||
@@ -199,6 +230,9 @@ export const syncUpdatedFileDataFileIDs = async (
|
||||
const fileIDs = new Set<number>();
|
||||
for (const fd of diff) {
|
||||
lastUpdatedAt = Math.max(lastUpdatedAt, fd.updatedAt);
|
||||
// While we could prune isDeleted entries here, we can also rely
|
||||
// on the the pruning that happens when the trash gets synced.
|
||||
// See: [Note: Pruning stale status-diff entries]
|
||||
if (fd.type == type && !fd.isDeleted) {
|
||||
fileIDs.add(fd.fileID);
|
||||
}
|
||||
|
||||
@@ -3,12 +3,19 @@ import { assertionFailed } from "ente-base/assert";
|
||||
import { decryptBlob } from "ente-base/crypto";
|
||||
import type { EncryptedBlob } from "ente-base/crypto/types";
|
||||
import { ensureElectron } from "ente-base/electron";
|
||||
import { isHTTP4xxError, type PublicAlbumsCredentials } from "ente-base/http";
|
||||
import {
|
||||
isHTTP4xxError,
|
||||
retryAsyncOperation,
|
||||
type PublicAlbumsCredentials,
|
||||
} from "ente-base/http";
|
||||
import { getKV, getKVN, setKV } from "ente-base/kv";
|
||||
import { ensureLocalUser } from "ente-base/local-user";
|
||||
import log from "ente-base/log";
|
||||
import { fileLogID, type EnteFile } from "ente-media/file";
|
||||
import { FileType } from "ente-media/file-type";
|
||||
import {
|
||||
getAllLocalFiles,
|
||||
getLocalTrashFileIDs,
|
||||
uniqueFilesByID,
|
||||
} from "ente-new/photos/services/files";
|
||||
import { settingsSnapshot } from "ente-new/photos/services/settings";
|
||||
@@ -21,6 +28,7 @@ import {
|
||||
initiateGenerateHLS,
|
||||
readVideoStream,
|
||||
videoStreamDone,
|
||||
type GenerateHLSResult,
|
||||
} from "../utils/native-stream";
|
||||
import { downloadManager, isNetworkDownloadError } from "./download";
|
||||
import {
|
||||
@@ -351,11 +359,6 @@ const blobToDataURL = (blob: Blob) =>
|
||||
reader.readAsDataURL(blob);
|
||||
});
|
||||
|
||||
// TODO(HLS): Store this in DB.
|
||||
let _videoPreviewProcessedFileIDs: number[] = [];
|
||||
let _videoPreviewFailedFileIDs: number[] = [];
|
||||
let _videoPreviewSyncLastUpdatedAt: number | undefined;
|
||||
|
||||
/**
|
||||
* Return the (persistent) {@link Set} containing the ids of the files which
|
||||
* have already been processed for generating their streaming variant.
|
||||
@@ -368,11 +371,18 @@ let _videoPreviewSyncLastUpdatedAt: number | undefined;
|
||||
* The data is retrieved from persistent storage (KV DB), where it is stored as
|
||||
* an array.
|
||||
*/
|
||||
const savedProcessedVideoFileIDs = async () => {
|
||||
// TODO(HLS): make async
|
||||
await wait(0);
|
||||
return new Set(_videoPreviewProcessedFileIDs);
|
||||
};
|
||||
const savedProcessedVideoFileIDs = () =>
|
||||
// [Note: Avoiding zod parsing overhead for DB arrays]
|
||||
//
|
||||
// Validating that the value we read from the DB is indeed the same as the
|
||||
// type we expect can be done using zod, but for potentially very large
|
||||
// arrays, this has an overhead that is perhaps not justified when dealing
|
||||
// with DB entries we ourselves wrote.
|
||||
//
|
||||
// As an optimization, we skip the runtime check here and cast. This might
|
||||
// not be the most optimal choice in the future, so (a) use it sparingly,
|
||||
// and (b) mark all such cases with the title of this note.
|
||||
getKV("videoPreviewProcessedFileIDs").then((v) => new Set(v as number[]));
|
||||
|
||||
/**
|
||||
* Return the (persistent) {@link Set} containing the ids of the files for which
|
||||
@@ -380,11 +390,9 @@ const savedProcessedVideoFileIDs = async () => {
|
||||
*
|
||||
* @see also {@link savedProcessedVideoFileIDs}.
|
||||
*/
|
||||
const savedFailedVideoFileIDs = async () => {
|
||||
// TODO(HLS): make async
|
||||
await wait(0);
|
||||
return new Set(_videoPreviewFailedFileIDs);
|
||||
};
|
||||
const savedFailedVideoFileIDs = () =>
|
||||
// See: [Note: Avoiding zod parsing overhead for DB arrays]
|
||||
getKV("videoPreviewFailedFileIDs").then((v) => new Set(v as number[]));
|
||||
|
||||
/**
|
||||
* Update the persisted set of IDs of files which have already been processed
|
||||
@@ -392,11 +400,8 @@ const savedFailedVideoFileIDs = async () => {
|
||||
*
|
||||
* @see also {@link savedProcessedVideoFileIDs}.
|
||||
*/
|
||||
const saveProcessedVideoFileIDs = async (videoFileIDs: Set<number>) => {
|
||||
// TODO(HLS): make async
|
||||
await wait(0);
|
||||
_videoPreviewProcessedFileIDs = Array.from(videoFileIDs);
|
||||
};
|
||||
const saveProcessedVideoFileIDs = (videoFileIDs: Set<number>) =>
|
||||
setKV("videoPreviewProcessedFileIDs", Array.from(videoFileIDs));
|
||||
|
||||
/**
|
||||
* Update the persisted set of IDs of files for which attempt to generate a
|
||||
@@ -404,11 +409,8 @@ const saveProcessedVideoFileIDs = async (videoFileIDs: Set<number>) => {
|
||||
*
|
||||
* @see also {@link savedProcessedVideoFileIDs}.
|
||||
*/
|
||||
const saveFailedVideoFileIDs = async (videoFileIDs: Set<number>) => {
|
||||
// TODO(HLS): make async
|
||||
await wait(0);
|
||||
_videoPreviewFailedFileIDs = Array.from(videoFileIDs);
|
||||
};
|
||||
const saveFailedVideoFileIDs = (videoFileIDs: Set<number>) =>
|
||||
setKV("videoPreviewFailedFileIDs", Array.from(videoFileIDs));
|
||||
|
||||
/**
|
||||
* Mark the provided file ID as having been processed to generate a video
|
||||
@@ -466,11 +468,7 @@ const markFailedVideoFileID = async (fileID: number) => {
|
||||
* The returned value is an epoch millisecond value suitable to be passed to
|
||||
* {@link syncUpdatedFileDataFileIDs}.
|
||||
*/
|
||||
const savedSyncLastUpdatedAt = async () => {
|
||||
// TODO(HLS): make async
|
||||
await wait(0);
|
||||
return _videoPreviewSyncLastUpdatedAt;
|
||||
};
|
||||
const savedSyncLastUpdatedAt = () => getKVN("videoPreviewSyncLastUpdatedAt");
|
||||
|
||||
/**
|
||||
* Update the persisted timestamp used for syncing processed file IDs with
|
||||
@@ -478,11 +476,8 @@ const savedSyncLastUpdatedAt = async () => {
|
||||
*
|
||||
* Use {@link savedSyncLastUpdatedAt} to get the persisted value back.
|
||||
*/
|
||||
const saveSyncLastUpdatedAt = async (lastUpdatedAt: number) => {
|
||||
// TODO(HLS): make async
|
||||
await wait(0);
|
||||
_videoPreviewSyncLastUpdatedAt = lastUpdatedAt;
|
||||
};
|
||||
const saveSyncLastUpdatedAt = (lastUpdatedAt: number) =>
|
||||
setKV("videoPreviewSyncLastUpdatedAt", lastUpdatedAt);
|
||||
|
||||
/**
|
||||
* Fetch IDs of files from remote that have been processed by other clients
|
||||
@@ -500,6 +495,33 @@ const syncProcessedFileIDs = async () =>
|
||||
},
|
||||
);
|
||||
|
||||
/**
|
||||
* Remove any saved entries for file IDs which were previously in trash but now
|
||||
* have been permanently deleted.
|
||||
*
|
||||
* This is called when processing the trash diff. It gives us a hook to clear
|
||||
* these IDs from our video processing related local state.
|
||||
*
|
||||
* See: [Note: Pruning stale status-diff entries]
|
||||
*
|
||||
* It is a no-op when we're running in the context of the web app (since it
|
||||
* doesn't currently process videos, so doesn't need to keep any local state for
|
||||
* that purpose).
|
||||
*/
|
||||
export const videoPrunePermanentlyDeletedFileIDsIfNeeded = async (
|
||||
deletedFileIDs: Set<number>,
|
||||
) => {
|
||||
if (!isDesktop) return;
|
||||
|
||||
const existing = await savedProcessedVideoFileIDs();
|
||||
if (existing.size > 0) {
|
||||
const updated = existing.difference(deletedFileIDs);
|
||||
if (updated.size != existing.size) {
|
||||
await saveProcessedVideoFileIDs(updated);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* If video processing is enabled, trigger a sync with remote and any subsequent
|
||||
* backfill queue processing for pending videos.
|
||||
@@ -647,13 +669,15 @@ const processQueue = async () => {
|
||||
return;
|
||||
}
|
||||
|
||||
const userID = ensureLocalUser().id;
|
||||
|
||||
let bq: typeof _state.liveQueue | undefined;
|
||||
while (isVideoProcessingEnabled()) {
|
||||
let item = _state.liveQueue.shift();
|
||||
if (!item) {
|
||||
if (!bq && _state.haveSyncedOnce) {
|
||||
/* initialize */
|
||||
bq = await backfillQueue();
|
||||
bq = await backfillQueue(userID);
|
||||
}
|
||||
if (bq) {
|
||||
switch (bq.length) {
|
||||
@@ -662,7 +686,7 @@ const processQueue = async () => {
|
||||
break;
|
||||
case 1 /* last item. take it, and refill queue */:
|
||||
item = bq.pop();
|
||||
bq = await backfillQueue();
|
||||
bq = await backfillQueue(userID);
|
||||
break;
|
||||
default:
|
||||
/* more than one item. take it */
|
||||
@@ -707,16 +731,29 @@ const processQueue = async () => {
|
||||
* Return the next batch of videos that need to be processed.
|
||||
*
|
||||
* If there is nothing pending, return an empty array.
|
||||
*
|
||||
* @param userID The ID of the currently logged in user. This is used to filter
|
||||
* the files to only include those that are owned by the user.
|
||||
*/
|
||||
const backfillQueue = async (): Promise<VideoProcessingQueueItem[]> => {
|
||||
const backfillQueue = async (
|
||||
userID: number,
|
||||
): Promise<VideoProcessingQueueItem[]> => {
|
||||
const allCollectionFiles = await getAllLocalFiles();
|
||||
const localTrashFileIDs = await getLocalTrashFileIDs();
|
||||
const videoFiles = uniqueFilesByID(
|
||||
allCollectionFiles.filter(
|
||||
(f) =>
|
||||
f.ownerID == userID &&
|
||||
f.metadata.fileType == FileType.video &&
|
||||
!localTrashFileIDs.has(f.id),
|
||||
),
|
||||
);
|
||||
|
||||
const doneIDs = (await savedProcessedVideoFileIDs()).union(
|
||||
await savedFailedVideoFileIDs(),
|
||||
);
|
||||
const videoFiles = uniqueFilesByID(
|
||||
allCollectionFiles.filter((f) => f.metadata.fileType == FileType.video),
|
||||
);
|
||||
const pendingVideoFiles = videoFiles.filter((f) => !doneIDs.has(f.id));
|
||||
|
||||
const batch = randomSample(pendingVideoFiles, 50);
|
||||
return batch.map((file) => ({ file }));
|
||||
};
|
||||
@@ -798,12 +835,24 @@ const processQueueItem = async ({
|
||||
|
||||
log.info(`Generate HLS for ${fileLogID(file)} | start`);
|
||||
|
||||
// TODO(HLS): Inside this needs to be more granular in case of errors.
|
||||
const res = await initiateGenerateHLS(
|
||||
electron,
|
||||
sourceVideo,
|
||||
objectUploadURL,
|
||||
);
|
||||
let res: GenerateHLSResult | undefined;
|
||||
try {
|
||||
res = await initiateGenerateHLS(electron, sourceVideo, objectUploadURL);
|
||||
} catch (e) {
|
||||
// Failures during stream generation on the native side are expected to
|
||||
// happen in two cases:
|
||||
//
|
||||
// 1. There is something specific to this video that doesn't work with
|
||||
// the current HLS generation pipeline (the ffmpeg invocation).
|
||||
//
|
||||
// 2. The upload of the generated video fails.
|
||||
//
|
||||
// The native side code already retries failures for case 2 (except HTTP
|
||||
// 4xx errors). Thus, usually we should come here only for case 1, and
|
||||
// retrying the same video again will not work either.
|
||||
await markFailedVideoFileID(file.id);
|
||||
throw e;
|
||||
}
|
||||
|
||||
if (!res) {
|
||||
log.info(`Generate HLS for ${fileLogID(file)} | not-required`);
|
||||
@@ -824,7 +873,9 @@ const processQueueItem = async ({
|
||||
});
|
||||
|
||||
try {
|
||||
await putVideoData(file, playlistData, objectID, videoSize);
|
||||
await retryAsyncOperation(() =>
|
||||
putVideoData(file, playlistData, objectID, videoSize),
|
||||
);
|
||||
} catch (e) {
|
||||
if (isHTTP4xxError(e)) await markFailedVideoFileID(file.id);
|
||||
throw e;
|
||||
|
||||
@@ -246,7 +246,8 @@ export const initiateGenerateHLS = async (
|
||||
/**
|
||||
* Variant of {@link readStream} tailored for video conversion.
|
||||
*
|
||||
* @param token A token obtained from {@link writeVideoStream}.
|
||||
* @param token A token obtained from a video conversion operation like
|
||||
* {@link initiateConvertToMP4} or {@link initiateGenerateHLS}.
|
||||
*
|
||||
* @returns a Response that contains the data associated with the provided
|
||||
* token.
|
||||
@@ -270,10 +271,8 @@ export const readVideoStream = async (
|
||||
};
|
||||
|
||||
/**
|
||||
* Sibling of {@link readConvertToMP4Stream} to let the native side know when we
|
||||
* are done reading the response, so it can dispose any temporary resources.
|
||||
*
|
||||
* @param token A token obtained from {@link writeVideoStream}.
|
||||
* Sibling of {@link readVideoStream} to let the native side know when we are
|
||||
* done reading the response, so it can dispose any temporary resources.
|
||||
*/
|
||||
export const videoStreamDone = async (
|
||||
_: Electron,
|
||||
|
||||
@@ -343,9 +343,29 @@ export async function cleanTrashCollections(fileTrash: Trash) {
|
||||
async function getLastTrashSyncTime() {
|
||||
return (await localForage.getItem<number>(TRASH_TIME)) ?? 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Update our locally saved data about the files and collections in trash by
|
||||
* syncing with remote.
|
||||
*
|
||||
* The sync uses a diff-based mechanism that syncs forward from the last sync
|
||||
* time (also persisted).
|
||||
*
|
||||
* @param onUpdateTrashFiles A callback invoked when the locally persisted trash
|
||||
* items are updated. This can be used for the UI to also update its state. This
|
||||
* callback can be invoked multiple times during the sync (once for each batch
|
||||
* that gets processed).
|
||||
*
|
||||
* @param onPruneDeletedFileIDs A callback invoked when files that were
|
||||
* previously in trash have now been permanently deleted. This can be used by
|
||||
* other subsystems to prune data referring to files that now have been deleted
|
||||
* permanently. This callback can be invoked multiple times during the sync
|
||||
* (once for each batch that gets processed).
|
||||
*/
|
||||
export async function syncTrash(
|
||||
collections: Collection[],
|
||||
setTrashedFiles: ((fs: EnteFile[]) => void) | undefined,
|
||||
onUpdateTrashFiles: ((files: EnteFile[]) => void) | undefined,
|
||||
onPruneDeletedFileIDs: (deletedFileIDs: Set<number>) => Promise<void>,
|
||||
): Promise<void> {
|
||||
const trash = await getLocalTrash();
|
||||
collections = [...collections, ...(await getLocalDeletedCollections())];
|
||||
@@ -359,21 +379,23 @@ export async function syncTrash(
|
||||
|
||||
const updatedTrash = await updateTrash(
|
||||
collectionMap,
|
||||
lastSyncTime,
|
||||
setTrashedFiles,
|
||||
trash,
|
||||
lastSyncTime,
|
||||
onUpdateTrashFiles,
|
||||
onPruneDeletedFileIDs,
|
||||
);
|
||||
await cleanTrashCollections(updatedTrash);
|
||||
}
|
||||
|
||||
export const updateTrash = async (
|
||||
const updateTrash = async (
|
||||
collections: Map<number, Collection>,
|
||||
sinceTime: number,
|
||||
setTrashedFiles: ((fs: EnteFile[]) => void) | undefined,
|
||||
currentTrash: Trash,
|
||||
sinceTime: number,
|
||||
onUpdateTrashFiles: ((files: EnteFile[]) => void) | undefined,
|
||||
onPruneDeletedFileIDs: (deletedFileIDs: Set<number>) => Promise<void>,
|
||||
): Promise<Trash> => {
|
||||
let updatedTrash: Trash = [...currentTrash];
|
||||
try {
|
||||
let updatedTrash: Trash = [...currentTrash];
|
||||
let time = sinceTime;
|
||||
|
||||
let resp;
|
||||
@@ -387,6 +409,7 @@ export const updateTrash = async (
|
||||
{ sinceTime: time },
|
||||
{ "X-Auth-Token": token },
|
||||
);
|
||||
const deletedFileIDs = new Set<number>();
|
||||
// #Perf: This can be optimized by running the decryption in parallel
|
||||
for (const trashItem of resp.data.diff as EncryptedTrashItem[]) {
|
||||
const collectionID = trashItem.file.collectionID;
|
||||
@@ -398,6 +421,9 @@ export const updateTrash = async (
|
||||
...collections.values(),
|
||||
]);
|
||||
}
|
||||
if (trashItem.isDeleted) {
|
||||
deletedFileIDs.add(trashItem.file.id);
|
||||
}
|
||||
if (!trashItem.isDeleted && !trashItem.isRestored) {
|
||||
const decryptedFile = await decryptFile(
|
||||
trashItem.file,
|
||||
@@ -415,15 +441,17 @@ export const updateTrash = async (
|
||||
time = resp.data.diff.slice(-1)[0].updatedAt;
|
||||
}
|
||||
|
||||
setTrashedFiles?.(getTrashedFiles(updatedTrash));
|
||||
onUpdateTrashFiles?.(getTrashedFiles(updatedTrash));
|
||||
if (deletedFileIDs.size > 0) {
|
||||
await onPruneDeletedFileIDs(deletedFileIDs);
|
||||
}
|
||||
await localForage.setItem(TRASH, updatedTrash);
|
||||
await localForage.setItem(TRASH_TIME, time);
|
||||
} while (resp.data.hasMore);
|
||||
return updatedTrash;
|
||||
} catch (e) {
|
||||
log.error("Get trash files failed", e);
|
||||
}
|
||||
return currentTrash;
|
||||
return updatedTrash;
|
||||
};
|
||||
|
||||
export const emptyTrash = async () => {
|
||||
|
||||
@@ -242,6 +242,13 @@ export function getTrashedFiles(trash: Trash): EnteFile[] {
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the IDs of all the files that are part of the trash as per our local
|
||||
* database.
|
||||
*/
|
||||
export const getLocalTrashFileIDs = () =>
|
||||
getLocalTrash().then((trash) => new Set(trash.map((f) => f.file.id)));
|
||||
|
||||
const sortTrashFiles = (files: EnteFile[]) => {
|
||||
return files.sort((a, b) => {
|
||||
if (a.deleteBy === b.deleteBy) {
|
||||
|
||||
@@ -255,7 +255,7 @@ export const addFileEntry = async (fileID: number) => {
|
||||
*/
|
||||
export const updateAssumingLocalFiles = async (
|
||||
localFileIDs: number[],
|
||||
localTrashFilesIDs: number[],
|
||||
localTrashFilesIDs: Set<number>,
|
||||
) => {
|
||||
const db = await mlDB();
|
||||
const tx = db.transaction(
|
||||
@@ -268,14 +268,13 @@ export const updateAssumingLocalFiles = async (
|
||||
.getAllKeys(IDBKeyRange.only("indexed"));
|
||||
|
||||
const local = new Set(localFileIDs);
|
||||
const localTrash = new Set(localTrashFilesIDs);
|
||||
const fdb = new Set(fdbFileIDs);
|
||||
const fdbIndexed = new Set(fdbIndexedFileIDs);
|
||||
|
||||
const newFileIDs = localFileIDs.filter((id) => !fdb.has(id));
|
||||
const removedFileIDs = fdbFileIDs.filter((id) => {
|
||||
if (local.has(id)) return false; // Still exists.
|
||||
if (localTrash.has(id)) {
|
||||
if (localTrashFilesIDs.has(id)) {
|
||||
// Exists in trash.
|
||||
if (fdbIndexed.has(id)) {
|
||||
// But is already indexed, so let it be.
|
||||
|
||||
@@ -9,7 +9,7 @@ import { isNetworkDownloadError } from "ente-gallery/services/download";
|
||||
import type { ProcessableUploadItem } from "ente-gallery/services/upload";
|
||||
import { fileLogID, type EnteFile } from "ente-media/file";
|
||||
import { wait } from "ente-utils/promise";
|
||||
import { getAllLocalFiles, getLocalTrashedFiles } from "../files";
|
||||
import { getAllLocalFiles, getLocalTrashFileIDs } from "../files";
|
||||
import {
|
||||
createImageBitmapAndData,
|
||||
fetchRenderableBlob,
|
||||
@@ -438,11 +438,9 @@ const syncWithLocalFilesAndGetFilesToIndex = async (
|
||||
const localFiles = await getAllLocalFiles();
|
||||
const localFileByID = new Map(localFiles.map((f) => [f.id, f]));
|
||||
|
||||
const localTrashFileIDs = (await getLocalTrashedFiles()).map((f) => f.id);
|
||||
|
||||
await updateAssumingLocalFiles(
|
||||
Array.from(localFileByID.keys()),
|
||||
localTrashFileIDs,
|
||||
await getLocalTrashFileIDs(),
|
||||
);
|
||||
|
||||
const fileIDsToIndex = await getIndexableFileIDs(count);
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
import { resetFileViewerDataSourceOnClose } from "ente-gallery/components/viewer/data-source";
|
||||
import { videoProcessingSyncIfNeeded } from "ente-gallery/services/video";
|
||||
import {
|
||||
videoProcessingSyncIfNeeded,
|
||||
videoPrunePermanentlyDeletedFileIDsIfNeeded,
|
||||
} from "ente-gallery/services/video";
|
||||
import type { Collection } from "ente-media/collection";
|
||||
import type { EnteFile } from "ente-media/file";
|
||||
import { isHiddenCollection } from "ente-new/photos/services/collection";
|
||||
@@ -141,7 +144,11 @@ export const syncCollectionAndFiles = async (
|
||||
opts?.onResetHiddenFiles,
|
||||
opts?.onFetchHiddenFiles,
|
||||
);
|
||||
await syncTrash(collections, opts?.onResetTrashedFiles);
|
||||
await syncTrash(
|
||||
collections,
|
||||
opts?.onResetTrashedFiles,
|
||||
videoPrunePermanentlyDeletedFileIDsIfNeeded,
|
||||
);
|
||||
if (didUpdateNormalFiles || didUpdateHiddenFiles) {
|
||||
// TODO: Ok for now since its is only commented for the deduper (gallery
|
||||
// does this on the return value), but still needs fixing instead of a
|
||||
|
||||
Reference in New Issue
Block a user