[server] Lint fix

This commit is contained in:
Neeraj Gupta
2024-08-10 17:20:03 +05:30
parent 97c9253127
commit bac660f7a0
4 changed files with 8 additions and 13 deletions

View File

@@ -24,10 +24,6 @@ import (
gTime "time"
)
const (
embeddingFetchTimeout = 10 * gTime.Second
)
// _fetchConfig is the configuration for the fetching objects from S3
type _fetchConfig struct {
RetryCount int
@@ -173,7 +169,7 @@ func (c *Controller) GetFilesData(ctx *gin.Context, req fileData.GetFilesData) (
errFileIds := make([]int64, 0)
for i := range doRows {
dbFileIds = append(dbFileIds, doRows[i].FileID)
if doRows[i].IsDeleted == false {
if !doRows[i].IsDeleted {
activeRows = append(activeRows, doRows[i])
}
}
@@ -209,7 +205,7 @@ func (c *Controller) GetFilesData(ctx *gin.Context, req fileData.GetFilesData) (
func (c *Controller) getS3FileMetadataParallel(dbRows []fileData.Row) ([]bulkS3MetaFetchResult, error) {
var wg sync.WaitGroup
embeddingObjects := make([]bulkS3MetaFetchResult, len(dbRows))
for i, _ := range dbRows {
for i := range dbRows {
dbRow := dbRows[i]
wg.Add(1)
globalFileFetchSemaphore <- struct{}{} // Acquire from global semaphore

View File

@@ -8,7 +8,7 @@ import (
"github.com/ente-io/museum/ente/filedata"
fileDataRepo "github.com/ente-io/museum/pkg/repo/filedata"
enteTime "github.com/ente-io/museum/pkg/utils/time"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
"time"
)
@@ -80,8 +80,7 @@ func (c *Controller) deleteFileRow(fileDataRow filedata.Row) error {
}
ctxLogger := log.WithField("file_id", fileDataRow.DeleteFromBuckets).WithField("type", fileDataRow.Type).WithField("user_id", fileDataRow.UserID)
objectKeys := filedata.AllObjects(fileID, ownerID, fileDataRow.Type)
bucketColumnMap := make(map[string]string)
bucketColumnMap, err = getMapOfBucketItToColumn(fileDataRow)
bucketColumnMap, err := getMapOfBucketItToColumn(fileDataRow)
if err != nil {
ctxLogger.WithError(err).Error("Failed to get bucketColumnMap")
return err
@@ -91,7 +90,7 @@ func (c *Controller) deleteFileRow(fileDataRow filedata.Row) error {
for _, objectKey := range objectKeys {
err := c.ObjectCleanupController.DeleteObjectFromDataCenter(objectKey, bucketID)
if err != nil {
ctxLogger.WithError(err).WithFields(logrus.Fields{
ctxLogger.WithError(err).WithFields(log.Fields{
"bucketID": bucketID,
"column": columnName,
"objectKey": objectKey,
@@ -101,7 +100,7 @@ func (c *Controller) deleteFileRow(fileDataRow filedata.Row) error {
}
dbErr := c.Repo.RemoveBucket(fileDataRow, bucketID, columnName)
if dbErr != nil {
ctxLogger.WithError(dbErr).WithFields(logrus.Fields{
ctxLogger.WithError(dbErr).WithFields(log.Fields{
"bucketID": bucketID,
"column": columnName,
}).Error("Failed to remove bucket from db")

View File

@@ -209,7 +209,7 @@ func (r *Repository) RegisterReplicationAttempt(ctx context.Context, row filedat
if array.StringInList(dstBucketID, row.DeleteFromBuckets) {
return r.MoveBetweenBuckets(row, dstBucketID, DeletionColumn, InflightRepColumn)
}
if array.StringInList(dstBucketID, row.InflightReplicas) == false {
if !array.StringInList(dstBucketID, row.InflightReplicas) {
return r.AddBucket(row, dstBucketID, InflightRepColumn)
}
return nil

View File

@@ -155,7 +155,7 @@ func (config *S3Config) initialize() {
}
if err := viper.Sub("s3").Unmarshal(&config.fileDataConfig); err != nil {
log.Fatal("Unable to decode into struct: %v\n", err)
log.Fatalf("Unable to decode into struct: %v\n", err)
return
}