1
0
Fork 0
mirror of https://codeberg.org/forgejo/forgejo.git synced 2024-12-10 11:25:56 -05:00
forgejo/cmd/migrate_storage.go
Peter Gardfjäll 4562d40fce
fix hard-coded timeout and error panic in API archive download endpoint (#20925)
* fix hard-coded timeout and error panic in API archive download endpoint

This commit updates the `GET /api/v1/repos/{owner}/{repo}/archive/{archive}`
endpoint which prior to this PR had a couple of issues.

1. The endpoint had a hard-coded 20s timeout for the archiver to complete after
   which a 500 (Internal Server Error) was returned to client. For a scripted
   API client there was no clear way of telling that the operation timed out and
   that it should retry.

2. Whenever the timeout _did occur_, the code used to panic. This was caused by
   the API endpoint "delegating" to the same call path as the web, which uses a
   slightly different way of reporting errors (HTML rather than JSON for
   example).

   More specifically, `api/v1/repo/file.go#GetArchive` just called through to
   `web/repo/repo.go#Download`, which expects the `Context` to have a `Render`
   field set, but which is `nil` for API calls. Hence, a `nil` pointer error.

The code addresses (1) by dropping the hard-coded timeout. Instead, any
timeout/cancelation on the incoming `Context` is used.

The code addresses (2) by updating the API endpoint to use a separate call path
for the API-triggered archive download. This avoids producing HTML-errors on
errors (it now produces JSON errors).

Signed-off-by: Peter Gardfjäll <peter.gardfjall.work@gmail.com>
2022-08-29 11:45:20 +02:00

206 lines
6.1 KiB
Go

// Copyright 2020 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package cmd
import (
"context"
"fmt"
"strings"
"code.gitea.io/gitea/models/db"
git_model "code.gitea.io/gitea/models/git"
"code.gitea.io/gitea/models/migrations"
packages_model "code.gitea.io/gitea/models/packages"
repo_model "code.gitea.io/gitea/models/repo"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/log"
packages_module "code.gitea.io/gitea/modules/packages"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/storage"
"github.com/urfave/cli"
)
// CmdMigrateStorage represents the available migrate storage sub-command.
var CmdMigrateStorage = cli.Command{
Name: "migrate-storage",
Usage: "Migrate the storage",
Description: "Copies stored files from storage configured in app.ini to parameter-configured storage",
Action: runMigrateStorage,
Flags: []cli.Flag{
cli.StringFlag{
Name: "type, t",
Value: "",
Usage: "Type of stored files to copy. Allowed types: 'attachments', 'lfs', 'avatars', 'repo-avatars', 'repo-archivers', 'packages'",
},
cli.StringFlag{
Name: "storage, s",
Value: "",
Usage: "New storage type: local (default) or minio",
},
cli.StringFlag{
Name: "path, p",
Value: "",
Usage: "New storage placement if store is local (leave blank for default)",
},
cli.StringFlag{
Name: "minio-endpoint",
Value: "",
Usage: "Minio storage endpoint",
},
cli.StringFlag{
Name: "minio-access-key-id",
Value: "",
Usage: "Minio storage accessKeyID",
},
cli.StringFlag{
Name: "minio-secret-access-key",
Value: "",
Usage: "Minio storage secretAccessKey",
},
cli.StringFlag{
Name: "minio-bucket",
Value: "",
Usage: "Minio storage bucket",
},
cli.StringFlag{
Name: "minio-location",
Value: "",
Usage: "Minio storage location to create bucket",
},
cli.StringFlag{
Name: "minio-base-path",
Value: "",
Usage: "Minio storage basepath on the bucket",
},
cli.BoolFlag{
Name: "minio-use-ssl",
Usage: "Enable SSL for minio",
},
},
}
func migrateAttachments(ctx context.Context, dstStorage storage.ObjectStorage) error {
return db.IterateObjects(ctx, func(attach *repo_model.Attachment) error {
_, err := storage.Copy(dstStorage, attach.RelativePath(), storage.Attachments, attach.RelativePath())
return err
})
}
func migrateLFS(ctx context.Context, dstStorage storage.ObjectStorage) error {
return db.IterateObjects(ctx, func(mo *git_model.LFSMetaObject) error {
_, err := storage.Copy(dstStorage, mo.RelativePath(), storage.LFS, mo.RelativePath())
return err
})
}
func migrateAvatars(ctx context.Context, dstStorage storage.ObjectStorage) error {
return db.IterateObjects(ctx, func(user *user_model.User) error {
_, err := storage.Copy(dstStorage, user.CustomAvatarRelativePath(), storage.Avatars, user.CustomAvatarRelativePath())
return err
})
}
func migrateRepoAvatars(ctx context.Context, dstStorage storage.ObjectStorage) error {
return db.IterateObjects(ctx, func(repo *repo_model.Repository) error {
_, err := storage.Copy(dstStorage, repo.CustomAvatarRelativePath(), storage.RepoAvatars, repo.CustomAvatarRelativePath())
return err
})
}
func migrateRepoArchivers(ctx context.Context, dstStorage storage.ObjectStorage) error {
return db.IterateObjects(ctx, func(archiver *repo_model.RepoArchiver) error {
p := archiver.RelativePath()
_, err := storage.Copy(dstStorage, p, storage.RepoArchives, p)
return err
})
}
func migratePackages(ctx context.Context, dstStorage storage.ObjectStorage) error {
return db.IterateObjects(ctx, func(pb *packages_model.PackageBlob) error {
p := packages_module.KeyToRelativePath(packages_module.BlobHash256Key(pb.HashSHA256))
_, err := storage.Copy(dstStorage, p, storage.Packages, p)
return err
})
}
func runMigrateStorage(ctx *cli.Context) error {
stdCtx, cancel := installSignals()
defer cancel()
if err := initDB(stdCtx); err != nil {
return err
}
log.Info("AppPath: %s", setting.AppPath)
log.Info("AppWorkPath: %s", setting.AppWorkPath)
log.Info("Custom path: %s", setting.CustomPath)
log.Info("Log path: %s", setting.LogRootPath)
log.Info("Configuration file: %s", setting.CustomConf)
if err := db.InitEngineWithMigration(context.Background(), migrations.Migrate); err != nil {
log.Fatal("Failed to initialize ORM engine: %v", err)
return err
}
if err := storage.Init(); err != nil {
return err
}
var dstStorage storage.ObjectStorage
var err error
switch strings.ToLower(ctx.String("storage")) {
case "":
fallthrough
case string(storage.LocalStorageType):
p := ctx.String("path")
if p == "" {
log.Fatal("Path must be given when storage is loal")
return nil
}
dstStorage, err = storage.NewLocalStorage(
stdCtx,
storage.LocalStorageConfig{
Path: p,
})
case string(storage.MinioStorageType):
dstStorage, err = storage.NewMinioStorage(
stdCtx,
storage.MinioStorageConfig{
Endpoint: ctx.String("minio-endpoint"),
AccessKeyID: ctx.String("minio-access-key-id"),
SecretAccessKey: ctx.String("minio-secret-access-key"),
Bucket: ctx.String("minio-bucket"),
Location: ctx.String("minio-location"),
BasePath: ctx.String("minio-base-path"),
UseSSL: ctx.Bool("minio-use-ssl"),
})
default:
return fmt.Errorf("unsupported storage type: %s", ctx.String("storage"))
}
if err != nil {
return err
}
migratedMethods := map[string]func(context.Context, storage.ObjectStorage) error{
"attachments": migrateAttachments,
"lfs": migrateLFS,
"avatars": migrateAvatars,
"repo-avatars": migrateRepoAvatars,
"repo-archivers": migrateRepoArchivers,
"packages": migratePackages,
}
tp := strings.ToLower(ctx.String("type"))
if m, ok := migratedMethods[tp]; ok {
if err := m(stdCtx, dstStorage); err != nil {
return err
}
log.Info("%s files have successfully been copied to the new storage.", tp)
return nil
}
return fmt.Errorf("unsupported storage: %s", ctx.String("type"))
}