mirror of
https://codeberg.org/forgejo/forgejo.git
synced 2024-11-22 08:42:32 -05:00
4562d40fce
* fix hard-coded timeout and error panic in API archive download endpoint This commit updates the `GET /api/v1/repos/{owner}/{repo}/archive/{archive}` endpoint which prior to this PR had a couple of issues. 1. The endpoint had a hard-coded 20s timeout for the archiver to complete after which a 500 (Internal Server Error) was returned to client. For a scripted API client there was no clear way of telling that the operation timed out and that it should retry. 2. Whenever the timeout _did occur_, the code used to panic. This was caused by the API endpoint "delegating" to the same call path as the web, which uses a slightly different way of reporting errors (HTML rather than JSON for example). More specifically, `api/v1/repo/file.go#GetArchive` just called through to `web/repo/repo.go#Download`, which expects the `Context` to have a `Render` field set, but which is `nil` for API calls. Hence, a `nil` pointer error. The code addresses (1) by dropping the hard-coded timeout. Instead, any timeout/cancelation on the incoming `Context` is used. The code addresses (2) by updating the API endpoint to use a separate call path for the API-triggered archive download. This avoids producing HTML-errors on errors (it now produces JSON errors). Signed-off-by: Peter Gardfjäll <peter.gardfjall.work@gmail.com>
116 lines
3.4 KiB
Go
116 lines
3.4 KiB
Go
// Copyright 2021 The Gitea Authors. All rights reserved.
|
|
// Use of this source code is governed by a MIT-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
package repo
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"time"
|
|
|
|
"code.gitea.io/gitea/models/db"
|
|
"code.gitea.io/gitea/modules/git"
|
|
"code.gitea.io/gitea/modules/timeutil"
|
|
|
|
"xorm.io/builder"
|
|
)
|
|
|
|
// ArchiverStatus represents repo archive status
|
|
type ArchiverStatus int
|
|
|
|
// enumerate all repo archive statuses
|
|
const (
|
|
ArchiverGenerating = iota // the archiver is generating
|
|
ArchiverReady // it's ready
|
|
)
|
|
|
|
// RepoArchiver represents all archivers
|
|
type RepoArchiver struct { //revive:disable-line:exported
|
|
ID int64 `xorm:"pk autoincr"`
|
|
RepoID int64 `xorm:"index unique(s)"`
|
|
Type git.ArchiveType `xorm:"unique(s)"`
|
|
Status ArchiverStatus
|
|
CommitID string `xorm:"VARCHAR(40) unique(s)"`
|
|
CreatedUnix timeutil.TimeStamp `xorm:"INDEX NOT NULL created"`
|
|
}
|
|
|
|
func init() {
|
|
db.RegisterModel(new(RepoArchiver))
|
|
}
|
|
|
|
// RelativePath returns the archive path relative to the archive storage root.
|
|
func (archiver *RepoArchiver) RelativePath() string {
|
|
return fmt.Sprintf("%d/%s/%s.%s", archiver.RepoID, archiver.CommitID[:2], archiver.CommitID, archiver.Type.String())
|
|
}
|
|
|
|
var delRepoArchiver = new(RepoArchiver)
|
|
|
|
// DeleteRepoArchiver delete archiver
|
|
func DeleteRepoArchiver(ctx context.Context, archiver *RepoArchiver) error {
|
|
_, err := db.GetEngine(db.DefaultContext).ID(archiver.ID).Delete(delRepoArchiver)
|
|
return err
|
|
}
|
|
|
|
// GetRepoArchiver get an archiver
|
|
func GetRepoArchiver(ctx context.Context, repoID int64, tp git.ArchiveType, commitID string) (*RepoArchiver, error) {
|
|
var archiver RepoArchiver
|
|
has, err := db.GetEngine(ctx).Where("repo_id=?", repoID).And("`type`=?", tp).And("commit_id=?", commitID).Get(&archiver)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
if has {
|
|
return &archiver, nil
|
|
}
|
|
return nil, nil
|
|
}
|
|
|
|
// AddRepoArchiver adds an archiver
|
|
func AddRepoArchiver(ctx context.Context, archiver *RepoArchiver) error {
|
|
_, err := db.GetEngine(ctx).Insert(archiver)
|
|
return err
|
|
}
|
|
|
|
// UpdateRepoArchiverStatus updates archiver's status
|
|
func UpdateRepoArchiverStatus(ctx context.Context, archiver *RepoArchiver) error {
|
|
_, err := db.GetEngine(ctx).ID(archiver.ID).Cols("status").Update(archiver)
|
|
return err
|
|
}
|
|
|
|
// DeleteAllRepoArchives deletes all repo archives records
|
|
func DeleteAllRepoArchives() error {
|
|
_, err := db.GetEngine(db.DefaultContext).Where("1=1").Delete(new(RepoArchiver))
|
|
return err
|
|
}
|
|
|
|
// FindRepoArchiversOption represents an archiver options
|
|
type FindRepoArchiversOption struct {
|
|
db.ListOptions
|
|
OlderThan time.Duration
|
|
}
|
|
|
|
func (opts FindRepoArchiversOption) toConds() builder.Cond {
|
|
cond := builder.NewCond()
|
|
if opts.OlderThan > 0 {
|
|
cond = cond.And(builder.Lt{"created_unix": time.Now().Add(-opts.OlderThan).Unix()})
|
|
}
|
|
return cond
|
|
}
|
|
|
|
// FindRepoArchives find repo archivers
|
|
func FindRepoArchives(opts FindRepoArchiversOption) ([]*RepoArchiver, error) {
|
|
archivers := make([]*RepoArchiver, 0, opts.PageSize)
|
|
start, limit := opts.GetSkipTake()
|
|
err := db.GetEngine(db.DefaultContext).Where(opts.toConds()).
|
|
Asc("created_unix").
|
|
Limit(limit, start).
|
|
Find(&archivers)
|
|
return archivers, err
|
|
}
|
|
|
|
// SetArchiveRepoState sets if a repo is archived
|
|
func SetArchiveRepoState(repo *Repository, isArchived bool) (err error) {
|
|
repo.IsArchived = isArchived
|
|
_, err = db.GetEngine(db.DefaultContext).Where("id = ?", repo.ID).Cols("is_archived").NoAutoTime().Update(repo)
|
|
return err
|
|
}
|