2014-11-16 20:27:04 -05:00
|
|
|
|
// Copyright 2014 The Gogs Authors. All rights reserved.
|
2018-11-28 06:26:14 -05:00
|
|
|
|
// Copyright 2018 The Gitea Authors. All rights reserved.
|
2022-11-27 13:20:29 -05:00
|
|
|
|
// SPDX-License-Identifier: MIT
|
2014-11-16 20:27:04 -05:00
|
|
|
|
|
2015-12-04 17:16:42 -05:00
|
|
|
|
package repo
|
2014-11-16 21:32:26 -05:00
|
|
|
|
|
|
|
|
|
import (
|
2022-06-04 09:17:53 -04:00
|
|
|
|
"bytes"
|
2019-04-17 12:06:35 -04:00
|
|
|
|
"encoding/base64"
|
2022-08-29 05:45:20 -04:00
|
|
|
|
"errors"
|
2020-05-31 16:59:34 -04:00
|
|
|
|
"fmt"
|
2022-06-04 09:17:53 -04:00
|
|
|
|
"io"
|
2019-04-17 12:06:35 -04:00
|
|
|
|
"net/http"
|
2022-05-09 11:54:51 -04:00
|
|
|
|
"path"
|
2023-05-29 05:41:35 -04:00
|
|
|
|
"strings"
|
2019-12-23 21:33:52 -05:00
|
|
|
|
"time"
|
2019-04-17 12:06:35 -04:00
|
|
|
|
|
2016-11-10 11:24:48 -05:00
|
|
|
|
"code.gitea.io/gitea/models"
|
2022-06-12 11:51:54 -04:00
|
|
|
|
git_model "code.gitea.io/gitea/models/git"
|
2021-12-09 20:27:50 -05:00
|
|
|
|
repo_model "code.gitea.io/gitea/models/repo"
|
2021-11-09 14:57:58 -05:00
|
|
|
|
"code.gitea.io/gitea/models/unit"
|
2019-03-27 05:33:00 -04:00
|
|
|
|
"code.gitea.io/gitea/modules/git"
|
Simplify how git repositories are opened (#28937)
## Purpose
This is a refactor toward building an abstraction over managing git
repositories.
Afterwards, it does not matter anymore if they are stored on the local
disk or somewhere remote.
## What this PR changes
We used `git.OpenRepository` everywhere previously.
Now, we should split them into two distinct functions:
Firstly, there are temporary repositories which do not change:
```go
git.OpenRepository(ctx, diskPath)
```
Gitea managed repositories having a record in the database in the
`repository` table are moved into the new package `gitrepo`:
```go
gitrepo.OpenRepository(ctx, repo_model.Repo)
```
Why is `repo_model.Repository` the second parameter instead of file
path?
Because then we can easily adapt our repository storage strategy.
The repositories can be stored locally, however, they could just as well
be stored on a remote server.
## Further changes in other PRs
- A Git Command wrapper on package `gitrepo` could be created. i.e.
`NewCommand(ctx, repo_model.Repository, commands...)`. `git.RunOpts{Dir:
repo.RepoPath()}`, the directory should be empty before invoking this
method and it can be filled in the function only. #28940
- Remove the `RepoPath()`/`WikiPath()` functions to reduce the
possibility of mistakes.
---------
Co-authored-by: delvh <dev.lh@web.de>
2024-01-27 15:09:51 -05:00
|
|
|
|
"code.gitea.io/gitea/modules/gitrepo"
|
2022-06-04 09:17:53 -04:00
|
|
|
|
"code.gitea.io/gitea/modules/httpcache"
|
|
|
|
|
"code.gitea.io/gitea/modules/lfs"
|
|
|
|
|
"code.gitea.io/gitea/modules/log"
|
2022-05-09 11:54:51 -04:00
|
|
|
|
"code.gitea.io/gitea/modules/setting"
|
2022-06-04 09:17:53 -04:00
|
|
|
|
"code.gitea.io/gitea/modules/storage"
|
2019-05-11 06:21:34 -04:00
|
|
|
|
api "code.gitea.io/gitea/modules/structs"
|
2021-01-26 10:36:53 -05:00
|
|
|
|
"code.gitea.io/gitea/modules/web"
|
2021-06-08 19:33:54 -04:00
|
|
|
|
"code.gitea.io/gitea/routers/common"
|
2024-02-27 02:12:22 -05:00
|
|
|
|
"code.gitea.io/gitea/services/context"
|
2022-08-29 05:45:20 -04:00
|
|
|
|
archiver_service "code.gitea.io/gitea/services/repository/archiver"
|
2021-11-24 02:56:24 -05:00
|
|
|
|
files_service "code.gitea.io/gitea/services/repository/files"
|
2014-11-16 21:32:26 -05:00
|
|
|
|
)
|
|
|
|
|
|
2023-01-14 02:16:30 -05:00
|
|
|
|
const (
|
|
|
|
|
giteaObjectTypeHeader = "X-Gitea-Object-Type"
|
|
|
|
|
forgejoObjectTypeHeader = "X-Forgejo-Object-Type"
|
|
|
|
|
)
|
2022-07-21 15:18:41 -04:00
|
|
|
|
|
2016-11-24 02:04:31 -05:00
|
|
|
|
// GetRawFile get a file by path on a repository
|
2016-03-13 18:49:16 -04:00
|
|
|
|
func GetRawFile(ctx *context.APIContext) {
|
2017-11-13 02:02:25 -05:00
|
|
|
|
// swagger:operation GET /repos/{owner}/{repo}/raw/{filepath} repository repoGetRawFile
|
|
|
|
|
// ---
|
|
|
|
|
// summary: Get a file from a repository
|
|
|
|
|
// produces:
|
2024-07-25 08:06:19 -04:00
|
|
|
|
// - application/octet-stream
|
2017-11-13 02:02:25 -05:00
|
|
|
|
// parameters:
|
|
|
|
|
// - name: owner
|
|
|
|
|
// in: path
|
|
|
|
|
// description: owner of the repo
|
|
|
|
|
// type: string
|
|
|
|
|
// required: true
|
|
|
|
|
// - name: repo
|
|
|
|
|
// in: path
|
|
|
|
|
// description: name of the repo
|
|
|
|
|
// type: string
|
|
|
|
|
// required: true
|
|
|
|
|
// - name: filepath
|
|
|
|
|
// in: path
|
|
|
|
|
// description: filepath of the file to get
|
|
|
|
|
// type: string
|
|
|
|
|
// required: true
|
2021-02-08 19:15:47 -05:00
|
|
|
|
// - name: ref
|
|
|
|
|
// in: query
|
|
|
|
|
// description: "The name of the commit/branch/tag. Default the repository’s default branch (usually master)"
|
|
|
|
|
// type: string
|
|
|
|
|
// required: false
|
2017-11-13 02:02:25 -05:00
|
|
|
|
// responses:
|
2018-06-01 01:51:49 -04:00
|
|
|
|
// 200:
|
2022-04-28 10:57:56 -04:00
|
|
|
|
// description: Returns raw file content.
|
2024-07-25 08:06:19 -04:00
|
|
|
|
// schema:
|
|
|
|
|
// type: file
|
2019-12-20 12:07:12 -05:00
|
|
|
|
// "404":
|
|
|
|
|
// "$ref": "#/responses/notFound"
|
|
|
|
|
|
2019-01-17 19:01:04 -05:00
|
|
|
|
if ctx.Repo.Repository.IsEmpty {
|
2019-03-18 22:29:43 -04:00
|
|
|
|
ctx.NotFound()
|
2017-06-10 22:57:28 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-21 15:18:41 -04:00
|
|
|
|
blob, entry, lastModified := getBlobForEntry(ctx)
|
2022-05-09 11:54:51 -04:00
|
|
|
|
if ctx.Written() {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-21 15:18:41 -04:00
|
|
|
|
ctx.RespHeader().Set(giteaObjectTypeHeader, string(files_service.GetObjectTypeFromTreeEntry(entry)))
|
2023-01-14 02:16:30 -05:00
|
|
|
|
ctx.RespHeader().Set(forgejoObjectTypeHeader, string(files_service.GetObjectTypeFromTreeEntry(entry)))
|
2022-07-21 15:18:41 -04:00
|
|
|
|
|
2023-05-20 21:50:53 -04:00
|
|
|
|
if err := common.ServeBlob(ctx.Base, ctx.Repo.TreePath, blob, lastModified); err != nil {
|
2022-05-09 11:54:51 -04:00
|
|
|
|
ctx.Error(http.StatusInternalServerError, "ServeBlob", err)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-06-04 09:17:53 -04:00
|
|
|
|
// GetRawFileOrLFS get a file by repo's path, redirecting to LFS if necessary.
|
|
|
|
|
func GetRawFileOrLFS(ctx *context.APIContext) {
|
|
|
|
|
// swagger:operation GET /repos/{owner}/{repo}/media/{filepath} repository repoGetRawFileOrLFS
|
|
|
|
|
// ---
|
|
|
|
|
// summary: Get a file or it's LFS object from a repository
|
2024-07-25 08:06:19 -04:00
|
|
|
|
// produces:
|
|
|
|
|
// - application/octet-stream
|
2022-06-04 09:17:53 -04:00
|
|
|
|
// parameters:
|
|
|
|
|
// - name: owner
|
|
|
|
|
// in: path
|
|
|
|
|
// description: owner of the repo
|
|
|
|
|
// type: string
|
|
|
|
|
// required: true
|
|
|
|
|
// - name: repo
|
|
|
|
|
// in: path
|
|
|
|
|
// description: name of the repo
|
|
|
|
|
// type: string
|
|
|
|
|
// required: true
|
|
|
|
|
// - name: filepath
|
|
|
|
|
// in: path
|
|
|
|
|
// description: filepath of the file to get
|
|
|
|
|
// type: string
|
|
|
|
|
// required: true
|
|
|
|
|
// - name: ref
|
|
|
|
|
// in: query
|
|
|
|
|
// description: "The name of the commit/branch/tag. Default the repository’s default branch (usually master)"
|
|
|
|
|
// type: string
|
|
|
|
|
// required: false
|
|
|
|
|
// responses:
|
|
|
|
|
// 200:
|
|
|
|
|
// description: Returns raw file content.
|
2024-07-25 08:06:19 -04:00
|
|
|
|
// schema:
|
|
|
|
|
// type: file
|
2022-06-04 09:17:53 -04:00
|
|
|
|
// "404":
|
|
|
|
|
// "$ref": "#/responses/notFound"
|
|
|
|
|
|
|
|
|
|
if ctx.Repo.Repository.IsEmpty {
|
|
|
|
|
ctx.NotFound()
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-21 15:18:41 -04:00
|
|
|
|
blob, entry, lastModified := getBlobForEntry(ctx)
|
2022-06-04 09:17:53 -04:00
|
|
|
|
if ctx.Written() {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-21 15:18:41 -04:00
|
|
|
|
ctx.RespHeader().Set(giteaObjectTypeHeader, string(files_service.GetObjectTypeFromTreeEntry(entry)))
|
2023-01-14 02:16:30 -05:00
|
|
|
|
ctx.RespHeader().Set(forgejoObjectTypeHeader, string(files_service.GetObjectTypeFromTreeEntry(entry)))
|
2022-07-21 15:18:41 -04:00
|
|
|
|
|
2022-06-04 09:17:53 -04:00
|
|
|
|
// LFS Pointer files are at most 1024 bytes - so any blob greater than 1024 bytes cannot be an LFS file
|
|
|
|
|
if blob.Size() > 1024 {
|
|
|
|
|
// First handle caching for the blob
|
|
|
|
|
if httpcache.HandleGenericETagTimeCache(ctx.Req, ctx.Resp, `"`+blob.ID.String()+`"`, lastModified) {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// OK not cached - serve!
|
2023-05-20 21:50:53 -04:00
|
|
|
|
if err := common.ServeBlob(ctx.Base, ctx.Repo.TreePath, blob, lastModified); err != nil {
|
2022-06-04 09:17:53 -04:00
|
|
|
|
ctx.ServerError("ServeBlob", err)
|
|
|
|
|
}
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2024-03-26 03:48:53 -04:00
|
|
|
|
// OK, now the blob is known to have at most 1024 bytes we can simply read this in one go (This saves reading it twice)
|
2022-06-04 09:17:53 -04:00
|
|
|
|
dataRc, err := blob.DataAsync()
|
|
|
|
|
if err != nil {
|
|
|
|
|
ctx.ServerError("DataAsync", err)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2023-05-09 03:34:36 -04:00
|
|
|
|
// FIXME: code from #19689, what if the file is large ... OOM ...
|
2022-06-04 09:17:53 -04:00
|
|
|
|
buf, err := io.ReadAll(dataRc)
|
|
|
|
|
if err != nil {
|
|
|
|
|
_ = dataRc.Close()
|
|
|
|
|
ctx.ServerError("DataAsync", err)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if err := dataRc.Close(); err != nil {
|
2023-05-20 21:50:53 -04:00
|
|
|
|
log.Error("Error whilst closing blob %s reader in %-v. Error: %v", blob.ID, ctx.Repo.Repository, err)
|
2022-06-04 09:17:53 -04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Check if the blob represents a pointer
|
|
|
|
|
pointer, _ := lfs.ReadPointer(bytes.NewReader(buf))
|
|
|
|
|
|
2023-05-09 03:34:36 -04:00
|
|
|
|
// if it's not a pointer, just serve the data directly
|
2022-06-04 09:17:53 -04:00
|
|
|
|
if !pointer.IsValid() {
|
|
|
|
|
// First handle caching for the blob
|
|
|
|
|
if httpcache.HandleGenericETagTimeCache(ctx.Req, ctx.Resp, `"`+blob.ID.String()+`"`, lastModified) {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// OK not cached - serve!
|
2023-05-20 21:50:53 -04:00
|
|
|
|
common.ServeContentByReader(ctx.Base, ctx.Repo.TreePath, blob.Size(), bytes.NewReader(buf))
|
2022-06-04 09:17:53 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2023-05-09 03:34:36 -04:00
|
|
|
|
// Now check if there is a MetaObject for this pointer
|
2023-01-08 22:50:54 -05:00
|
|
|
|
meta, err := git_model.GetLFSMetaObjectByOid(ctx, ctx.Repo.Repository.ID, pointer.Oid)
|
2022-06-04 09:17:53 -04:00
|
|
|
|
|
2023-05-09 03:34:36 -04:00
|
|
|
|
// If there isn't one, just serve the data directly
|
2022-06-12 11:51:54 -04:00
|
|
|
|
if err == git_model.ErrLFSObjectNotExist {
|
2022-06-04 09:17:53 -04:00
|
|
|
|
// Handle caching for the blob SHA (not the LFS object OID)
|
|
|
|
|
if httpcache.HandleGenericETagTimeCache(ctx.Req, ctx.Resp, `"`+blob.ID.String()+`"`, lastModified) {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2023-05-20 21:50:53 -04:00
|
|
|
|
common.ServeContentByReader(ctx.Base, ctx.Repo.TreePath, blob.Size(), bytes.NewReader(buf))
|
2022-06-04 09:17:53 -04:00
|
|
|
|
return
|
|
|
|
|
} else if err != nil {
|
|
|
|
|
ctx.ServerError("GetLFSMetaObjectByOid", err)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Handle caching for the LFS object OID
|
|
|
|
|
if httpcache.HandleGenericETagCache(ctx.Req, ctx.Resp, `"`+pointer.Oid+`"`) {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2023-06-13 23:42:38 -04:00
|
|
|
|
if setting.LFS.Storage.MinioConfig.ServeDirect {
|
2022-06-04 09:17:53 -04:00
|
|
|
|
// If we have a signed url (S3, object storage), redirect to this directly.
|
Fix `missing signature key` error when pulling Docker images with `SERVE_DIRECT` enabled (#32365)
Fix #28121
I did some tests and found that the `missing signature key` error is
caused by an incorrect `Content-Type` header. Gitea correctly sets the
`Content-Type` header when serving files.
https://github.com/go-gitea/gitea/blob/348d1d0f322ca57c459acd902f54821d687ca804/routers/api/packages/container/container.go#L712-L717
However, when `SERVE_DIRECT` is enabled, the `Content-Type` header may
be set to an incorrect value by the storage service. To fix this issue,
we can use query parameters to override response header values.
https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
<img width="600px"
src="https://github.com/user-attachments/assets/f2ff90f0-f1df-46f9-9680-b8120222c555"
/>
In this PR, I introduced a new parameter to the `URL` method to support
additional parameters.
```
URL(path, name string, reqParams url.Values) (*url.URL, error)
```
---
Most S3-like services support specifying the content type when storing
objects. However, Gitea always use `application/octet-stream`.
Therefore, I believe we also need to improve the `Save` method to
support storing objects with the correct content type.
https://github.com/go-gitea/gitea/blob/b7fb20e73e63b8edc9b90c52073e248bef428fcc/modules/storage/minio.go#L214-L221
(cherry picked from commit 0690cb076bf63f71988a709f62a9c04660b51a4f)
Conflicts:
- modules/storage/azureblob.go
Dropped the change, as we do not support Azure blob storage.
- modules/storage/helper.go
Resolved by adjusting their `discardStorage` to our
`DiscardStorage`
- routers/api/actions/artifacts.go
routers/api/actions/artifactsv4.go
routers/web/repo/actions/view.go
routers/web/repo/download.go
Resolved the conflicts by manually adding the new `nil`
parameter to the `storage.Attachments.URL()` calls.
Originally conflicted due to differences in the if expression
above these calls.
2024-10-31 11:28:25 -04:00
|
|
|
|
u, err := storage.LFS.URL(pointer.RelativePath(), blob.Name(), nil)
|
2022-06-04 09:17:53 -04:00
|
|
|
|
if u != nil && err == nil {
|
|
|
|
|
ctx.Redirect(u.String())
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
lfsDataRc, err := lfs.ReadMetaObject(meta.Pointer)
|
|
|
|
|
if err != nil {
|
|
|
|
|
ctx.ServerError("ReadMetaObject", err)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
defer lfsDataRc.Close()
|
|
|
|
|
|
2023-05-20 21:50:53 -04:00
|
|
|
|
common.ServeContentByReadSeeker(ctx.Base, ctx.Repo.TreePath, lastModified, lfsDataRc)
|
2022-06-04 09:17:53 -04:00
|
|
|
|
}
|
|
|
|
|
|
2023-07-07 01:31:56 -04:00
|
|
|
|
func getBlobForEntry(ctx *context.APIContext) (blob *git.Blob, entry *git.TreeEntry, lastModified *time.Time) {
|
2022-05-09 11:54:51 -04:00
|
|
|
|
entry, err := ctx.Repo.Commit.GetTreeEntryByPath(ctx.Repo.TreePath)
|
2014-11-16 21:32:26 -05:00
|
|
|
|
if err != nil {
|
2015-12-09 20:46:05 -05:00
|
|
|
|
if git.IsErrNotExist(err) {
|
2019-03-18 22:29:43 -04:00
|
|
|
|
ctx.NotFound()
|
2014-11-16 21:32:26 -05:00
|
|
|
|
} else {
|
2022-05-09 11:54:51 -04:00
|
|
|
|
ctx.Error(http.StatusInternalServerError, "GetTreeEntryByPath", err)
|
2014-11-16 21:32:26 -05:00
|
|
|
|
}
|
2023-07-07 01:31:56 -04:00
|
|
|
|
return nil, nil, nil
|
2014-11-16 21:32:26 -05:00
|
|
|
|
}
|
2022-05-09 11:54:51 -04:00
|
|
|
|
|
|
|
|
|
if entry.IsDir() || entry.IsSubModule() {
|
|
|
|
|
ctx.NotFound("getBlobForEntry", nil)
|
2023-07-07 01:31:56 -04:00
|
|
|
|
return nil, nil, nil
|
2014-11-16 21:32:26 -05:00
|
|
|
|
}
|
2022-05-09 11:54:51 -04:00
|
|
|
|
|
2022-07-25 11:39:42 -04:00
|
|
|
|
info, _, err := git.Entries([]*git.TreeEntry{entry}).GetCommitsInfo(ctx, ctx.Repo.Commit, path.Dir("/" + ctx.Repo.TreePath)[1:])
|
2022-05-09 11:54:51 -04:00
|
|
|
|
if err != nil {
|
|
|
|
|
ctx.Error(http.StatusInternalServerError, "GetCommitsInfo", err)
|
2023-07-07 01:31:56 -04:00
|
|
|
|
return nil, nil, nil
|
2022-05-09 11:54:51 -04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if len(info) == 1 {
|
|
|
|
|
// Not Modified
|
2023-07-07 01:31:56 -04:00
|
|
|
|
lastModified = &info[0].Commit.Committer.When
|
2022-05-09 11:54:51 -04:00
|
|
|
|
}
|
|
|
|
|
blob = entry.Blob()
|
|
|
|
|
|
2022-07-21 15:18:41 -04:00
|
|
|
|
return blob, entry, lastModified
|
2014-11-16 21:32:26 -05:00
|
|
|
|
}
|
2015-09-02 09:54:35 -04:00
|
|
|
|
|
2016-11-24 02:04:31 -05:00
|
|
|
|
// GetArchive get archive of a repository
|
2016-03-13 18:49:16 -04:00
|
|
|
|
func GetArchive(ctx *context.APIContext) {
|
2018-06-12 10:59:22 -04:00
|
|
|
|
// swagger:operation GET /repos/{owner}/{repo}/archive/{archive} repository repoGetArchive
|
2017-11-13 02:02:25 -05:00
|
|
|
|
// ---
|
|
|
|
|
// summary: Get an archive of a repository
|
|
|
|
|
// produces:
|
2024-01-18 19:14:49 -05:00
|
|
|
|
// - application/octet-stream
|
|
|
|
|
// - application/zip
|
|
|
|
|
// - application/gzip
|
2017-11-13 02:02:25 -05:00
|
|
|
|
// parameters:
|
|
|
|
|
// - name: owner
|
|
|
|
|
// in: path
|
|
|
|
|
// description: owner of the repo
|
|
|
|
|
// type: string
|
|
|
|
|
// required: true
|
|
|
|
|
// - name: repo
|
|
|
|
|
// in: path
|
|
|
|
|
// description: name of the repo
|
|
|
|
|
// type: string
|
|
|
|
|
// required: true
|
|
|
|
|
// - name: archive
|
|
|
|
|
// in: path
|
2020-09-06 12:23:47 -04:00
|
|
|
|
// description: the git reference for download with attached archive format (e.g. master.zip)
|
2017-11-13 02:02:25 -05:00
|
|
|
|
// type: string
|
|
|
|
|
// required: true
|
|
|
|
|
// responses:
|
2018-06-01 01:51:49 -04:00
|
|
|
|
// 200:
|
|
|
|
|
// description: success
|
2019-12-20 12:07:12 -05:00
|
|
|
|
// "404":
|
|
|
|
|
// "$ref": "#/responses/notFound"
|
|
|
|
|
|
2021-09-17 20:54:15 -04:00
|
|
|
|
if ctx.Repo.GitRepo == nil {
|
Simplify how git repositories are opened (#28937)
## Purpose
This is a refactor toward building an abstraction over managing git
repositories.
Afterwards, it does not matter anymore if they are stored on the local
disk or somewhere remote.
## What this PR changes
We used `git.OpenRepository` everywhere previously.
Now, we should split them into two distinct functions:
Firstly, there are temporary repositories which do not change:
```go
git.OpenRepository(ctx, diskPath)
```
Gitea managed repositories having a record in the database in the
`repository` table are moved into the new package `gitrepo`:
```go
gitrepo.OpenRepository(ctx, repo_model.Repo)
```
Why is `repo_model.Repository` the second parameter instead of file
path?
Because then we can easily adapt our repository storage strategy.
The repositories can be stored locally, however, they could just as well
be stored on a remote server.
## Further changes in other PRs
- A Git Command wrapper on package `gitrepo` could be created. i.e.
`NewCommand(ctx, repo_model.Repository, commands...)`. `git.RunOpts{Dir:
repo.RepoPath()}`, the directory should be empty before invoking this
method and it can be filled in the function only. #28940
- Remove the `RepoPath()`/`WikiPath()` functions to reduce the
possibility of mistakes.
---------
Co-authored-by: delvh <dev.lh@web.de>
2024-01-27 15:09:51 -05:00
|
|
|
|
gitRepo, err := gitrepo.OpenRepository(ctx, ctx.Repo.Repository)
|
2021-09-17 20:54:15 -04:00
|
|
|
|
if err != nil {
|
|
|
|
|
ctx.Error(http.StatusInternalServerError, "OpenRepository", err)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
ctx.Repo.GitRepo = gitRepo
|
|
|
|
|
defer gitRepo.Close()
|
2015-09-02 09:54:35 -04:00
|
|
|
|
}
|
|
|
|
|
|
2022-08-29 05:45:20 -04:00
|
|
|
|
archiveDownload(ctx)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func archiveDownload(ctx *context.APIContext) {
|
|
|
|
|
uri := ctx.Params("*")
|
2024-11-25 14:35:49 -05:00
|
|
|
|
ext, tp, err := archiver_service.ParseFileName(uri)
|
|
|
|
|
if err != nil {
|
|
|
|
|
ctx.Error(http.StatusBadRequest, "ParseFileName", err)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
aReq, err := archiver_service.NewRequest(ctx, ctx.Repo.Repository.ID, ctx.Repo.GitRepo, strings.TrimSuffix(uri, ext), tp)
|
2022-08-29 05:45:20 -04:00
|
|
|
|
if err != nil {
|
|
|
|
|
if errors.Is(err, archiver_service.ErrUnknownArchiveFormat{}) {
|
|
|
|
|
ctx.Error(http.StatusBadRequest, "unknown archive format", err)
|
|
|
|
|
} else if errors.Is(err, archiver_service.RepoRefNotFoundError{}) {
|
|
|
|
|
ctx.Error(http.StatusNotFound, "unrecognized reference", err)
|
|
|
|
|
} else {
|
|
|
|
|
ctx.ServerError("archiver_service.NewRequest", err)
|
|
|
|
|
}
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
archiver, err := aReq.Await(ctx)
|
|
|
|
|
if err != nil {
|
|
|
|
|
ctx.ServerError("archiver.Await", err)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
download(ctx, aReq.GetArchiveName(), archiver)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func download(ctx *context.APIContext, archiveName string, archiver *repo_model.RepoArchiver) {
|
|
|
|
|
downloadName := ctx.Repo.Repository.Name + "-" + archiveName
|
|
|
|
|
|
Add an immutable tarball link to archive download headers for Nix
This allows `nix flake metadata` and nix in general to lock a *branch*
tarball link in a manner that causes it to fetch the correct commit even
if the branch is updated with a newer version.
For further context, Nix flakes are a feature that, among other things,
allows for "inputs" that are "github:someuser/somerepo",
"https://some-tarball-service/some-tarball.tar.gz",
"sourcehut:~meow/nya" or similar. This feature allows our users to fetch
tarballs of git-based inputs to their builds rather than using git to
fetch them, saving significant download time.
There is presently no gitea or forgejo specific fetcher in Nix, and we
don't particularly wish to have one. Ideally (as a developer on a Nix
implementation myself) we could just use the generic tarball fetcher and
not add specific forgejo support, but to do so, we need additional
metadata to know which commit a given *branch* tarball represents, which
is the purpose of the Link header added here.
The result of this patch is that a Nix user can specify `inputs.something.url =
"https://forgejo-host/some/project/archive/main.tar.gz"` in flake.nix
and get a link to some concrete tarball for the actual commit in the
lock file, then when they run `nix flake update` in the future, they
will get the latest commit in that branch.
Example of it working locally:
» nix flake metadata --refresh 'http://localhost:3000/api/v1/repos/jade/cats/archive/main.tar.gz?dir=configs/nix'
Resolved URL: http://localhost:3000/api/v1/repos/jade/cats/archive/main.tar.gz?dir=configs/nix
Locked URL: http://localhost:3000/api/v1/repos/jade/cats/archive/804ede182b6b66469b23ea4d21eece52766b7a06.tar.gz?dir=configs
/nix&narHash=sha256-yP7KkDVfuixZzs0fsqhSETXFC0y8m6nmPLw2GrAMxKQ%3D
Description: Computers with the nixos
Path: /nix/store/s856c6yqghyan4v0zy6jj19ksv0q22nx-source
Revision: 804ede182b6b66469b23ea4d21eece52766b7a06
Last modified: 2024-05-02 00:48:32
For details on the header value, see:
https://github.com/nixos/nix/blob/56763ff918eb308db23080e560ed2ea3e00c80a7/doc/manual/src/protocols/tarball-fetcher.md
2024-05-02 20:51:26 -04:00
|
|
|
|
// Add nix format link header so tarballs lock correctly:
|
|
|
|
|
// https://github.com/nixos/nix/blob/56763ff918eb308db23080e560ed2ea3e00c80a7/doc/manual/src/protocols/tarball-fetcher.md
|
2024-11-25 14:35:49 -05:00
|
|
|
|
ctx.Resp.Header().Add("Link", fmt.Sprintf(`<%s/archive/%s.%s?rev=%s>; rel="immutable"`,
|
Add an immutable tarball link to archive download headers for Nix
This allows `nix flake metadata` and nix in general to lock a *branch*
tarball link in a manner that causes it to fetch the correct commit even
if the branch is updated with a newer version.
For further context, Nix flakes are a feature that, among other things,
allows for "inputs" that are "github:someuser/somerepo",
"https://some-tarball-service/some-tarball.tar.gz",
"sourcehut:~meow/nya" or similar. This feature allows our users to fetch
tarballs of git-based inputs to their builds rather than using git to
fetch them, saving significant download time.
There is presently no gitea or forgejo specific fetcher in Nix, and we
don't particularly wish to have one. Ideally (as a developer on a Nix
implementation myself) we could just use the generic tarball fetcher and
not add specific forgejo support, but to do so, we need additional
metadata to know which commit a given *branch* tarball represents, which
is the purpose of the Link header added here.
The result of this patch is that a Nix user can specify `inputs.something.url =
"https://forgejo-host/some/project/archive/main.tar.gz"` in flake.nix
and get a link to some concrete tarball for the actual commit in the
lock file, then when they run `nix flake update` in the future, they
will get the latest commit in that branch.
Example of it working locally:
» nix flake metadata --refresh 'http://localhost:3000/api/v1/repos/jade/cats/archive/main.tar.gz?dir=configs/nix'
Resolved URL: http://localhost:3000/api/v1/repos/jade/cats/archive/main.tar.gz?dir=configs/nix
Locked URL: http://localhost:3000/api/v1/repos/jade/cats/archive/804ede182b6b66469b23ea4d21eece52766b7a06.tar.gz?dir=configs
/nix&narHash=sha256-yP7KkDVfuixZzs0fsqhSETXFC0y8m6nmPLw2GrAMxKQ%3D
Description: Computers with the nixos
Path: /nix/store/s856c6yqghyan4v0zy6jj19ksv0q22nx-source
Revision: 804ede182b6b66469b23ea4d21eece52766b7a06
Last modified: 2024-05-02 00:48:32
For details on the header value, see:
https://github.com/nixos/nix/blob/56763ff918eb308db23080e560ed2ea3e00c80a7/doc/manual/src/protocols/tarball-fetcher.md
2024-05-02 20:51:26 -04:00
|
|
|
|
ctx.Repo.Repository.APIURL(),
|
2024-11-25 14:35:49 -05:00
|
|
|
|
archiver.CommitID,
|
|
|
|
|
archiver.Type.String(),
|
|
|
|
|
archiver.CommitID,
|
|
|
|
|
))
|
Add an immutable tarball link to archive download headers for Nix
This allows `nix flake metadata` and nix in general to lock a *branch*
tarball link in a manner that causes it to fetch the correct commit even
if the branch is updated with a newer version.
For further context, Nix flakes are a feature that, among other things,
allows for "inputs" that are "github:someuser/somerepo",
"https://some-tarball-service/some-tarball.tar.gz",
"sourcehut:~meow/nya" or similar. This feature allows our users to fetch
tarballs of git-based inputs to their builds rather than using git to
fetch them, saving significant download time.
There is presently no gitea or forgejo specific fetcher in Nix, and we
don't particularly wish to have one. Ideally (as a developer on a Nix
implementation myself) we could just use the generic tarball fetcher and
not add specific forgejo support, but to do so, we need additional
metadata to know which commit a given *branch* tarball represents, which
is the purpose of the Link header added here.
The result of this patch is that a Nix user can specify `inputs.something.url =
"https://forgejo-host/some/project/archive/main.tar.gz"` in flake.nix
and get a link to some concrete tarball for the actual commit in the
lock file, then when they run `nix flake update` in the future, they
will get the latest commit in that branch.
Example of it working locally:
» nix flake metadata --refresh 'http://localhost:3000/api/v1/repos/jade/cats/archive/main.tar.gz?dir=configs/nix'
Resolved URL: http://localhost:3000/api/v1/repos/jade/cats/archive/main.tar.gz?dir=configs/nix
Locked URL: http://localhost:3000/api/v1/repos/jade/cats/archive/804ede182b6b66469b23ea4d21eece52766b7a06.tar.gz?dir=configs
/nix&narHash=sha256-yP7KkDVfuixZzs0fsqhSETXFC0y8m6nmPLw2GrAMxKQ%3D
Description: Computers with the nixos
Path: /nix/store/s856c6yqghyan4v0zy6jj19ksv0q22nx-source
Revision: 804ede182b6b66469b23ea4d21eece52766b7a06
Last modified: 2024-05-02 00:48:32
For details on the header value, see:
https://github.com/nixos/nix/blob/56763ff918eb308db23080e560ed2ea3e00c80a7/doc/manual/src/protocols/tarball-fetcher.md
2024-05-02 20:51:26 -04:00
|
|
|
|
|
2022-08-29 05:45:20 -04:00
|
|
|
|
rPath := archiver.RelativePath()
|
2023-06-13 23:42:38 -04:00
|
|
|
|
if setting.RepoArchive.Storage.MinioConfig.ServeDirect {
|
2022-08-29 05:45:20 -04:00
|
|
|
|
// If we have a signed url (S3, object storage), redirect to this directly.
|
Fix `missing signature key` error when pulling Docker images with `SERVE_DIRECT` enabled (#32365)
Fix #28121
I did some tests and found that the `missing signature key` error is
caused by an incorrect `Content-Type` header. Gitea correctly sets the
`Content-Type` header when serving files.
https://github.com/go-gitea/gitea/blob/348d1d0f322ca57c459acd902f54821d687ca804/routers/api/packages/container/container.go#L712-L717
However, when `SERVE_DIRECT` is enabled, the `Content-Type` header may
be set to an incorrect value by the storage service. To fix this issue,
we can use query parameters to override response header values.
https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
<img width="600px"
src="https://github.com/user-attachments/assets/f2ff90f0-f1df-46f9-9680-b8120222c555"
/>
In this PR, I introduced a new parameter to the `URL` method to support
additional parameters.
```
URL(path, name string, reqParams url.Values) (*url.URL, error)
```
---
Most S3-like services support specifying the content type when storing
objects. However, Gitea always use `application/octet-stream`.
Therefore, I believe we also need to improve the `Save` method to
support storing objects with the correct content type.
https://github.com/go-gitea/gitea/blob/b7fb20e73e63b8edc9b90c52073e248bef428fcc/modules/storage/minio.go#L214-L221
(cherry picked from commit 0690cb076bf63f71988a709f62a9c04660b51a4f)
Conflicts:
- modules/storage/azureblob.go
Dropped the change, as we do not support Azure blob storage.
- modules/storage/helper.go
Resolved by adjusting their `discardStorage` to our
`DiscardStorage`
- routers/api/actions/artifacts.go
routers/api/actions/artifactsv4.go
routers/web/repo/actions/view.go
routers/web/repo/download.go
Resolved the conflicts by manually adding the new `nil`
parameter to the `storage.Attachments.URL()` calls.
Originally conflicted due to differences in the if expression
above these calls.
2024-10-31 11:28:25 -04:00
|
|
|
|
u, err := storage.RepoArchives.URL(rPath, downloadName, nil)
|
2022-08-29 05:45:20 -04:00
|
|
|
|
if u != nil && err == nil {
|
|
|
|
|
ctx.Redirect(u.String())
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If we have matched and access to release or issue
|
|
|
|
|
fr, err := storage.RepoArchives.Open(rPath)
|
|
|
|
|
if err != nil {
|
|
|
|
|
ctx.ServerError("Open", err)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
defer fr.Close()
|
2022-11-24 09:25:13 -05:00
|
|
|
|
|
2024-01-18 19:14:49 -05:00
|
|
|
|
contentType := ""
|
|
|
|
|
switch archiver.Type {
|
|
|
|
|
case git.ZIP:
|
|
|
|
|
contentType = "application/zip"
|
|
|
|
|
case git.TARGZ:
|
|
|
|
|
// Per RFC6713.
|
|
|
|
|
contentType = "application/gzip"
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-24 09:25:13 -05:00
|
|
|
|
ctx.ServeContent(fr, &context.ServeHeaderOptions{
|
2024-01-18 19:14:49 -05:00
|
|
|
|
ContentType: contentType,
|
2022-11-24 09:25:13 -05:00
|
|
|
|
Filename: downloadName,
|
|
|
|
|
LastModified: archiver.CreatedUnix.AsLocalTime(),
|
|
|
|
|
})
|
2015-09-02 09:54:35 -04:00
|
|
|
|
}
|
2016-08-30 19:18:40 -04:00
|
|
|
|
|
2016-11-24 02:04:31 -05:00
|
|
|
|
// GetEditorconfig get editor config of a repository
|
2016-08-30 19:18:40 -04:00
|
|
|
|
func GetEditorconfig(ctx *context.APIContext) {
|
2017-11-13 02:02:25 -05:00
|
|
|
|
// swagger:operation GET /repos/{owner}/{repo}/editorconfig/{filepath} repository repoGetEditorConfig
|
|
|
|
|
// ---
|
|
|
|
|
// summary: Get the EditorConfig definitions of a file in a repository
|
|
|
|
|
// produces:
|
|
|
|
|
// - application/json
|
|
|
|
|
// parameters:
|
|
|
|
|
// - name: owner
|
|
|
|
|
// in: path
|
|
|
|
|
// description: owner of the repo
|
|
|
|
|
// type: string
|
|
|
|
|
// required: true
|
|
|
|
|
// - name: repo
|
|
|
|
|
// in: path
|
|
|
|
|
// description: name of the repo
|
|
|
|
|
// type: string
|
|
|
|
|
// required: true
|
|
|
|
|
// - name: filepath
|
|
|
|
|
// in: path
|
|
|
|
|
// description: filepath of file to get
|
|
|
|
|
// type: string
|
|
|
|
|
// required: true
|
2022-04-21 11:17:57 -04:00
|
|
|
|
// - name: ref
|
|
|
|
|
// in: query
|
|
|
|
|
// description: "The name of the commit/branch/tag. Default the repository’s default branch (usually master)"
|
|
|
|
|
// type: string
|
|
|
|
|
// required: false
|
2017-11-13 02:02:25 -05:00
|
|
|
|
// responses:
|
2018-06-01 01:51:49 -04:00
|
|
|
|
// 200:
|
|
|
|
|
// description: success
|
2019-12-20 12:07:12 -05:00
|
|
|
|
// "404":
|
|
|
|
|
// "$ref": "#/responses/notFound"
|
|
|
|
|
|
2023-04-06 16:01:20 -04:00
|
|
|
|
ec, _, err := ctx.Repo.GetEditorconfig(ctx.Repo.Commit)
|
2016-08-30 19:18:40 -04:00
|
|
|
|
if err != nil {
|
|
|
|
|
if git.IsErrNotExist(err) {
|
2019-03-18 22:29:43 -04:00
|
|
|
|
ctx.NotFound(err)
|
2016-08-30 19:18:40 -04:00
|
|
|
|
} else {
|
2019-04-17 12:06:35 -04:00
|
|
|
|
ctx.Error(http.StatusInternalServerError, "GetEditorconfig", err)
|
2016-08-30 19:18:40 -04:00
|
|
|
|
}
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fileName := ctx.Params("filename")
|
2019-10-16 20:15:02 -04:00
|
|
|
|
def, err := ec.GetDefinitionForFilename(fileName)
|
2016-08-30 19:18:40 -04:00
|
|
|
|
if def == nil {
|
2019-03-18 22:29:43 -04:00
|
|
|
|
ctx.NotFound(err)
|
2016-08-30 19:18:40 -04:00
|
|
|
|
return
|
|
|
|
|
}
|
2019-04-17 12:06:35 -04:00
|
|
|
|
ctx.JSON(http.StatusOK, def)
|
|
|
|
|
}
|
|
|
|
|
|
2020-04-24 12:20:22 -04:00
|
|
|
|
// canWriteFiles returns true if repository is editable and user has proper access level.
|
2022-04-28 11:45:33 -04:00
|
|
|
|
func canWriteFiles(ctx *context.APIContext, branch string) bool {
|
2023-07-22 10:14:27 -04:00
|
|
|
|
return ctx.Repo.CanWriteToBranch(ctx, ctx.Doer, branch) &&
|
2022-04-28 11:45:33 -04:00
|
|
|
|
!ctx.Repo.Repository.IsMirror &&
|
|
|
|
|
!ctx.Repo.Repository.IsArchived
|
2019-04-17 12:06:35 -04:00
|
|
|
|
}
|
|
|
|
|
|
2020-04-24 12:20:22 -04:00
|
|
|
|
// canReadFiles returns true if repository is readable and user has proper access level.
|
|
|
|
|
func canReadFiles(r *context.Repository) bool {
|
2021-11-09 14:57:58 -05:00
|
|
|
|
return r.Permission.CanRead(unit.TypeCode)
|
2019-04-17 12:06:35 -04:00
|
|
|
|
}
|
|
|
|
|
|
2024-02-19 09:50:03 -05:00
|
|
|
|
func base64Reader(s string) (io.ReadSeeker, error) {
|
2023-07-18 14:14:47 -04:00
|
|
|
|
b, err := base64.StdEncoding.DecodeString(s)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
return bytes.NewReader(b), nil
|
|
|
|
|
}
|
|
|
|
|
|
2023-06-07 11:49:58 -04:00
|
|
|
|
// ChangeFiles handles API call for modifying multiple files
|
2023-05-29 05:41:35 -04:00
|
|
|
|
func ChangeFiles(ctx *context.APIContext) {
|
|
|
|
|
// swagger:operation POST /repos/{owner}/{repo}/contents repository repoChangeFiles
|
|
|
|
|
// ---
|
2023-06-07 11:49:58 -04:00
|
|
|
|
// summary: Modify multiple files in a repository
|
2023-05-29 05:41:35 -04:00
|
|
|
|
// consumes:
|
|
|
|
|
// - application/json
|
|
|
|
|
// produces:
|
|
|
|
|
// - application/json
|
|
|
|
|
// parameters:
|
|
|
|
|
// - name: owner
|
|
|
|
|
// in: path
|
|
|
|
|
// description: owner of the repo
|
|
|
|
|
// type: string
|
|
|
|
|
// required: true
|
|
|
|
|
// - name: repo
|
|
|
|
|
// in: path
|
|
|
|
|
// description: name of the repo
|
|
|
|
|
// type: string
|
|
|
|
|
// required: true
|
|
|
|
|
// - name: body
|
|
|
|
|
// in: body
|
|
|
|
|
// required: true
|
|
|
|
|
// schema:
|
|
|
|
|
// "$ref": "#/definitions/ChangeFilesOptions"
|
|
|
|
|
// responses:
|
|
|
|
|
// "201":
|
|
|
|
|
// "$ref": "#/responses/FilesResponse"
|
|
|
|
|
// "403":
|
|
|
|
|
// "$ref": "#/responses/error"
|
|
|
|
|
// "404":
|
|
|
|
|
// "$ref": "#/responses/notFound"
|
feat(quota): Quota enforcement
The previous commit laid out the foundation of the quota engine, this
one builds on top of it, and implements the actual enforcement.
Enforcement happens at the route decoration level, whenever possible. In
case of the API, when over quota, a 413 error is returned, with an
appropriate JSON payload. In case of web routes, a 413 HTML page is
rendered with similar information.
This implementation is for a **soft quota**: quota usage is checked
before an operation is to be performed, and the operation is *only*
denied if the user is already over quota. This makes it possible to go
over quota, but has the significant advantage of being practically
implementable within the current Forgejo architecture.
The goal of enforcement is to deny actions that can make the user go
over quota, and allow the rest. As such, deleting things should - in
almost all cases - be possible. A prime exemption is deleting files via
the web ui: that creates a new commit, which in turn increases repo
size, thus, is denied if the user is over quota.
Limitations
-----------
Because we generally work at a route decorator level, and rarely
look *into* the operation itself, `size:repos:public` and
`size:repos:private` are not enforced at this level, the engine enforces
against `size:repos:all`. This will be improved in the future.
AGit does not play very well with this system, because AGit PRs count
toward the repo they're opened against, while in the GitHub-style fork +
pull model, it counts against the fork. This too, can be improved in the
future.
There's very little done on the UI side to guard against going over
quota. What this patch implements, is enforcement, not prevention. The
UI will still let you *try* operations that *will* result in a denial.
Signed-off-by: Gergely Nagy <forgejo@gergo.csillger.hu>
2024-07-06 04:30:16 -04:00
|
|
|
|
// "413":
|
|
|
|
|
// "$ref": "#/responses/quotaExceeded"
|
2023-05-29 05:41:35 -04:00
|
|
|
|
// "422":
|
|
|
|
|
// "$ref": "#/responses/error"
|
2023-09-21 19:43:29 -04:00
|
|
|
|
// "423":
|
|
|
|
|
// "$ref": "#/responses/repoArchivedError"
|
2023-05-29 05:41:35 -04:00
|
|
|
|
|
|
|
|
|
apiOpts := web.GetForm(ctx).(*api.ChangeFilesOptions)
|
|
|
|
|
|
|
|
|
|
if apiOpts.BranchName == "" {
|
|
|
|
|
apiOpts.BranchName = ctx.Repo.Repository.DefaultBranch
|
|
|
|
|
}
|
|
|
|
|
|
2023-07-18 14:14:47 -04:00
|
|
|
|
var files []*files_service.ChangeRepoFile
|
2023-05-29 05:41:35 -04:00
|
|
|
|
for _, file := range apiOpts.Files {
|
2023-07-18 14:14:47 -04:00
|
|
|
|
contentReader, err := base64Reader(file.ContentBase64)
|
|
|
|
|
if err != nil {
|
|
|
|
|
ctx.Error(http.StatusUnprocessableEntity, "Invalid base64 content", err)
|
|
|
|
|
return
|
|
|
|
|
}
|
2023-05-29 05:41:35 -04:00
|
|
|
|
changeRepoFile := &files_service.ChangeRepoFile{
|
2023-07-18 14:14:47 -04:00
|
|
|
|
Operation: file.Operation,
|
|
|
|
|
TreePath: file.Path,
|
|
|
|
|
FromTreePath: file.FromPath,
|
|
|
|
|
ContentReader: contentReader,
|
|
|
|
|
SHA: file.SHA,
|
2023-05-29 05:41:35 -04:00
|
|
|
|
}
|
|
|
|
|
files = append(files, changeRepoFile)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
opts := &files_service.ChangeRepoFilesOptions{
|
|
|
|
|
Files: files,
|
|
|
|
|
Message: apiOpts.Message,
|
|
|
|
|
OldBranch: apiOpts.BranchName,
|
|
|
|
|
NewBranch: apiOpts.NewBranchName,
|
|
|
|
|
Committer: &files_service.IdentityOptions{
|
|
|
|
|
Name: apiOpts.Committer.Name,
|
|
|
|
|
Email: apiOpts.Committer.Email,
|
|
|
|
|
},
|
|
|
|
|
Author: &files_service.IdentityOptions{
|
|
|
|
|
Name: apiOpts.Author.Name,
|
|
|
|
|
Email: apiOpts.Author.Email,
|
|
|
|
|
},
|
|
|
|
|
Dates: &files_service.CommitDateOptions{
|
|
|
|
|
Author: apiOpts.Dates.Author,
|
|
|
|
|
Committer: apiOpts.Dates.Committer,
|
|
|
|
|
},
|
|
|
|
|
Signoff: apiOpts.Signoff,
|
|
|
|
|
}
|
|
|
|
|
if opts.Dates.Author.IsZero() {
|
|
|
|
|
opts.Dates.Author = time.Now()
|
|
|
|
|
}
|
|
|
|
|
if opts.Dates.Committer.IsZero() {
|
|
|
|
|
opts.Dates.Committer = time.Now()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if opts.Message == "" {
|
|
|
|
|
opts.Message = changeFilesCommitMessage(ctx, files)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if filesResponse, err := createOrUpdateFiles(ctx, opts); err != nil {
|
|
|
|
|
handleCreateOrUpdateFileError(ctx, err)
|
|
|
|
|
} else {
|
|
|
|
|
ctx.JSON(http.StatusCreated, filesResponse)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-17 12:06:35 -04:00
|
|
|
|
// CreateFile handles API call for creating a file
|
2021-01-26 10:36:53 -05:00
|
|
|
|
func CreateFile(ctx *context.APIContext) {
|
2019-04-17 12:06:35 -04:00
|
|
|
|
// swagger:operation POST /repos/{owner}/{repo}/contents/{filepath} repository repoCreateFile
|
|
|
|
|
// ---
|
|
|
|
|
// summary: Create a file in a repository
|
|
|
|
|
// consumes:
|
|
|
|
|
// - application/json
|
|
|
|
|
// produces:
|
|
|
|
|
// - application/json
|
|
|
|
|
// parameters:
|
|
|
|
|
// - name: owner
|
|
|
|
|
// in: path
|
|
|
|
|
// description: owner of the repo
|
|
|
|
|
// type: string
|
|
|
|
|
// required: true
|
|
|
|
|
// - name: repo
|
|
|
|
|
// in: path
|
|
|
|
|
// description: name of the repo
|
|
|
|
|
// type: string
|
|
|
|
|
// required: true
|
|
|
|
|
// - name: filepath
|
|
|
|
|
// in: path
|
|
|
|
|
// description: path of the file to create
|
|
|
|
|
// type: string
|
|
|
|
|
// required: true
|
|
|
|
|
// - name: body
|
|
|
|
|
// in: body
|
2019-05-30 13:57:55 -04:00
|
|
|
|
// required: true
|
2019-04-17 12:06:35 -04:00
|
|
|
|
// schema:
|
|
|
|
|
// "$ref": "#/definitions/CreateFileOptions"
|
|
|
|
|
// responses:
|
|
|
|
|
// "201":
|
|
|
|
|
// "$ref": "#/responses/FileResponse"
|
2020-05-31 16:59:34 -04:00
|
|
|
|
// "403":
|
|
|
|
|
// "$ref": "#/responses/error"
|
|
|
|
|
// "404":
|
|
|
|
|
// "$ref": "#/responses/notFound"
|
feat(quota): Quota enforcement
The previous commit laid out the foundation of the quota engine, this
one builds on top of it, and implements the actual enforcement.
Enforcement happens at the route decoration level, whenever possible. In
case of the API, when over quota, a 413 error is returned, with an
appropriate JSON payload. In case of web routes, a 413 HTML page is
rendered with similar information.
This implementation is for a **soft quota**: quota usage is checked
before an operation is to be performed, and the operation is *only*
denied if the user is already over quota. This makes it possible to go
over quota, but has the significant advantage of being practically
implementable within the current Forgejo architecture.
The goal of enforcement is to deny actions that can make the user go
over quota, and allow the rest. As such, deleting things should - in
almost all cases - be possible. A prime exemption is deleting files via
the web ui: that creates a new commit, which in turn increases repo
size, thus, is denied if the user is over quota.
Limitations
-----------
Because we generally work at a route decorator level, and rarely
look *into* the operation itself, `size:repos:public` and
`size:repos:private` are not enforced at this level, the engine enforces
against `size:repos:all`. This will be improved in the future.
AGit does not play very well with this system, because AGit PRs count
toward the repo they're opened against, while in the GitHub-style fork +
pull model, it counts against the fork. This too, can be improved in the
future.
There's very little done on the UI side to guard against going over
quota. What this patch implements, is enforcement, not prevention. The
UI will still let you *try* operations that *will* result in a denial.
Signed-off-by: Gergely Nagy <forgejo@gergo.csillger.hu>
2024-07-06 04:30:16 -04:00
|
|
|
|
// "413":
|
|
|
|
|
// "$ref": "#/responses/quotaExceeded"
|
2020-05-31 16:59:34 -04:00
|
|
|
|
// "422":
|
|
|
|
|
// "$ref": "#/responses/error"
|
2023-09-21 19:43:29 -04:00
|
|
|
|
// "423":
|
|
|
|
|
// "$ref": "#/responses/repoArchivedError"
|
2020-05-31 16:59:34 -04:00
|
|
|
|
|
2021-01-26 10:36:53 -05:00
|
|
|
|
apiOpts := web.GetForm(ctx).(*api.CreateFileOptions)
|
2019-04-17 12:06:35 -04:00
|
|
|
|
|
2020-04-20 12:47:05 -04:00
|
|
|
|
if apiOpts.BranchName == "" {
|
|
|
|
|
apiOpts.BranchName = ctx.Repo.Repository.DefaultBranch
|
|
|
|
|
}
|
|
|
|
|
|
2023-07-18 14:14:47 -04:00
|
|
|
|
contentReader, err := base64Reader(apiOpts.ContentBase64)
|
|
|
|
|
if err != nil {
|
|
|
|
|
ctx.Error(http.StatusUnprocessableEntity, "Invalid base64 content", err)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2023-05-29 05:41:35 -04:00
|
|
|
|
opts := &files_service.ChangeRepoFilesOptions{
|
|
|
|
|
Files: []*files_service.ChangeRepoFile{
|
|
|
|
|
{
|
2023-07-18 14:14:47 -04:00
|
|
|
|
Operation: "create",
|
|
|
|
|
TreePath: ctx.Params("*"),
|
|
|
|
|
ContentReader: contentReader,
|
2023-05-29 05:41:35 -04:00
|
|
|
|
},
|
|
|
|
|
},
|
2019-04-17 12:06:35 -04:00
|
|
|
|
Message: apiOpts.Message,
|
|
|
|
|
OldBranch: apiOpts.BranchName,
|
|
|
|
|
NewBranch: apiOpts.NewBranchName,
|
2021-11-24 02:56:24 -05:00
|
|
|
|
Committer: &files_service.IdentityOptions{
|
2019-04-17 12:06:35 -04:00
|
|
|
|
Name: apiOpts.Committer.Name,
|
|
|
|
|
Email: apiOpts.Committer.Email,
|
|
|
|
|
},
|
2021-11-24 02:56:24 -05:00
|
|
|
|
Author: &files_service.IdentityOptions{
|
2019-04-17 12:06:35 -04:00
|
|
|
|
Name: apiOpts.Author.Name,
|
|
|
|
|
Email: apiOpts.Author.Email,
|
|
|
|
|
},
|
2021-11-24 02:56:24 -05:00
|
|
|
|
Dates: &files_service.CommitDateOptions{
|
2019-12-23 21:33:52 -05:00
|
|
|
|
Author: apiOpts.Dates.Author,
|
|
|
|
|
Committer: apiOpts.Dates.Committer,
|
|
|
|
|
},
|
2021-01-29 03:57:45 -05:00
|
|
|
|
Signoff: apiOpts.Signoff,
|
2019-12-23 21:33:52 -05:00
|
|
|
|
}
|
|
|
|
|
if opts.Dates.Author.IsZero() {
|
|
|
|
|
opts.Dates.Author = time.Now()
|
|
|
|
|
}
|
|
|
|
|
if opts.Dates.Committer.IsZero() {
|
|
|
|
|
opts.Dates.Committer = time.Now()
|
2019-04-17 12:06:35 -04:00
|
|
|
|
}
|
2019-06-29 11:19:24 -04:00
|
|
|
|
|
|
|
|
|
if opts.Message == "" {
|
2023-05-29 05:41:35 -04:00
|
|
|
|
opts.Message = changeFilesCommitMessage(ctx, opts.Files)
|
2019-06-29 11:19:24 -04:00
|
|
|
|
}
|
|
|
|
|
|
2023-05-29 05:41:35 -04:00
|
|
|
|
if filesResponse, err := createOrUpdateFiles(ctx, opts); err != nil {
|
2020-05-31 16:59:34 -04:00
|
|
|
|
handleCreateOrUpdateFileError(ctx, err)
|
2019-04-17 12:06:35 -04:00
|
|
|
|
} else {
|
2023-05-29 05:41:35 -04:00
|
|
|
|
fileResponse := files_service.GetFileResponseFromFilesResponse(filesResponse, 0)
|
2019-04-17 12:06:35 -04:00
|
|
|
|
ctx.JSON(http.StatusCreated, fileResponse)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// UpdateFile handles API call for updating a file
|
2021-01-26 10:36:53 -05:00
|
|
|
|
func UpdateFile(ctx *context.APIContext) {
|
2019-04-17 12:06:35 -04:00
|
|
|
|
// swagger:operation PUT /repos/{owner}/{repo}/contents/{filepath} repository repoUpdateFile
|
|
|
|
|
// ---
|
|
|
|
|
// summary: Update a file in a repository
|
|
|
|
|
// consumes:
|
|
|
|
|
// - application/json
|
|
|
|
|
// produces:
|
|
|
|
|
// - application/json
|
|
|
|
|
// parameters:
|
|
|
|
|
// - name: owner
|
|
|
|
|
// in: path
|
|
|
|
|
// description: owner of the repo
|
|
|
|
|
// type: string
|
|
|
|
|
// required: true
|
|
|
|
|
// - name: repo
|
|
|
|
|
// in: path
|
|
|
|
|
// description: name of the repo
|
|
|
|
|
// type: string
|
|
|
|
|
// required: true
|
|
|
|
|
// - name: filepath
|
|
|
|
|
// in: path
|
|
|
|
|
// description: path of the file to update
|
|
|
|
|
// type: string
|
|
|
|
|
// required: true
|
|
|
|
|
// - name: body
|
|
|
|
|
// in: body
|
2019-05-30 13:57:55 -04:00
|
|
|
|
// required: true
|
2019-04-17 12:06:35 -04:00
|
|
|
|
// schema:
|
|
|
|
|
// "$ref": "#/definitions/UpdateFileOptions"
|
|
|
|
|
// responses:
|
|
|
|
|
// "200":
|
|
|
|
|
// "$ref": "#/responses/FileResponse"
|
2020-05-31 16:59:34 -04:00
|
|
|
|
// "403":
|
|
|
|
|
// "$ref": "#/responses/error"
|
|
|
|
|
// "404":
|
|
|
|
|
// "$ref": "#/responses/notFound"
|
feat(quota): Quota enforcement
The previous commit laid out the foundation of the quota engine, this
one builds on top of it, and implements the actual enforcement.
Enforcement happens at the route decoration level, whenever possible. In
case of the API, when over quota, a 413 error is returned, with an
appropriate JSON payload. In case of web routes, a 413 HTML page is
rendered with similar information.
This implementation is for a **soft quota**: quota usage is checked
before an operation is to be performed, and the operation is *only*
denied if the user is already over quota. This makes it possible to go
over quota, but has the significant advantage of being practically
implementable within the current Forgejo architecture.
The goal of enforcement is to deny actions that can make the user go
over quota, and allow the rest. As such, deleting things should - in
almost all cases - be possible. A prime exemption is deleting files via
the web ui: that creates a new commit, which in turn increases repo
size, thus, is denied if the user is over quota.
Limitations
-----------
Because we generally work at a route decorator level, and rarely
look *into* the operation itself, `size:repos:public` and
`size:repos:private` are not enforced at this level, the engine enforces
against `size:repos:all`. This will be improved in the future.
AGit does not play very well with this system, because AGit PRs count
toward the repo they're opened against, while in the GitHub-style fork +
pull model, it counts against the fork. This too, can be improved in the
future.
There's very little done on the UI side to guard against going over
quota. What this patch implements, is enforcement, not prevention. The
UI will still let you *try* operations that *will* result in a denial.
Signed-off-by: Gergely Nagy <forgejo@gergo.csillger.hu>
2024-07-06 04:30:16 -04:00
|
|
|
|
// "413":
|
|
|
|
|
// "$ref": "#/responses/quotaExceeded"
|
2020-05-31 16:59:34 -04:00
|
|
|
|
// "422":
|
|
|
|
|
// "$ref": "#/responses/error"
|
2023-09-21 19:43:29 -04:00
|
|
|
|
// "423":
|
|
|
|
|
// "$ref": "#/responses/repoArchivedError"
|
2021-01-26 10:36:53 -05:00
|
|
|
|
apiOpts := web.GetForm(ctx).(*api.UpdateFileOptions)
|
2020-05-31 16:59:34 -04:00
|
|
|
|
if ctx.Repo.Repository.IsEmpty {
|
|
|
|
|
ctx.Error(http.StatusUnprocessableEntity, "RepoIsEmpty", fmt.Errorf("repo is empty"))
|
2024-02-27 10:09:13 -05:00
|
|
|
|
return
|
2020-05-31 16:59:34 -04:00
|
|
|
|
}
|
2019-04-17 12:06:35 -04:00
|
|
|
|
|
2020-04-20 12:47:05 -04:00
|
|
|
|
if apiOpts.BranchName == "" {
|
|
|
|
|
apiOpts.BranchName = ctx.Repo.Repository.DefaultBranch
|
|
|
|
|
}
|
|
|
|
|
|
2023-07-18 14:14:47 -04:00
|
|
|
|
contentReader, err := base64Reader(apiOpts.ContentBase64)
|
|
|
|
|
if err != nil {
|
|
|
|
|
ctx.Error(http.StatusUnprocessableEntity, "Invalid base64 content", err)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2023-05-29 05:41:35 -04:00
|
|
|
|
opts := &files_service.ChangeRepoFilesOptions{
|
|
|
|
|
Files: []*files_service.ChangeRepoFile{
|
|
|
|
|
{
|
2023-07-18 14:14:47 -04:00
|
|
|
|
Operation: "update",
|
|
|
|
|
ContentReader: contentReader,
|
|
|
|
|
SHA: apiOpts.SHA,
|
|
|
|
|
FromTreePath: apiOpts.FromPath,
|
|
|
|
|
TreePath: ctx.Params("*"),
|
2023-05-29 05:41:35 -04:00
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
Message: apiOpts.Message,
|
|
|
|
|
OldBranch: apiOpts.BranchName,
|
|
|
|
|
NewBranch: apiOpts.NewBranchName,
|
2021-11-24 02:56:24 -05:00
|
|
|
|
Committer: &files_service.IdentityOptions{
|
2019-04-17 12:06:35 -04:00
|
|
|
|
Name: apiOpts.Committer.Name,
|
|
|
|
|
Email: apiOpts.Committer.Email,
|
|
|
|
|
},
|
2021-11-24 02:56:24 -05:00
|
|
|
|
Author: &files_service.IdentityOptions{
|
2019-04-17 12:06:35 -04:00
|
|
|
|
Name: apiOpts.Author.Name,
|
|
|
|
|
Email: apiOpts.Author.Email,
|
|
|
|
|
},
|
2021-11-24 02:56:24 -05:00
|
|
|
|
Dates: &files_service.CommitDateOptions{
|
2019-12-23 21:33:52 -05:00
|
|
|
|
Author: apiOpts.Dates.Author,
|
|
|
|
|
Committer: apiOpts.Dates.Committer,
|
|
|
|
|
},
|
2021-01-29 03:57:45 -05:00
|
|
|
|
Signoff: apiOpts.Signoff,
|
2019-12-23 21:33:52 -05:00
|
|
|
|
}
|
|
|
|
|
if opts.Dates.Author.IsZero() {
|
|
|
|
|
opts.Dates.Author = time.Now()
|
|
|
|
|
}
|
|
|
|
|
if opts.Dates.Committer.IsZero() {
|
|
|
|
|
opts.Dates.Committer = time.Now()
|
2019-04-17 12:06:35 -04:00
|
|
|
|
}
|
|
|
|
|
|
2019-06-29 11:19:24 -04:00
|
|
|
|
if opts.Message == "" {
|
2023-05-29 05:41:35 -04:00
|
|
|
|
opts.Message = changeFilesCommitMessage(ctx, opts.Files)
|
2019-06-29 11:19:24 -04:00
|
|
|
|
}
|
|
|
|
|
|
2023-05-29 05:41:35 -04:00
|
|
|
|
if filesResponse, err := createOrUpdateFiles(ctx, opts); err != nil {
|
2020-05-31 16:59:34 -04:00
|
|
|
|
handleCreateOrUpdateFileError(ctx, err)
|
2019-04-17 12:06:35 -04:00
|
|
|
|
} else {
|
2023-05-29 05:41:35 -04:00
|
|
|
|
fileResponse := files_service.GetFileResponseFromFilesResponse(filesResponse, 0)
|
2019-04-17 12:06:35 -04:00
|
|
|
|
ctx.JSON(http.StatusOK, fileResponse)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-31 16:59:34 -04:00
|
|
|
|
func handleCreateOrUpdateFileError(ctx *context.APIContext, err error) {
|
|
|
|
|
if models.IsErrUserCannotCommit(err) || models.IsErrFilePathProtected(err) {
|
|
|
|
|
ctx.Error(http.StatusForbidden, "Access", err)
|
|
|
|
|
return
|
|
|
|
|
}
|
2023-06-29 06:03:20 -04:00
|
|
|
|
if git_model.IsErrBranchAlreadyExists(err) || models.IsErrFilenameInvalid(err) || models.IsErrSHADoesNotMatch(err) ||
|
2020-05-31 16:59:34 -04:00
|
|
|
|
models.IsErrFilePathInvalid(err) || models.IsErrRepoFileAlreadyExists(err) {
|
|
|
|
|
ctx.Error(http.StatusUnprocessableEntity, "Invalid", err)
|
|
|
|
|
return
|
|
|
|
|
}
|
2023-06-29 06:03:20 -04:00
|
|
|
|
if git_model.IsErrBranchNotExist(err) || git.IsErrBranchNotExist(err) {
|
2020-06-07 13:30:58 -04:00
|
|
|
|
ctx.Error(http.StatusNotFound, "BranchDoesNotExist", err)
|
|
|
|
|
return
|
|
|
|
|
}
|
2020-05-31 16:59:34 -04:00
|
|
|
|
|
|
|
|
|
ctx.Error(http.StatusInternalServerError, "UpdateFile", err)
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-17 12:06:35 -04:00
|
|
|
|
// Called from both CreateFile or UpdateFile to handle both
|
2023-05-29 05:41:35 -04:00
|
|
|
|
func createOrUpdateFiles(ctx *context.APIContext, opts *files_service.ChangeRepoFilesOptions) (*api.FilesResponse, error) {
|
2022-04-28 11:45:33 -04:00
|
|
|
|
if !canWriteFiles(ctx, opts.OldBranch) {
|
2022-06-13 05:37:59 -04:00
|
|
|
|
return nil, repo_model.ErrUserDoesNotHaveAccessToRepo{
|
2022-03-22 03:03:22 -04:00
|
|
|
|
UserID: ctx.Doer.ID,
|
2019-04-17 12:06:35 -04:00
|
|
|
|
RepoName: ctx.Repo.Repository.LowerName,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-05-29 05:41:35 -04:00
|
|
|
|
return files_service.ChangeRepoFiles(ctx, ctx.Repo.Repository, ctx.Doer, opts)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// format commit message if empty
|
|
|
|
|
func changeFilesCommitMessage(ctx *context.APIContext, files []*files_service.ChangeRepoFile) string {
|
|
|
|
|
var (
|
|
|
|
|
createFiles []string
|
|
|
|
|
updateFiles []string
|
|
|
|
|
deleteFiles []string
|
|
|
|
|
)
|
|
|
|
|
for _, file := range files {
|
|
|
|
|
switch file.Operation {
|
|
|
|
|
case "create":
|
|
|
|
|
createFiles = append(createFiles, file.TreePath)
|
|
|
|
|
case "update":
|
|
|
|
|
updateFiles = append(updateFiles, file.TreePath)
|
|
|
|
|
case "delete":
|
|
|
|
|
deleteFiles = append(deleteFiles, file.TreePath)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
message := ""
|
|
|
|
|
if len(createFiles) != 0 {
|
2024-02-14 16:48:45 -05:00
|
|
|
|
message += ctx.Locale.TrString("repo.editor.add", strings.Join(createFiles, ", ")+"\n")
|
2023-05-29 05:41:35 -04:00
|
|
|
|
}
|
|
|
|
|
if len(updateFiles) != 0 {
|
2024-02-14 16:48:45 -05:00
|
|
|
|
message += ctx.Locale.TrString("repo.editor.update", strings.Join(updateFiles, ", ")+"\n")
|
2023-05-29 05:41:35 -04:00
|
|
|
|
}
|
|
|
|
|
if len(deleteFiles) != 0 {
|
2024-02-14 16:48:45 -05:00
|
|
|
|
message += ctx.Locale.TrString("repo.editor.delete", strings.Join(deleteFiles, ", "))
|
2023-05-29 05:41:35 -04:00
|
|
|
|
}
|
|
|
|
|
return strings.Trim(message, "\n")
|
2019-04-17 12:06:35 -04:00
|
|
|
|
}
|
|
|
|
|
|
2022-01-10 04:32:37 -05:00
|
|
|
|
// DeleteFile Delete a file in a repository
|
2021-01-26 10:36:53 -05:00
|
|
|
|
func DeleteFile(ctx *context.APIContext) {
|
2019-04-17 12:06:35 -04:00
|
|
|
|
// swagger:operation DELETE /repos/{owner}/{repo}/contents/{filepath} repository repoDeleteFile
|
|
|
|
|
// ---
|
|
|
|
|
// summary: Delete a file in a repository
|
|
|
|
|
// consumes:
|
|
|
|
|
// - application/json
|
|
|
|
|
// produces:
|
|
|
|
|
// - application/json
|
|
|
|
|
// parameters:
|
|
|
|
|
// - name: owner
|
|
|
|
|
// in: path
|
|
|
|
|
// description: owner of the repo
|
|
|
|
|
// type: string
|
|
|
|
|
// required: true
|
|
|
|
|
// - name: repo
|
|
|
|
|
// in: path
|
|
|
|
|
// description: name of the repo
|
|
|
|
|
// type: string
|
|
|
|
|
// required: true
|
|
|
|
|
// - name: filepath
|
|
|
|
|
// in: path
|
|
|
|
|
// description: path of the file to delete
|
|
|
|
|
// type: string
|
|
|
|
|
// required: true
|
|
|
|
|
// - name: body
|
|
|
|
|
// in: body
|
2019-05-30 13:57:55 -04:00
|
|
|
|
// required: true
|
2019-04-17 12:06:35 -04:00
|
|
|
|
// schema:
|
|
|
|
|
// "$ref": "#/definitions/DeleteFileOptions"
|
|
|
|
|
// responses:
|
|
|
|
|
// "200":
|
|
|
|
|
// "$ref": "#/responses/FileDeleteResponse"
|
2020-04-15 01:18:51 -04:00
|
|
|
|
// "400":
|
|
|
|
|
// "$ref": "#/responses/error"
|
|
|
|
|
// "403":
|
|
|
|
|
// "$ref": "#/responses/error"
|
|
|
|
|
// "404":
|
|
|
|
|
// "$ref": "#/responses/error"
|
feat(quota): Quota enforcement
The previous commit laid out the foundation of the quota engine, this
one builds on top of it, and implements the actual enforcement.
Enforcement happens at the route decoration level, whenever possible. In
case of the API, when over quota, a 413 error is returned, with an
appropriate JSON payload. In case of web routes, a 413 HTML page is
rendered with similar information.
This implementation is for a **soft quota**: quota usage is checked
before an operation is to be performed, and the operation is *only*
denied if the user is already over quota. This makes it possible to go
over quota, but has the significant advantage of being practically
implementable within the current Forgejo architecture.
The goal of enforcement is to deny actions that can make the user go
over quota, and allow the rest. As such, deleting things should - in
almost all cases - be possible. A prime exemption is deleting files via
the web ui: that creates a new commit, which in turn increases repo
size, thus, is denied if the user is over quota.
Limitations
-----------
Because we generally work at a route decorator level, and rarely
look *into* the operation itself, `size:repos:public` and
`size:repos:private` are not enforced at this level, the engine enforces
against `size:repos:all`. This will be improved in the future.
AGit does not play very well with this system, because AGit PRs count
toward the repo they're opened against, while in the GitHub-style fork +
pull model, it counts against the fork. This too, can be improved in the
future.
There's very little done on the UI side to guard against going over
quota. What this patch implements, is enforcement, not prevention. The
UI will still let you *try* operations that *will* result in a denial.
Signed-off-by: Gergely Nagy <forgejo@gergo.csillger.hu>
2024-07-06 04:30:16 -04:00
|
|
|
|
// "413":
|
|
|
|
|
// "$ref": "#/responses/quotaExceeded"
|
2023-09-21 19:43:29 -04:00
|
|
|
|
// "423":
|
|
|
|
|
// "$ref": "#/responses/repoArchivedError"
|
2019-12-20 12:07:12 -05:00
|
|
|
|
|
2021-01-26 10:36:53 -05:00
|
|
|
|
apiOpts := web.GetForm(ctx).(*api.DeleteFileOptions)
|
2022-04-28 11:45:33 -04:00
|
|
|
|
if !canWriteFiles(ctx, apiOpts.BranchName) {
|
2022-06-13 05:37:59 -04:00
|
|
|
|
ctx.Error(http.StatusForbidden, "DeleteFile", repo_model.ErrUserDoesNotHaveAccessToRepo{
|
2022-03-22 03:03:22 -04:00
|
|
|
|
UserID: ctx.Doer.ID,
|
2019-04-17 12:06:35 -04:00
|
|
|
|
RepoName: ctx.Repo.Repository.LowerName,
|
|
|
|
|
})
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2020-04-20 12:47:05 -04:00
|
|
|
|
if apiOpts.BranchName == "" {
|
|
|
|
|
apiOpts.BranchName = ctx.Repo.Repository.DefaultBranch
|
|
|
|
|
}
|
|
|
|
|
|
2023-05-29 05:41:35 -04:00
|
|
|
|
opts := &files_service.ChangeRepoFilesOptions{
|
|
|
|
|
Files: []*files_service.ChangeRepoFile{
|
|
|
|
|
{
|
|
|
|
|
Operation: "delete",
|
|
|
|
|
SHA: apiOpts.SHA,
|
|
|
|
|
TreePath: ctx.Params("*"),
|
|
|
|
|
},
|
|
|
|
|
},
|
2019-04-17 12:06:35 -04:00
|
|
|
|
Message: apiOpts.Message,
|
|
|
|
|
OldBranch: apiOpts.BranchName,
|
|
|
|
|
NewBranch: apiOpts.NewBranchName,
|
2021-11-24 02:56:24 -05:00
|
|
|
|
Committer: &files_service.IdentityOptions{
|
2019-04-17 12:06:35 -04:00
|
|
|
|
Name: apiOpts.Committer.Name,
|
|
|
|
|
Email: apiOpts.Committer.Email,
|
|
|
|
|
},
|
2021-11-24 02:56:24 -05:00
|
|
|
|
Author: &files_service.IdentityOptions{
|
2019-04-17 12:06:35 -04:00
|
|
|
|
Name: apiOpts.Author.Name,
|
|
|
|
|
Email: apiOpts.Author.Email,
|
|
|
|
|
},
|
2021-11-24 02:56:24 -05:00
|
|
|
|
Dates: &files_service.CommitDateOptions{
|
2019-12-23 21:33:52 -05:00
|
|
|
|
Author: apiOpts.Dates.Author,
|
|
|
|
|
Committer: apiOpts.Dates.Committer,
|
|
|
|
|
},
|
2021-01-29 03:57:45 -05:00
|
|
|
|
Signoff: apiOpts.Signoff,
|
2019-12-23 21:33:52 -05:00
|
|
|
|
}
|
|
|
|
|
if opts.Dates.Author.IsZero() {
|
|
|
|
|
opts.Dates.Author = time.Now()
|
|
|
|
|
}
|
|
|
|
|
if opts.Dates.Committer.IsZero() {
|
|
|
|
|
opts.Dates.Committer = time.Now()
|
2019-04-17 12:06:35 -04:00
|
|
|
|
}
|
|
|
|
|
|
2019-06-29 11:19:24 -04:00
|
|
|
|
if opts.Message == "" {
|
2023-05-29 05:41:35 -04:00
|
|
|
|
opts.Message = changeFilesCommitMessage(ctx, opts.Files)
|
2019-06-29 11:19:24 -04:00
|
|
|
|
}
|
|
|
|
|
|
2023-05-29 05:41:35 -04:00
|
|
|
|
if filesResponse, err := files_service.ChangeRepoFiles(ctx, ctx.Repo.Repository, ctx.Doer, opts); err != nil {
|
2020-04-15 01:18:51 -04:00
|
|
|
|
if git.IsErrBranchNotExist(err) || models.IsErrRepoFileDoesNotExist(err) || git.IsErrNotExist(err) {
|
|
|
|
|
ctx.Error(http.StatusNotFound, "DeleteFile", err)
|
|
|
|
|
return
|
2023-06-29 06:03:20 -04:00
|
|
|
|
} else if git_model.IsErrBranchAlreadyExists(err) ||
|
2020-04-15 01:18:51 -04:00
|
|
|
|
models.IsErrFilenameInvalid(err) ||
|
|
|
|
|
models.IsErrSHADoesNotMatch(err) ||
|
|
|
|
|
models.IsErrCommitIDDoesNotMatch(err) ||
|
|
|
|
|
models.IsErrSHAOrCommitIDNotProvided(err) {
|
|
|
|
|
ctx.Error(http.StatusBadRequest, "DeleteFile", err)
|
|
|
|
|
return
|
|
|
|
|
} else if models.IsErrUserCannotCommit(err) {
|
|
|
|
|
ctx.Error(http.StatusForbidden, "DeleteFile", err)
|
|
|
|
|
return
|
|
|
|
|
}
|
2019-04-17 12:06:35 -04:00
|
|
|
|
ctx.Error(http.StatusInternalServerError, "DeleteFile", err)
|
|
|
|
|
} else {
|
2023-05-29 05:41:35 -04:00
|
|
|
|
fileResponse := files_service.GetFileResponseFromFilesResponse(filesResponse, 0)
|
2020-04-15 01:18:51 -04:00
|
|
|
|
ctx.JSON(http.StatusOK, fileResponse) // FIXME on APIv2: return http.StatusNoContent
|
2019-04-17 12:06:35 -04:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-06-29 16:51:10 -04:00
|
|
|
|
// GetContents Get the metadata and contents (if a file) of an entry in a repository, or a list of entries if a dir
|
|
|
|
|
func GetContents(ctx *context.APIContext) {
|
|
|
|
|
// swagger:operation GET /repos/{owner}/{repo}/contents/{filepath} repository repoGetContents
|
2019-04-17 12:06:35 -04:00
|
|
|
|
// ---
|
2019-06-29 16:51:10 -04:00
|
|
|
|
// summary: Gets the metadata and contents (if a file) of an entry in a repository, or a list of entries if a dir
|
2019-04-17 12:06:35 -04:00
|
|
|
|
// produces:
|
|
|
|
|
// - application/json
|
|
|
|
|
// parameters:
|
|
|
|
|
// - name: owner
|
|
|
|
|
// in: path
|
|
|
|
|
// description: owner of the repo
|
|
|
|
|
// type: string
|
|
|
|
|
// required: true
|
|
|
|
|
// - name: repo
|
|
|
|
|
// in: path
|
|
|
|
|
// description: name of the repo
|
|
|
|
|
// type: string
|
|
|
|
|
// required: true
|
|
|
|
|
// - name: filepath
|
|
|
|
|
// in: path
|
2019-06-29 16:51:10 -04:00
|
|
|
|
// description: path of the dir, file, symlink or submodule in the repo
|
2019-04-17 12:06:35 -04:00
|
|
|
|
// type: string
|
|
|
|
|
// required: true
|
|
|
|
|
// - name: ref
|
|
|
|
|
// in: query
|
|
|
|
|
// description: "The name of the commit/branch/tag. Default the repository’s default branch (usually master)"
|
|
|
|
|
// type: string
|
2019-06-29 16:51:10 -04:00
|
|
|
|
// required: false
|
2019-04-17 12:06:35 -04:00
|
|
|
|
// responses:
|
|
|
|
|
// "200":
|
2019-06-29 16:51:10 -04:00
|
|
|
|
// "$ref": "#/responses/ContentsResponse"
|
2020-04-15 01:18:51 -04:00
|
|
|
|
// "404":
|
|
|
|
|
// "$ref": "#/responses/notFound"
|
2019-04-17 12:06:35 -04:00
|
|
|
|
|
2020-04-24 12:20:22 -04:00
|
|
|
|
if !canReadFiles(ctx.Repo) {
|
2022-06-13 05:37:59 -04:00
|
|
|
|
ctx.Error(http.StatusInternalServerError, "GetContentsOrList", repo_model.ErrUserDoesNotHaveAccessToRepo{
|
2022-03-22 03:03:22 -04:00
|
|
|
|
UserID: ctx.Doer.ID,
|
2019-04-17 12:06:35 -04:00
|
|
|
|
RepoName: ctx.Repo.Repository.LowerName,
|
|
|
|
|
})
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
treePath := ctx.Params("*")
|
2021-07-28 21:42:15 -04:00
|
|
|
|
ref := ctx.FormTrim("ref")
|
2019-04-17 12:06:35 -04:00
|
|
|
|
|
2022-01-19 18:26:57 -05:00
|
|
|
|
if fileList, err := files_service.GetContentsOrList(ctx, ctx.Repo.Repository, treePath, ref); err != nil {
|
2020-04-15 01:18:51 -04:00
|
|
|
|
if git.IsErrNotExist(err) {
|
|
|
|
|
ctx.NotFound("GetContentsOrList", err)
|
|
|
|
|
return
|
|
|
|
|
}
|
2019-06-29 16:51:10 -04:00
|
|
|
|
ctx.Error(http.StatusInternalServerError, "GetContentsOrList", err)
|
2019-04-17 12:06:35 -04:00
|
|
|
|
} else {
|
2019-06-29 16:51:10 -04:00
|
|
|
|
ctx.JSON(http.StatusOK, fileList)
|
2019-04-17 12:06:35 -04:00
|
|
|
|
}
|
2016-08-30 19:18:40 -04:00
|
|
|
|
}
|
2019-06-29 16:51:10 -04:00
|
|
|
|
|
|
|
|
|
// GetContentsList Get the metadata of all the entries of the root dir
|
|
|
|
|
func GetContentsList(ctx *context.APIContext) {
|
|
|
|
|
// swagger:operation GET /repos/{owner}/{repo}/contents repository repoGetContentsList
|
|
|
|
|
// ---
|
|
|
|
|
// summary: Gets the metadata of all the entries of the root dir
|
|
|
|
|
// produces:
|
|
|
|
|
// - application/json
|
|
|
|
|
// parameters:
|
|
|
|
|
// - name: owner
|
|
|
|
|
// in: path
|
|
|
|
|
// description: owner of the repo
|
|
|
|
|
// type: string
|
|
|
|
|
// required: true
|
|
|
|
|
// - name: repo
|
|
|
|
|
// in: path
|
|
|
|
|
// description: name of the repo
|
|
|
|
|
// type: string
|
|
|
|
|
// required: true
|
|
|
|
|
// - name: ref
|
|
|
|
|
// in: query
|
|
|
|
|
// description: "The name of the commit/branch/tag. Default the repository’s default branch (usually master)"
|
|
|
|
|
// type: string
|
|
|
|
|
// required: false
|
|
|
|
|
// responses:
|
|
|
|
|
// "200":
|
|
|
|
|
// "$ref": "#/responses/ContentsListResponse"
|
2020-04-15 01:18:51 -04:00
|
|
|
|
// "404":
|
|
|
|
|
// "$ref": "#/responses/notFound"
|
2019-06-29 16:51:10 -04:00
|
|
|
|
|
|
|
|
|
// same as GetContents(), this function is here because swagger fails if path is empty in GetContents() interface
|
|
|
|
|
GetContents(ctx)
|
|
|
|
|
}
|