2019-02-12 08:07:31 -05:00
|
|
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a MIT-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
2019-04-17 12:06:35 -04:00
|
|
|
package repofiles
|
2019-02-12 08:07:31 -05:00
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"os"
|
|
|
|
"path"
|
|
|
|
"strings"
|
|
|
|
|
2019-03-27 05:33:00 -04:00
|
|
|
"code.gitea.io/gitea/models"
|
2019-02-12 08:07:31 -05:00
|
|
|
"code.gitea.io/gitea/modules/lfs"
|
|
|
|
"code.gitea.io/gitea/modules/setting"
|
2020-09-08 11:45:10 -04:00
|
|
|
"code.gitea.io/gitea/modules/storage"
|
2019-02-12 08:07:31 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
// UploadRepoFileOptions contains the uploaded repository file options
|
|
|
|
type UploadRepoFileOptions struct {
|
|
|
|
LastCommitID string
|
|
|
|
OldBranch string
|
|
|
|
NewBranch string
|
|
|
|
TreePath string
|
|
|
|
Message string
|
|
|
|
Files []string // In UUID format.
|
2021-01-29 03:57:45 -05:00
|
|
|
Signoff bool
|
2019-02-12 08:07:31 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
type uploadInfo struct {
|
|
|
|
upload *models.Upload
|
|
|
|
lfsMetaObject *models.LFSMetaObject
|
|
|
|
}
|
|
|
|
|
|
|
|
func cleanUpAfterFailure(infos *[]uploadInfo, t *TemporaryUploadRepository, original error) error {
|
|
|
|
for _, info := range *infos {
|
|
|
|
if info.lfsMetaObject == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if !info.lfsMetaObject.Existing {
|
2019-10-28 14:31:55 -04:00
|
|
|
if _, err := t.repo.RemoveLFSMetaObjectByOid(info.lfsMetaObject.Oid); err != nil {
|
2019-02-12 08:07:31 -05:00
|
|
|
original = fmt.Errorf("%v, %v", original, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return original
|
|
|
|
}
|
|
|
|
|
|
|
|
// UploadRepoFiles uploads files to the given repository
|
|
|
|
func UploadRepoFiles(repo *models.Repository, doer *models.User, opts *UploadRepoFileOptions) error {
|
|
|
|
if len(opts.Files) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
uploads, err := models.GetUploadsByUUIDs(opts.Files)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("GetUploadsByUUIDs [uuids: %v]: %v", opts.Files, err)
|
|
|
|
}
|
|
|
|
|
2019-11-02 03:37:05 -04:00
|
|
|
names := make([]string, len(uploads))
|
|
|
|
infos := make([]uploadInfo, len(uploads))
|
|
|
|
for i, upload := range uploads {
|
|
|
|
// Check file is not lfs locked, will return nil if lock setting not enabled
|
|
|
|
filepath := path.Join(opts.TreePath, upload.Name)
|
|
|
|
lfsLock, err := repo.GetTreePathLock(filepath)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if lfsLock != nil && lfsLock.OwnerID != doer.ID {
|
|
|
|
return models.ErrLFSFileLocked{RepoID: repo.ID, Path: filepath, UserName: lfsLock.Owner.Name}
|
|
|
|
}
|
|
|
|
|
|
|
|
names[i] = upload.Name
|
|
|
|
infos[i] = uploadInfo{upload: upload}
|
|
|
|
}
|
|
|
|
|
2019-02-12 08:07:31 -05:00
|
|
|
t, err := NewTemporaryUploadRepository(repo)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-06-12 15:41:28 -04:00
|
|
|
defer t.Close()
|
2019-02-12 08:07:31 -05:00
|
|
|
if err := t.Clone(opts.OldBranch); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := t.SetDefaultIndex(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-10-11 20:13:27 -04:00
|
|
|
var filename2attribute2info map[string]map[string]string
|
|
|
|
if setting.LFS.StartServer {
|
|
|
|
filename2attribute2info, err = t.CheckAttribute("filter", names...)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-02-12 08:07:31 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Copy uploaded files into repository.
|
|
|
|
for i, uploadInfo := range infos {
|
|
|
|
file, err := os.Open(uploadInfo.upload.LocalPath())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer file.Close()
|
|
|
|
|
|
|
|
var objectHash string
|
2019-10-11 20:13:27 -04:00
|
|
|
if setting.LFS.StartServer && filename2attribute2info[uploadInfo.upload.Name] != nil && filename2attribute2info[uploadInfo.upload.Name]["filter"] == "lfs" {
|
2019-02-12 08:07:31 -05:00
|
|
|
// Handle LFS
|
|
|
|
// FIXME: Inefficient! this should probably happen in models.Upload
|
|
|
|
oid, err := models.GenerateLFSOid(file)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
fileInfo, err := file.Stat()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
uploadInfo.lfsMetaObject = &models.LFSMetaObject{Oid: oid, Size: fileInfo.Size(), RepositoryID: t.repo.ID}
|
|
|
|
|
|
|
|
if objectHash, err = t.HashObject(strings.NewReader(uploadInfo.lfsMetaObject.Pointer())); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
infos[i] = uploadInfo
|
|
|
|
|
2019-06-12 15:41:28 -04:00
|
|
|
} else if objectHash, err = t.HashObject(file); err != nil {
|
|
|
|
return err
|
2019-02-12 08:07:31 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Add the object to the index
|
|
|
|
if err := t.AddObjectToIndex("100644", objectHash, path.Join(opts.TreePath, uploadInfo.upload.Name)); err != nil {
|
|
|
|
return err
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now write the tree
|
|
|
|
treeHash, err := t.WriteTree()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-04-17 12:06:35 -04:00
|
|
|
// make author and committer the doer
|
|
|
|
author := doer
|
|
|
|
committer := doer
|
|
|
|
|
2019-02-12 08:07:31 -05:00
|
|
|
// Now commit the tree
|
2021-01-29 03:57:45 -05:00
|
|
|
commitHash, err := t.CommitTree(author, committer, treeHash, opts.Message, opts.Signoff)
|
2019-02-12 08:07:31 -05:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now deal with LFS objects
|
|
|
|
for _, uploadInfo := range infos {
|
|
|
|
if uploadInfo.lfsMetaObject == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
uploadInfo.lfsMetaObject, err = models.NewLFSMetaObject(uploadInfo.lfsMetaObject)
|
|
|
|
if err != nil {
|
|
|
|
// OK Now we need to cleanup
|
|
|
|
return cleanUpAfterFailure(&infos, t, err)
|
|
|
|
}
|
|
|
|
// Don't move the files yet - we need to ensure that
|
|
|
|
// everything can be inserted first
|
|
|
|
}
|
|
|
|
|
|
|
|
// OK now we can insert the data into the store - there's no way to clean up the store
|
|
|
|
// once it's in there, it's in there.
|
2020-09-08 11:45:10 -04:00
|
|
|
contentStore := &lfs.ContentStore{ObjectStorage: storage.LFS}
|
2019-02-12 08:07:31 -05:00
|
|
|
for _, uploadInfo := range infos {
|
|
|
|
if uploadInfo.lfsMetaObject == nil {
|
|
|
|
continue
|
|
|
|
}
|
2020-09-08 11:45:10 -04:00
|
|
|
exist, err := contentStore.Exists(uploadInfo.lfsMetaObject)
|
|
|
|
if err != nil {
|
|
|
|
return cleanUpAfterFailure(&infos, t, err)
|
|
|
|
}
|
|
|
|
if !exist {
|
2019-02-12 08:07:31 -05:00
|
|
|
file, err := os.Open(uploadInfo.upload.LocalPath())
|
|
|
|
if err != nil {
|
|
|
|
return cleanUpAfterFailure(&infos, t, err)
|
|
|
|
}
|
|
|
|
defer file.Close()
|
|
|
|
// FIXME: Put regenerates the hash and copies the file over.
|
|
|
|
// I guess this strictly ensures the soundness of the store but this is inefficient.
|
|
|
|
if err := contentStore.Put(uploadInfo.lfsMetaObject, file); err != nil {
|
|
|
|
// OK Now we need to cleanup
|
|
|
|
// Can't clean up the store, once uploaded there they're there.
|
|
|
|
return cleanUpAfterFailure(&infos, t, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Then push this tree to NewBranch
|
|
|
|
if err := t.Push(doer, commitHash, opts.NewBranch); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return models.DeleteUploads(uploads...)
|
|
|
|
}
|