1
0
Fork 0
mirror of https://codeberg.org/forgejo/forgejo.git synced 2024-11-24 08:57:03 -05:00
forgejo/services/migrations/migrate.go

515 lines
15 KiB
Go
Raw Normal View History

// Copyright 2019 The Gitea Authors. All rights reserved.
// Copyright 2018 Jonas Franz. All rights reserved.
// SPDX-License-Identifier: MIT
package migrations
import (
"context"
2019-10-13 09:23:14 -04:00
"fmt"
"net"
"net/url"
"path/filepath"
"strings"
2019-10-13 09:23:14 -04:00
"code.gitea.io/gitea/models"
repo_model "code.gitea.io/gitea/models/repo"
system_model "code.gitea.io/gitea/models/system"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/hostmatcher"
"code.gitea.io/gitea/modules/log"
base "code.gitea.io/gitea/modules/migration"
"code.gitea.io/gitea/modules/setting"
[BRANDING] add Forgejo Git Service and migration UI [FEAT] add Forgejo Git Service (squash) register a Forgejo factory If the Forgejo factory for the Forgejo service is not registered, newDownloader will fallback to a git service and not migrate issues etc. Refs: https://codeberg.org/forgejo/forgejo/issues/1678 (cherry picked from commit 51938cd1613c789c7176ca59592689c3bf055f45) [FEAT] add Forgero Git Service Signed-off-by: cassiozareck <cassiomilczareck@gmail.com> (cherry picked from commit a878adfe628cf6dc367a17c3715fcd3499aa02b6) Adding description and Forgejo SVG (cherry picked from commit 13738c03804d019f28550e46a4ebc37dbe3a5cfc) Undo reordering and tmpl redirection (cherry picked from commit 9ae51c46f42acecac834371857e638098ebf6d27) (cherry picked from commit 70fffdc61d06dd1d70b6a31496676a23d3d0c2fc) (cherry picked from commit c0ebfa9da3db3e60d7b403a1bf8b8a19c32c5dc7) (cherry picked from commit 9922c92787eccaba0021486ba0a3eb28583969e1) (cherry picked from commit 00c0effbc74aedc7a4167a69c8a410ef324d576b) (cherry picked from commit e4c9525b137205fa9ffdb4e0d7492bbbda9be6b5) (cherry picked from commit 09d7b83211652d045975b0e3bb790856267d52a5) (cherry picked from commit bbcd5975c91f6932f7f2ee07fbd63e84560ba96a) (cherry picked from commit 55c70a0e18d33d8ac0da9ffb97f6d994ed88a319) (cherry picked from commit 76596410c0dd0137cd497c9728c3e1d1c98f2430) (cherry picked from commit 1308043931388bf6de691ad0f766861b77fb08a5) (cherry picked from commit 919d6aedfed6abc8ec9def19f8deec2ee413252b) [FEAT] add Forgero Git Service (squash) more tests Previously only Gitea service was being tested under self-hosted migrations. Since Forgejo is also self-hosted and in fact use the same downloader/migrator we can add to this suite another test that will do the same, migrating the same repository under the same local instance but for the Forgejo service (represented by 9) Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/1709 Co-authored-by: zareck <cassiomilczareck@gmail.com> Co-committed-by: zareck <cassiomilczareck@gmail.com> (cherry picked from commit 40a4b8f1a8637f78cf2f48104f0b336377652df9) (cherry picked from commit 3198b4a64240b7d4e8b33d8b858a12d046db38c9) (cherry picked from commit 4edda1f3890eb1b5bb9b1eeec1214dbc11f8e343) (cherry picked from commit 4d91b77d29fd4b20be12bf21c31447722ff6da40) (cherry picked from commit afe85c52e3c1c165c171443e3ba79caef1560e0d) (cherry picked from commit 5ea7df79adfba4a85c7ebbccfb7da15b48eef19c) (cherry picked from commit a667182542abab8ebb29905fb38afe509682c220) (cherry picked from commit a9bebb1e71b8a20bb19352357a5b71b9b84c0d21) (cherry picked from commit 4831a89e460bb982a497b6f22613149840b13a9c) (cherry picked from commit e02a74651f9813cc72c64e391a2fa6e3c282ce3f) (cherry picked from commit 05dcef59aa4d05b040fe4ae24d73f9d9660e6ed2) (cherry picked from commit c8bac187f983150150a2652724bab8f923be44e0) (cherry picked from commit c87903a0cc75daeee8783d9774158711011d4382)
2023-12-05 10:43:35 -05:00
"code.gitea.io/gitea/modules/structs"
"code.gitea.io/gitea/modules/util"
)
// MigrateOptions is equal to base.MigrateOptions
type MigrateOptions = base.MigrateOptions
var (
factories []base.DownloaderFactory
allowList *hostmatcher.HostMatchList
blockList *hostmatcher.HostMatchList
)
// RegisterDownloaderFactory registers a downloader factory
func RegisterDownloaderFactory(factory base.DownloaderFactory) {
factories = append(factories, factory)
}
// IsMigrateURLAllowed checks if an URL is allowed to be migrated from
func IsMigrateURLAllowed(remoteURL string, doer *user_model.User) error {
// Remote address can be HTTP/HTTPS/Git URL or local path.
u, err := url.Parse(remoteURL)
if err != nil {
return &models.ErrInvalidCloneAddr{IsURLError: true, Host: remoteURL}
}
if u.Scheme == "file" || u.Scheme == "" {
if !doer.CanImportLocal() {
return &models.ErrInvalidCloneAddr{Host: "<LOCAL_FILESYSTEM>", IsPermissionDenied: true, LocalPath: true}
}
isAbs := filepath.IsAbs(u.Host + u.Path)
if !isAbs {
return &models.ErrInvalidCloneAddr{Host: "<LOCAL_FILESYSTEM>", IsInvalidPath: true, LocalPath: true}
}
isDir, err := util.IsDir(u.Host + u.Path)
if err != nil {
log.Error("Unable to check if %s is a directory: %v", u.Host+u.Path, err)
return err
}
if !isDir {
return &models.ErrInvalidCloneAddr{Host: "<LOCAL_FILESYSTEM>", IsInvalidPath: true, LocalPath: true}
}
return nil
}
if u.Scheme == "git" && u.Port() != "" && (strings.Contains(remoteURL, "%0d") || strings.Contains(remoteURL, "%0a")) {
return &models.ErrInvalidCloneAddr{Host: u.Host, IsURLError: true}
}
if u.Opaque != "" || u.Scheme != "" && u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "git" {
return &models.ErrInvalidCloneAddr{Host: u.Host, IsProtocolInvalid: true, IsPermissionDenied: true, IsURLError: true}
}
hostName, _, err := net.SplitHostPort(u.Host)
if err != nil {
// u.Host can be "host" or "host:port"
err = nil //nolint
hostName = u.Host
}
// some users only use proxy, there is no DNS resolver. it's safe to ignore the LookupIP error
addrList, _ := net.LookupIP(hostName)
return checkByAllowBlockList(hostName, addrList)
}
func checkByAllowBlockList(hostName string, addrList []net.IP) error {
var ipAllowed bool
var ipBlocked bool
for _, addr := range addrList {
ipAllowed = ipAllowed || allowList.MatchIPAddr(addr)
ipBlocked = ipBlocked || blockList.MatchIPAddr(addr)
}
var blockedError error
if blockList.MatchHostName(hostName) || ipBlocked {
blockedError = &models.ErrInvalidCloneAddr{Host: hostName, IsPermissionDenied: true}
}
// if we have an allow-list, check the allow-list before return to get the more accurate error
if !allowList.IsEmpty() {
if !allowList.MatchHostName(hostName) && !ipAllowed {
return &models.ErrInvalidCloneAddr{Host: hostName, IsPermissionDenied: true}
}
}
// otherwise, we always follow the blocked list
return blockedError
}
// MigrateRepository migrate repository according MigrateOptions
func MigrateRepository(ctx context.Context, doer *user_model.User, ownerName string, opts base.MigrateOptions, messenger base.Messenger) (*repo_model.Repository, error) {
err := IsMigrateURLAllowed(opts.CloneAddr, doer)
if err != nil {
return nil, err
}
Add LFS Migration and Mirror (#14726) * Implemented LFS client. * Implemented scanning for pointer files. * Implemented downloading of lfs files. * Moved model-dependent code into services. * Removed models dependency. Added TryReadPointerFromBuffer. * Migrated code from service to module. * Centralised storage creation. * Removed dependency from models. * Moved ContentStore into modules. * Share structs between server and client. * Moved method to services. * Implemented lfs download on clone. * Implemented LFS sync on clone and mirror update. * Added form fields. * Updated templates. * Fixed condition. * Use alternate endpoint. * Added missing methods. * Fixed typo and make linter happy. * Detached pointer parser from gogit dependency. * Fixed TestGetLFSRange test. * Added context to support cancellation. * Use ReadFull to probably read more data. * Removed duplicated code from models. * Moved scan implementation into pointer_scanner_nogogit. * Changed method name. * Added comments. * Added more/specific log/error messages. * Embedded lfs.Pointer into models.LFSMetaObject. * Moved code from models to module. * Moved code from models to module. * Moved code from models to module. * Reduced pointer usage. * Embedded type. * Use promoted fields. * Fixed unexpected eof. * Added unit tests. * Implemented migration of local file paths. * Show an error on invalid LFS endpoints. * Hide settings if not used. * Added LFS info to mirror struct. * Fixed comment. * Check LFS endpoint. * Manage LFS settings from mirror page. * Fixed selector. * Adjusted selector. * Added more tests. * Added local filesystem migration test. * Fixed typo. * Reset settings. * Added special windows path handling. * Added unit test for HTTPClient. * Added unit test for BasicTransferAdapter. * Moved into util package. * Test if LFS endpoint is allowed. * Added support for git:// * Just use a static placeholder as the displayed url may be invalid. * Reverted to original code. * Added "Advanced Settings". * Updated wording. * Added discovery info link. * Implemented suggestion. * Fixed missing format parameter. * Added Pointer.IsValid(). * Always remove model on error. * Added suggestions. * Use channel instead of array. * Update routers/repo/migrate.go * fmt Signed-off-by: Andrew Thornton <art27@cantab.net> Co-authored-by: zeripath <art27@cantab.net>
2021-04-08 18:25:57 -04:00
if opts.LFS && len(opts.LFSEndpoint) > 0 {
err := IsMigrateURLAllowed(opts.LFSEndpoint, doer)
if err != nil {
return nil, err
}
}
downloader, err := newDownloader(ctx, ownerName, opts)
if err != nil {
return nil, err
}
uploader := NewGiteaLocalUploader(ctx, doer, ownerName, opts.RepoName)
uploader.gitServiceType = opts.GitServiceType
if err := migrateRepository(ctx, doer, downloader, uploader, opts, messenger); err != nil {
if err1 := uploader.Rollback(); err1 != nil {
log.Error("rollback failed: %v", err1)
}
if err2 := system_model.CreateRepositoryNotice(fmt.Sprintf("Migrate repository from %s failed: %v", opts.OriginalURL, err)); err2 != nil {
log.Error("create respotiry notice failed: ", err2)
}
return nil, err
}
return uploader.repo, nil
}
[BRANDING] add Forgejo Git Service and migration UI [FEAT] add Forgejo Git Service (squash) register a Forgejo factory If the Forgejo factory for the Forgejo service is not registered, newDownloader will fallback to a git service and not migrate issues etc. Refs: https://codeberg.org/forgejo/forgejo/issues/1678 (cherry picked from commit 51938cd1613c789c7176ca59592689c3bf055f45) [FEAT] add Forgero Git Service Signed-off-by: cassiozareck <cassiomilczareck@gmail.com> (cherry picked from commit a878adfe628cf6dc367a17c3715fcd3499aa02b6) Adding description and Forgejo SVG (cherry picked from commit 13738c03804d019f28550e46a4ebc37dbe3a5cfc) Undo reordering and tmpl redirection (cherry picked from commit 9ae51c46f42acecac834371857e638098ebf6d27) (cherry picked from commit 70fffdc61d06dd1d70b6a31496676a23d3d0c2fc) (cherry picked from commit c0ebfa9da3db3e60d7b403a1bf8b8a19c32c5dc7) (cherry picked from commit 9922c92787eccaba0021486ba0a3eb28583969e1) (cherry picked from commit 00c0effbc74aedc7a4167a69c8a410ef324d576b) (cherry picked from commit e4c9525b137205fa9ffdb4e0d7492bbbda9be6b5) (cherry picked from commit 09d7b83211652d045975b0e3bb790856267d52a5) (cherry picked from commit bbcd5975c91f6932f7f2ee07fbd63e84560ba96a) (cherry picked from commit 55c70a0e18d33d8ac0da9ffb97f6d994ed88a319) (cherry picked from commit 76596410c0dd0137cd497c9728c3e1d1c98f2430) (cherry picked from commit 1308043931388bf6de691ad0f766861b77fb08a5) (cherry picked from commit 919d6aedfed6abc8ec9def19f8deec2ee413252b) [FEAT] add Forgero Git Service (squash) more tests Previously only Gitea service was being tested under self-hosted migrations. Since Forgejo is also self-hosted and in fact use the same downloader/migrator we can add to this suite another test that will do the same, migrating the same repository under the same local instance but for the Forgejo service (represented by 9) Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/1709 Co-authored-by: zareck <cassiomilczareck@gmail.com> Co-committed-by: zareck <cassiomilczareck@gmail.com> (cherry picked from commit 40a4b8f1a8637f78cf2f48104f0b336377652df9) (cherry picked from commit 3198b4a64240b7d4e8b33d8b858a12d046db38c9) (cherry picked from commit 4edda1f3890eb1b5bb9b1eeec1214dbc11f8e343) (cherry picked from commit 4d91b77d29fd4b20be12bf21c31447722ff6da40) (cherry picked from commit afe85c52e3c1c165c171443e3ba79caef1560e0d) (cherry picked from commit 5ea7df79adfba4a85c7ebbccfb7da15b48eef19c) (cherry picked from commit a667182542abab8ebb29905fb38afe509682c220) (cherry picked from commit a9bebb1e71b8a20bb19352357a5b71b9b84c0d21) (cherry picked from commit 4831a89e460bb982a497b6f22613149840b13a9c) (cherry picked from commit e02a74651f9813cc72c64e391a2fa6e3c282ce3f) (cherry picked from commit 05dcef59aa4d05b040fe4ae24d73f9d9660e6ed2) (cherry picked from commit c8bac187f983150150a2652724bab8f923be44e0) (cherry picked from commit c87903a0cc75daeee8783d9774158711011d4382)
2023-12-05 10:43:35 -05:00
func getFactoryFromServiceType(serviceType structs.GitServiceType) base.DownloaderFactory {
for _, factory := range factories {
if factory.GitServiceType() == serviceType {
return factory
}
}
return nil
}
func newDownloader(ctx context.Context, ownerName string, opts base.MigrateOptions) (base.Downloader, error) {
var (
downloader base.Downloader
err error
)
[BRANDING] add Forgejo Git Service and migration UI [FEAT] add Forgejo Git Service (squash) register a Forgejo factory If the Forgejo factory for the Forgejo service is not registered, newDownloader will fallback to a git service and not migrate issues etc. Refs: https://codeberg.org/forgejo/forgejo/issues/1678 (cherry picked from commit 51938cd1613c789c7176ca59592689c3bf055f45) [FEAT] add Forgero Git Service Signed-off-by: cassiozareck <cassiomilczareck@gmail.com> (cherry picked from commit a878adfe628cf6dc367a17c3715fcd3499aa02b6) Adding description and Forgejo SVG (cherry picked from commit 13738c03804d019f28550e46a4ebc37dbe3a5cfc) Undo reordering and tmpl redirection (cherry picked from commit 9ae51c46f42acecac834371857e638098ebf6d27) (cherry picked from commit 70fffdc61d06dd1d70b6a31496676a23d3d0c2fc) (cherry picked from commit c0ebfa9da3db3e60d7b403a1bf8b8a19c32c5dc7) (cherry picked from commit 9922c92787eccaba0021486ba0a3eb28583969e1) (cherry picked from commit 00c0effbc74aedc7a4167a69c8a410ef324d576b) (cherry picked from commit e4c9525b137205fa9ffdb4e0d7492bbbda9be6b5) (cherry picked from commit 09d7b83211652d045975b0e3bb790856267d52a5) (cherry picked from commit bbcd5975c91f6932f7f2ee07fbd63e84560ba96a) (cherry picked from commit 55c70a0e18d33d8ac0da9ffb97f6d994ed88a319) (cherry picked from commit 76596410c0dd0137cd497c9728c3e1d1c98f2430) (cherry picked from commit 1308043931388bf6de691ad0f766861b77fb08a5) (cherry picked from commit 919d6aedfed6abc8ec9def19f8deec2ee413252b) [FEAT] add Forgero Git Service (squash) more tests Previously only Gitea service was being tested under self-hosted migrations. Since Forgejo is also self-hosted and in fact use the same downloader/migrator we can add to this suite another test that will do the same, migrating the same repository under the same local instance but for the Forgejo service (represented by 9) Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/1709 Co-authored-by: zareck <cassiomilczareck@gmail.com> Co-committed-by: zareck <cassiomilczareck@gmail.com> (cherry picked from commit 40a4b8f1a8637f78cf2f48104f0b336377652df9) (cherry picked from commit 3198b4a64240b7d4e8b33d8b858a12d046db38c9) (cherry picked from commit 4edda1f3890eb1b5bb9b1eeec1214dbc11f8e343) (cherry picked from commit 4d91b77d29fd4b20be12bf21c31447722ff6da40) (cherry picked from commit afe85c52e3c1c165c171443e3ba79caef1560e0d) (cherry picked from commit 5ea7df79adfba4a85c7ebbccfb7da15b48eef19c) (cherry picked from commit a667182542abab8ebb29905fb38afe509682c220) (cherry picked from commit a9bebb1e71b8a20bb19352357a5b71b9b84c0d21) (cherry picked from commit 4831a89e460bb982a497b6f22613149840b13a9c) (cherry picked from commit e02a74651f9813cc72c64e391a2fa6e3c282ce3f) (cherry picked from commit 05dcef59aa4d05b040fe4ae24d73f9d9660e6ed2) (cherry picked from commit c8bac187f983150150a2652724bab8f923be44e0) (cherry picked from commit c87903a0cc75daeee8783d9774158711011d4382)
2023-12-05 10:43:35 -05:00
if factory := getFactoryFromServiceType(opts.GitServiceType); factory != nil {
downloader, err = factory.New(ctx, opts)
if err != nil {
return nil, err
}
}
if downloader == nil {
opts.Wiki = true
opts.Milestones = false
opts.Labels = false
opts.Releases = false
opts.Comments = false
opts.Issues = false
opts.PullRequests = false
2019-10-13 09:23:14 -04:00
downloader = NewPlainGitDownloader(ownerName, opts.RepoName, opts.CloneAddr)
log.Trace("Will migrate from git: %s", opts.OriginalURL)
}
if setting.Migrations.MaxAttempts > 1 {
downloader = base.NewRetryDownloader(ctx, downloader, setting.Migrations.MaxAttempts, setting.Migrations.RetryBackoff)
}
return downloader, nil
}
// migrateRepository will download information and then upload it to Uploader, this is a simple
// process for small repository. For a big repository, save all the data to disk
// before upload is better
func migrateRepository(ctx context.Context, doer *user_model.User, downloader base.Downloader, uploader base.Uploader, opts base.MigrateOptions, messenger base.Messenger) error {
if messenger == nil {
messenger = base.NilMessenger
}
repo, err := downloader.GetRepoInfo()
if err != nil {
if !base.IsErrNotSupported(err) {
return err
}
log.Info("migrating repo infos is not supported, ignored")
}
repo.IsPrivate = opts.Private
repo.IsMirror = opts.Mirror
if opts.Description != "" {
repo.Description = opts.Description
}
if repo.CloneURL, err = downloader.FormatCloneURL(opts, repo.CloneURL); err != nil {
return err
}
// SECURITY: If the downloader is not a RepositoryRestorer then we need to recheck the CloneURL
if _, ok := downloader.(*RepositoryRestorer); !ok {
// Now the clone URL can be rewritten by the downloader so we must recheck
if err := IsMigrateURLAllowed(repo.CloneURL, doer); err != nil {
return err
}
// SECURITY: Ensure that we haven't been redirected from an external to a local filesystem
// Now we know all of these must parse
cloneAddrURL, _ := url.Parse(opts.CloneAddr)
cloneURL, _ := url.Parse(repo.CloneURL)
if cloneURL.Scheme == "file" || cloneURL.Scheme == "" {
if cloneAddrURL.Scheme != "file" && cloneAddrURL.Scheme != "" {
return fmt.Errorf("repo info has changed from external to local filesystem")
}
}
// We don't actually need to check the OriginalURL as it isn't used anywhere
}
log.Trace("migrating git data from %s", repo.CloneURL)
messenger("repo.migrate.migrating_git")
if err = uploader.CreateRepo(repo, opts); err != nil {
return err
}
defer uploader.Close()
log.Trace("migrating topics")
messenger("repo.migrate.migrating_topics")
topics, err := downloader.GetTopics()
if err != nil {
if !base.IsErrNotSupported(err) {
return err
}
log.Warn("migrating topics is not supported, ignored")
}
if len(topics) != 0 {
if err = uploader.CreateTopics(topics...); err != nil {
return err
}
}
if opts.Milestones {
log.Trace("migrating milestones")
messenger("repo.migrate.migrating_milestones")
milestones, err := downloader.GetMilestones()
if err != nil {
if !base.IsErrNotSupported(err) {
return err
}
log.Warn("migrating milestones is not supported, ignored")
}
msBatchSize := uploader.MaxBatchInsertSize("milestone")
for len(milestones) > 0 {
if len(milestones) < msBatchSize {
msBatchSize = len(milestones)
}
if err := uploader.CreateMilestones(milestones[:msBatchSize]...); err != nil {
return err
}
milestones = milestones[msBatchSize:]
}
}
if opts.Labels {
log.Trace("migrating labels")
messenger("repo.migrate.migrating_labels")
labels, err := downloader.GetLabels()
if err != nil {
if !base.IsErrNotSupported(err) {
return err
}
log.Warn("migrating labels is not supported, ignored")
}
lbBatchSize := uploader.MaxBatchInsertSize("label")
for len(labels) > 0 {
if len(labels) < lbBatchSize {
lbBatchSize = len(labels)
}
if err := uploader.CreateLabels(labels[:lbBatchSize]...); err != nil {
return err
}
labels = labels[lbBatchSize:]
}
}
if opts.Releases {
log.Trace("migrating releases")
messenger("repo.migrate.migrating_releases")
releases, err := downloader.GetReleases()
if err != nil {
if !base.IsErrNotSupported(err) {
return err
}
log.Warn("migrating releases is not supported, ignored")
}
relBatchSize := uploader.MaxBatchInsertSize("release")
for len(releases) > 0 {
if len(releases) < relBatchSize {
relBatchSize = len(releases)
}
if err = uploader.CreateReleases(releases[:relBatchSize]...); err != nil {
return err
}
releases = releases[relBatchSize:]
}
// Once all releases (if any) are inserted, sync any remaining non-release tags
if err = uploader.SyncTags(); err != nil {
return err
}
}
var (
commentBatchSize = uploader.MaxBatchInsertSize("comment")
reviewBatchSize = uploader.MaxBatchInsertSize("review")
)
supportAllComments := downloader.SupportGetRepoComments()
if opts.Issues {
log.Trace("migrating issues and comments")
messenger("repo.migrate.migrating_issues")
issueBatchSize := uploader.MaxBatchInsertSize("issue")
for i := 1; ; i++ {
issues, isEnd, err := downloader.GetIssues(i, issueBatchSize)
if err != nil {
if !base.IsErrNotSupported(err) {
return err
}
log.Warn("migrating issues is not supported, ignored")
break
}
if err := uploader.CreateIssues(issues...); err != nil {
return err
}
if opts.Comments && !supportAllComments {
allComments := make([]*base.Comment, 0, commentBatchSize)
for _, issue := range issues {
log.Trace("migrating issue %d's comments", issue.Number)
Store the foreign ID of issues during migration (#18446) Storing the foreign identifier of an imported issue in the database is a prerequisite to implement idempotent migrations or mirror for issues. It is a baby step towards mirroring that introduces a new table. At the moment when an issue is created by the Gitea uploader, it fails if the issue already exists. The Gitea uploader could be modified so that, instead of failing, it looks up the database to find an existing issue. And if it does it would update the issue instead of creating a new one. However this is not currently possible because an information is missing from the database: the foreign identifier that uniquely represents the issue being migrated is not persisted. With this change, the foreign identifier is stored in the database and the Gitea uploader will then be able to run a query to figure out if a given issue being imported already exists. The implementation of mirroring for issues, pull requests, releases, etc. can be done in three steps: 1. Store an identifier for the element being mirrored (issue, pull request...) in the database (this is the purpose of these changes) 2. Modify the Gitea uploader to be able to update an existing repository with all it contains (issues, pull request...) instead of failing if it exists 3. Optimize the Gitea uploader to speed up the updates, when possible. The second step creates code that does not yet exist to enable idempotent migrations with the Gitea uploader. When a migration is done for the first time, the behavior is not changed. But when a migration is done for a repository that already exists, this new code is used to update it. The third step can use the code created in the second step to optimize and speed up migrations. For instance, when a migration is resumed, an issue that has an update time that is not more recent can be skipped and only newly created issues or updated ones will be updated. Another example of optimization could be that a webhook notifies Gitea when an issue is updated. The code triggered by the webhook would download only this issue and call the code created in the second step to update the issue, as if it was in the process of an idempotent migration. The ForeignReferences table is added to contain local and foreign ID pairs relative to a given repository. It can later be used for pull requests and other artifacts that can be mirrored. Although the foreign id could be added as a single field in issues or pull requests, it would need to be added to all tables that represent something that can be mirrored. Creating a new table makes for a simpler and more generic design. The drawback is that it requires an extra lookup to obtain the information. However, this extra information is only required during migration or mirroring and does not impact the way Gitea currently works. The foreign identifier of an issue or pull request is similar to the identifier of an external user, which is stored in reactions, issues, etc. as OriginalPosterID and so on. The representation of a user is however different and the ability of users to link their account to an external user at a later time is also a logic that is different from what is involved in mirroring or migrations. For these reasons, despite some commonalities, it is unclear at this time how the two tables (foreign reference and external user) could be merged together. The ForeignID field is extracted from the issue migration context so that it can be dumped in files with dump-repo and later restored via restore-repo. The GetAllComments downloader method is introduced to simplify the implementation and not overload the Context for the purpose of pagination. It also clarifies in which context the comments are paginated and in which context they are not. The Context interface is no longer useful for the purpose of retrieving the LocalID and ForeignID since they are now both available from the PullRequest and Issue struct. The Reviewable and Commentable interfaces replace and serve the same purpose. The Context data member of PullRequest and Issue becomes a DownloaderContext to clarify that its purpose is not to support in memory operations while the current downloader is acting but is not otherwise persisted. It is, for instance, used by the GitLab downloader to store the IsMergeRequest boolean and sort out issues. --- [source](https://lab.forgefriends.org/forgefriends/forgefriends/-/merge_requests/36) Signed-off-by: Loïc Dachary <loic@dachary.org> Co-authored-by: Loïc Dachary <loic@dachary.org>
2022-03-17 13:08:35 -04:00
comments, _, err := downloader.GetComments(issue)
if err != nil {
if !base.IsErrNotSupported(err) {
return err
}
log.Warn("migrating comments is not supported, ignored")
}
allComments = append(allComments, comments...)
if len(allComments) >= commentBatchSize {
if err = uploader.CreateComments(allComments[:commentBatchSize]...); err != nil {
return err
}
allComments = allComments[commentBatchSize:]
}
}
if len(allComments) > 0 {
if err = uploader.CreateComments(allComments...); err != nil {
return err
}
}
}
if isEnd {
break
}
}
}
if opts.PullRequests {
log.Trace("migrating pull requests and comments")
messenger("repo.migrate.migrating_pulls")
prBatchSize := uploader.MaxBatchInsertSize("pullrequest")
for i := 1; ; i++ {
prs, isEnd, err := downloader.GetPullRequests(i, prBatchSize)
if err != nil {
if !base.IsErrNotSupported(err) {
return err
}
log.Warn("migrating pull requests is not supported, ignored")
break
}
if err := uploader.CreatePullRequests(prs...); err != nil {
return err
}
if opts.Comments {
if !supportAllComments {
// plain comments
allComments := make([]*base.Comment, 0, commentBatchSize)
for _, pr := range prs {
log.Trace("migrating pull request %d's comments", pr.Number)
Store the foreign ID of issues during migration (#18446) Storing the foreign identifier of an imported issue in the database is a prerequisite to implement idempotent migrations or mirror for issues. It is a baby step towards mirroring that introduces a new table. At the moment when an issue is created by the Gitea uploader, it fails if the issue already exists. The Gitea uploader could be modified so that, instead of failing, it looks up the database to find an existing issue. And if it does it would update the issue instead of creating a new one. However this is not currently possible because an information is missing from the database: the foreign identifier that uniquely represents the issue being migrated is not persisted. With this change, the foreign identifier is stored in the database and the Gitea uploader will then be able to run a query to figure out if a given issue being imported already exists. The implementation of mirroring for issues, pull requests, releases, etc. can be done in three steps: 1. Store an identifier for the element being mirrored (issue, pull request...) in the database (this is the purpose of these changes) 2. Modify the Gitea uploader to be able to update an existing repository with all it contains (issues, pull request...) instead of failing if it exists 3. Optimize the Gitea uploader to speed up the updates, when possible. The second step creates code that does not yet exist to enable idempotent migrations with the Gitea uploader. When a migration is done for the first time, the behavior is not changed. But when a migration is done for a repository that already exists, this new code is used to update it. The third step can use the code created in the second step to optimize and speed up migrations. For instance, when a migration is resumed, an issue that has an update time that is not more recent can be skipped and only newly created issues or updated ones will be updated. Another example of optimization could be that a webhook notifies Gitea when an issue is updated. The code triggered by the webhook would download only this issue and call the code created in the second step to update the issue, as if it was in the process of an idempotent migration. The ForeignReferences table is added to contain local and foreign ID pairs relative to a given repository. It can later be used for pull requests and other artifacts that can be mirrored. Although the foreign id could be added as a single field in issues or pull requests, it would need to be added to all tables that represent something that can be mirrored. Creating a new table makes for a simpler and more generic design. The drawback is that it requires an extra lookup to obtain the information. However, this extra information is only required during migration or mirroring and does not impact the way Gitea currently works. The foreign identifier of an issue or pull request is similar to the identifier of an external user, which is stored in reactions, issues, etc. as OriginalPosterID and so on. The representation of a user is however different and the ability of users to link their account to an external user at a later time is also a logic that is different from what is involved in mirroring or migrations. For these reasons, despite some commonalities, it is unclear at this time how the two tables (foreign reference and external user) could be merged together. The ForeignID field is extracted from the issue migration context so that it can be dumped in files with dump-repo and later restored via restore-repo. The GetAllComments downloader method is introduced to simplify the implementation and not overload the Context for the purpose of pagination. It also clarifies in which context the comments are paginated and in which context they are not. The Context interface is no longer useful for the purpose of retrieving the LocalID and ForeignID since they are now both available from the PullRequest and Issue struct. The Reviewable and Commentable interfaces replace and serve the same purpose. The Context data member of PullRequest and Issue becomes a DownloaderContext to clarify that its purpose is not to support in memory operations while the current downloader is acting but is not otherwise persisted. It is, for instance, used by the GitLab downloader to store the IsMergeRequest boolean and sort out issues. --- [source](https://lab.forgefriends.org/forgefriends/forgefriends/-/merge_requests/36) Signed-off-by: Loïc Dachary <loic@dachary.org> Co-authored-by: Loïc Dachary <loic@dachary.org>
2022-03-17 13:08:35 -04:00
comments, _, err := downloader.GetComments(pr)
if err != nil {
if !base.IsErrNotSupported(err) {
return err
}
log.Warn("migrating comments is not supported, ignored")
}
allComments = append(allComments, comments...)
if len(allComments) >= commentBatchSize {
if err = uploader.CreateComments(allComments[:commentBatchSize]...); err != nil {
return err
}
allComments = allComments[commentBatchSize:]
}
}
if len(allComments) > 0 {
if err = uploader.CreateComments(allComments...); err != nil {
return err
}
}
}
// migrate reviews
allReviews := make([]*base.Review, 0, reviewBatchSize)
for _, pr := range prs {
Store the foreign ID of issues during migration (#18446) Storing the foreign identifier of an imported issue in the database is a prerequisite to implement idempotent migrations or mirror for issues. It is a baby step towards mirroring that introduces a new table. At the moment when an issue is created by the Gitea uploader, it fails if the issue already exists. The Gitea uploader could be modified so that, instead of failing, it looks up the database to find an existing issue. And if it does it would update the issue instead of creating a new one. However this is not currently possible because an information is missing from the database: the foreign identifier that uniquely represents the issue being migrated is not persisted. With this change, the foreign identifier is stored in the database and the Gitea uploader will then be able to run a query to figure out if a given issue being imported already exists. The implementation of mirroring for issues, pull requests, releases, etc. can be done in three steps: 1. Store an identifier for the element being mirrored (issue, pull request...) in the database (this is the purpose of these changes) 2. Modify the Gitea uploader to be able to update an existing repository with all it contains (issues, pull request...) instead of failing if it exists 3. Optimize the Gitea uploader to speed up the updates, when possible. The second step creates code that does not yet exist to enable idempotent migrations with the Gitea uploader. When a migration is done for the first time, the behavior is not changed. But when a migration is done for a repository that already exists, this new code is used to update it. The third step can use the code created in the second step to optimize and speed up migrations. For instance, when a migration is resumed, an issue that has an update time that is not more recent can be skipped and only newly created issues or updated ones will be updated. Another example of optimization could be that a webhook notifies Gitea when an issue is updated. The code triggered by the webhook would download only this issue and call the code created in the second step to update the issue, as if it was in the process of an idempotent migration. The ForeignReferences table is added to contain local and foreign ID pairs relative to a given repository. It can later be used for pull requests and other artifacts that can be mirrored. Although the foreign id could be added as a single field in issues or pull requests, it would need to be added to all tables that represent something that can be mirrored. Creating a new table makes for a simpler and more generic design. The drawback is that it requires an extra lookup to obtain the information. However, this extra information is only required during migration or mirroring and does not impact the way Gitea currently works. The foreign identifier of an issue or pull request is similar to the identifier of an external user, which is stored in reactions, issues, etc. as OriginalPosterID and so on. The representation of a user is however different and the ability of users to link their account to an external user at a later time is also a logic that is different from what is involved in mirroring or migrations. For these reasons, despite some commonalities, it is unclear at this time how the two tables (foreign reference and external user) could be merged together. The ForeignID field is extracted from the issue migration context so that it can be dumped in files with dump-repo and later restored via restore-repo. The GetAllComments downloader method is introduced to simplify the implementation and not overload the Context for the purpose of pagination. It also clarifies in which context the comments are paginated and in which context they are not. The Context interface is no longer useful for the purpose of retrieving the LocalID and ForeignID since they are now both available from the PullRequest and Issue struct. The Reviewable and Commentable interfaces replace and serve the same purpose. The Context data member of PullRequest and Issue becomes a DownloaderContext to clarify that its purpose is not to support in memory operations while the current downloader is acting but is not otherwise persisted. It is, for instance, used by the GitLab downloader to store the IsMergeRequest boolean and sort out issues. --- [source](https://lab.forgefriends.org/forgefriends/forgefriends/-/merge_requests/36) Signed-off-by: Loïc Dachary <loic@dachary.org> Co-authored-by: Loïc Dachary <loic@dachary.org>
2022-03-17 13:08:35 -04:00
reviews, err := downloader.GetReviews(pr)
if err != nil {
if !base.IsErrNotSupported(err) {
return err
}
log.Warn("migrating reviews is not supported, ignored")
break
}
allReviews = append(allReviews, reviews...)
if len(allReviews) >= reviewBatchSize {
if err = uploader.CreateReviews(allReviews[:reviewBatchSize]...); err != nil {
return err
}
allReviews = allReviews[reviewBatchSize:]
}
}
if len(allReviews) > 0 {
if err = uploader.CreateReviews(allReviews...); err != nil {
return err
}
}
}
if isEnd {
break
}
}
}
if opts.Comments && supportAllComments {
log.Trace("migrating comments")
for i := 1; ; i++ {
Store the foreign ID of issues during migration (#18446) Storing the foreign identifier of an imported issue in the database is a prerequisite to implement idempotent migrations or mirror for issues. It is a baby step towards mirroring that introduces a new table. At the moment when an issue is created by the Gitea uploader, it fails if the issue already exists. The Gitea uploader could be modified so that, instead of failing, it looks up the database to find an existing issue. And if it does it would update the issue instead of creating a new one. However this is not currently possible because an information is missing from the database: the foreign identifier that uniquely represents the issue being migrated is not persisted. With this change, the foreign identifier is stored in the database and the Gitea uploader will then be able to run a query to figure out if a given issue being imported already exists. The implementation of mirroring for issues, pull requests, releases, etc. can be done in three steps: 1. Store an identifier for the element being mirrored (issue, pull request...) in the database (this is the purpose of these changes) 2. Modify the Gitea uploader to be able to update an existing repository with all it contains (issues, pull request...) instead of failing if it exists 3. Optimize the Gitea uploader to speed up the updates, when possible. The second step creates code that does not yet exist to enable idempotent migrations with the Gitea uploader. When a migration is done for the first time, the behavior is not changed. But when a migration is done for a repository that already exists, this new code is used to update it. The third step can use the code created in the second step to optimize and speed up migrations. For instance, when a migration is resumed, an issue that has an update time that is not more recent can be skipped and only newly created issues or updated ones will be updated. Another example of optimization could be that a webhook notifies Gitea when an issue is updated. The code triggered by the webhook would download only this issue and call the code created in the second step to update the issue, as if it was in the process of an idempotent migration. The ForeignReferences table is added to contain local and foreign ID pairs relative to a given repository. It can later be used for pull requests and other artifacts that can be mirrored. Although the foreign id could be added as a single field in issues or pull requests, it would need to be added to all tables that represent something that can be mirrored. Creating a new table makes for a simpler and more generic design. The drawback is that it requires an extra lookup to obtain the information. However, this extra information is only required during migration or mirroring and does not impact the way Gitea currently works. The foreign identifier of an issue or pull request is similar to the identifier of an external user, which is stored in reactions, issues, etc. as OriginalPosterID and so on. The representation of a user is however different and the ability of users to link their account to an external user at a later time is also a logic that is different from what is involved in mirroring or migrations. For these reasons, despite some commonalities, it is unclear at this time how the two tables (foreign reference and external user) could be merged together. The ForeignID field is extracted from the issue migration context so that it can be dumped in files with dump-repo and later restored via restore-repo. The GetAllComments downloader method is introduced to simplify the implementation and not overload the Context for the purpose of pagination. It also clarifies in which context the comments are paginated and in which context they are not. The Context interface is no longer useful for the purpose of retrieving the LocalID and ForeignID since they are now both available from the PullRequest and Issue struct. The Reviewable and Commentable interfaces replace and serve the same purpose. The Context data member of PullRequest and Issue becomes a DownloaderContext to clarify that its purpose is not to support in memory operations while the current downloader is acting but is not otherwise persisted. It is, for instance, used by the GitLab downloader to store the IsMergeRequest boolean and sort out issues. --- [source](https://lab.forgefriends.org/forgefriends/forgefriends/-/merge_requests/36) Signed-off-by: Loïc Dachary <loic@dachary.org> Co-authored-by: Loïc Dachary <loic@dachary.org>
2022-03-17 13:08:35 -04:00
comments, isEnd, err := downloader.GetAllComments(i, commentBatchSize)
if err != nil {
return err
}
if err := uploader.CreateComments(comments...); err != nil {
return err
}
if isEnd {
break
}
}
}
return uploader.Finish()
}
// Init migrations service
func Init() error {
// TODO: maybe we can deprecate these legacy ALLOWED_DOMAINS/ALLOW_LOCALNETWORKS/BLOCKED_DOMAINS, use ALLOWED_HOST_LIST/BLOCKED_HOST_LIST instead
blockList = hostmatcher.ParseSimpleMatchList("migrations.BLOCKED_DOMAINS", setting.Migrations.BlockedDomains)
allowList = hostmatcher.ParseSimpleMatchList("migrations.ALLOWED_DOMAINS/ALLOW_LOCALNETWORKS", setting.Migrations.AllowedDomains)
if allowList.IsEmpty() {
// the default policy is that migration module can access external hosts
allowList.AppendBuiltin(hostmatcher.MatchBuiltinExternal)
}
if setting.Migrations.AllowLocalNetworks {
allowList.AppendBuiltin(hostmatcher.MatchBuiltinPrivate)
allowList.AppendBuiltin(hostmatcher.MatchBuiltinLoopback)
}
// TODO: at the moment, if ALLOW_LOCALNETWORKS=false, ALLOWED_DOMAINS=domain.com, and domain.com has IP 127.0.0.1, then it's still allowed.
// if we want to block such case, the private&loopback should be added to the blockList when ALLOW_LOCALNETWORKS=false
if setting.Proxy.Enabled && setting.Proxy.ProxyURLFixed != nil {
allowList.AppendPattern(setting.Proxy.ProxyURLFixed.Host)
}
return nil
}