mirror of
https://codeberg.org/forgejo/forgejo.git
synced 2024-12-22 12:54:53 -05:00
Update to go-git v5.1.0 (#11936)
Signed-off-by: Andrew Thornton <art27@cantab.net> Co-authored-by: techknowlogick <techknowlogick@gitea.io>
This commit is contained in:
parent
6bf78d2b57
commit
1426126690
76 changed files with 3134 additions and 556 deletions
8
go.mod
8
go.mod
|
@ -39,7 +39,7 @@ require (
|
|||
github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a // indirect
|
||||
github.com/go-enry/go-enry/v2 v2.5.2
|
||||
github.com/go-git/go-billy/v5 v5.0.0
|
||||
github.com/go-git/go-git/v5 v5.0.0
|
||||
github.com/go-git/go-git/v5 v5.1.0
|
||||
github.com/go-openapi/jsonreference v0.19.3 // indirect
|
||||
github.com/go-redis/redis v6.15.2+incompatible
|
||||
github.com/go-sql-driver/mysql v1.4.1
|
||||
|
@ -101,10 +101,10 @@ require (
|
|||
github.com/yohcop/openid-go v1.0.0
|
||||
github.com/yuin/goldmark v1.1.25
|
||||
github.com/yuin/goldmark-meta v0.0.0-20191126180153-f0638e958b60
|
||||
golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f
|
||||
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9
|
||||
golang.org/x/net v0.0.0-20200602114024-627f9648deb9
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
|
||||
golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1
|
||||
golang.org/x/text v0.3.2
|
||||
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 // indirect
|
||||
golang.org/x/tools v0.0.0-20200325010219-a49f79bcc224
|
||||
|
|
18
go.sum
18
go.sum
|
@ -209,8 +209,8 @@ github.com/go-git/go-billy/v5 v5.0.0 h1:7NQHvd9FVid8VL4qVUMm8XifBK+2xCoZ2lSk0agR
|
|||
github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
|
||||
github.com/go-git/go-git-fixtures/v4 v4.0.1 h1:q+IFMfLx200Q3scvt2hN79JsEzy4AmBTp/pqnefH+Bc=
|
||||
github.com/go-git/go-git-fixtures/v4 v4.0.1/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw=
|
||||
github.com/go-git/go-git/v5 v5.0.0 h1:k5RWPm4iJwYtfWoxIJy4wJX9ON7ihPeZZYC1fLYDnpg=
|
||||
github.com/go-git/go-git/v5 v5.0.0/go.mod h1:oYD8y9kWsGINPFJoLdaScGCN6dlKg23blmClfZwtUVA=
|
||||
github.com/go-git/go-git/v5 v5.1.0 h1:HxJn9g/E7eYvKW3Fm7Jt4ee8LXfPOm/H1cdDu8vEssk=
|
||||
github.com/go-git/go-git/v5 v5.1.0/go.mod h1:ZKfuPUoY1ZqIG4QG9BDBh3G4gLM5zvPuSJAozQrZuyM=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
|
@ -381,6 +381,8 @@ github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
|||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huandu/xstrings v1.3.0 h1:gvV6jG9dTgFEncxo+AF7PH6MZXi/vZl25owA/8Dg8Wo=
|
||||
github.com/huandu/xstrings v1.3.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
|
||||
github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg=
|
||||
github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/issue9/assert v1.3.1/go.mod h1:9Ger+iz8X7r1zMYYwEhh++2wMGWcNN2oVI+zIQXxcio=
|
||||
github.com/issue9/assert v1.3.2 h1:IaTa37u4m1fUuTH9K9ldO5IONKVDXjLiUO1T9vj0OF0=
|
||||
|
@ -707,8 +709,8 @@ golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8U
|
|||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 h1:xMPOj6Pz6UipU1wXLkrtqpHbR0AVFnyPEQq/wRWz9lM=
|
||||
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79 h1:IaQbIIB2X/Mp/DKctl6ROxz1KyMlKp4uyvL6+kQ7C88=
|
||||
golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9 h1:vEg9joUBmeBcK9iSJftGNf3coIG4HqZElCPehJsfAYM=
|
||||
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
|
@ -747,8 +749,8 @@ golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLL
|
|||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f h1:QBjCr1Fz5kw158VqdE9JfI9cJnl/ymnJWAdMuinqL7Y=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200602114024-627f9648deb9 h1:pNX+40auqi2JqRfOP1akLGtYcn15TUbkhwuCO3foqqM=
|
||||
golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/oauth2 v0.0.0-20180620175406-ef147856a6dd/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
|
@ -795,8 +797,8 @@ golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0
|
|||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f h1:mOhmO9WsBaJCNmaZHPtHs9wOcdqdKCjF6OPJlmDM3KI=
|
||||
golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1 h1:ogLJMz+qpzav7lGMh10LMvAkM/fAoGlaiiHYiFYdm80=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
|
|
2
vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md
generated
vendored
2
vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md
generated
vendored
|
@ -101,7 +101,7 @@ is supported by go-git.
|
|||
| http(s):// (smart) | ✔ |
|
||||
| git:// | ✔ |
|
||||
| ssh:// | ✔ |
|
||||
| file:// | ✔ |
|
||||
| file:// | partial | Warning: this is not pure Golang. This shells out to the `git` binary. |
|
||||
| custom | ✔ |
|
||||
| **other features** |
|
||||
| gitignore | ✔ |
|
||||
|
|
8
vendor/github.com/go-git/go-git/v5/README.md
generated
vendored
8
vendor/github.com/go-git/go-git/v5/README.md
generated
vendored
|
@ -1,9 +1,9 @@
|
|||
![go-git logo](https://cdn.rawgit.com/src-d/artwork/02036484/go-git/files/go-git-github-readme-header.png)
|
||||
[![GoDoc](https://godoc.org/github.com/go-git/go-git/v5?status.svg)](https://godoc.org/github.com/src-d/go-git) [![Build Status](https://github.com/go-git/go-git/workflows/Test%20&%20Coverage/badge.svg)](https://github.com/go-git/go-git/actions) [![Go Report Card](https://goreportcard.com/badge/github.com/src-d/go-git)](https://goreportcard.com/report/github.com/src-d/go-git)
|
||||
[![GoDoc](https://godoc.org/github.com/go-git/go-git/v5?status.svg)](https://pkg.go.dev/github.com/go-git/go-git/v5) [![Build Status](https://github.com/go-git/go-git/workflows/Test/badge.svg)](https://github.com/go-git/go-git/actions) [![Go Report Card](https://goreportcard.com/badge/github.com/go-git/go-git)](https://goreportcard.com/report/github.com/go-git/go-git)
|
||||
|
||||
*go-git* is a highly extensible git implementation library written in **pure Go**.
|
||||
|
||||
It can be used to manipulate git repositories at low level *(plumbing)* or high level *(porcelain)*, through an idiomatic Go API. It also supports several types of storage, such as in-memory filesystems, or custom implementations, thanks to the [`Storer`](https://godoc.org/github.com/go-git/go-git/v5/plumbing/storer) interface.
|
||||
It can be used to manipulate git repositories at low level *(plumbing)* or high level *(porcelain)*, through an idiomatic Go API. It also supports several types of storage, such as in-memory filesystems, or custom implementations, thanks to the [`Storer`](https://pkg.go.dev/github.com/go-git/go-git/v5/plumbing/storer) interface.
|
||||
|
||||
It's being actively developed since 2015 and is being used extensively by [Keybase](https://keybase.io/blog/encrypted-git-for-everyone), [Gitea](https://gitea.io/en-us/) or [Pulumi](https://github.com/search?q=org%3Apulumi+go-git&type=Code), and by many other libraries and tools.
|
||||
|
||||
|
@ -12,7 +12,7 @@ Project Status
|
|||
|
||||
After the legal issues with the [`src-d`](https://github.com/src-d) organization, the lack of update for four months and the requirement to make a hard fork, the project is **now back to normality**.
|
||||
|
||||
The project is currently actively maintained by individual contributors, including several of the original authors, but also backed by a new company `gitsigth` where `go-git` is a critical component used at scale.
|
||||
The project is currently actively maintained by individual contributors, including several of the original authors, but also backed by a new company, [gitsight](https://github.com/gitsight), where `go-git` is a critical component used at scale.
|
||||
|
||||
|
||||
Comparison with git
|
||||
|
@ -37,7 +37,7 @@ import "github.com/go-git/go-git" // with go modules disabled
|
|||
Examples
|
||||
--------
|
||||
|
||||
> Please note that the `CheckIfError` and `Info` functions used in the examples are from the [examples package](https://github.com/src-d/go-git/blob/master/_examples/common.go#L17) just to be used in the examples.
|
||||
> Please note that the `CheckIfError` and `Info` functions used in the examples are from the [examples package](https://github.com/go-git/go-git/blob/master/_examples/common.go#L19) just to be used in the examples.
|
||||
|
||||
|
||||
### Basic example
|
||||
|
|
157
vendor/github.com/go-git/go-git/v5/config/config.go
generated
vendored
157
vendor/github.com/go-git/go-git/v5/config/config.go
generated
vendored
|
@ -5,11 +5,16 @@ import (
|
|||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/go-git/go-git/v5/internal/url"
|
||||
format "github.com/go-git/go-git/v5/plumbing/format/config"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -32,6 +37,16 @@ var (
|
|||
ErrRemoteConfigEmptyName = errors.New("remote config: empty name")
|
||||
)
|
||||
|
||||
// Scope defines the scope of a config file, such as local, global or system.
|
||||
type Scope int
|
||||
|
||||
// Available ConfigScope's
|
||||
const (
|
||||
LocalScope Scope = iota
|
||||
GlobalScope
|
||||
SystemScope
|
||||
)
|
||||
|
||||
// Config contains the repository configuration
|
||||
// https://www.kernel.org/pub/software/scm/git/docs/git-config.html#FILES
|
||||
type Config struct {
|
||||
|
@ -46,6 +61,27 @@ type Config struct {
|
|||
CommentChar string
|
||||
}
|
||||
|
||||
User struct {
|
||||
// Name is the personal name of the author and the commiter of a commit.
|
||||
Name string
|
||||
// Email is the email of the author and the commiter of a commit.
|
||||
Email string
|
||||
}
|
||||
|
||||
Author struct {
|
||||
// Name is the personal name of the author of a commit.
|
||||
Name string
|
||||
// Email is the email of the author of a commit.
|
||||
Email string
|
||||
}
|
||||
|
||||
Committer struct {
|
||||
// Name is the personal name of the commiter of a commit.
|
||||
Name string
|
||||
// Email is the email of the the commiter of a commit.
|
||||
Email string
|
||||
}
|
||||
|
||||
Pack struct {
|
||||
// Window controls the size of the sliding window for delta
|
||||
// compression. The default is 10. A value of 0 turns off
|
||||
|
@ -82,6 +118,77 @@ func NewConfig() *Config {
|
|||
return config
|
||||
}
|
||||
|
||||
// ReadConfig reads a config file from a io.Reader.
|
||||
func ReadConfig(r io.Reader) (*Config, error) {
|
||||
b, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := NewConfig()
|
||||
if err = cfg.Unmarshal(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// LoadConfig loads a config file from a given scope. The returned Config,
|
||||
// contains exclusively information fom the given scope. If couldn't find a
|
||||
// config file to the given scope, a empty one is returned.
|
||||
func LoadConfig(scope Scope) (*Config, error) {
|
||||
if scope == LocalScope {
|
||||
return nil, fmt.Errorf("LocalScope should be read from the a ConfigStorer.")
|
||||
}
|
||||
|
||||
files, err := Paths(scope)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer f.Close()
|
||||
return ReadConfig(f)
|
||||
}
|
||||
|
||||
return NewConfig(), nil
|
||||
}
|
||||
|
||||
// Paths returns the config file location for a given scope.
|
||||
func Paths(scope Scope) ([]string, error) {
|
||||
var files []string
|
||||
switch scope {
|
||||
case GlobalScope:
|
||||
xdg := os.Getenv("XDG_CONFIG_HOME")
|
||||
if xdg != "" {
|
||||
files = append(files, filepath.Join(xdg, "git/config"))
|
||||
}
|
||||
|
||||
home, err := homedir.Dir()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
files = append(files,
|
||||
filepath.Join(home, ".gitconfig"),
|
||||
filepath.Join(home, ".config/git/config"),
|
||||
)
|
||||
case SystemScope:
|
||||
files = append(files, "/etc/gitconfig")
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
// Validate validates the fields and sets the default values.
|
||||
func (c *Config) Validate() error {
|
||||
for name, r := range c.Remotes {
|
||||
|
@ -113,6 +220,9 @@ const (
|
|||
branchSection = "branch"
|
||||
coreSection = "core"
|
||||
packSection = "pack"
|
||||
userSection = "user"
|
||||
authorSection = "author"
|
||||
committerSection = "committer"
|
||||
fetchKey = "fetch"
|
||||
urlKey = "url"
|
||||
bareKey = "bare"
|
||||
|
@ -121,6 +231,8 @@ const (
|
|||
windowKey = "window"
|
||||
mergeKey = "merge"
|
||||
rebaseKey = "rebase"
|
||||
nameKey = "name"
|
||||
emailKey = "email"
|
||||
|
||||
// DefaultPackWindow holds the number of previous objects used to
|
||||
// generate deltas. The value 10 is the same used by git command.
|
||||
|
@ -138,6 +250,7 @@ func (c *Config) Unmarshal(b []byte) error {
|
|||
}
|
||||
|
||||
c.unmarshalCore()
|
||||
c.unmarshalUser()
|
||||
if err := c.unmarshalPack(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -160,6 +273,20 @@ func (c *Config) unmarshalCore() {
|
|||
c.Core.CommentChar = s.Options.Get(commentCharKey)
|
||||
}
|
||||
|
||||
func (c *Config) unmarshalUser() {
|
||||
s := c.Raw.Section(userSection)
|
||||
c.User.Name = s.Options.Get(nameKey)
|
||||
c.User.Email = s.Options.Get(emailKey)
|
||||
|
||||
s = c.Raw.Section(authorSection)
|
||||
c.Author.Name = s.Options.Get(nameKey)
|
||||
c.Author.Email = s.Options.Get(emailKey)
|
||||
|
||||
s = c.Raw.Section(committerSection)
|
||||
c.Committer.Name = s.Options.Get(nameKey)
|
||||
c.Committer.Email = s.Options.Get(emailKey)
|
||||
}
|
||||
|
||||
func (c *Config) unmarshalPack() error {
|
||||
s := c.Raw.Section(packSection)
|
||||
window := s.Options.Get(windowKey)
|
||||
|
@ -220,6 +347,7 @@ func (c *Config) unmarshalBranches() error {
|
|||
// Marshal returns Config encoded as a git-config file.
|
||||
func (c *Config) Marshal() ([]byte, error) {
|
||||
c.marshalCore()
|
||||
c.marshalUser()
|
||||
c.marshalPack()
|
||||
c.marshalRemotes()
|
||||
c.marshalSubmodules()
|
||||
|
@ -242,6 +370,35 @@ func (c *Config) marshalCore() {
|
|||
}
|
||||
}
|
||||
|
||||
func (c *Config) marshalUser() {
|
||||
s := c.Raw.Section(userSection)
|
||||
if c.User.Name != "" {
|
||||
s.SetOption(nameKey, c.User.Name)
|
||||
}
|
||||
|
||||
if c.User.Email != "" {
|
||||
s.SetOption(emailKey, c.User.Email)
|
||||
}
|
||||
|
||||
s = c.Raw.Section(authorSection)
|
||||
if c.Author.Name != "" {
|
||||
s.SetOption(nameKey, c.Author.Name)
|
||||
}
|
||||
|
||||
if c.Author.Email != "" {
|
||||
s.SetOption(emailKey, c.Author.Email)
|
||||
}
|
||||
|
||||
s = c.Raw.Section(committerSection)
|
||||
if c.Committer.Name != "" {
|
||||
s.SetOption(nameKey, c.Committer.Name)
|
||||
}
|
||||
|
||||
if c.Committer.Email != "" {
|
||||
s.SetOption(emailKey, c.Committer.Email)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Config) marshalPack() {
|
||||
s := c.Raw.Section(packSection)
|
||||
if c.Pack.Window != DefaultPackWindow {
|
||||
|
|
9
vendor/github.com/go-git/go-git/v5/config/refspec.go
generated
vendored
9
vendor/github.com/go-git/go-git/v5/config/refspec.go
generated
vendored
|
@ -25,7 +25,7 @@ var (
|
|||
// reference even if it isn’t a fast-forward.
|
||||
// eg.: "+refs/heads/*:refs/remotes/origin/*"
|
||||
//
|
||||
// https://git-scm.com/book/es/v2/Git-Internals-The-Refspec
|
||||
// https://git-scm.com/book/en/v2/Git-Internals-The-Refspec
|
||||
type RefSpec string
|
||||
|
||||
// Validate validates the RefSpec
|
||||
|
@ -59,6 +59,11 @@ func (s RefSpec) IsDelete() bool {
|
|||
return s[0] == refSpecSeparator[0]
|
||||
}
|
||||
|
||||
// IsExactSHA1 returns true if the source is a SHA1 hash.
|
||||
func (s RefSpec) IsExactSHA1() bool {
|
||||
return plumbing.IsHash(s.Src())
|
||||
}
|
||||
|
||||
// Src return the src side.
|
||||
func (s RefSpec) Src() string {
|
||||
spec := string(s)
|
||||
|
@ -69,8 +74,8 @@ func (s RefSpec) Src() string {
|
|||
} else {
|
||||
start = 0
|
||||
}
|
||||
end := strings.Index(spec, refSpecSeparator)
|
||||
|
||||
end := strings.Index(spec, refSpecSeparator)
|
||||
return spec[start:end]
|
||||
}
|
||||
|
||||
|
|
1
vendor/github.com/go-git/go-git/v5/go.mod
generated
vendored
1
vendor/github.com/go-git/go-git/v5/go.mod
generated
vendored
|
@ -10,6 +10,7 @@ require (
|
|||
github.com/go-git/go-billy/v5 v5.0.0
|
||||
github.com/go-git/go-git-fixtures/v4 v4.0.1
|
||||
github.com/google/go-cmp v0.3.0
|
||||
github.com/imdario/mergo v0.3.9
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99
|
||||
github.com/jessevdk/go-flags v1.4.0
|
||||
github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd
|
||||
|
|
2
vendor/github.com/go-git/go-git/v5/go.sum
generated
vendored
2
vendor/github.com/go-git/go-git/v5/go.sum
generated
vendored
|
@ -22,6 +22,8 @@ github.com/go-git/go-git-fixtures/v4 v4.0.1 h1:q+IFMfLx200Q3scvt2hN79JsEzy4AmBTp
|
|||
github.com/go-git/go-git-fixtures/v4 v4.0.1/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw=
|
||||
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg=
|
||||
github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
|
||||
github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA=
|
||||
|
|
49
vendor/github.com/go-git/go-git/v5/options.go
generated
vendored
49
vendor/github.com/go-git/go-git/v5/options.go
generated
vendored
|
@ -6,12 +6,12 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/openpgp"
|
||||
"github.com/go-git/go-git/v5/config"
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
"github.com/go-git/go-git/v5/plumbing/object"
|
||||
"github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband"
|
||||
"github.com/go-git/go-git/v5/plumbing/transport"
|
||||
"golang.org/x/crypto/openpgp"
|
||||
)
|
||||
|
||||
// SubmoduleRescursivity defines how depth will affect any submodule recursive
|
||||
|
@ -190,6 +190,9 @@ type PushOptions struct {
|
|||
// Prune specify that remote refs that match given RefSpecs and that do
|
||||
// not exist locally will be removed.
|
||||
Prune bool
|
||||
// Force allows the push to update a remote branch even when the local
|
||||
// branch does not descend from it.
|
||||
Force bool
|
||||
}
|
||||
|
||||
// Validate validates the fields and sets the default values.
|
||||
|
@ -375,7 +378,8 @@ type CommitOptions struct {
|
|||
// All automatically stage files that have been modified and deleted, but
|
||||
// new files you have not told Git about are not affected.
|
||||
All bool
|
||||
// Author is the author's signature of the commit.
|
||||
// Author is the author's signature of the commit. If Author is empty the
|
||||
// Name and Email is read from the config, and time.Now it's used as When.
|
||||
Author *object.Signature
|
||||
// Committer is the committer's signature of the commit. If Committer is
|
||||
// nil the Author signature is used.
|
||||
|
@ -392,7 +396,9 @@ type CommitOptions struct {
|
|||
// Validate validates the fields and sets the default values.
|
||||
func (o *CommitOptions) Validate(r *Repository) error {
|
||||
if o.Author == nil {
|
||||
return ErrMissingAuthor
|
||||
if err := o.loadConfigAuthorAndCommitter(r); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if o.Committer == nil {
|
||||
|
@ -413,6 +419,43 @@ func (o *CommitOptions) Validate(r *Repository) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (o *CommitOptions) loadConfigAuthorAndCommitter(r *Repository) error {
|
||||
cfg, err := r.ConfigScoped(config.SystemScope)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if o.Author == nil && cfg.Author.Email != "" && cfg.Author.Name != "" {
|
||||
o.Author = &object.Signature{
|
||||
Name: cfg.Author.Name,
|
||||
Email: cfg.Author.Email,
|
||||
When: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
if o.Committer == nil && cfg.Committer.Email != "" && cfg.Committer.Name != "" {
|
||||
o.Committer = &object.Signature{
|
||||
Name: cfg.Committer.Name,
|
||||
Email: cfg.Committer.Email,
|
||||
When: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
if o.Author == nil && cfg.User.Email != "" && cfg.User.Name != "" {
|
||||
o.Author = &object.Signature{
|
||||
Name: cfg.User.Name,
|
||||
Email: cfg.User.Email,
|
||||
When: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
if o.Author == nil {
|
||||
return ErrMissingAuthor
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
ErrMissingName = errors.New("name field is required")
|
||||
ErrMissingTagger = errors.New("tagger field is required")
|
||||
|
|
38
vendor/github.com/go-git/go-git/v5/plumbing/color/color.go
generated
vendored
Normal file
38
vendor/github.com/go-git/go-git/v5/plumbing/color/color.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
package color
|
||||
|
||||
// TODO read colors from a github.com/go-git/go-git/plumbing/format/config.Config struct
|
||||
// TODO implement color parsing, see https://github.com/git/git/blob/v2.26.2/color.c
|
||||
|
||||
// Colors. See https://github.com/git/git/blob/v2.26.2/color.h#L24-L53.
|
||||
const (
|
||||
Normal = ""
|
||||
Reset = "\033[m"
|
||||
Bold = "\033[1m"
|
||||
Red = "\033[31m"
|
||||
Green = "\033[32m"
|
||||
Yellow = "\033[33m"
|
||||
Blue = "\033[34m"
|
||||
Magenta = "\033[35m"
|
||||
Cyan = "\033[36m"
|
||||
BoldRed = "\033[1;31m"
|
||||
BoldGreen = "\033[1;32m"
|
||||
BoldYellow = "\033[1;33m"
|
||||
BoldBlue = "\033[1;34m"
|
||||
BoldMagenta = "\033[1;35m"
|
||||
BoldCyan = "\033[1;36m"
|
||||
FaintRed = "\033[2;31m"
|
||||
FaintGreen = "\033[2;32m"
|
||||
FaintYellow = "\033[2;33m"
|
||||
FaintBlue = "\033[2;34m"
|
||||
FaintMagenta = "\033[2;35m"
|
||||
FaintCyan = "\033[2;36m"
|
||||
BgRed = "\033[41m"
|
||||
BgGreen = "\033[42m"
|
||||
BgYellow = "\033[43m"
|
||||
BgBlue = "\033[44m"
|
||||
BgMagenta = "\033[45m"
|
||||
BgCyan = "\033[46m"
|
||||
Faint = "\033[2m"
|
||||
FaintItalic = "\033[2;3m"
|
||||
Reverse = "\033[7m"
|
||||
)
|
97
vendor/github.com/go-git/go-git/v5/plumbing/format/diff/colorconfig.go
generated
vendored
Normal file
97
vendor/github.com/go-git/go-git/v5/plumbing/format/diff/colorconfig.go
generated
vendored
Normal file
|
@ -0,0 +1,97 @@
|
|||
package diff
|
||||
|
||||
import "github.com/go-git/go-git/v5/plumbing/color"
|
||||
|
||||
// A ColorKey is a key into a ColorConfig map and also equal to the key in the
|
||||
// diff.color subsection of the config. See
|
||||
// https://github.com/git/git/blob/v2.26.2/diff.c#L83-L106.
|
||||
type ColorKey string
|
||||
|
||||
// ColorKeys.
|
||||
const (
|
||||
Context ColorKey = "context"
|
||||
Meta ColorKey = "meta"
|
||||
Frag ColorKey = "frag"
|
||||
Old ColorKey = "old"
|
||||
New ColorKey = "new"
|
||||
Commit ColorKey = "commit"
|
||||
Whitespace ColorKey = "whitespace"
|
||||
Func ColorKey = "func"
|
||||
OldMoved ColorKey = "oldMoved"
|
||||
OldMovedAlternative ColorKey = "oldMovedAlternative"
|
||||
OldMovedDimmed ColorKey = "oldMovedDimmed"
|
||||
OldMovedAlternativeDimmed ColorKey = "oldMovedAlternativeDimmed"
|
||||
NewMoved ColorKey = "newMoved"
|
||||
NewMovedAlternative ColorKey = "newMovedAlternative"
|
||||
NewMovedDimmed ColorKey = "newMovedDimmed"
|
||||
NewMovedAlternativeDimmed ColorKey = "newMovedAlternativeDimmed"
|
||||
ContextDimmed ColorKey = "contextDimmed"
|
||||
OldDimmed ColorKey = "oldDimmed"
|
||||
NewDimmed ColorKey = "newDimmed"
|
||||
ContextBold ColorKey = "contextBold"
|
||||
OldBold ColorKey = "oldBold"
|
||||
NewBold ColorKey = "newBold"
|
||||
)
|
||||
|
||||
// A ColorConfig is a color configuration. A nil or empty ColorConfig
|
||||
// corresponds to no color.
|
||||
type ColorConfig map[ColorKey]string
|
||||
|
||||
// A ColorConfigOption sets an option on a ColorConfig.
|
||||
type ColorConfigOption func(ColorConfig)
|
||||
|
||||
// WithColor sets the color for key.
|
||||
func WithColor(key ColorKey, color string) ColorConfigOption {
|
||||
return func(cc ColorConfig) {
|
||||
cc[key] = color
|
||||
}
|
||||
}
|
||||
|
||||
// defaultColorConfig is the default color configuration. See
|
||||
// https://github.com/git/git/blob/v2.26.2/diff.c#L57-L81.
|
||||
var defaultColorConfig = ColorConfig{
|
||||
Context: color.Normal,
|
||||
Meta: color.Bold,
|
||||
Frag: color.Cyan,
|
||||
Old: color.Red,
|
||||
New: color.Green,
|
||||
Commit: color.Yellow,
|
||||
Whitespace: color.BgRed,
|
||||
Func: color.Normal,
|
||||
OldMoved: color.BoldMagenta,
|
||||
OldMovedAlternative: color.BoldBlue,
|
||||
OldMovedDimmed: color.Faint,
|
||||
OldMovedAlternativeDimmed: color.FaintItalic,
|
||||
NewMoved: color.BoldCyan,
|
||||
NewMovedAlternative: color.BoldYellow,
|
||||
NewMovedDimmed: color.Faint,
|
||||
NewMovedAlternativeDimmed: color.FaintItalic,
|
||||
ContextDimmed: color.Faint,
|
||||
OldDimmed: color.FaintRed,
|
||||
NewDimmed: color.FaintGreen,
|
||||
ContextBold: color.Bold,
|
||||
OldBold: color.BoldRed,
|
||||
NewBold: color.BoldGreen,
|
||||
}
|
||||
|
||||
// NewColorConfig returns a new ColorConfig.
|
||||
func NewColorConfig(options ...ColorConfigOption) ColorConfig {
|
||||
cc := make(ColorConfig)
|
||||
for key, value := range defaultColorConfig {
|
||||
cc[key] = value
|
||||
}
|
||||
for _, option := range options {
|
||||
option(cc)
|
||||
}
|
||||
return cc
|
||||
}
|
||||
|
||||
// Reset returns the ANSI escape sequence to reset the color with key set from
|
||||
// cc. If no color was set then no reset is needed so it returns the empty
|
||||
// string.
|
||||
func (cc ColorConfig) Reset(key ColorKey) string {
|
||||
if cc[key] == "" {
|
||||
return ""
|
||||
}
|
||||
return color.Reset
|
||||
}
|
399
vendor/github.com/go-git/go-git/v5/plumbing/format/diff/unified_encoder.go
generated
vendored
399
vendor/github.com/go-git/go-git/v5/plumbing/format/diff/unified_encoder.go
generated
vendored
|
@ -1,157 +1,158 @@
|
|||
package diff
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
)
|
||||
|
||||
const (
|
||||
diffInit = "diff --git a/%s b/%s\n"
|
||||
// DefaultContextLines is the default number of context lines.
|
||||
const DefaultContextLines = 3
|
||||
|
||||
chunkStart = "@@ -"
|
||||
chunkMiddle = " +"
|
||||
chunkEnd = " @@%s\n"
|
||||
chunkCount = "%d,%d"
|
||||
var (
|
||||
splitLinesRegexp = regexp.MustCompile(`[^\n]*(\n|$)`)
|
||||
|
||||
noFilePath = "/dev/null"
|
||||
aDir = "a/"
|
||||
bDir = "b/"
|
||||
operationChar = map[Operation]byte{
|
||||
Add: '+',
|
||||
Delete: '-',
|
||||
Equal: ' ',
|
||||
}
|
||||
|
||||
fPath = "--- %s\n"
|
||||
tPath = "+++ %s\n"
|
||||
binary = "Binary files %s and %s differ\n"
|
||||
|
||||
addLine = "+%s%s"
|
||||
deleteLine = "-%s%s"
|
||||
equalLine = " %s%s"
|
||||
noNewLine = "\n\\ No newline at end of file\n"
|
||||
|
||||
oldMode = "old mode %o\n"
|
||||
newMode = "new mode %o\n"
|
||||
deletedFileMode = "deleted file mode %o\n"
|
||||
newFileMode = "new file mode %o\n"
|
||||
|
||||
renameFrom = "from"
|
||||
renameTo = "to"
|
||||
renameFileMode = "rename %s %s\n"
|
||||
|
||||
indexAndMode = "index %s..%s %o\n"
|
||||
indexNoMode = "index %s..%s\n"
|
||||
|
||||
DefaultContextLines = 3
|
||||
operationColorKey = map[Operation]ColorKey{
|
||||
Add: New,
|
||||
Delete: Old,
|
||||
Equal: Context,
|
||||
}
|
||||
)
|
||||
|
||||
// UnifiedEncoder encodes an unified diff into the provided Writer.
|
||||
// There are some unsupported features:
|
||||
// - Similarity index for renames
|
||||
// - Sort hash representation
|
||||
// UnifiedEncoder encodes an unified diff into the provided Writer. It does not
|
||||
// support similarity index for renames or sorting hash representations.
|
||||
type UnifiedEncoder struct {
|
||||
io.Writer
|
||||
|
||||
// ctxLines is the count of unchanged lines that will appear
|
||||
// surrounding a change.
|
||||
ctxLines int
|
||||
// contextLines is the count of unchanged lines that will appear surrounding
|
||||
// a change.
|
||||
contextLines int
|
||||
|
||||
buf bytes.Buffer
|
||||
// colorConfig is the color configuration. The default is no color.
|
||||
color ColorConfig
|
||||
}
|
||||
|
||||
func NewUnifiedEncoder(w io.Writer, ctxLines int) *UnifiedEncoder {
|
||||
return &UnifiedEncoder{ctxLines: ctxLines, Writer: w}
|
||||
// NewUnifiedEncoder returns a new UnifiedEncoder that writes to w.
|
||||
func NewUnifiedEncoder(w io.Writer, contextLines int) *UnifiedEncoder {
|
||||
return &UnifiedEncoder{
|
||||
Writer: w,
|
||||
contextLines: contextLines,
|
||||
}
|
||||
}
|
||||
|
||||
// SetColor sets e's color configuration and returns e.
|
||||
func (e *UnifiedEncoder) SetColor(colorConfig ColorConfig) *UnifiedEncoder {
|
||||
e.color = colorConfig
|
||||
return e
|
||||
}
|
||||
|
||||
// Encode encodes patch.
|
||||
func (e *UnifiedEncoder) Encode(patch Patch) error {
|
||||
e.printMessage(patch.Message())
|
||||
sb := &strings.Builder{}
|
||||
|
||||
if err := e.encodeFilePatch(patch.FilePatches()); err != nil {
|
||||
return err
|
||||
if message := patch.Message(); message != "" {
|
||||
sb.WriteString(message)
|
||||
if !strings.HasSuffix(message, "\n") {
|
||||
sb.WriteByte('\n')
|
||||
}
|
||||
}
|
||||
|
||||
_, err := e.buf.WriteTo(e)
|
||||
for _, filePatch := range patch.FilePatches() {
|
||||
e.writeFilePatchHeader(sb, filePatch)
|
||||
g := newHunksGenerator(filePatch.Chunks(), e.contextLines)
|
||||
for _, hunk := range g.Generate() {
|
||||
hunk.writeTo(sb, e.color)
|
||||
}
|
||||
}
|
||||
|
||||
_, err := e.Write([]byte(sb.String()))
|
||||
return err
|
||||
}
|
||||
|
||||
func (e *UnifiedEncoder) encodeFilePatch(filePatches []FilePatch) error {
|
||||
for _, p := range filePatches {
|
||||
f, t := p.Files()
|
||||
if err := e.header(f, t, p.IsBinary()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
g := newHunksGenerator(p.Chunks(), e.ctxLines)
|
||||
for _, c := range g.Generate() {
|
||||
c.WriteTo(&e.buf)
|
||||
}
|
||||
func (e *UnifiedEncoder) writeFilePatchHeader(sb *strings.Builder, filePatch FilePatch) {
|
||||
from, to := filePatch.Files()
|
||||
if from == nil && to == nil {
|
||||
return
|
||||
}
|
||||
isBinary := filePatch.IsBinary()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *UnifiedEncoder) printMessage(message string) {
|
||||
isEmpty := message == ""
|
||||
hasSuffix := strings.HasSuffix(message, "\n")
|
||||
if !isEmpty && !hasSuffix {
|
||||
message += "\n"
|
||||
}
|
||||
|
||||
e.buf.WriteString(message)
|
||||
}
|
||||
|
||||
func (e *UnifiedEncoder) header(from, to File, isBinary bool) error {
|
||||
var lines []string
|
||||
switch {
|
||||
case from == nil && to == nil:
|
||||
return nil
|
||||
case from != nil && to != nil:
|
||||
hashEquals := from.Hash() == to.Hash()
|
||||
|
||||
fmt.Fprintf(&e.buf, diffInit, from.Path(), to.Path())
|
||||
|
||||
lines = append(lines,
|
||||
fmt.Sprintf("diff --git a/%s b/%s", from.Path(), to.Path()),
|
||||
)
|
||||
if from.Mode() != to.Mode() {
|
||||
fmt.Fprintf(&e.buf, oldMode+newMode, from.Mode(), to.Mode())
|
||||
lines = append(lines,
|
||||
fmt.Sprintf("old mode %o", from.Mode()),
|
||||
fmt.Sprintf("new mode %o", to.Mode()),
|
||||
)
|
||||
}
|
||||
|
||||
if from.Path() != to.Path() {
|
||||
fmt.Fprintf(&e.buf,
|
||||
renameFileMode+renameFileMode,
|
||||
renameFrom, from.Path(), renameTo, to.Path())
|
||||
lines = append(lines,
|
||||
fmt.Sprintf("rename from %s", from.Path()),
|
||||
fmt.Sprintf("rename to %s", to.Path()),
|
||||
)
|
||||
}
|
||||
|
||||
if from.Mode() != to.Mode() && !hashEquals {
|
||||
fmt.Fprintf(&e.buf, indexNoMode, from.Hash(), to.Hash())
|
||||
lines = append(lines,
|
||||
fmt.Sprintf("index %s..%s", from.Hash(), to.Hash()),
|
||||
)
|
||||
} else if !hashEquals {
|
||||
fmt.Fprintf(&e.buf, indexAndMode, from.Hash(), to.Hash(), from.Mode())
|
||||
lines = append(lines,
|
||||
fmt.Sprintf("index %s..%s %o", from.Hash(), to.Hash(), from.Mode()),
|
||||
)
|
||||
}
|
||||
|
||||
if !hashEquals {
|
||||
e.pathLines(isBinary, aDir+from.Path(), bDir+to.Path())
|
||||
lines = e.appendPathLines(lines, "a/"+from.Path(), "b/"+to.Path(), isBinary)
|
||||
}
|
||||
case from == nil:
|
||||
fmt.Fprintf(&e.buf, diffInit, to.Path(), to.Path())
|
||||
fmt.Fprintf(&e.buf, newFileMode, to.Mode())
|
||||
fmt.Fprintf(&e.buf, indexNoMode, plumbing.ZeroHash, to.Hash())
|
||||
e.pathLines(isBinary, noFilePath, bDir+to.Path())
|
||||
lines = append(lines,
|
||||
fmt.Sprintf("diff --git a/%s b/%s", to.Path(), to.Path()),
|
||||
fmt.Sprintf("new file mode %o", to.Mode()),
|
||||
fmt.Sprintf("index %s..%s", plumbing.ZeroHash, to.Hash()),
|
||||
)
|
||||
lines = e.appendPathLines(lines, "/dev/null", "b/"+to.Path(), isBinary)
|
||||
case to == nil:
|
||||
fmt.Fprintf(&e.buf, diffInit, from.Path(), from.Path())
|
||||
fmt.Fprintf(&e.buf, deletedFileMode, from.Mode())
|
||||
fmt.Fprintf(&e.buf, indexNoMode, from.Hash(), plumbing.ZeroHash)
|
||||
e.pathLines(isBinary, aDir+from.Path(), noFilePath)
|
||||
lines = append(lines,
|
||||
fmt.Sprintf("diff --git a/%s b/%s", from.Path(), from.Path()),
|
||||
fmt.Sprintf("deleted file mode %o", from.Mode()),
|
||||
fmt.Sprintf("index %s..%s", from.Hash(), plumbing.ZeroHash),
|
||||
)
|
||||
lines = e.appendPathLines(lines, "a/"+from.Path(), "/dev/null", isBinary)
|
||||
}
|
||||
|
||||
return nil
|
||||
sb.WriteString(e.color[Meta])
|
||||
sb.WriteString(lines[0])
|
||||
for _, line := range lines[1:] {
|
||||
sb.WriteByte('\n')
|
||||
sb.WriteString(line)
|
||||
}
|
||||
sb.WriteString(e.color.Reset(Meta))
|
||||
sb.WriteByte('\n')
|
||||
}
|
||||
|
||||
func (e *UnifiedEncoder) pathLines(isBinary bool, fromPath, toPath string) {
|
||||
format := fPath + tPath
|
||||
func (e *UnifiedEncoder) appendPathLines(lines []string, fromPath, toPath string, isBinary bool) []string {
|
||||
if isBinary {
|
||||
format = binary
|
||||
return append(lines,
|
||||
fmt.Sprintf("Binary files %s and %s differ", fromPath, toPath),
|
||||
)
|
||||
}
|
||||
|
||||
fmt.Fprintf(&e.buf, format, fromPath, toPath)
|
||||
return append(lines,
|
||||
fmt.Sprintf("--- %s", fromPath),
|
||||
fmt.Sprintf("+++ %s", toPath),
|
||||
)
|
||||
}
|
||||
|
||||
type hunksGenerator struct {
|
||||
|
@ -170,84 +171,84 @@ func newHunksGenerator(chunks []Chunk, ctxLines int) *hunksGenerator {
|
|||
}
|
||||
}
|
||||
|
||||
func (c *hunksGenerator) Generate() []*hunk {
|
||||
for i, chunk := range c.chunks {
|
||||
ls := splitLines(chunk.Content())
|
||||
lsLen := len(ls)
|
||||
func (g *hunksGenerator) Generate() []*hunk {
|
||||
for i, chunk := range g.chunks {
|
||||
lines := splitLines(chunk.Content())
|
||||
nLines := len(lines)
|
||||
|
||||
switch chunk.Type() {
|
||||
case Equal:
|
||||
c.fromLine += lsLen
|
||||
c.toLine += lsLen
|
||||
c.processEqualsLines(ls, i)
|
||||
g.fromLine += nLines
|
||||
g.toLine += nLines
|
||||
g.processEqualsLines(lines, i)
|
||||
case Delete:
|
||||
if lsLen != 0 {
|
||||
c.fromLine++
|
||||
if nLines != 0 {
|
||||
g.fromLine++
|
||||
}
|
||||
|
||||
c.processHunk(i, chunk.Type())
|
||||
c.fromLine += lsLen - 1
|
||||
c.current.AddOp(chunk.Type(), ls...)
|
||||
g.processHunk(i, chunk.Type())
|
||||
g.fromLine += nLines - 1
|
||||
g.current.AddOp(chunk.Type(), lines...)
|
||||
case Add:
|
||||
if lsLen != 0 {
|
||||
c.toLine++
|
||||
if nLines != 0 {
|
||||
g.toLine++
|
||||
}
|
||||
c.processHunk(i, chunk.Type())
|
||||
c.toLine += lsLen - 1
|
||||
c.current.AddOp(chunk.Type(), ls...)
|
||||
g.processHunk(i, chunk.Type())
|
||||
g.toLine += nLines - 1
|
||||
g.current.AddOp(chunk.Type(), lines...)
|
||||
}
|
||||
|
||||
if i == len(c.chunks)-1 && c.current != nil {
|
||||
c.hunks = append(c.hunks, c.current)
|
||||
if i == len(g.chunks)-1 && g.current != nil {
|
||||
g.hunks = append(g.hunks, g.current)
|
||||
}
|
||||
}
|
||||
|
||||
return c.hunks
|
||||
return g.hunks
|
||||
}
|
||||
|
||||
func (c *hunksGenerator) processHunk(i int, op Operation) {
|
||||
if c.current != nil {
|
||||
func (g *hunksGenerator) processHunk(i int, op Operation) {
|
||||
if g.current != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var ctxPrefix string
|
||||
linesBefore := len(c.beforeContext)
|
||||
if linesBefore > c.ctxLines {
|
||||
ctxPrefix = " " + c.beforeContext[linesBefore-c.ctxLines-1]
|
||||
c.beforeContext = c.beforeContext[linesBefore-c.ctxLines:]
|
||||
linesBefore = c.ctxLines
|
||||
linesBefore := len(g.beforeContext)
|
||||
if linesBefore > g.ctxLines {
|
||||
ctxPrefix = g.beforeContext[linesBefore-g.ctxLines-1]
|
||||
g.beforeContext = g.beforeContext[linesBefore-g.ctxLines:]
|
||||
linesBefore = g.ctxLines
|
||||
}
|
||||
|
||||
c.current = &hunk{ctxPrefix: strings.TrimSuffix(ctxPrefix, "\n")}
|
||||
c.current.AddOp(Equal, c.beforeContext...)
|
||||
g.current = &hunk{ctxPrefix: strings.TrimSuffix(ctxPrefix, "\n")}
|
||||
g.current.AddOp(Equal, g.beforeContext...)
|
||||
|
||||
switch op {
|
||||
case Delete:
|
||||
c.current.fromLine, c.current.toLine =
|
||||
c.addLineNumbers(c.fromLine, c.toLine, linesBefore, i, Add)
|
||||
g.current.fromLine, g.current.toLine =
|
||||
g.addLineNumbers(g.fromLine, g.toLine, linesBefore, i, Add)
|
||||
case Add:
|
||||
c.current.toLine, c.current.fromLine =
|
||||
c.addLineNumbers(c.toLine, c.fromLine, linesBefore, i, Delete)
|
||||
g.current.toLine, g.current.fromLine =
|
||||
g.addLineNumbers(g.toLine, g.fromLine, linesBefore, i, Delete)
|
||||
}
|
||||
|
||||
c.beforeContext = nil
|
||||
g.beforeContext = nil
|
||||
}
|
||||
|
||||
// addLineNumbers obtains the line numbers in a new chunk
|
||||
func (c *hunksGenerator) addLineNumbers(la, lb int, linesBefore int, i int, op Operation) (cla, clb int) {
|
||||
// addLineNumbers obtains the line numbers in a new chunk.
|
||||
func (g *hunksGenerator) addLineNumbers(la, lb int, linesBefore int, i int, op Operation) (cla, clb int) {
|
||||
cla = la - linesBefore
|
||||
// we need to search for a reference for the next diff
|
||||
switch {
|
||||
case linesBefore != 0 && c.ctxLines != 0:
|
||||
if lb > c.ctxLines {
|
||||
clb = lb - c.ctxLines + 1
|
||||
case linesBefore != 0 && g.ctxLines != 0:
|
||||
if lb > g.ctxLines {
|
||||
clb = lb - g.ctxLines + 1
|
||||
} else {
|
||||
clb = 1
|
||||
}
|
||||
case c.ctxLines == 0:
|
||||
case g.ctxLines == 0:
|
||||
clb = lb
|
||||
case i != len(c.chunks)-1:
|
||||
next := c.chunks[i+1]
|
||||
case i != len(g.chunks)-1:
|
||||
next := g.chunks[i+1]
|
||||
if next.Type() == op || next.Type() == Equal {
|
||||
// this diff will be into this chunk
|
||||
clb = lb + 1
|
||||
|
@ -257,34 +258,32 @@ func (c *hunksGenerator) addLineNumbers(la, lb int, linesBefore int, i int, op O
|
|||
return
|
||||
}
|
||||
|
||||
func (c *hunksGenerator) processEqualsLines(ls []string, i int) {
|
||||
if c.current == nil {
|
||||
c.beforeContext = append(c.beforeContext, ls...)
|
||||
func (g *hunksGenerator) processEqualsLines(ls []string, i int) {
|
||||
if g.current == nil {
|
||||
g.beforeContext = append(g.beforeContext, ls...)
|
||||
return
|
||||
}
|
||||
|
||||
c.afterContext = append(c.afterContext, ls...)
|
||||
if len(c.afterContext) <= c.ctxLines*2 && i != len(c.chunks)-1 {
|
||||
c.current.AddOp(Equal, c.afterContext...)
|
||||
c.afterContext = nil
|
||||
g.afterContext = append(g.afterContext, ls...)
|
||||
if len(g.afterContext) <= g.ctxLines*2 && i != len(g.chunks)-1 {
|
||||
g.current.AddOp(Equal, g.afterContext...)
|
||||
g.afterContext = nil
|
||||
} else {
|
||||
ctxLines := c.ctxLines
|
||||
if ctxLines > len(c.afterContext) {
|
||||
ctxLines = len(c.afterContext)
|
||||
ctxLines := g.ctxLines
|
||||
if ctxLines > len(g.afterContext) {
|
||||
ctxLines = len(g.afterContext)
|
||||
}
|
||||
c.current.AddOp(Equal, c.afterContext[:ctxLines]...)
|
||||
c.hunks = append(c.hunks, c.current)
|
||||
g.current.AddOp(Equal, g.afterContext[:ctxLines]...)
|
||||
g.hunks = append(g.hunks, g.current)
|
||||
|
||||
c.current = nil
|
||||
c.beforeContext = c.afterContext[ctxLines:]
|
||||
c.afterContext = nil
|
||||
g.current = nil
|
||||
g.beforeContext = g.afterContext[ctxLines:]
|
||||
g.afterContext = nil
|
||||
}
|
||||
}
|
||||
|
||||
var splitLinesRE = regexp.MustCompile(`[^\n]*(\n|$)`)
|
||||
|
||||
func splitLines(s string) []string {
|
||||
out := splitLinesRE.FindAllString(s, -1)
|
||||
out := splitLinesRegexp.FindAllString(s, -1)
|
||||
if out[len(out)-1] == "" {
|
||||
out = out[:len(out)-1]
|
||||
}
|
||||
|
@ -302,44 +301,59 @@ type hunk struct {
|
|||
ops []*op
|
||||
}
|
||||
|
||||
func (c *hunk) WriteTo(buf *bytes.Buffer) {
|
||||
buf.WriteString(chunkStart)
|
||||
func (h *hunk) writeTo(sb *strings.Builder, color ColorConfig) {
|
||||
sb.WriteString(color[Frag])
|
||||
sb.WriteString("@@ -")
|
||||
|
||||
if c.fromCount == 1 {
|
||||
fmt.Fprintf(buf, "%d", c.fromLine)
|
||||
if h.fromCount == 1 {
|
||||
sb.WriteString(strconv.Itoa(h.fromLine))
|
||||
} else {
|
||||
fmt.Fprintf(buf, chunkCount, c.fromLine, c.fromCount)
|
||||
sb.WriteString(strconv.Itoa(h.fromLine))
|
||||
sb.WriteByte(',')
|
||||
sb.WriteString(strconv.Itoa(h.fromCount))
|
||||
}
|
||||
|
||||
buf.WriteString(chunkMiddle)
|
||||
sb.WriteString(" +")
|
||||
|
||||
if c.toCount == 1 {
|
||||
fmt.Fprintf(buf, "%d", c.toLine)
|
||||
if h.toCount == 1 {
|
||||
sb.WriteString(strconv.Itoa(h.toLine))
|
||||
} else {
|
||||
fmt.Fprintf(buf, chunkCount, c.toLine, c.toCount)
|
||||
sb.WriteString(strconv.Itoa(h.toLine))
|
||||
sb.WriteByte(',')
|
||||
sb.WriteString(strconv.Itoa(h.toCount))
|
||||
}
|
||||
|
||||
fmt.Fprintf(buf, chunkEnd, c.ctxPrefix)
|
||||
sb.WriteString(" @@")
|
||||
sb.WriteString(color.Reset(Frag))
|
||||
|
||||
for _, d := range c.ops {
|
||||
buf.WriteString(d.String())
|
||||
if h.ctxPrefix != "" {
|
||||
sb.WriteByte(' ')
|
||||
sb.WriteString(color[Func])
|
||||
sb.WriteString(h.ctxPrefix)
|
||||
sb.WriteString(color.Reset(Func))
|
||||
}
|
||||
|
||||
sb.WriteByte('\n')
|
||||
|
||||
for _, op := range h.ops {
|
||||
op.writeTo(sb, color)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *hunk) AddOp(t Operation, s ...string) {
|
||||
ls := len(s)
|
||||
func (h *hunk) AddOp(t Operation, ss ...string) {
|
||||
n := len(ss)
|
||||
switch t {
|
||||
case Add:
|
||||
c.toCount += ls
|
||||
h.toCount += n
|
||||
case Delete:
|
||||
c.fromCount += ls
|
||||
h.fromCount += n
|
||||
case Equal:
|
||||
c.toCount += ls
|
||||
c.fromCount += ls
|
||||
h.toCount += n
|
||||
h.fromCount += n
|
||||
}
|
||||
|
||||
for _, l := range s {
|
||||
c.ops = append(c.ops, &op{l, t})
|
||||
for _, s := range ss {
|
||||
h.ops = append(h.ops, &op{s, t})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -348,20 +362,15 @@ type op struct {
|
|||
t Operation
|
||||
}
|
||||
|
||||
func (o *op) String() string {
|
||||
var prefix, suffix string
|
||||
switch o.t {
|
||||
case Add:
|
||||
prefix = addLine
|
||||
case Delete:
|
||||
prefix = deleteLine
|
||||
case Equal:
|
||||
prefix = equalLine
|
||||
func (o *op) writeTo(sb *strings.Builder, color ColorConfig) {
|
||||
colorKey := operationColorKey[o.t]
|
||||
sb.WriteString(color[colorKey])
|
||||
sb.WriteByte(operationChar[o.t])
|
||||
if strings.HasSuffix(o.text, "\n") {
|
||||
sb.WriteString(strings.TrimSuffix(o.text, "\n"))
|
||||
} else {
|
||||
sb.WriteString(o.text + "\n\\ No newline at end of file")
|
||||
}
|
||||
n := len(o.text)
|
||||
if n > 0 && o.text[n-1] != '\n' {
|
||||
suffix = noNewLine
|
||||
}
|
||||
|
||||
return fmt.Sprintf(prefix, o.text, suffix)
|
||||
sb.WriteString(color.Reset(colorKey))
|
||||
sb.WriteByte('\n')
|
||||
}
|
||||
|
|
10
vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/diff_delta.go
generated
vendored
10
vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/diff_delta.go
generated
vendored
|
@ -4,6 +4,7 @@ import (
|
|||
"bytes"
|
||||
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
"github.com/go-git/go-git/v5/utils/ioutil"
|
||||
)
|
||||
|
||||
// See https://github.com/jelmer/dulwich/blob/master/dulwich/pack.py and
|
||||
|
@ -27,17 +28,20 @@ func GetDelta(base, target plumbing.EncodedObject) (plumbing.EncodedObject, erro
|
|||
return getDelta(new(deltaIndex), base, target)
|
||||
}
|
||||
|
||||
func getDelta(index *deltaIndex, base, target plumbing.EncodedObject) (plumbing.EncodedObject, error) {
|
||||
func getDelta(index *deltaIndex, base, target plumbing.EncodedObject) (o plumbing.EncodedObject, err error) {
|
||||
br, err := base.Reader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer br.Close()
|
||||
|
||||
defer ioutil.CheckClose(br, &err)
|
||||
|
||||
tr, err := target.Reader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer tr.Close()
|
||||
|
||||
defer ioutil.CheckClose(tr, &err)
|
||||
|
||||
bb := bufPool.Get().(*bytes.Buffer)
|
||||
defer bufPool.Put(bb)
|
||||
|
|
10
vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/encoder.go
generated
vendored
10
vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/encoder.go
generated
vendored
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/go-git/go-git/v5/plumbing"
|
||||
"github.com/go-git/go-git/v5/plumbing/storer"
|
||||
"github.com/go-git/go-git/v5/utils/binary"
|
||||
"github.com/go-git/go-git/v5/utils/ioutil"
|
||||
)
|
||||
|
||||
// Encoder gets the data from the storage and write it into the writer in PACK
|
||||
|
@ -80,7 +81,7 @@ func (e *Encoder) head(numEntries int) error {
|
|||
)
|
||||
}
|
||||
|
||||
func (e *Encoder) entry(o *ObjectToPack) error {
|
||||
func (e *Encoder) entry(o *ObjectToPack) (err error) {
|
||||
if o.WantWrite() {
|
||||
// A cycle exists in this delta chain. This should only occur if a
|
||||
// selected object representation disappeared during writing
|
||||
|
@ -119,17 +120,22 @@ func (e *Encoder) entry(o *ObjectToPack) error {
|
|||
}
|
||||
|
||||
e.zw.Reset(e.w)
|
||||
|
||||
defer ioutil.CheckClose(e.zw, &err)
|
||||
|
||||
or, err := o.Object.Reader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer ioutil.CheckClose(or, &err)
|
||||
|
||||
_, err = io.Copy(e.zw, or)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return e.zw.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Encoder) writeBaseIfDelta(o *ObjectToPack) error {
|
||||
|
|
5
vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/packfile.go
generated
vendored
5
vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/packfile.go
generated
vendored
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/go-git/go-git/v5/plumbing/cache"
|
||||
"github.com/go-git/go-git/v5/plumbing/format/idxfile"
|
||||
"github.com/go-git/go-git/v5/plumbing/storer"
|
||||
"github.com/go-git/go-git/v5/utils/ioutil"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -307,12 +308,14 @@ func (p *Packfile) getNextMemoryObject(h *ObjectHeader) (plumbing.EncodedObject,
|
|||
return obj, nil
|
||||
}
|
||||
|
||||
func (p *Packfile) fillRegularObjectContent(obj plumbing.EncodedObject) error {
|
||||
func (p *Packfile) fillRegularObjectContent(obj plumbing.EncodedObject) (err error) {
|
||||
w, err := obj.Writer()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer ioutil.CheckClose(w, &err)
|
||||
|
||||
_, _, err = p.s.NextObject(w)
|
||||
p.cachePut(obj)
|
||||
|
||||
|
|
15
vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/parser.go
generated
vendored
15
vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/parser.go
generated
vendored
|
@ -4,11 +4,12 @@ import (
|
|||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
stdioutil "io/ioutil"
|
||||
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
"github.com/go-git/go-git/v5/plumbing/cache"
|
||||
"github.com/go-git/go-git/v5/plumbing/storer"
|
||||
"github.com/go-git/go-git/v5/utils/ioutil"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -283,7 +284,7 @@ func (p *Parser) resolveDeltas() error {
|
|||
|
||||
if !obj.IsDelta() && len(obj.Children) > 0 {
|
||||
for _, child := range obj.Children {
|
||||
if err := p.resolveObject(ioutil.Discard, child, content); err != nil {
|
||||
if err := p.resolveObject(stdioutil.Discard, child, content); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -298,7 +299,7 @@ func (p *Parser) resolveDeltas() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) error {
|
||||
func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) {
|
||||
if !o.ExternalRef { // skip cache check for placeholder parents
|
||||
b, ok := p.cache.Get(o.Offset)
|
||||
if ok {
|
||||
|
@ -310,17 +311,21 @@ func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) error {
|
|||
// If it's not on the cache and is not a delta we can try to find it in the
|
||||
// storage, if there's one. External refs must enter here.
|
||||
if p.storage != nil && !o.Type.IsDelta() {
|
||||
e, err := p.storage.EncodedObject(plumbing.AnyObject, o.SHA1)
|
||||
var e plumbing.EncodedObject
|
||||
e, err = p.storage.EncodedObject(plumbing.AnyObject, o.SHA1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.Type = e.Type()
|
||||
|
||||
r, err := e.Reader()
|
||||
var r io.ReadCloser
|
||||
r, err = e.Reader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer ioutil.CheckClose(r, &err)
|
||||
|
||||
_, err = buf.ReadFrom(io.LimitReader(r, e.Size()))
|
||||
return err
|
||||
}
|
||||
|
|
7
vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/patch_delta.go
generated
vendored
7
vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/patch_delta.go
generated
vendored
|
@ -6,6 +6,7 @@ import (
|
|||
"io"
|
||||
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
"github.com/go-git/go-git/v5/utils/ioutil"
|
||||
)
|
||||
|
||||
// See https://github.com/git/git/blob/49fa3dc76179e04b0833542fa52d0f287a4955ac/delta.h
|
||||
|
@ -16,17 +17,21 @@ import (
|
|||
const deltaSizeMin = 4
|
||||
|
||||
// ApplyDelta writes to target the result of applying the modification deltas in delta to base.
|
||||
func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) error {
|
||||
func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) (err error) {
|
||||
r, err := base.Reader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer ioutil.CheckClose(r, &err)
|
||||
|
||||
w, err := target.Writer()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer ioutil.CheckClose(w, &err)
|
||||
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
defer bufPool.Put(buf)
|
||||
buf.Reset()
|
||||
|
|
10
vendor/github.com/go-git/go-git/v5/plumbing/hash.go
generated
vendored
10
vendor/github.com/go-git/go-git/v5/plumbing/hash.go
generated
vendored
|
@ -71,3 +71,13 @@ type HashSlice []Hash
|
|||
func (p HashSlice) Len() int { return len(p) }
|
||||
func (p HashSlice) Less(i, j int) bool { return bytes.Compare(p[i][:], p[j][:]) < 0 }
|
||||
func (p HashSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
// IsHash returns true if the given string is a valid hash.
|
||||
func IsHash(s string) bool {
|
||||
if len(s) != 40 {
|
||||
return false
|
||||
}
|
||||
|
||||
_, err := hex.DecodeString(s)
|
||||
return err == nil
|
||||
}
|
||||
|
|
4
vendor/github.com/go-git/go-git/v5/plumbing/object/change.go
generated
vendored
4
vendor/github.com/go-git/go-git/v5/plumbing/object/change.go
generated
vendored
|
@ -18,7 +18,7 @@ type Change struct {
|
|||
To ChangeEntry
|
||||
}
|
||||
|
||||
var empty = ChangeEntry{}
|
||||
var empty ChangeEntry
|
||||
|
||||
// Action returns the kind of action represented by the change, an
|
||||
// insertion, a deletion or a modification.
|
||||
|
@ -27,9 +27,11 @@ func (c *Change) Action() (merkletrie.Action, error) {
|
|||
return merkletrie.Action(0),
|
||||
fmt.Errorf("malformed change: empty from and to")
|
||||
}
|
||||
|
||||
if c.From == empty {
|
||||
return merkletrie.Insert, nil
|
||||
}
|
||||
|
||||
if c.To == empty {
|
||||
return merkletrie.Delete, nil
|
||||
}
|
||||
|
|
15
vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go
generated
vendored
15
vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go
generated
vendored
|
@ -78,21 +78,30 @@ func (c *Commit) Tree() (*Tree, error) {
|
|||
|
||||
// PatchContext returns the Patch between the actual commit and the provided one.
|
||||
// Error will be return if context expires. Provided context must be non-nil.
|
||||
//
|
||||
// NOTE: Since version 5.1.0 the renames are correctly handled, the settings
|
||||
// used are the recommended options DefaultDiffTreeOptions.
|
||||
func (c *Commit) PatchContext(ctx context.Context, to *Commit) (*Patch, error) {
|
||||
fromTree, err := c.Tree()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
toTree, err := to.Tree()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var toTree *Tree
|
||||
if to != nil {
|
||||
toTree, err = to.Tree()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return fromTree.PatchContext(ctx, toTree)
|
||||
}
|
||||
|
||||
// Patch returns the Patch between the actual commit and the provided one.
|
||||
//
|
||||
// NOTE: Since version 5.1.0 the renames are correctly handled, the settings
|
||||
// used are the recommended options DefaultDiffTreeOptions.
|
||||
func (c *Commit) Patch(to *Commit) (*Patch, error) {
|
||||
return c.PatchContext(context.Background(), to)
|
||||
}
|
||||
|
|
67
vendor/github.com/go-git/go-git/v5/plumbing/object/difftree.go
generated
vendored
67
vendor/github.com/go-git/go-git/v5/plumbing/object/difftree.go
generated
vendored
|
@ -10,14 +10,62 @@ import (
|
|||
|
||||
// DiffTree compares the content and mode of the blobs found via two
|
||||
// tree objects.
|
||||
// DiffTree does not perform rename detection, use DiffTreeWithOptions
|
||||
// instead to detect renames.
|
||||
func DiffTree(a, b *Tree) (Changes, error) {
|
||||
return DiffTreeContext(context.Background(), a, b)
|
||||
}
|
||||
|
||||
// DiffTree compares the content and mode of the blobs found via two
|
||||
// DiffTreeContext compares the content and mode of the blobs found via two
|
||||
// tree objects. Provided context must be non-nil.
|
||||
// An error will be return if context expires
|
||||
// An error will be returned if context expires.
|
||||
func DiffTreeContext(ctx context.Context, a, b *Tree) (Changes, error) {
|
||||
return DiffTreeWithOptions(ctx, a, b, nil)
|
||||
}
|
||||
|
||||
// DiffTreeOptions are the configurable options when performing a diff tree.
|
||||
type DiffTreeOptions struct {
|
||||
// DetectRenames is whether the diff tree will use rename detection.
|
||||
DetectRenames bool
|
||||
// RenameScore is the threshold to of similarity between files to consider
|
||||
// that a pair of delete and insert are a rename. The number must be
|
||||
// exactly between 0 and 100.
|
||||
RenameScore uint
|
||||
// RenameLimit is the maximum amount of files that can be compared when
|
||||
// detecting renames. The number of comparisons that have to be performed
|
||||
// is equal to the number of deleted files * the number of added files.
|
||||
// That means, that if 100 files were deleted and 50 files were added, 5000
|
||||
// file comparisons may be needed. So, if the rename limit is 50, the number
|
||||
// of both deleted and added needs to be equal or less than 50.
|
||||
// A value of 0 means no limit.
|
||||
RenameLimit uint
|
||||
// OnlyExactRenames performs only detection of exact renames and will not perform
|
||||
// any detection of renames based on file similarity.
|
||||
OnlyExactRenames bool
|
||||
}
|
||||
|
||||
// DefaultDiffTreeOptions are the default and recommended options for the
|
||||
// diff tree.
|
||||
var DefaultDiffTreeOptions = &DiffTreeOptions{
|
||||
DetectRenames: true,
|
||||
RenameScore: 60,
|
||||
RenameLimit: 0,
|
||||
OnlyExactRenames: false,
|
||||
}
|
||||
|
||||
// DiffTreeWithOptions compares the content and mode of the blobs found
|
||||
// via two tree objects with the given options. The provided context
|
||||
// must be non-nil.
|
||||
// If no options are passed, no rename detection will be performed. The
|
||||
// recommended options are DefaultDiffTreeOptions.
|
||||
// An error will be returned if the context expires.
|
||||
// This function will be deprecated and removed in v6 so the default
|
||||
// behaviour of DiffTree is to detect renames.
|
||||
func DiffTreeWithOptions(
|
||||
ctx context.Context,
|
||||
a, b *Tree,
|
||||
opts *DiffTreeOptions,
|
||||
) (Changes, error) {
|
||||
from := NewTreeRootNode(a)
|
||||
to := NewTreeRootNode(b)
|
||||
|
||||
|
@ -33,5 +81,18 @@ func DiffTreeContext(ctx context.Context, a, b *Tree) (Changes, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return newChanges(merkletrieChanges)
|
||||
changes, err := newChanges(merkletrieChanges)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if opts == nil {
|
||||
opts = new(DiffTreeOptions)
|
||||
}
|
||||
|
||||
if opts.DetectRenames {
|
||||
return DetectRenames(changes, opts)
|
||||
}
|
||||
|
||||
return changes, nil
|
||||
}
|
||||
|
|
2
vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go
generated
vendored
2
vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go
generated
vendored
|
@ -115,7 +115,7 @@ func fileContent(f *File) (content string, isBinary bool, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
// textPatch is an implementation of fdiff.Patch interface
|
||||
// Patch is an implementation of fdiff.Patch interface
|
||||
type Patch struct {
|
||||
message string
|
||||
filePatches []fdiff.FilePatch
|
||||
|
|
813
vendor/github.com/go-git/go-git/v5/plumbing/object/rename.go
generated
vendored
Normal file
813
vendor/github.com/go-git/go-git/v5/plumbing/object/rename.go
generated
vendored
Normal file
|
@ -0,0 +1,813 @@
|
|||
package object
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
"github.com/go-git/go-git/v5/plumbing/filemode"
|
||||
"github.com/go-git/go-git/v5/utils/ioutil"
|
||||
"github.com/go-git/go-git/v5/utils/merkletrie"
|
||||
)
|
||||
|
||||
// DetectRenames detects the renames in the given changes on two trees with
|
||||
// the given options. It will return the given changes grouping additions and
|
||||
// deletions into modifications when possible.
|
||||
// If options is nil, the default diff tree options will be used.
|
||||
func DetectRenames(
|
||||
changes Changes,
|
||||
opts *DiffTreeOptions,
|
||||
) (Changes, error) {
|
||||
if opts == nil {
|
||||
opts = DefaultDiffTreeOptions
|
||||
}
|
||||
|
||||
detector := &renameDetector{
|
||||
renameScore: int(opts.RenameScore),
|
||||
renameLimit: int(opts.RenameLimit),
|
||||
onlyExact: opts.OnlyExactRenames,
|
||||
}
|
||||
|
||||
for _, c := range changes {
|
||||
action, err := c.Action()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch action {
|
||||
case merkletrie.Insert:
|
||||
detector.added = append(detector.added, c)
|
||||
case merkletrie.Delete:
|
||||
detector.deleted = append(detector.deleted, c)
|
||||
default:
|
||||
detector.modified = append(detector.modified, c)
|
||||
}
|
||||
}
|
||||
|
||||
return detector.detect()
|
||||
}
|
||||
|
||||
// renameDetector will detect and resolve renames in a set of changes.
|
||||
// see: https://github.com/eclipse/jgit/blob/master/org.eclipse.jgit/src/org/eclipse/jgit/diff/RenameDetector.java
|
||||
type renameDetector struct {
|
||||
added []*Change
|
||||
deleted []*Change
|
||||
modified []*Change
|
||||
|
||||
renameScore int
|
||||
renameLimit int
|
||||
onlyExact bool
|
||||
}
|
||||
|
||||
// detectExactRenames detects matches files that were deleted with files that
|
||||
// were added where the hash is the same on both. If there are multiple targets
|
||||
// the one with the most similar path will be chosen as the rename and the
|
||||
// rest as either deletions or additions.
|
||||
func (d *renameDetector) detectExactRenames() {
|
||||
added := groupChangesByHash(d.added)
|
||||
deletes := groupChangesByHash(d.deleted)
|
||||
var uniqueAdds []*Change
|
||||
var nonUniqueAdds [][]*Change
|
||||
var addedLeft []*Change
|
||||
|
||||
for _, cs := range added {
|
||||
if len(cs) == 1 {
|
||||
uniqueAdds = append(uniqueAdds, cs[0])
|
||||
} else {
|
||||
nonUniqueAdds = append(nonUniqueAdds, cs)
|
||||
}
|
||||
}
|
||||
|
||||
for _, c := range uniqueAdds {
|
||||
hash := changeHash(c)
|
||||
deleted := deletes[hash]
|
||||
|
||||
if len(deleted) == 1 {
|
||||
if sameMode(c, deleted[0]) {
|
||||
d.modified = append(d.modified, &Change{From: deleted[0].From, To: c.To})
|
||||
delete(deletes, hash)
|
||||
} else {
|
||||
addedLeft = append(addedLeft, c)
|
||||
}
|
||||
} else if len(deleted) > 1 {
|
||||
bestMatch := bestNameMatch(c, deleted)
|
||||
if bestMatch != nil && sameMode(c, bestMatch) {
|
||||
d.modified = append(d.modified, &Change{From: bestMatch.From, To: c.To})
|
||||
delete(deletes, hash)
|
||||
|
||||
var newDeletes = make([]*Change, 0, len(deleted)-1)
|
||||
for _, d := range deleted {
|
||||
if d != bestMatch {
|
||||
newDeletes = append(newDeletes, d)
|
||||
}
|
||||
}
|
||||
deletes[hash] = newDeletes
|
||||
}
|
||||
} else {
|
||||
addedLeft = append(addedLeft, c)
|
||||
}
|
||||
}
|
||||
|
||||
for _, added := range nonUniqueAdds {
|
||||
hash := changeHash(added[0])
|
||||
deleted := deletes[hash]
|
||||
|
||||
if len(deleted) == 1 {
|
||||
deleted := deleted[0]
|
||||
bestMatch := bestNameMatch(deleted, added)
|
||||
if bestMatch != nil && sameMode(deleted, bestMatch) {
|
||||
d.modified = append(d.modified, &Change{From: deleted.From, To: bestMatch.To})
|
||||
delete(deletes, hash)
|
||||
|
||||
for _, c := range added {
|
||||
if c != bestMatch {
|
||||
addedLeft = append(addedLeft, c)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
addedLeft = append(addedLeft, added...)
|
||||
}
|
||||
} else if len(deleted) > 1 {
|
||||
maxSize := len(deleted) * len(added)
|
||||
if d.renameLimit > 0 && d.renameLimit < maxSize {
|
||||
maxSize = d.renameLimit
|
||||
}
|
||||
|
||||
matrix := make(similarityMatrix, 0, maxSize)
|
||||
|
||||
for delIdx, del := range deleted {
|
||||
deletedName := changeName(del)
|
||||
|
||||
for addIdx, add := range added {
|
||||
addedName := changeName(add)
|
||||
|
||||
score := nameSimilarityScore(addedName, deletedName)
|
||||
matrix = append(matrix, similarityPair{added: addIdx, deleted: delIdx, score: score})
|
||||
|
||||
if len(matrix) >= maxSize {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(matrix) >= maxSize {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
sort.Stable(matrix)
|
||||
|
||||
usedAdds := make(map[*Change]struct{})
|
||||
usedDeletes := make(map[*Change]struct{})
|
||||
for i := len(matrix) - 1; i >= 0; i-- {
|
||||
del := deleted[matrix[i].deleted]
|
||||
add := added[matrix[i].added]
|
||||
|
||||
if add == nil || del == nil {
|
||||
// it was already matched
|
||||
continue
|
||||
}
|
||||
|
||||
usedAdds[add] = struct{}{}
|
||||
usedDeletes[del] = struct{}{}
|
||||
d.modified = append(d.modified, &Change{From: del.From, To: add.To})
|
||||
added[matrix[i].added] = nil
|
||||
deleted[matrix[i].deleted] = nil
|
||||
}
|
||||
|
||||
for _, c := range added {
|
||||
if _, ok := usedAdds[c]; !ok && c != nil {
|
||||
addedLeft = append(addedLeft, c)
|
||||
}
|
||||
}
|
||||
|
||||
var newDeletes = make([]*Change, 0, len(deleted)-len(usedDeletes))
|
||||
for _, c := range deleted {
|
||||
if _, ok := usedDeletes[c]; !ok && c != nil {
|
||||
newDeletes = append(newDeletes, c)
|
||||
}
|
||||
}
|
||||
deletes[hash] = newDeletes
|
||||
} else {
|
||||
addedLeft = append(addedLeft, added...)
|
||||
}
|
||||
}
|
||||
|
||||
d.added = addedLeft
|
||||
d.deleted = nil
|
||||
for _, dels := range deletes {
|
||||
d.deleted = append(d.deleted, dels...)
|
||||
}
|
||||
}
|
||||
|
||||
// detectContentRenames detects renames based on the similarity of the content
|
||||
// in the files by building a matrix of pairs between sources and destinations
|
||||
// and matching by the highest score.
|
||||
// see: https://github.com/eclipse/jgit/blob/master/org.eclipse.jgit/src/org/eclipse/jgit/diff/SimilarityRenameDetector.java
|
||||
func (d *renameDetector) detectContentRenames() error {
|
||||
cnt := max(len(d.added), len(d.deleted))
|
||||
if d.renameLimit > 0 && cnt > d.renameLimit {
|
||||
return nil
|
||||
}
|
||||
|
||||
srcs, dsts := d.deleted, d.added
|
||||
matrix, err := buildSimilarityMatrix(srcs, dsts, d.renameScore)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
renames := make([]*Change, 0, min(len(matrix), len(dsts)))
|
||||
|
||||
// Match rename pairs on a first come, first serve basis until
|
||||
// we have looked at everything that is above the minimum score.
|
||||
for i := len(matrix) - 1; i >= 0; i-- {
|
||||
pair := matrix[i]
|
||||
src := srcs[pair.deleted]
|
||||
dst := dsts[pair.added]
|
||||
|
||||
if dst == nil || src == nil {
|
||||
// It was already matched before
|
||||
continue
|
||||
}
|
||||
|
||||
renames = append(renames, &Change{From: src.From, To: dst.To})
|
||||
|
||||
// Claim destination and source as matched
|
||||
dsts[pair.added] = nil
|
||||
srcs[pair.deleted] = nil
|
||||
}
|
||||
|
||||
d.modified = append(d.modified, renames...)
|
||||
d.added = compactChanges(dsts)
|
||||
d.deleted = compactChanges(srcs)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *renameDetector) detect() (Changes, error) {
|
||||
if len(d.added) > 0 && len(d.deleted) > 0 {
|
||||
d.detectExactRenames()
|
||||
|
||||
if !d.onlyExact {
|
||||
if err := d.detectContentRenames(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result := make(Changes, 0, len(d.added)+len(d.deleted)+len(d.modified))
|
||||
result = append(result, d.added...)
|
||||
result = append(result, d.deleted...)
|
||||
result = append(result, d.modified...)
|
||||
|
||||
sort.Stable(result)
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func bestNameMatch(change *Change, changes []*Change) *Change {
|
||||
var best *Change
|
||||
var bestScore int
|
||||
|
||||
cname := changeName(change)
|
||||
|
||||
for _, c := range changes {
|
||||
score := nameSimilarityScore(cname, changeName(c))
|
||||
if score > bestScore {
|
||||
bestScore = score
|
||||
best = c
|
||||
}
|
||||
}
|
||||
|
||||
return best
|
||||
}
|
||||
|
||||
func nameSimilarityScore(a, b string) int {
|
||||
aDirLen := strings.LastIndexByte(a, '/') + 1
|
||||
bDirLen := strings.LastIndexByte(b, '/') + 1
|
||||
|
||||
dirMin := min(aDirLen, bDirLen)
|
||||
dirMax := max(aDirLen, bDirLen)
|
||||
|
||||
var dirScoreLtr, dirScoreRtl int
|
||||
if dirMax == 0 {
|
||||
dirScoreLtr = 100
|
||||
dirScoreRtl = 100
|
||||
} else {
|
||||
var dirSim int
|
||||
|
||||
for ; dirSim < dirMin; dirSim++ {
|
||||
if a[dirSim] != b[dirSim] {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
dirScoreLtr = dirSim * 100 / dirMax
|
||||
|
||||
if dirScoreLtr == 100 {
|
||||
dirScoreRtl = 100
|
||||
} else {
|
||||
for dirSim = 0; dirSim < dirMin; dirSim++ {
|
||||
if a[aDirLen-1-dirSim] != b[bDirLen-1-dirSim] {
|
||||
break
|
||||
}
|
||||
}
|
||||
dirScoreRtl = dirSim * 100 / dirMax
|
||||
}
|
||||
}
|
||||
|
||||
fileMin := min(len(a)-aDirLen, len(b)-bDirLen)
|
||||
fileMax := max(len(a)-aDirLen, len(b)-bDirLen)
|
||||
|
||||
fileSim := 0
|
||||
for ; fileSim < fileMin; fileSim++ {
|
||||
if a[len(a)-1-fileSim] != b[len(b)-1-fileSim] {
|
||||
break
|
||||
}
|
||||
}
|
||||
fileScore := fileSim * 100 / fileMax
|
||||
|
||||
return (((dirScoreLtr + dirScoreRtl) * 25) + (fileScore * 50)) / 100
|
||||
}
|
||||
|
||||
func changeName(c *Change) string {
|
||||
if c.To != empty {
|
||||
return c.To.Name
|
||||
}
|
||||
return c.From.Name
|
||||
}
|
||||
|
||||
func changeHash(c *Change) plumbing.Hash {
|
||||
if c.To != empty {
|
||||
return c.To.TreeEntry.Hash
|
||||
}
|
||||
|
||||
return c.From.TreeEntry.Hash
|
||||
}
|
||||
|
||||
func changeMode(c *Change) filemode.FileMode {
|
||||
if c.To != empty {
|
||||
return c.To.TreeEntry.Mode
|
||||
}
|
||||
|
||||
return c.From.TreeEntry.Mode
|
||||
}
|
||||
|
||||
func sameMode(a, b *Change) bool {
|
||||
return changeMode(a) == changeMode(b)
|
||||
}
|
||||
|
||||
func groupChangesByHash(changes []*Change) map[plumbing.Hash][]*Change {
|
||||
var result = make(map[plumbing.Hash][]*Change)
|
||||
for _, c := range changes {
|
||||
hash := changeHash(c)
|
||||
result[hash] = append(result[hash], c)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
type similarityMatrix []similarityPair
|
||||
|
||||
func (m similarityMatrix) Len() int { return len(m) }
|
||||
func (m similarityMatrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
|
||||
func (m similarityMatrix) Less(i, j int) bool {
|
||||
if m[i].score == m[j].score {
|
||||
if m[i].added == m[j].added {
|
||||
return m[i].deleted < m[j].deleted
|
||||
}
|
||||
return m[i].added < m[j].added
|
||||
}
|
||||
return m[i].score < m[j].score
|
||||
}
|
||||
|
||||
type similarityPair struct {
|
||||
// index of the added file
|
||||
added int
|
||||
// index of the deleted file
|
||||
deleted int
|
||||
// similarity score
|
||||
score int
|
||||
}
|
||||
|
||||
func max(a, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func buildSimilarityMatrix(srcs, dsts []*Change, renameScore int) (similarityMatrix, error) {
|
||||
// Allocate for the worst-case scenario where every pair has a score
|
||||
// that we need to consider. We might not need that many.
|
||||
matrix := make(similarityMatrix, 0, len(srcs)*len(dsts))
|
||||
srcSizes := make([]int64, len(srcs))
|
||||
dstSizes := make([]int64, len(dsts))
|
||||
dstTooLarge := make(map[int]bool)
|
||||
|
||||
// Consider each pair of files, if the score is above the minimum
|
||||
// threshold we need to record that scoring in the matrix so we can
|
||||
// later find the best matches.
|
||||
outerLoop:
|
||||
for srcIdx, src := range srcs {
|
||||
if changeMode(src) != filemode.Regular {
|
||||
continue
|
||||
}
|
||||
|
||||
// Declare the from file and the similarity index here to be able to
|
||||
// reuse it inside the inner loop. The reason to not initialize them
|
||||
// here is so we can skip the initialization in case they happen to
|
||||
// not be needed later. They will be initialized inside the inner
|
||||
// loop if and only if they're needed and reused in subsequent passes.
|
||||
var from *File
|
||||
var s *similarityIndex
|
||||
var err error
|
||||
for dstIdx, dst := range dsts {
|
||||
if changeMode(dst) != filemode.Regular {
|
||||
continue
|
||||
}
|
||||
|
||||
if dstTooLarge[dstIdx] {
|
||||
continue
|
||||
}
|
||||
|
||||
var to *File
|
||||
srcSize := srcSizes[srcIdx]
|
||||
if srcSize == 0 {
|
||||
from, _, err = src.Files()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcSize = from.Size + 1
|
||||
srcSizes[srcIdx] = srcSize
|
||||
}
|
||||
|
||||
dstSize := dstSizes[dstIdx]
|
||||
if dstSize == 0 {
|
||||
_, to, err = dst.Files()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dstSize = to.Size + 1
|
||||
dstSizes[dstIdx] = dstSize
|
||||
}
|
||||
|
||||
min, max := srcSize, dstSize
|
||||
if dstSize < srcSize {
|
||||
min = dstSize
|
||||
max = srcSize
|
||||
}
|
||||
|
||||
if int(min*100/max) < renameScore {
|
||||
// File sizes are too different to be a match
|
||||
continue
|
||||
}
|
||||
|
||||
if s == nil {
|
||||
s, err = fileSimilarityIndex(from)
|
||||
if err != nil {
|
||||
if err == errIndexFull {
|
||||
continue outerLoop
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if to == nil {
|
||||
_, to, err = dst.Files()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
di, err := fileSimilarityIndex(to)
|
||||
if err != nil {
|
||||
if err == errIndexFull {
|
||||
dstTooLarge[dstIdx] = true
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
contentScore := s.score(di, 10000)
|
||||
// The name score returns a value between 0 and 100, so we need to
|
||||
// convert it to the same range as the content score.
|
||||
nameScore := nameSimilarityScore(src.From.Name, dst.To.Name) * 100
|
||||
score := (contentScore*99 + nameScore*1) / 10000
|
||||
|
||||
if score < renameScore {
|
||||
continue
|
||||
}
|
||||
|
||||
matrix = append(matrix, similarityPair{added: dstIdx, deleted: srcIdx, score: score})
|
||||
}
|
||||
}
|
||||
|
||||
sort.Stable(matrix)
|
||||
|
||||
return matrix, nil
|
||||
}
|
||||
|
||||
func compactChanges(changes []*Change) []*Change {
|
||||
var result []*Change
|
||||
for _, c := range changes {
|
||||
if c != nil {
|
||||
result = append(result, c)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
const (
|
||||
keyShift = 32
|
||||
maxCountValue = (1 << keyShift) - 1
|
||||
)
|
||||
|
||||
var errIndexFull = errors.New("index is full")
|
||||
|
||||
// similarityIndex is an index structure of lines/blocks in one file.
|
||||
// This structure can be used to compute an approximation of the similarity
|
||||
// between two files.
|
||||
// To save space in memory, this index uses a space efficient encoding which
|
||||
// will not exceed 1MiB per instance. The index starts out at a smaller size
|
||||
// (closer to 2KiB), but may grow as more distinct blocks withing the scanned
|
||||
// file are discovered.
|
||||
// see: https://github.com/eclipse/jgit/blob/master/org.eclipse.jgit/src/org/eclipse/jgit/diff/SimilarityIndex.java
|
||||
type similarityIndex struct {
|
||||
hashed uint64
|
||||
// number of non-zero entries in hashes
|
||||
numHashes int
|
||||
growAt int
|
||||
hashes []keyCountPair
|
||||
hashBits int
|
||||
}
|
||||
|
||||
func fileSimilarityIndex(f *File) (*similarityIndex, error) {
|
||||
idx := newSimilarityIndex()
|
||||
if err := idx.hash(f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sort.Stable(keyCountPairs(idx.hashes))
|
||||
|
||||
return idx, nil
|
||||
}
|
||||
|
||||
func newSimilarityIndex() *similarityIndex {
|
||||
return &similarityIndex{
|
||||
hashBits: 8,
|
||||
hashes: make([]keyCountPair, 1<<8),
|
||||
growAt: shouldGrowAt(8),
|
||||
}
|
||||
}
|
||||
|
||||
func (i *similarityIndex) hash(f *File) error {
|
||||
isBin, err := f.IsBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r, err := f.Reader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer ioutil.CheckClose(r, &err)
|
||||
|
||||
return i.hashContent(r, f.Size, isBin)
|
||||
}
|
||||
|
||||
func (i *similarityIndex) hashContent(r io.Reader, size int64, isBin bool) error {
|
||||
var buf = make([]byte, 4096)
|
||||
var ptr, cnt int
|
||||
remaining := size
|
||||
|
||||
for 0 < remaining {
|
||||
hash := 5381
|
||||
var blockHashedCnt uint64
|
||||
|
||||
// Hash one line or block, whatever happens first
|
||||
n := int64(0)
|
||||
for {
|
||||
if ptr == cnt {
|
||||
ptr = 0
|
||||
var err error
|
||||
cnt, err = io.ReadFull(r, buf)
|
||||
if err != nil && err != io.ErrUnexpectedEOF {
|
||||
return err
|
||||
}
|
||||
|
||||
if cnt == 0 {
|
||||
return io.EOF
|
||||
}
|
||||
}
|
||||
n++
|
||||
c := buf[ptr] & 0xff
|
||||
ptr++
|
||||
|
||||
// Ignore CR in CRLF sequence if it's text
|
||||
if !isBin && c == '\r' && ptr < cnt && buf[ptr] == '\n' {
|
||||
continue
|
||||
}
|
||||
blockHashedCnt++
|
||||
|
||||
if c == '\n' {
|
||||
break
|
||||
}
|
||||
|
||||
hash = (hash << 5) + hash + int(c)
|
||||
|
||||
if n >= 64 || n >= remaining {
|
||||
break
|
||||
}
|
||||
}
|
||||
i.hashed += blockHashedCnt
|
||||
if err := i.add(hash, blockHashedCnt); err != nil {
|
||||
return err
|
||||
}
|
||||
remaining -= n
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// score computes the similarity score between this index and another one.
|
||||
// A region of a file is defined as a line in a text file or a fixed-size
|
||||
// block in a binary file. To prepare an index, each region in the file is
|
||||
// hashed; the values and counts of hashes are retained in a sorted table.
|
||||
// Define the similarity fraction F as the count of matching regions between
|
||||
// the two files divided between the maximum count of regions in either file.
|
||||
// The similarity score is F multiplied by the maxScore constant, yielding a
|
||||
// range [0, maxScore]. It is defined as maxScore for the degenerate case of
|
||||
// two empty files.
|
||||
// The similarity score is symmetrical; i.e. a.score(b) == b.score(a).
|
||||
func (i *similarityIndex) score(other *similarityIndex, maxScore int) int {
|
||||
var maxHashed = i.hashed
|
||||
if maxHashed < other.hashed {
|
||||
maxHashed = other.hashed
|
||||
}
|
||||
if maxHashed == 0 {
|
||||
return maxScore
|
||||
}
|
||||
|
||||
return int(i.common(other) * uint64(maxScore) / maxHashed)
|
||||
}
|
||||
|
||||
func (i *similarityIndex) common(dst *similarityIndex) uint64 {
|
||||
srcIdx, dstIdx := 0, 0
|
||||
if i.numHashes == 0 || dst.numHashes == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var common uint64
|
||||
srcKey, dstKey := i.hashes[srcIdx].key(), dst.hashes[dstIdx].key()
|
||||
|
||||
for {
|
||||
if srcKey == dstKey {
|
||||
srcCnt, dstCnt := i.hashes[srcIdx].count(), dst.hashes[dstIdx].count()
|
||||
if srcCnt < dstCnt {
|
||||
common += srcCnt
|
||||
} else {
|
||||
common += dstCnt
|
||||
}
|
||||
|
||||
srcIdx++
|
||||
if srcIdx == len(i.hashes) {
|
||||
break
|
||||
}
|
||||
srcKey = i.hashes[srcIdx].key()
|
||||
|
||||
dstIdx++
|
||||
if dstIdx == len(dst.hashes) {
|
||||
break
|
||||
}
|
||||
dstKey = dst.hashes[dstIdx].key()
|
||||
} else if srcKey < dstKey {
|
||||
// Region of src that is not in dst
|
||||
srcIdx++
|
||||
if srcIdx == len(i.hashes) {
|
||||
break
|
||||
}
|
||||
srcKey = i.hashes[srcIdx].key()
|
||||
} else {
|
||||
// Region of dst that is not in src
|
||||
dstIdx++
|
||||
if dstIdx == len(dst.hashes) {
|
||||
break
|
||||
}
|
||||
dstKey = dst.hashes[dstIdx].key()
|
||||
}
|
||||
}
|
||||
|
||||
return common
|
||||
}
|
||||
|
||||
func (i *similarityIndex) add(key int, cnt uint64) error {
|
||||
key = int(uint32(key)*0x9e370001 >> 1)
|
||||
|
||||
j := i.slot(key)
|
||||
for {
|
||||
v := i.hashes[j]
|
||||
if v == 0 {
|
||||
// It's an empty slot, so we can store it here.
|
||||
if i.growAt <= i.numHashes {
|
||||
if err := i.grow(); err != nil {
|
||||
return err
|
||||
}
|
||||
j = i.slot(key)
|
||||
continue
|
||||
}
|
||||
|
||||
var err error
|
||||
i.hashes[j], err = newKeyCountPair(key, cnt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
i.numHashes++
|
||||
return nil
|
||||
} else if v.key() == key {
|
||||
// It's the same key, so increment the counter.
|
||||
var err error
|
||||
i.hashes[j], err = newKeyCountPair(key, v.count()+cnt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
} else if j+1 >= len(i.hashes) {
|
||||
j = 0
|
||||
} else {
|
||||
j++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type keyCountPair uint64
|
||||
|
||||
func newKeyCountPair(key int, cnt uint64) (keyCountPair, error) {
|
||||
if cnt > maxCountValue {
|
||||
return 0, errIndexFull
|
||||
}
|
||||
|
||||
return keyCountPair((uint64(key) << keyShift) | cnt), nil
|
||||
}
|
||||
|
||||
func (p keyCountPair) key() int {
|
||||
return int(p >> keyShift)
|
||||
}
|
||||
|
||||
func (p keyCountPair) count() uint64 {
|
||||
return uint64(p) & maxCountValue
|
||||
}
|
||||
|
||||
func (i *similarityIndex) slot(key int) int {
|
||||
// We use 31 - hashBits because the upper bit was already forced
|
||||
// to be 0 and we want the remaining high bits to be used as the
|
||||
// table slot.
|
||||
return int(uint32(key) >> uint(31 - i.hashBits))
|
||||
}
|
||||
|
||||
func shouldGrowAt(hashBits int) int {
|
||||
return (1 << uint(hashBits)) * (hashBits - 3) / hashBits
|
||||
}
|
||||
|
||||
func (i *similarityIndex) grow() error {
|
||||
if i.hashBits == 30 {
|
||||
return errIndexFull
|
||||
}
|
||||
|
||||
old := i.hashes
|
||||
|
||||
i.hashBits++
|
||||
i.growAt = shouldGrowAt(i.hashBits)
|
||||
|
||||
// TODO(erizocosmico): find a way to check if it will OOM and return
|
||||
// errIndexFull instead.
|
||||
i.hashes = make([]keyCountPair, 1<<uint(i.hashBits))
|
||||
|
||||
for _, v := range old {
|
||||
if v != 0 {
|
||||
j := i.slot(v.key())
|
||||
for i.hashes[j] != 0 {
|
||||
j++
|
||||
if j >= len(i.hashes) {
|
||||
j = 0
|
||||
}
|
||||
}
|
||||
i.hashes[j] = v
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type keyCountPairs []keyCountPair
|
||||
|
||||
func (p keyCountPairs) Len() int { return len(p) }
|
||||
func (p keyCountPairs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
func (p keyCountPairs) Less(i, j int) bool { return p[i] < p[j] }
|
35
vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go
generated
vendored
35
vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go
generated
vendored
|
@ -304,29 +304,34 @@ func (t *Tree) buildMap() {
|
|||
}
|
||||
|
||||
// Diff returns a list of changes between this tree and the provided one
|
||||
func (from *Tree) Diff(to *Tree) (Changes, error) {
|
||||
return DiffTree(from, to)
|
||||
func (t *Tree) Diff(to *Tree) (Changes, error) {
|
||||
return t.DiffContext(context.Background(), to)
|
||||
}
|
||||
|
||||
// Diff returns a list of changes between this tree and the provided one
|
||||
// Error will be returned if context expires
|
||||
// Provided context must be non nil
|
||||
func (from *Tree) DiffContext(ctx context.Context, to *Tree) (Changes, error) {
|
||||
return DiffTreeContext(ctx, from, to)
|
||||
// DiffContext returns a list of changes between this tree and the provided one
|
||||
// Error will be returned if context expires. Provided context must be non nil.
|
||||
//
|
||||
// NOTE: Since version 5.1.0 the renames are correctly handled, the settings
|
||||
// used are the recommended options DefaultDiffTreeOptions.
|
||||
func (t *Tree) DiffContext(ctx context.Context, to *Tree) (Changes, error) {
|
||||
return DiffTreeWithOptions(ctx, t, to, DefaultDiffTreeOptions)
|
||||
}
|
||||
|
||||
// Patch returns a slice of Patch objects with all the changes between trees
|
||||
// in chunks. This representation can be used to create several diff outputs.
|
||||
func (from *Tree) Patch(to *Tree) (*Patch, error) {
|
||||
return from.PatchContext(context.Background(), to)
|
||||
func (t *Tree) Patch(to *Tree) (*Patch, error) {
|
||||
return t.PatchContext(context.Background(), to)
|
||||
}
|
||||
|
||||
// Patch returns a slice of Patch objects with all the changes between trees
|
||||
// in chunks. This representation can be used to create several diff outputs.
|
||||
// If context expires, an error will be returned
|
||||
// Provided context must be non-nil
|
||||
func (from *Tree) PatchContext(ctx context.Context, to *Tree) (*Patch, error) {
|
||||
changes, err := DiffTreeContext(ctx, from, to)
|
||||
// PatchContext returns a slice of Patch objects with all the changes between
|
||||
// trees in chunks. This representation can be used to create several diff
|
||||
// outputs. If context expires, an error will be returned. Provided context must
|
||||
// be non-nil.
|
||||
//
|
||||
// NOTE: Since version 5.1.0 the renames are correctly handled, the settings
|
||||
// used are the recommended options DefaultDiffTreeOptions.
|
||||
func (t *Tree) PatchContext(ctx context.Context, to *Tree) (*Patch, error) {
|
||||
changes, err := t.DiffContext(ctx, to)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
8
vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs.go
generated
vendored
8
vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs.go
generated
vendored
|
@ -201,3 +201,11 @@ func (a *AdvRefs) addSymbolicRefs(s storer.ReferenceStorer) error {
|
|||
func (a *AdvRefs) supportSymrefs() bool {
|
||||
return a.Capabilities.Supports(capability.SymRef)
|
||||
}
|
||||
|
||||
// IsEmpty returns true if doesn't contain any reference.
|
||||
func (a *AdvRefs) IsEmpty() bool {
|
||||
return a.Head == nil &&
|
||||
len(a.References) == 0 &&
|
||||
len(a.Peeled) == 0 &&
|
||||
len(a.Shallows) == 0
|
||||
}
|
||||
|
|
7
vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go
generated
vendored
7
vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go
generated
vendored
|
@ -175,6 +175,13 @@ func (s *session) AdvertisedReferences() (*packp.AdvRefs, error) {
|
|||
}
|
||||
}
|
||||
|
||||
// Some servers like jGit, announce capabilities instead of returning an
|
||||
// packp message with a flush. This verifies that we received a empty
|
||||
// adv-refs, even it contains capabilities.
|
||||
if !s.isReceivePack && ar.IsEmpty() {
|
||||
return nil, transport.ErrEmptyRemoteRepository
|
||||
}
|
||||
|
||||
transport.FilterUnsupportedCapabilities(ar.Capabilities)
|
||||
s.advRefs = ar
|
||||
return ar, nil
|
||||
|
|
12
vendor/github.com/go-git/go-git/v5/plumbing/transport/server/server.go
generated
vendored
12
vendor/github.com/go-git/go-git/v5/plumbing/transport/server/server.go
generated
vendored
|
@ -243,11 +243,13 @@ func (s *rpSession) ReceivePack(ctx context.Context, req *packp.ReferenceUpdateR
|
|||
|
||||
//TODO: Implement 'atomic' update of references.
|
||||
|
||||
r := ioutil.NewContextReadCloser(ctx, req.Packfile)
|
||||
if err := s.writePackfile(r); err != nil {
|
||||
s.unpackErr = err
|
||||
s.firstErr = err
|
||||
return s.reportStatus(), err
|
||||
if req.Packfile != nil {
|
||||
r := ioutil.NewContextReadCloser(ctx, req.Packfile)
|
||||
if err := s.writePackfile(r); err != nil {
|
||||
s.unpackErr = err
|
||||
s.firstErr = err
|
||||
return s.reportStatus(), err
|
||||
}
|
||||
}
|
||||
|
||||
s.updateReferences(req)
|
||||
|
|
42
vendor/github.com/go-git/go-git/v5/remote.go
generated
vendored
42
vendor/github.com/go-git/go-git/v5/remote.go
generated
vendored
|
@ -29,6 +29,7 @@ var (
|
|||
NoErrAlreadyUpToDate = errors.New("already up-to-date")
|
||||
ErrDeleteRefNotSupported = errors.New("server does not support delete-refs")
|
||||
ErrForceNeeded = errors.New("some refs were not updated")
|
||||
ErrExactSHA1NotSupported = errors.New("server does not support exact SHA1 refspec")
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -122,6 +123,15 @@ func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) {
|
|||
return ErrDeleteRefNotSupported
|
||||
}
|
||||
|
||||
if o.Force {
|
||||
for i := 0; i < len(o.RefSpecs); i++ {
|
||||
rs := &o.RefSpecs[i]
|
||||
if !rs.IsForceUpdate() {
|
||||
o.RefSpecs[i] = config.RefSpec("+" + rs.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
localRefs, err := r.references()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -303,6 +313,10 @@ func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.Referen
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if err := r.isSupportedRefSpec(o.RefSpecs, ar); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
remoteRefs, err := ar.AllReferences()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -546,6 +560,7 @@ func (r *Remote) addReferenceIfRefSpecMatches(rs config.RefSpec,
|
|||
|
||||
func (r *Remote) references() ([]*plumbing.Reference, error) {
|
||||
var localRefs []*plumbing.Reference
|
||||
|
||||
iter, err := r.s.IterReferences()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -701,6 +716,11 @@ func doCalculateRefs(
|
|||
return err
|
||||
}
|
||||
|
||||
if s.IsExactSHA1() {
|
||||
ref := plumbing.NewHashReference(s.Dst(""), plumbing.NewHash(s.Src()))
|
||||
return refs.SetReference(ref)
|
||||
}
|
||||
|
||||
var matched bool
|
||||
err = iter.ForEach(func(ref *plumbing.Reference) error {
|
||||
if !s.Match(ref.Name()) {
|
||||
|
@ -850,6 +870,26 @@ func (r *Remote) newUploadPackRequest(o *FetchOptions,
|
|||
return req, nil
|
||||
}
|
||||
|
||||
func (r *Remote) isSupportedRefSpec(refs []config.RefSpec, ar *packp.AdvRefs) error {
|
||||
var containsIsExact bool
|
||||
for _, ref := range refs {
|
||||
if ref.IsExactSHA1() {
|
||||
containsIsExact = true
|
||||
}
|
||||
}
|
||||
|
||||
if !containsIsExact {
|
||||
return nil
|
||||
}
|
||||
|
||||
if ar.Capabilities.Supports(capability.AllowReachableSHA1InWant) ||
|
||||
ar.Capabilities.Supports(capability.AllowTipSHA1InWant) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return ErrExactSHA1NotSupported
|
||||
}
|
||||
|
||||
func buildSidebandIfSupported(l *capability.List, reader io.Reader, p sideband.Progress) io.Reader {
|
||||
var t sideband.Type
|
||||
|
||||
|
@ -883,7 +923,7 @@ func (r *Remote) updateLocalReferenceStorage(
|
|||
}
|
||||
|
||||
for _, ref := range fetchedRefs {
|
||||
if !spec.Match(ref.Name()) {
|
||||
if !spec.Match(ref.Name()) && !spec.IsExactSHA1() {
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
69
vendor/github.com/go-git/go-git/v5/repository.go
generated
vendored
69
vendor/github.com/go-git/go-git/v5/repository.go
generated
vendored
|
@ -13,7 +13,6 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/openpgp"
|
||||
"github.com/go-git/go-git/v5/config"
|
||||
"github.com/go-git/go-git/v5/internal/revision"
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
|
@ -24,6 +23,8 @@ import (
|
|||
"github.com/go-git/go-git/v5/storage"
|
||||
"github.com/go-git/go-git/v5/storage/filesystem"
|
||||
"github.com/go-git/go-git/v5/utils/ioutil"
|
||||
"github.com/imdario/mergo"
|
||||
"golang.org/x/crypto/openpgp"
|
||||
|
||||
"github.com/go-git/go-billy/v5"
|
||||
"github.com/go-git/go-billy/v5/osfs"
|
||||
|
@ -155,7 +156,7 @@ func setConfigWorktree(r *Repository, worktree, storage billy.Filesystem) error
|
|||
return nil
|
||||
}
|
||||
|
||||
cfg, err := r.Storer.Config()
|
||||
cfg, err := r.Config()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -434,14 +435,56 @@ func cleanUpDir(path string, all bool) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Config return the repository config
|
||||
// Config return the repository config. In a filesystem backed repository this
|
||||
// means read the `.git/config`.
|
||||
func (r *Repository) Config() (*config.Config, error) {
|
||||
return r.Storer.Config()
|
||||
}
|
||||
|
||||
// SetConfig marshall and writes the repository config. In a filesystem backed
|
||||
// repository this means write the `.git/config`. This function should be called
|
||||
// with the result of `Repository.Config` and never with the output of
|
||||
// `Repository.ConfigScoped`.
|
||||
func (r *Repository) SetConfig(cfg *config.Config) error {
|
||||
return r.Storer.SetConfig(cfg)
|
||||
}
|
||||
|
||||
// ConfigScoped returns the repository config, merged with requested scope and
|
||||
// lower. For example if, config.GlobalScope is given the local and global config
|
||||
// are returned merged in one config value.
|
||||
func (r *Repository) ConfigScoped(scope config.Scope) (*config.Config, error) {
|
||||
// TODO(mcuadros): v6, add this as ConfigOptions.Scoped
|
||||
|
||||
var err error
|
||||
system := config.NewConfig()
|
||||
if scope >= config.SystemScope {
|
||||
system, err = config.LoadConfig(config.SystemScope)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
global := config.NewConfig()
|
||||
if scope >= config.GlobalScope {
|
||||
global, err = config.LoadConfig(config.GlobalScope)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
local, err := r.Storer.Config()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_ = mergo.Merge(global, system)
|
||||
_ = mergo.Merge(local, global)
|
||||
return local, nil
|
||||
}
|
||||
|
||||
// Remote return a remote if exists
|
||||
func (r *Repository) Remote(name string) (*Remote, error) {
|
||||
cfg, err := r.Storer.Config()
|
||||
cfg, err := r.Config()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -456,7 +499,7 @@ func (r *Repository) Remote(name string) (*Remote, error) {
|
|||
|
||||
// Remotes returns a list with all the remotes
|
||||
func (r *Repository) Remotes() ([]*Remote, error) {
|
||||
cfg, err := r.Storer.Config()
|
||||
cfg, err := r.Config()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -480,7 +523,7 @@ func (r *Repository) CreateRemote(c *config.RemoteConfig) (*Remote, error) {
|
|||
|
||||
remote := NewRemote(r.Storer, c)
|
||||
|
||||
cfg, err := r.Storer.Config()
|
||||
cfg, err := r.Config()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -511,7 +554,7 @@ func (r *Repository) CreateRemoteAnonymous(c *config.RemoteConfig) (*Remote, err
|
|||
|
||||
// DeleteRemote delete a remote from the repository and delete the config
|
||||
func (r *Repository) DeleteRemote(name string) error {
|
||||
cfg, err := r.Storer.Config()
|
||||
cfg, err := r.Config()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -526,7 +569,7 @@ func (r *Repository) DeleteRemote(name string) error {
|
|||
|
||||
// Branch return a Branch if exists
|
||||
func (r *Repository) Branch(name string) (*config.Branch, error) {
|
||||
cfg, err := r.Storer.Config()
|
||||
cfg, err := r.Config()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -545,7 +588,7 @@ func (r *Repository) CreateBranch(c *config.Branch) error {
|
|||
return err
|
||||
}
|
||||
|
||||
cfg, err := r.Storer.Config()
|
||||
cfg, err := r.Config()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -560,7 +603,7 @@ func (r *Repository) CreateBranch(c *config.Branch) error {
|
|||
|
||||
// DeleteBranch delete a Branch from the repository and delete the config
|
||||
func (r *Repository) DeleteBranch(name string) error {
|
||||
cfg, err := r.Storer.Config()
|
||||
cfg, err := r.Config()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -835,7 +878,7 @@ func (r *Repository) cloneRefSpec(o *CloneOptions) []config.RefSpec {
|
|||
}
|
||||
|
||||
func (r *Repository) setIsBare(isBare bool) error {
|
||||
cfg, err := r.Storer.Config()
|
||||
cfg, err := r.Config()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -851,7 +894,7 @@ func (r *Repository) updateRemoteConfigIfNeeded(o *CloneOptions, c *config.Remot
|
|||
|
||||
c.Fetch = r.cloneRefSpec(o)
|
||||
|
||||
cfg, err := r.Storer.Config()
|
||||
cfg, err := r.Config()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1541,7 +1584,7 @@ func (r *Repository) createNewObjectPack(cfg *RepackConfig) (h plumbing.Hash, er
|
|||
return h, err
|
||||
}
|
||||
defer ioutil.CheckClose(wc, &err)
|
||||
scfg, err := r.Storer.Config()
|
||||
scfg, err := r.Config()
|
||||
if err != nil {
|
||||
return h, err
|
||||
}
|
||||
|
|
17
vendor/github.com/go-git/go-git/v5/storage/filesystem/config.go
generated
vendored
17
vendor/github.com/go-git/go-git/v5/storage/filesystem/config.go
generated
vendored
|
@ -1,7 +1,6 @@
|
|||
package filesystem
|
||||
|
||||
import (
|
||||
stdioutil "io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/go-git/go-git/v5/config"
|
||||
|
@ -14,29 +13,17 @@ type ConfigStorage struct {
|
|||
}
|
||||
|
||||
func (c *ConfigStorage) Config() (conf *config.Config, err error) {
|
||||
cfg := config.NewConfig()
|
||||
|
||||
f, err := c.dir.Config()
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return cfg, nil
|
||||
return config.NewConfig(), nil
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer ioutil.CheckClose(f, &err)
|
||||
|
||||
b, err := stdioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = cfg.Unmarshal(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cfg, err
|
||||
return config.ReadConfig(f)
|
||||
}
|
||||
|
||||
func (c *ConfigStorage) SetConfig(cfg *config.Config) (err error) {
|
||||
|
|
11
vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go
generated
vendored
11
vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go
generated
vendored
|
@ -57,6 +57,9 @@ var (
|
|||
// targeting a non-existing object. This usually means the repository
|
||||
// is corrupt.
|
||||
ErrSymRefTargetNotFound = errors.New("symbolic reference target not found")
|
||||
// ErrIsDir is returned when a reference file is attempting to be read,
|
||||
// but the path specified is a directory.
|
||||
ErrIsDir = errors.New("reference path is a directory")
|
||||
)
|
||||
|
||||
// Options holds configuration for the storage.
|
||||
|
@ -926,6 +929,14 @@ func (d *DotGit) addRefFromHEAD(refs *[]*plumbing.Reference) error {
|
|||
|
||||
func (d *DotGit) readReferenceFile(path, name string) (ref *plumbing.Reference, err error) {
|
||||
path = d.fs.Join(path, d.fs.Join(strings.Split(name, "/")...))
|
||||
st, err := d.fs.Stat(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if st.IsDir() {
|
||||
return nil, ErrIsDir
|
||||
}
|
||||
|
||||
f, err := d.fs.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
2
vendor/github.com/go-git/go-git/v5/storage/filesystem/object.go
generated
vendored
2
vendor/github.com/go-git/go-git/v5/storage/filesystem/object.go
generated
vendored
|
@ -408,6 +408,8 @@ func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedOb
|
|||
return nil, err
|
||||
}
|
||||
|
||||
defer ioutil.CheckClose(w, &err)
|
||||
|
||||
s.objectCache.Put(obj)
|
||||
|
||||
_, err = io.Copy(w, r)
|
||||
|
|
2
vendor/github.com/go-git/go-git/v5/submodule.go
generated
vendored
2
vendor/github.com/go-git/go-git/v5/submodule.go
generated
vendored
|
@ -35,7 +35,7 @@ func (s *Submodule) Config() *config.Submodule {
|
|||
// Init initialize the submodule reading the recorded Entry in the index for
|
||||
// the given submodule
|
||||
func (s *Submodule) Init() error {
|
||||
cfg, err := s.w.r.Storer.Config()
|
||||
cfg, err := s.w.r.Config()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
12
vendor/github.com/go-git/go-git/v5/utils/merkletrie/difftree.go
generated
vendored
12
vendor/github.com/go-git/go-git/v5/utils/merkletrie/difftree.go
generated
vendored
|
@ -23,7 +23,7 @@ package merkletrie
|
|||
|
||||
// # Cases
|
||||
//
|
||||
// When comparing noders in both trees you will found yourself in
|
||||
// When comparing noders in both trees you will find yourself in
|
||||
// one of 169 possible cases, but if we ignore moves, we can
|
||||
// simplify a lot the search space into the following table:
|
||||
//
|
||||
|
@ -256,17 +256,21 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
// ErrCanceled is returned whenever the operation is canceled.
|
||||
ErrCanceled = errors.New("operation canceled")
|
||||
)
|
||||
|
||||
// DiffTree calculates the list of changes between two merkletries. It
|
||||
// uses the provided hashEqual callback to compare noders.
|
||||
func DiffTree(fromTree, toTree noder.Noder,
|
||||
hashEqual noder.Equal) (Changes, error) {
|
||||
func DiffTree(
|
||||
fromTree,
|
||||
toTree noder.Noder,
|
||||
hashEqual noder.Equal,
|
||||
) (Changes, error) {
|
||||
return DiffTreeContext(context.Background(), fromTree, toTree, hashEqual)
|
||||
}
|
||||
|
||||
// DiffTree calculates the list of changes between two merkletries. It
|
||||
// DiffTreeContext calculates the list of changes between two merkletries. It
|
||||
// uses the provided hashEqual callback to compare noders.
|
||||
// Error will be returned if context expires
|
||||
// Provided context must be non nil
|
||||
|
|
12
vendor/github.com/imdario/mergo/.deepsource.toml
generated
vendored
Normal file
12
vendor/github.com/imdario/mergo/.deepsource.toml
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
version = 1
|
||||
|
||||
test_patterns = [
|
||||
"*_test.go"
|
||||
]
|
||||
|
||||
[[analyzers]]
|
||||
name = "go"
|
||||
enabled = true
|
||||
|
||||
[analyzers.meta]
|
||||
import_path = "github.com/imdario/mergo"
|
33
vendor/github.com/imdario/mergo/.gitignore
generated
vendored
Normal file
33
vendor/github.com/imdario/mergo/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
|||
#### joe made this: http://goel.io/joe
|
||||
|
||||
#### go ####
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, build with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
|
||||
.glide/
|
||||
|
||||
#### vim ####
|
||||
# Swap
|
||||
[._]*.s[a-v][a-z]
|
||||
[._]*.sw[a-p]
|
||||
[._]s[a-v][a-z]
|
||||
[._]sw[a-p]
|
||||
|
||||
# Session
|
||||
Session.vim
|
||||
|
||||
# Temporary
|
||||
.netrwhist
|
||||
*~
|
||||
# Auto-generated tag files
|
||||
tags
|
9
vendor/github.com/imdario/mergo/.travis.yml
generated
vendored
Normal file
9
vendor/github.com/imdario/mergo/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
language: go
|
||||
install:
|
||||
- go get -t
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get github.com/mattn/goveralls
|
||||
script:
|
||||
- go test -race -v ./...
|
||||
after_script:
|
||||
- $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN
|
46
vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md
generated
vendored
Normal file
46
vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
28
vendor/github.com/imdario/mergo/LICENSE
generated
vendored
Normal file
28
vendor/github.com/imdario/mergo/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,28 @@
|
|||
Copyright (c) 2013 Dario Castañé. All rights reserved.
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
238
vendor/github.com/imdario/mergo/README.md
generated
vendored
Normal file
238
vendor/github.com/imdario/mergo/README.md
generated
vendored
Normal file
|
@ -0,0 +1,238 @@
|
|||
# Mergo
|
||||
|
||||
A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
|
||||
|
||||
Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche.
|
||||
|
||||
## Status
|
||||
|
||||
It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
|
||||
|
||||
[![GoDoc][3]][4]
|
||||
[![GoCard][5]][6]
|
||||
[![Build Status][1]][2]
|
||||
[![Coverage Status][7]][8]
|
||||
[![Sourcegraph][9]][10]
|
||||
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield)
|
||||
|
||||
[1]: https://travis-ci.org/imdario/mergo.png
|
||||
[2]: https://travis-ci.org/imdario/mergo
|
||||
[3]: https://godoc.org/github.com/imdario/mergo?status.svg
|
||||
[4]: https://godoc.org/github.com/imdario/mergo
|
||||
[5]: https://goreportcard.com/badge/imdario/mergo
|
||||
[6]: https://goreportcard.com/report/github.com/imdario/mergo
|
||||
[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master
|
||||
[8]: https://coveralls.io/github/imdario/mergo?branch=master
|
||||
[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg
|
||||
[10]: https://sourcegraph.com/github.com/imdario/mergo?badge
|
||||
|
||||
### Latest release
|
||||
|
||||
[Release v0.3.7](https://github.com/imdario/mergo/releases/tag/v0.3.7).
|
||||
|
||||
### Important note
|
||||
|
||||
Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code.
|
||||
|
||||
If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0).
|
||||
|
||||
### Donations
|
||||
|
||||
If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes:
|
||||
|
||||
<a href='https://ko-fi.com/B0B58839' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://az743702.vo.msecnd.net/cdn/kofi1.png?v=0' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a>
|
||||
[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo)
|
||||
[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo)
|
||||
<a href="https://liberapay.com/dario/donate"><img alt="Donate using Liberapay" src="https://liberapay.com/assets/widgets/donate.svg"></a>
|
||||
|
||||
### Mergo in the wild
|
||||
|
||||
- [moby/moby](https://github.com/moby/moby)
|
||||
- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
|
||||
- [vmware/dispatch](https://github.com/vmware/dispatch)
|
||||
- [Shopify/themekit](https://github.com/Shopify/themekit)
|
||||
- [imdario/zas](https://github.com/imdario/zas)
|
||||
- [matcornic/hermes](https://github.com/matcornic/hermes)
|
||||
- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go)
|
||||
- [kataras/iris](https://github.com/kataras/iris)
|
||||
- [michaelsauter/crane](https://github.com/michaelsauter/crane)
|
||||
- [go-task/task](https://github.com/go-task/task)
|
||||
- [sensu/uchiwa](https://github.com/sensu/uchiwa)
|
||||
- [ory/hydra](https://github.com/ory/hydra)
|
||||
- [sisatech/vcli](https://github.com/sisatech/vcli)
|
||||
- [dairycart/dairycart](https://github.com/dairycart/dairycart)
|
||||
- [projectcalico/felix](https://github.com/projectcalico/felix)
|
||||
- [resin-os/balena](https://github.com/resin-os/balena)
|
||||
- [go-kivik/kivik](https://github.com/go-kivik/kivik)
|
||||
- [Telefonica/govice](https://github.com/Telefonica/govice)
|
||||
- [supergiant/supergiant](supergiant/supergiant)
|
||||
- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce)
|
||||
- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy)
|
||||
- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel)
|
||||
- [EagerIO/Stout](https://github.com/EagerIO/Stout)
|
||||
- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api)
|
||||
- [russross/canvasassignments](https://github.com/russross/canvasassignments)
|
||||
- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api)
|
||||
- [casualjim/exeggutor](https://github.com/casualjim/exeggutor)
|
||||
- [divshot/gitling](https://github.com/divshot/gitling)
|
||||
- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl)
|
||||
- [andrerocker/deploy42](https://github.com/andrerocker/deploy42)
|
||||
- [elwinar/rambler](https://github.com/elwinar/rambler)
|
||||
- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman)
|
||||
- [jfbus/impressionist](https://github.com/jfbus/impressionist)
|
||||
- [Jmeyering/zealot](https://github.com/Jmeyering/zealot)
|
||||
- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host)
|
||||
- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go)
|
||||
- [thoas/picfit](https://github.com/thoas/picfit)
|
||||
- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server)
|
||||
- [jnuthong/item_search](https://github.com/jnuthong/item_search)
|
||||
- [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
|
||||
|
||||
## Installation
|
||||
|
||||
go get github.com/imdario/mergo
|
||||
|
||||
// use in your .go code
|
||||
import (
|
||||
"github.com/imdario/mergo"
|
||||
)
|
||||
|
||||
## Usage
|
||||
|
||||
You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
|
||||
|
||||
```go
|
||||
if err := mergo.Merge(&dst, src); err != nil {
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
Also, you can merge overwriting values using the transformer `WithOverride`.
|
||||
|
||||
```go
|
||||
if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field.
|
||||
|
||||
```go
|
||||
if err := mergo.Map(&dst, srcMap); err != nil {
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values.
|
||||
|
||||
More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo).
|
||||
|
||||
### Nice example
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/imdario/mergo"
|
||||
)
|
||||
|
||||
type Foo struct {
|
||||
A string
|
||||
B int64
|
||||
}
|
||||
|
||||
func main() {
|
||||
src := Foo{
|
||||
A: "one",
|
||||
B: 2,
|
||||
}
|
||||
dest := Foo{
|
||||
A: "two",
|
||||
}
|
||||
mergo.Merge(&dest, src)
|
||||
fmt.Println(dest)
|
||||
// Will print
|
||||
// {two 2}
|
||||
}
|
||||
```
|
||||
|
||||
Note: if test are failing due missing package, please execute:
|
||||
|
||||
go get gopkg.in/yaml.v2
|
||||
|
||||
### Transformers
|
||||
|
||||
Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`?
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/imdario/mergo"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
type timeTransfomer struct {
|
||||
}
|
||||
|
||||
func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
|
||||
if typ == reflect.TypeOf(time.Time{}) {
|
||||
return func(dst, src reflect.Value) error {
|
||||
if dst.CanSet() {
|
||||
isZero := dst.MethodByName("IsZero")
|
||||
result := isZero.Call([]reflect.Value{})
|
||||
if result[0].Bool() {
|
||||
dst.Set(src)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Snapshot struct {
|
||||
Time time.Time
|
||||
// ...
|
||||
}
|
||||
|
||||
func main() {
|
||||
src := Snapshot{time.Now()}
|
||||
dest := Snapshot{}
|
||||
mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{}))
|
||||
fmt.Println(dest)
|
||||
// Will print
|
||||
// { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Contact me
|
||||
|
||||
If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario)
|
||||
|
||||
## About
|
||||
|
||||
Written by [Dario Castañé](http://dario.im).
|
||||
|
||||
## Top Contributors
|
||||
|
||||
[![0](https://sourcerer.io/fame/imdario/imdario/mergo/images/0)](https://sourcerer.io/fame/imdario/imdario/mergo/links/0)
|
||||
[![1](https://sourcerer.io/fame/imdario/imdario/mergo/images/1)](https://sourcerer.io/fame/imdario/imdario/mergo/links/1)
|
||||
[![2](https://sourcerer.io/fame/imdario/imdario/mergo/images/2)](https://sourcerer.io/fame/imdario/imdario/mergo/links/2)
|
||||
[![3](https://sourcerer.io/fame/imdario/imdario/mergo/images/3)](https://sourcerer.io/fame/imdario/imdario/mergo/links/3)
|
||||
[![4](https://sourcerer.io/fame/imdario/imdario/mergo/images/4)](https://sourcerer.io/fame/imdario/imdario/mergo/links/4)
|
||||
[![5](https://sourcerer.io/fame/imdario/imdario/mergo/images/5)](https://sourcerer.io/fame/imdario/imdario/mergo/links/5)
|
||||
[![6](https://sourcerer.io/fame/imdario/imdario/mergo/images/6)](https://sourcerer.io/fame/imdario/imdario/mergo/links/6)
|
||||
[![7](https://sourcerer.io/fame/imdario/imdario/mergo/images/7)](https://sourcerer.io/fame/imdario/imdario/mergo/links/7)
|
||||
|
||||
|
||||
## License
|
||||
|
||||
[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
|
||||
|
||||
|
||||
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large)
|
44
vendor/github.com/imdario/mergo/doc.go
generated
vendored
Normal file
44
vendor/github.com/imdario/mergo/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
// Copyright 2013 Dario Castañé. All rights reserved.
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package mergo merges same-type structs and maps by setting default values in zero-value fields.
|
||||
|
||||
Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
|
||||
|
||||
Usage
|
||||
|
||||
From my own work-in-progress project:
|
||||
|
||||
type networkConfig struct {
|
||||
Protocol string
|
||||
Address string
|
||||
ServerType string `json: "server_type"`
|
||||
Port uint16
|
||||
}
|
||||
|
||||
type FssnConfig struct {
|
||||
Network networkConfig
|
||||
}
|
||||
|
||||
var fssnDefault = FssnConfig {
|
||||
networkConfig {
|
||||
"tcp",
|
||||
"127.0.0.1",
|
||||
"http",
|
||||
31560,
|
||||
},
|
||||
}
|
||||
|
||||
// Inside a function [...]
|
||||
|
||||
if err := mergo.Merge(&config, fssnDefault); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// More code [...]
|
||||
|
||||
*/
|
||||
package mergo
|
176
vendor/github.com/imdario/mergo/map.go
generated
vendored
Normal file
176
vendor/github.com/imdario/mergo/map.go
generated
vendored
Normal file
|
@ -0,0 +1,176 @@
|
|||
// Copyright 2014 Dario Castañé. All rights reserved.
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Based on src/pkg/reflect/deepequal.go from official
|
||||
// golang's stdlib.
|
||||
|
||||
package mergo
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
func changeInitialCase(s string, mapper func(rune) rune) string {
|
||||
if s == "" {
|
||||
return s
|
||||
}
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
return string(mapper(r)) + s[n:]
|
||||
}
|
||||
|
||||
func isExported(field reflect.StructField) bool {
|
||||
r, _ := utf8.DecodeRuneInString(field.Name)
|
||||
return r >= 'A' && r <= 'Z'
|
||||
}
|
||||
|
||||
// Traverses recursively both values, assigning src's fields values to dst.
|
||||
// The map argument tracks comparisons that have already been seen, which allows
|
||||
// short circuiting on recursive types.
|
||||
func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
|
||||
overwrite := config.Overwrite
|
||||
if dst.CanAddr() {
|
||||
addr := dst.UnsafeAddr()
|
||||
h := 17 * addr
|
||||
seen := visited[h]
|
||||
typ := dst.Type()
|
||||
for p := seen; p != nil; p = p.next {
|
||||
if p.ptr == addr && p.typ == typ {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// Remember, remember...
|
||||
visited[h] = &visit{addr, typ, seen}
|
||||
}
|
||||
zeroValue := reflect.Value{}
|
||||
switch dst.Kind() {
|
||||
case reflect.Map:
|
||||
dstMap := dst.Interface().(map[string]interface{})
|
||||
for i, n := 0, src.NumField(); i < n; i++ {
|
||||
srcType := src.Type()
|
||||
field := srcType.Field(i)
|
||||
if !isExported(field) {
|
||||
continue
|
||||
}
|
||||
fieldName := field.Name
|
||||
fieldName = changeInitialCase(fieldName, unicode.ToLower)
|
||||
if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) {
|
||||
dstMap[fieldName] = src.Field(i).Interface()
|
||||
}
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if dst.IsNil() {
|
||||
v := reflect.New(dst.Type().Elem())
|
||||
dst.Set(v)
|
||||
}
|
||||
dst = dst.Elem()
|
||||
fallthrough
|
||||
case reflect.Struct:
|
||||
srcMap := src.Interface().(map[string]interface{})
|
||||
for key := range srcMap {
|
||||
config.overwriteWithEmptyValue = true
|
||||
srcValue := srcMap[key]
|
||||
fieldName := changeInitialCase(key, unicode.ToUpper)
|
||||
dstElement := dst.FieldByName(fieldName)
|
||||
if dstElement == zeroValue {
|
||||
// We discard it because the field doesn't exist.
|
||||
continue
|
||||
}
|
||||
srcElement := reflect.ValueOf(srcValue)
|
||||
dstKind := dstElement.Kind()
|
||||
srcKind := srcElement.Kind()
|
||||
if srcKind == reflect.Ptr && dstKind != reflect.Ptr {
|
||||
srcElement = srcElement.Elem()
|
||||
srcKind = reflect.TypeOf(srcElement.Interface()).Kind()
|
||||
} else if dstKind == reflect.Ptr {
|
||||
// Can this work? I guess it can't.
|
||||
if srcKind != reflect.Ptr && srcElement.CanAddr() {
|
||||
srcPtr := srcElement.Addr()
|
||||
srcElement = reflect.ValueOf(srcPtr)
|
||||
srcKind = reflect.Ptr
|
||||
}
|
||||
}
|
||||
|
||||
if !srcElement.IsValid() {
|
||||
continue
|
||||
}
|
||||
if srcKind == dstKind {
|
||||
if _, err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
} else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface {
|
||||
if _, err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
} else if srcKind == reflect.Map {
|
||||
if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Map sets fields' values in dst from src.
|
||||
// src can be a map with string keys or a struct. dst must be the opposite:
|
||||
// if src is a map, dst must be a valid pointer to struct. If src is a struct,
|
||||
// dst must be map[string]interface{}.
|
||||
// It won't merge unexported (private) fields and will do recursively
|
||||
// any exported field.
|
||||
// If dst is a map, keys will be src fields' names in lower camel case.
|
||||
// Missing key in src that doesn't match a field in dst will be skipped. This
|
||||
// doesn't apply if dst is a map.
|
||||
// This is separated method from Merge because it is cleaner and it keeps sane
|
||||
// semantics: merging equal types, mapping different (restricted) types.
|
||||
func Map(dst, src interface{}, opts ...func(*Config)) error {
|
||||
return _map(dst, src, opts...)
|
||||
}
|
||||
|
||||
// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by
|
||||
// non-empty src attribute values.
|
||||
// Deprecated: Use Map(…) with WithOverride
|
||||
func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
|
||||
return _map(dst, src, append(opts, WithOverride)...)
|
||||
}
|
||||
|
||||
func _map(dst, src interface{}, opts ...func(*Config)) error {
|
||||
var (
|
||||
vDst, vSrc reflect.Value
|
||||
err error
|
||||
)
|
||||
config := &Config{}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(config)
|
||||
}
|
||||
|
||||
if vDst, vSrc, err = resolveValues(dst, src); err != nil {
|
||||
return err
|
||||
}
|
||||
// To be friction-less, we redirect equal-type arguments
|
||||
// to deepMerge. Only because arguments can be anything.
|
||||
if vSrc.Kind() == vDst.Kind() {
|
||||
_, err := deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
|
||||
return err
|
||||
}
|
||||
switch vSrc.Kind() {
|
||||
case reflect.Struct:
|
||||
if vDst.Kind() != reflect.Map {
|
||||
return ErrExpectedMapAsDestination
|
||||
}
|
||||
case reflect.Map:
|
||||
if vDst.Kind() != reflect.Struct {
|
||||
return ErrExpectedStructAsDestination
|
||||
}
|
||||
default:
|
||||
return ErrNotSupported
|
||||
}
|
||||
return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config)
|
||||
}
|
338
vendor/github.com/imdario/mergo/merge.go
generated
vendored
Normal file
338
vendor/github.com/imdario/mergo/merge.go
generated
vendored
Normal file
|
@ -0,0 +1,338 @@
|
|||
// Copyright 2013 Dario Castañé. All rights reserved.
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Based on src/pkg/reflect/deepequal.go from official
|
||||
// golang's stdlib.
|
||||
|
||||
package mergo
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func hasExportedField(dst reflect.Value) (exported bool) {
|
||||
for i, n := 0, dst.NumField(); i < n; i++ {
|
||||
field := dst.Type().Field(i)
|
||||
if isExportedComponent(&field) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func isExportedComponent(field *reflect.StructField) bool {
|
||||
name := field.Name
|
||||
pkgPath := field.PkgPath
|
||||
if len(pkgPath) > 0 {
|
||||
return false
|
||||
}
|
||||
c := name[0]
|
||||
if 'a' <= c && c <= 'z' || c == '_' {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Overwrite bool
|
||||
AppendSlice bool
|
||||
TypeCheck bool
|
||||
Transformers Transformers
|
||||
overwriteWithEmptyValue bool
|
||||
overwriteSliceWithEmptyValue bool
|
||||
}
|
||||
|
||||
type Transformers interface {
|
||||
Transformer(reflect.Type) func(dst, src reflect.Value) error
|
||||
}
|
||||
|
||||
// Traverses recursively both values, assigning src's fields values to dst.
|
||||
// The map argument tracks comparisons that have already been seen, which allows
|
||||
// short circuiting on recursive types.
|
||||
func deepMerge(dstIn, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (dst reflect.Value, err error) {
|
||||
dst = dstIn
|
||||
overwrite := config.Overwrite
|
||||
typeCheck := config.TypeCheck
|
||||
overwriteWithEmptySrc := config.overwriteWithEmptyValue
|
||||
overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue
|
||||
|
||||
if !src.IsValid() {
|
||||
return
|
||||
}
|
||||
|
||||
if dst.CanAddr() {
|
||||
addr := dst.UnsafeAddr()
|
||||
h := 17 * addr
|
||||
seen := visited[h]
|
||||
typ := dst.Type()
|
||||
for p := seen; p != nil; p = p.next {
|
||||
if p.ptr == addr && p.typ == typ {
|
||||
return dst, nil
|
||||
}
|
||||
}
|
||||
// Remember, remember...
|
||||
visited[h] = &visit{addr, typ, seen}
|
||||
}
|
||||
|
||||
if config.Transformers != nil && !isEmptyValue(dst) {
|
||||
if fn := config.Transformers.Transformer(dst.Type()); fn != nil {
|
||||
err = fn(dst, src)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if dst.IsValid() && src.IsValid() && src.Type() != dst.Type() {
|
||||
err = fmt.Errorf("cannot append two different types (%s, %s)", src.Kind(), dst.Kind())
|
||||
return
|
||||
}
|
||||
|
||||
switch dst.Kind() {
|
||||
case reflect.Struct:
|
||||
if hasExportedField(dst) {
|
||||
dstCp := reflect.New(dst.Type()).Elem()
|
||||
for i, n := 0, dst.NumField(); i < n; i++ {
|
||||
dstField := dst.Field(i)
|
||||
structField := dst.Type().Field(i)
|
||||
// copy un-exported struct fields
|
||||
if !isExportedComponent(&structField) {
|
||||
rf := dstCp.Field(i)
|
||||
rf = reflect.NewAt(rf.Type(), unsafe.Pointer(rf.UnsafeAddr())).Elem() //nolint:gosec
|
||||
dstRF := dst.Field(i)
|
||||
if !dst.Field(i).CanAddr() {
|
||||
continue
|
||||
}
|
||||
|
||||
dstRF = reflect.NewAt(dstRF.Type(), unsafe.Pointer(dstRF.UnsafeAddr())).Elem() //nolint:gosec
|
||||
rf.Set(dstRF)
|
||||
continue
|
||||
}
|
||||
dstField, err = deepMerge(dstField, src.Field(i), visited, depth+1, config)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
dstCp.Field(i).Set(dstField)
|
||||
}
|
||||
|
||||
if dst.CanSet() {
|
||||
dst.Set(dstCp)
|
||||
} else {
|
||||
dst = dstCp
|
||||
}
|
||||
return
|
||||
} else {
|
||||
if (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) {
|
||||
dst = src
|
||||
}
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
if dst.IsNil() && !src.IsNil() {
|
||||
if dst.CanSet() {
|
||||
dst.Set(reflect.MakeMap(dst.Type()))
|
||||
} else {
|
||||
dst = src
|
||||
return
|
||||
}
|
||||
}
|
||||
for _, key := range src.MapKeys() {
|
||||
srcElement := src.MapIndex(key)
|
||||
dstElement := dst.MapIndex(key)
|
||||
if !srcElement.IsValid() {
|
||||
continue
|
||||
}
|
||||
if dst.MapIndex(key).IsValid() {
|
||||
k := dstElement.Interface()
|
||||
dstElement = reflect.ValueOf(k)
|
||||
}
|
||||
if isReflectNil(srcElement) {
|
||||
if overwrite || isReflectNil(dstElement) {
|
||||
dst.SetMapIndex(key, srcElement)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if !srcElement.CanInterface() {
|
||||
continue
|
||||
}
|
||||
|
||||
if srcElement.CanInterface() {
|
||||
srcElement = reflect.ValueOf(srcElement.Interface())
|
||||
if dstElement.IsValid() {
|
||||
dstElement = reflect.ValueOf(dstElement.Interface())
|
||||
}
|
||||
}
|
||||
dstElement, err = deepMerge(dstElement, srcElement, visited, depth+1, config)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
dst.SetMapIndex(key, dstElement)
|
||||
|
||||
}
|
||||
case reflect.Slice:
|
||||
newSlice := dst
|
||||
if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
|
||||
if typeCheck && src.Type() != dst.Type() {
|
||||
return dst, fmt.Errorf("cannot override two slices with different type (%s, %s)", src.Type(), dst.Type())
|
||||
}
|
||||
newSlice = src
|
||||
} else if config.AppendSlice {
|
||||
if typeCheck && src.Type() != dst.Type() {
|
||||
err = fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type())
|
||||
return
|
||||
}
|
||||
newSlice = reflect.AppendSlice(dst, src)
|
||||
}
|
||||
if dst.CanSet() {
|
||||
dst.Set(newSlice)
|
||||
} else {
|
||||
dst = newSlice
|
||||
}
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
if isReflectNil(src) {
|
||||
break
|
||||
}
|
||||
|
||||
if dst.Kind() != reflect.Ptr && src.Type().AssignableTo(dst.Type()) {
|
||||
if dst.IsNil() || overwrite {
|
||||
if overwrite || isEmptyValue(dst) {
|
||||
if dst.CanSet() {
|
||||
dst.Set(src)
|
||||
} else {
|
||||
dst = src
|
||||
}
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if src.Kind() != reflect.Interface {
|
||||
if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) {
|
||||
if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
|
||||
dst.Set(src)
|
||||
}
|
||||
} else if src.Kind() == reflect.Ptr {
|
||||
if dst, err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
dst = dst.Addr()
|
||||
} else if dst.Elem().Type() == src.Type() {
|
||||
if dst, err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
return dst, ErrDifferentArgumentsTypes
|
||||
}
|
||||
break
|
||||
}
|
||||
if dst.IsNil() || overwrite {
|
||||
if (overwrite || isEmptyValue(dst)) && (overwriteWithEmptySrc || !isEmptyValue(src)) {
|
||||
if dst.CanSet() {
|
||||
dst.Set(src)
|
||||
} else {
|
||||
dst = src
|
||||
}
|
||||
}
|
||||
} else if _, err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
default:
|
||||
overwriteFull := (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst))
|
||||
if overwriteFull {
|
||||
if dst.CanSet() {
|
||||
dst.Set(src)
|
||||
} else {
|
||||
dst = src
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Merge will fill any empty for value type attributes on the dst struct using corresponding
|
||||
// src attributes if they themselves are not empty. dst and src must be valid same-type structs
|
||||
// and dst must be a pointer to struct.
|
||||
// It won't merge unexported (private) fields and will do recursively any exported field.
|
||||
func Merge(dst, src interface{}, opts ...func(*Config)) error {
|
||||
return merge(dst, src, opts...)
|
||||
}
|
||||
|
||||
// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by
|
||||
// non-empty src attribute values.
|
||||
// Deprecated: use Merge(…) with WithOverride
|
||||
func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
|
||||
return merge(dst, src, append(opts, WithOverride)...)
|
||||
}
|
||||
|
||||
// WithTransformers adds transformers to merge, allowing to customize the merging of some types.
|
||||
func WithTransformers(transformers Transformers) func(*Config) {
|
||||
return func(config *Config) {
|
||||
config.Transformers = transformers
|
||||
}
|
||||
}
|
||||
|
||||
// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values.
|
||||
func WithOverride(config *Config) {
|
||||
config.Overwrite = true
|
||||
}
|
||||
|
||||
// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values.
|
||||
func WithOverwriteWithEmptyValue(config *Config) {
|
||||
config.overwriteWithEmptyValue = true
|
||||
}
|
||||
|
||||
// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice.
|
||||
func WithOverrideEmptySlice(config *Config) {
|
||||
config.overwriteSliceWithEmptyValue = true
|
||||
}
|
||||
|
||||
// WithAppendSlice will make merge append slices instead of overwriting it.
|
||||
func WithAppendSlice(config *Config) {
|
||||
config.AppendSlice = true
|
||||
}
|
||||
|
||||
// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride).
|
||||
func WithTypeCheck(config *Config) {
|
||||
config.TypeCheck = true
|
||||
}
|
||||
|
||||
func merge(dst, src interface{}, opts ...func(*Config)) error {
|
||||
var (
|
||||
vDst, vSrc reflect.Value
|
||||
err error
|
||||
)
|
||||
|
||||
config := &Config{}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(config)
|
||||
}
|
||||
|
||||
if vDst, vSrc, err = resolveValues(dst, src); err != nil {
|
||||
return err
|
||||
}
|
||||
if !vDst.CanSet() {
|
||||
return fmt.Errorf("cannot set dst, needs reference")
|
||||
}
|
||||
if vDst.Type() != vSrc.Type() {
|
||||
return ErrDifferentArgumentsTypes
|
||||
}
|
||||
_, err = deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
|
||||
return err
|
||||
}
|
||||
|
||||
// IsReflectNil is the reflect value provided nil
|
||||
func isReflectNil(v reflect.Value) bool {
|
||||
k := v.Kind()
|
||||
switch k {
|
||||
case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr:
|
||||
// Both interface and slice are nil if first word is 0.
|
||||
// Both are always bigger than a word; assume flagIndir.
|
||||
return v.IsNil()
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
97
vendor/github.com/imdario/mergo/mergo.go
generated
vendored
Normal file
97
vendor/github.com/imdario/mergo/mergo.go
generated
vendored
Normal file
|
@ -0,0 +1,97 @@
|
|||
// Copyright 2013 Dario Castañé. All rights reserved.
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Based on src/pkg/reflect/deepequal.go from official
|
||||
// golang's stdlib.
|
||||
|
||||
package mergo
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// Errors reported by Mergo when it finds invalid arguments.
|
||||
var (
|
||||
ErrNilArguments = errors.New("src and dst must not be nil")
|
||||
ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type")
|
||||
ErrNotSupported = errors.New("only structs and maps are supported")
|
||||
ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
|
||||
ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
|
||||
)
|
||||
|
||||
// During deepMerge, must keep track of checks that are
|
||||
// in progress. The comparison algorithm assumes that all
|
||||
// checks in progress are true when it reencounters them.
|
||||
// Visited are stored in a map indexed by 17 * a1 + a2;
|
||||
type visit struct {
|
||||
ptr uintptr
|
||||
typ reflect.Type
|
||||
next *visit
|
||||
}
|
||||
|
||||
// From src/pkg/encoding/json/encode.go.
|
||||
func isEmptyValue(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
return v.Len() == 0
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
if v.IsNil() {
|
||||
return true
|
||||
}
|
||||
return isEmptyValue(v.Elem())
|
||||
case reflect.Func:
|
||||
return v.IsNil()
|
||||
case reflect.Invalid:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
|
||||
if dst == nil || src == nil {
|
||||
err = ErrNilArguments
|
||||
return
|
||||
}
|
||||
vDst = reflect.ValueOf(dst).Elem()
|
||||
if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map {
|
||||
err = ErrNotSupported
|
||||
return
|
||||
}
|
||||
vSrc = reflect.ValueOf(src)
|
||||
// We check if vSrc is a pointer to dereference it.
|
||||
if vSrc.Kind() == reflect.Ptr {
|
||||
vSrc = vSrc.Elem()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Traverses recursively both values, assigning src's fields values to dst.
|
||||
// The map argument tracks comparisons that have already been seen, which allows
|
||||
// short circuiting on recursive types.
|
||||
func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) {
|
||||
if dst.CanAddr() {
|
||||
addr := dst.UnsafeAddr()
|
||||
h := 17 * addr
|
||||
seen := visited[h]
|
||||
typ := dst.Type()
|
||||
for p := seen; p != nil; p = p.next {
|
||||
if p.ptr == addr && p.typ == typ {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// Remember, remember...
|
||||
visited[h] = &visit{addr, typ, seen}
|
||||
}
|
||||
return // TODO refactor
|
||||
}
|
23
vendor/golang.org/x/crypto/ssh/mux.go
generated
vendored
23
vendor/golang.org/x/crypto/ssh/mux.go
generated
vendored
|
@ -240,7 +240,7 @@ func (m *mux) onePacket() error {
|
|||
id := binary.BigEndian.Uint32(packet[1:])
|
||||
ch := m.chanList.getChan(id)
|
||||
if ch == nil {
|
||||
return fmt.Errorf("ssh: invalid channel %d", id)
|
||||
return m.handleUnknownChannelPacket(id, packet)
|
||||
}
|
||||
|
||||
return ch.handlePacket(packet)
|
||||
|
@ -328,3 +328,24 @@ func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) {
|
|||
return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *mux) handleUnknownChannelPacket(id uint32, packet []byte) error {
|
||||
msg, err := decode(packet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch msg := msg.(type) {
|
||||
// RFC 4254 section 5.4 says unrecognized channel requests should
|
||||
// receive a failure response.
|
||||
case *channelRequestMsg:
|
||||
if msg.WantReply {
|
||||
return m.sendMessage(channelRequestFailureMsg{
|
||||
PeersID: msg.PeersID,
|
||||
})
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("ssh: invalid channel %d", id)
|
||||
}
|
||||
}
|
||||
|
|
11
vendor/golang.org/x/sys/cpu/byteorder.go
generated
vendored
11
vendor/golang.org/x/sys/cpu/byteorder.go
generated
vendored
|
@ -39,20 +39,25 @@ func (bigEndian) Uint64(b []byte) uint64 {
|
|||
uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
|
||||
}
|
||||
|
||||
// hostByteOrder returns binary.LittleEndian on little-endian machines and
|
||||
// binary.BigEndian on big-endian machines.
|
||||
// hostByteOrder returns littleEndian on little-endian machines and
|
||||
// bigEndian on big-endian machines.
|
||||
func hostByteOrder() byteOrder {
|
||||
switch runtime.GOARCH {
|
||||
case "386", "amd64", "amd64p32",
|
||||
"alpha",
|
||||
"arm", "arm64",
|
||||
"mipsle", "mips64le", "mips64p32le",
|
||||
"nios2",
|
||||
"ppc64le",
|
||||
"riscv", "riscv64":
|
||||
"riscv", "riscv64",
|
||||
"sh":
|
||||
return littleEndian{}
|
||||
case "armbe", "arm64be",
|
||||
"m68k",
|
||||
"mips", "mips64", "mips64p32",
|
||||
"ppc", "ppc64",
|
||||
"s390", "s390x",
|
||||
"shbe",
|
||||
"sparc", "sparc64":
|
||||
return bigEndian{}
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build aix,ppc64
|
||||
// +build aix
|
||||
|
||||
package cpu
|
||||
|
8
vendor/golang.org/x/sys/cpu/cpu_arm64.go
generated
vendored
8
vendor/golang.org/x/sys/cpu/cpu_arm64.go
generated
vendored
|
@ -10,8 +10,14 @@ const cacheLineSize = 64
|
|||
|
||||
func init() {
|
||||
switch runtime.GOOS {
|
||||
case "android", "darwin":
|
||||
case "android", "darwin", "netbsd":
|
||||
// Android and iOS don't seem to allow reading these registers.
|
||||
//
|
||||
// NetBSD:
|
||||
// ID_AA64ISAR0_EL1 is a privileged register and cannot be read from EL0.
|
||||
// It can be read via sysctl(3). Example for future implementers:
|
||||
// https://nxr.netbsd.org/xref/src/usr.sbin/cpuctl/arch/aarch64.c
|
||||
//
|
||||
// Fake the minimal features expected by
|
||||
// TestARM64minimalFeatures.
|
||||
ARM64.HasASIMD = true
|
||||
|
|
27
vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go
generated
vendored
Normal file
27
vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Recreate a getsystemcfg syscall handler instead of
|
||||
// using the one provided by x/sys/unix to avoid having
|
||||
// the dependency between them. (See golang.org/issue/32102)
|
||||
// Morever, this file will be used during the building of
|
||||
// gccgo's libgo and thus must not used a CGo method.
|
||||
|
||||
// +build aix
|
||||
// +build gccgo
|
||||
|
||||
package cpu
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
//extern getsystemcfg
|
||||
func gccgoGetsystemcfg(label uint32) (r uint64)
|
||||
|
||||
func callgetsystemcfg(label int) (r1 uintptr, e1 syscall.Errno) {
|
||||
r1 = uintptr(gccgoGetsystemcfg(uint32(label)))
|
||||
e1 = syscall.GetErrno()
|
||||
return
|
||||
}
|
12
vendor/golang.org/x/sys/unix/syscall_linux.go
generated
vendored
12
vendor/golang.org/x/sys/unix/syscall_linux.go
generated
vendored
|
@ -97,6 +97,12 @@ func IoctlSetRTCTime(fd int, value *RTCTime) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func IoctlSetRTCWkAlrm(fd int, value *RTCWkAlrm) error {
|
||||
err := ioctl(fd, RTC_WKALM_SET, uintptr(unsafe.Pointer(value)))
|
||||
runtime.KeepAlive(value)
|
||||
return err
|
||||
}
|
||||
|
||||
func IoctlGetUint32(fd int, req uint) (uint32, error) {
|
||||
var value uint32
|
||||
err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
|
||||
|
@ -109,6 +115,12 @@ func IoctlGetRTCTime(fd int) (*RTCTime, error) {
|
|||
return &value, err
|
||||
}
|
||||
|
||||
func IoctlGetRTCWkAlrm(fd int) (*RTCWkAlrm, error) {
|
||||
var value RTCWkAlrm
|
||||
err := ioctl(fd, RTC_WKALM_RD, uintptr(unsafe.Pointer(&value)))
|
||||
return &value, err
|
||||
}
|
||||
|
||||
//sys Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error)
|
||||
|
||||
func Link(oldpath string, newpath string) (err error) {
|
||||
|
|
66
vendor/golang.org/x/sys/unix/zerrors_linux.go
generated
vendored
66
vendor/golang.org/x/sys/unix/zerrors_linux.go
generated
vendored
|
@ -160,78 +160,28 @@ const (
|
|||
BPF_A = 0x10
|
||||
BPF_ABS = 0x20
|
||||
BPF_ADD = 0x0
|
||||
BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff
|
||||
BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38
|
||||
BPF_ALU = 0x4
|
||||
BPF_ALU64 = 0x7
|
||||
BPF_AND = 0x50
|
||||
BPF_ANY = 0x0
|
||||
BPF_ARSH = 0xc0
|
||||
BPF_B = 0x10
|
||||
BPF_BUILD_ID_SIZE = 0x14
|
||||
BPF_CALL = 0x80
|
||||
BPF_DEVCG_ACC_MKNOD = 0x1
|
||||
BPF_DEVCG_ACC_READ = 0x2
|
||||
BPF_DEVCG_ACC_WRITE = 0x4
|
||||
BPF_DEVCG_DEV_BLOCK = 0x1
|
||||
BPF_DEVCG_DEV_CHAR = 0x2
|
||||
BPF_DIV = 0x30
|
||||
BPF_DW = 0x18
|
||||
BPF_END = 0xd0
|
||||
BPF_EXIST = 0x2
|
||||
BPF_EXIT = 0x90
|
||||
BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = 0x1
|
||||
BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = 0x4
|
||||
BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = 0x2
|
||||
BPF_FROM_BE = 0x8
|
||||
BPF_FROM_LE = 0x0
|
||||
BPF_FS_MAGIC = 0xcafe4a11
|
||||
BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2
|
||||
BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4
|
||||
BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8
|
||||
BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10
|
||||
BPF_F_ADJ_ROOM_FIXED_GSO = 0x1
|
||||
BPF_F_ALLOW_MULTI = 0x2
|
||||
BPF_F_ALLOW_OVERRIDE = 0x1
|
||||
BPF_F_ANY_ALIGNMENT = 0x2
|
||||
BPF_F_CLONE = 0x200
|
||||
BPF_F_CTXLEN_MASK = 0xfffff00000000
|
||||
BPF_F_CURRENT_CPU = 0xffffffff
|
||||
BPF_F_CURRENT_NETNS = -0x1
|
||||
BPF_F_DONT_FRAGMENT = 0x4
|
||||
BPF_F_FAST_STACK_CMP = 0x200
|
||||
BPF_F_HDR_FIELD_MASK = 0xf
|
||||
BPF_F_INDEX_MASK = 0xffffffff
|
||||
BPF_F_INGRESS = 0x1
|
||||
BPF_F_INVALIDATE_HASH = 0x2
|
||||
BPF_F_LOCK = 0x4
|
||||
BPF_F_MARK_ENFORCE = 0x40
|
||||
BPF_F_MARK_MANGLED_0 = 0x20
|
||||
BPF_F_MMAPABLE = 0x400
|
||||
BPF_F_NO_COMMON_LRU = 0x2
|
||||
BPF_F_NO_PREALLOC = 0x1
|
||||
BPF_F_NUMA_NODE = 0x4
|
||||
BPF_F_PSEUDO_HDR = 0x10
|
||||
BPF_F_QUERY_EFFECTIVE = 0x1
|
||||
BPF_F_RDONLY = 0x8
|
||||
BPF_F_RDONLY_PROG = 0x80
|
||||
BPF_F_RECOMPUTE_CSUM = 0x1
|
||||
BPF_F_REPLACE = 0x4
|
||||
BPF_F_REUSE_STACKID = 0x400
|
||||
BPF_F_SEQ_NUMBER = 0x8
|
||||
BPF_F_SKIP_FIELD_MASK = 0xff
|
||||
BPF_F_STACK_BUILD_ID = 0x20
|
||||
BPF_F_STRICT_ALIGNMENT = 0x1
|
||||
BPF_F_SYSCTL_BASE_NAME = 0x1
|
||||
BPF_F_TEST_RND_HI32 = 0x4
|
||||
BPF_F_TEST_STATE_FREQ = 0x8
|
||||
BPF_F_TUNINFO_IPV6 = 0x1
|
||||
BPF_F_USER_BUILD_ID = 0x800
|
||||
BPF_F_USER_STACK = 0x100
|
||||
BPF_F_WRONLY = 0x10
|
||||
BPF_F_WRONLY_PROG = 0x100
|
||||
BPF_F_ZERO_CSUM_TX = 0x2
|
||||
BPF_F_ZERO_SEED = 0x40
|
||||
BPF_H = 0x8
|
||||
BPF_IMM = 0x0
|
||||
BPF_IND = 0x40
|
||||
|
@ -267,7 +217,6 @@ const (
|
|||
BPF_MUL = 0x20
|
||||
BPF_NEG = 0x80
|
||||
BPF_NET_OFF = -0x100000
|
||||
BPF_NOEXIST = 0x1
|
||||
BPF_OBJ_NAME_LEN = 0x10
|
||||
BPF_OR = 0x40
|
||||
BPF_PSEUDO_CALL = 0x1
|
||||
|
@ -275,12 +224,6 @@ const (
|
|||
BPF_PSEUDO_MAP_VALUE = 0x2
|
||||
BPF_RET = 0x6
|
||||
BPF_RSH = 0x70
|
||||
BPF_SK_STORAGE_GET_F_CREATE = 0x1
|
||||
BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf
|
||||
BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2
|
||||
BPF_SOCK_OPS_RTO_CB_FLAG = 0x1
|
||||
BPF_SOCK_OPS_RTT_CB_FLAG = 0x8
|
||||
BPF_SOCK_OPS_STATE_CB_FLAG = 0x4
|
||||
BPF_ST = 0x2
|
||||
BPF_STX = 0x3
|
||||
BPF_SUB = 0x10
|
||||
|
@ -378,12 +321,14 @@ const (
|
|||
CLOCK_TXINT = 0x3
|
||||
CLONE_ARGS_SIZE_VER0 = 0x40
|
||||
CLONE_ARGS_SIZE_VER1 = 0x50
|
||||
CLONE_ARGS_SIZE_VER2 = 0x58
|
||||
CLONE_CHILD_CLEARTID = 0x200000
|
||||
CLONE_CHILD_SETTID = 0x1000000
|
||||
CLONE_CLEAR_SIGHAND = 0x100000000
|
||||
CLONE_DETACHED = 0x400000
|
||||
CLONE_FILES = 0x400
|
||||
CLONE_FS = 0x200
|
||||
CLONE_INTO_CGROUP = 0x200000000
|
||||
CLONE_IO = 0x80000000
|
||||
CLONE_NEWCGROUP = 0x2000000
|
||||
CLONE_NEWIPC = 0x8000000
|
||||
|
@ -598,7 +543,9 @@ const (
|
|||
FAN_DELETE = 0x200
|
||||
FAN_DELETE_SELF = 0x400
|
||||
FAN_DENY = 0x2
|
||||
FAN_DIR_MODIFY = 0x80000
|
||||
FAN_ENABLE_AUDIT = 0x40
|
||||
FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2
|
||||
FAN_EVENT_INFO_TYPE_FID = 0x1
|
||||
FAN_EVENT_METADATA_LEN = 0x18
|
||||
FAN_EVENT_ON_CHILD = 0x8000000
|
||||
|
@ -2108,8 +2055,6 @@ const (
|
|||
TCOFLUSH = 0x1
|
||||
TCOOFF = 0x0
|
||||
TCOON = 0x1
|
||||
TCP_BPF_IW = 0x3e9
|
||||
TCP_BPF_SNDCWND_CLAMP = 0x3ea
|
||||
TCP_CC_INFO = 0x1a
|
||||
TCP_CM_INQ = 0x24
|
||||
TCP_CONGESTION = 0xd
|
||||
|
@ -2384,8 +2329,9 @@ const (
|
|||
XDP_COPY = 0x2
|
||||
XDP_FLAGS_DRV_MODE = 0x4
|
||||
XDP_FLAGS_HW_MODE = 0x8
|
||||
XDP_FLAGS_MASK = 0xf
|
||||
XDP_FLAGS_MASK = 0x1f
|
||||
XDP_FLAGS_MODES = 0xe
|
||||
XDP_FLAGS_REPLACE = 0x10
|
||||
XDP_FLAGS_SKB_MODE = 0x2
|
||||
XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1
|
||||
XDP_MMAP_OFFSETS = 0x1
|
||||
|
|
1
vendor/golang.org/x/sys/unix/zerrors_linux_386.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zerrors_linux_386.go
generated
vendored
|
@ -75,6 +75,7 @@ const (
|
|||
FP_XSTATE_MAGIC2 = 0x46505845
|
||||
FS_IOC_ENABLE_VERITY = 0x40806685
|
||||
FS_IOC_GETFLAGS = 0x80046601
|
||||
FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b
|
||||
FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
|
||||
FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
|
||||
FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613
|
||||
|
|
1
vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
generated
vendored
|
@ -75,6 +75,7 @@ const (
|
|||
FP_XSTATE_MAGIC2 = 0x46505845
|
||||
FS_IOC_ENABLE_VERITY = 0x40806685
|
||||
FS_IOC_GETFLAGS = 0x80086601
|
||||
FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b
|
||||
FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
|
||||
FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
|
||||
FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613
|
||||
|
|
1
vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
generated
vendored
|
@ -74,6 +74,7 @@ const (
|
|||
FLUSHO = 0x1000
|
||||
FS_IOC_ENABLE_VERITY = 0x40806685
|
||||
FS_IOC_GETFLAGS = 0x80046601
|
||||
FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b
|
||||
FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
|
||||
FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
|
||||
FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613
|
||||
|
|
1
vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
generated
vendored
|
@ -77,6 +77,7 @@ const (
|
|||
FPSIMD_MAGIC = 0x46508001
|
||||
FS_IOC_ENABLE_VERITY = 0x40806685
|
||||
FS_IOC_GETFLAGS = 0x80086601
|
||||
FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b
|
||||
FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
|
||||
FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
|
||||
FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613
|
||||
|
|
1
vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
generated
vendored
|
@ -74,6 +74,7 @@ const (
|
|||
FLUSHO = 0x2000
|
||||
FS_IOC_ENABLE_VERITY = 0x80806685
|
||||
FS_IOC_GETFLAGS = 0x40046601
|
||||
FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b
|
||||
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
|
||||
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
|
||||
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613
|
||||
|
|
1
vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
generated
vendored
|
@ -74,6 +74,7 @@ const (
|
|||
FLUSHO = 0x2000
|
||||
FS_IOC_ENABLE_VERITY = 0x80806685
|
||||
FS_IOC_GETFLAGS = 0x40086601
|
||||
FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b
|
||||
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
|
||||
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
|
||||
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613
|
||||
|
|
1
vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
generated
vendored
|
@ -74,6 +74,7 @@ const (
|
|||
FLUSHO = 0x2000
|
||||
FS_IOC_ENABLE_VERITY = 0x80806685
|
||||
FS_IOC_GETFLAGS = 0x40086601
|
||||
FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b
|
||||
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
|
||||
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
|
||||
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613
|
||||
|
|
1
vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
generated
vendored
|
@ -74,6 +74,7 @@ const (
|
|||
FLUSHO = 0x2000
|
||||
FS_IOC_ENABLE_VERITY = 0x80806685
|
||||
FS_IOC_GETFLAGS = 0x40046601
|
||||
FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b
|
||||
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
|
||||
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
|
||||
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613
|
||||
|
|
1
vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
generated
vendored
|
@ -74,6 +74,7 @@ const (
|
|||
FLUSHO = 0x800000
|
||||
FS_IOC_ENABLE_VERITY = 0x80806685
|
||||
FS_IOC_GETFLAGS = 0x40086601
|
||||
FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b
|
||||
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
|
||||
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
|
||||
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613
|
||||
|
|
1
vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
generated
vendored
|
@ -74,6 +74,7 @@ const (
|
|||
FLUSHO = 0x800000
|
||||
FS_IOC_ENABLE_VERITY = 0x80806685
|
||||
FS_IOC_GETFLAGS = 0x40086601
|
||||
FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b
|
||||
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
|
||||
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
|
||||
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613
|
||||
|
|
1
vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
generated
vendored
|
@ -74,6 +74,7 @@ const (
|
|||
FLUSHO = 0x1000
|
||||
FS_IOC_ENABLE_VERITY = 0x40806685
|
||||
FS_IOC_GETFLAGS = 0x80086601
|
||||
FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b
|
||||
FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
|
||||
FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
|
||||
FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613
|
||||
|
|
1
vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
generated
vendored
|
@ -74,6 +74,7 @@ const (
|
|||
FLUSHO = 0x1000
|
||||
FS_IOC_ENABLE_VERITY = 0x40806685
|
||||
FS_IOC_GETFLAGS = 0x80086601
|
||||
FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b
|
||||
FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
|
||||
FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
|
||||
FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613
|
||||
|
|
1
vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
generated
vendored
|
@ -78,6 +78,7 @@ const (
|
|||
FLUSHO = 0x1000
|
||||
FS_IOC_ENABLE_VERITY = 0x80806685
|
||||
FS_IOC_GETFLAGS = 0x40086601
|
||||
FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b
|
||||
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
|
||||
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
|
||||
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613
|
||||
|
|
3
vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go
generated
vendored
3
vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
// mksysctl_openbsd.pl
|
||||
// go run mksysctl_openbsd.go
|
||||
// Code generated by the command above; DO NOT EDIT.
|
||||
|
||||
// +build 386,openbsd
|
||||
|
@ -30,6 +30,7 @@ var sysctlMib = []mibentry{
|
|||
{"hw.model", []_C_int{6, 2}},
|
||||
{"hw.ncpu", []_C_int{6, 3}},
|
||||
{"hw.ncpufound", []_C_int{6, 21}},
|
||||
{"hw.ncpuonline", []_C_int{6, 25}},
|
||||
{"hw.pagesize", []_C_int{6, 7}},
|
||||
{"hw.physmem", []_C_int{6, 19}},
|
||||
{"hw.product", []_C_int{6, 15}},
|
||||
|
|
1
vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go
generated
vendored
|
@ -31,6 +31,7 @@ var sysctlMib = []mibentry{
|
|||
{"hw.model", []_C_int{6, 2}},
|
||||
{"hw.ncpu", []_C_int{6, 3}},
|
||||
{"hw.ncpufound", []_C_int{6, 21}},
|
||||
{"hw.ncpuonline", []_C_int{6, 25}},
|
||||
{"hw.pagesize", []_C_int{6, 7}},
|
||||
{"hw.perfpolicy", []_C_int{6, 23}},
|
||||
{"hw.physmem", []_C_int{6, 19}},
|
||||
|
|
1
vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go
generated
vendored
1
vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go
generated
vendored
|
@ -30,6 +30,7 @@ var sysctlMib = []mibentry{
|
|||
{"hw.model", []_C_int{6, 2}},
|
||||
{"hw.ncpu", []_C_int{6, 3}},
|
||||
{"hw.ncpufound", []_C_int{6, 21}},
|
||||
{"hw.ncpuonline", []_C_int{6, 25}},
|
||||
{"hw.pagesize", []_C_int{6, 7}},
|
||||
{"hw.physmem", []_C_int{6, 19}},
|
||||
{"hw.product", []_C_int{6, 15}},
|
||||
|
|
416
vendor/golang.org/x/sys/unix/ztypes_linux.go
generated
vendored
416
vendor/golang.org/x/sys/unix/ztypes_linux.go
generated
vendored
|
@ -1871,175 +1871,249 @@ const (
|
|||
)
|
||||
|
||||
const (
|
||||
BPF_REG_0 = 0x0
|
||||
BPF_REG_1 = 0x1
|
||||
BPF_REG_2 = 0x2
|
||||
BPF_REG_3 = 0x3
|
||||
BPF_REG_4 = 0x4
|
||||
BPF_REG_5 = 0x5
|
||||
BPF_REG_6 = 0x6
|
||||
BPF_REG_7 = 0x7
|
||||
BPF_REG_8 = 0x8
|
||||
BPF_REG_9 = 0x9
|
||||
BPF_REG_10 = 0xa
|
||||
BPF_MAP_CREATE = 0x0
|
||||
BPF_MAP_LOOKUP_ELEM = 0x1
|
||||
BPF_MAP_UPDATE_ELEM = 0x2
|
||||
BPF_MAP_DELETE_ELEM = 0x3
|
||||
BPF_MAP_GET_NEXT_KEY = 0x4
|
||||
BPF_PROG_LOAD = 0x5
|
||||
BPF_OBJ_PIN = 0x6
|
||||
BPF_OBJ_GET = 0x7
|
||||
BPF_PROG_ATTACH = 0x8
|
||||
BPF_PROG_DETACH = 0x9
|
||||
BPF_PROG_TEST_RUN = 0xa
|
||||
BPF_PROG_GET_NEXT_ID = 0xb
|
||||
BPF_MAP_GET_NEXT_ID = 0xc
|
||||
BPF_PROG_GET_FD_BY_ID = 0xd
|
||||
BPF_MAP_GET_FD_BY_ID = 0xe
|
||||
BPF_OBJ_GET_INFO_BY_FD = 0xf
|
||||
BPF_PROG_QUERY = 0x10
|
||||
BPF_RAW_TRACEPOINT_OPEN = 0x11
|
||||
BPF_BTF_LOAD = 0x12
|
||||
BPF_BTF_GET_FD_BY_ID = 0x13
|
||||
BPF_TASK_FD_QUERY = 0x14
|
||||
BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15
|
||||
BPF_MAP_FREEZE = 0x16
|
||||
BPF_BTF_GET_NEXT_ID = 0x17
|
||||
BPF_MAP_TYPE_UNSPEC = 0x0
|
||||
BPF_MAP_TYPE_HASH = 0x1
|
||||
BPF_MAP_TYPE_ARRAY = 0x2
|
||||
BPF_MAP_TYPE_PROG_ARRAY = 0x3
|
||||
BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4
|
||||
BPF_MAP_TYPE_PERCPU_HASH = 0x5
|
||||
BPF_MAP_TYPE_PERCPU_ARRAY = 0x6
|
||||
BPF_MAP_TYPE_STACK_TRACE = 0x7
|
||||
BPF_MAP_TYPE_CGROUP_ARRAY = 0x8
|
||||
BPF_MAP_TYPE_LRU_HASH = 0x9
|
||||
BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa
|
||||
BPF_MAP_TYPE_LPM_TRIE = 0xb
|
||||
BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc
|
||||
BPF_MAP_TYPE_HASH_OF_MAPS = 0xd
|
||||
BPF_MAP_TYPE_DEVMAP = 0xe
|
||||
BPF_MAP_TYPE_SOCKMAP = 0xf
|
||||
BPF_MAP_TYPE_CPUMAP = 0x10
|
||||
BPF_MAP_TYPE_XSKMAP = 0x11
|
||||
BPF_MAP_TYPE_SOCKHASH = 0x12
|
||||
BPF_MAP_TYPE_CGROUP_STORAGE = 0x13
|
||||
BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14
|
||||
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15
|
||||
BPF_MAP_TYPE_QUEUE = 0x16
|
||||
BPF_MAP_TYPE_STACK = 0x17
|
||||
BPF_MAP_TYPE_SK_STORAGE = 0x18
|
||||
BPF_MAP_TYPE_DEVMAP_HASH = 0x19
|
||||
BPF_PROG_TYPE_UNSPEC = 0x0
|
||||
BPF_PROG_TYPE_SOCKET_FILTER = 0x1
|
||||
BPF_PROG_TYPE_KPROBE = 0x2
|
||||
BPF_PROG_TYPE_SCHED_CLS = 0x3
|
||||
BPF_PROG_TYPE_SCHED_ACT = 0x4
|
||||
BPF_PROG_TYPE_TRACEPOINT = 0x5
|
||||
BPF_PROG_TYPE_XDP = 0x6
|
||||
BPF_PROG_TYPE_PERF_EVENT = 0x7
|
||||
BPF_PROG_TYPE_CGROUP_SKB = 0x8
|
||||
BPF_PROG_TYPE_CGROUP_SOCK = 0x9
|
||||
BPF_PROG_TYPE_LWT_IN = 0xa
|
||||
BPF_PROG_TYPE_LWT_OUT = 0xb
|
||||
BPF_PROG_TYPE_LWT_XMIT = 0xc
|
||||
BPF_PROG_TYPE_SOCK_OPS = 0xd
|
||||
BPF_PROG_TYPE_SK_SKB = 0xe
|
||||
BPF_PROG_TYPE_CGROUP_DEVICE = 0xf
|
||||
BPF_PROG_TYPE_SK_MSG = 0x10
|
||||
BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11
|
||||
BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12
|
||||
BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13
|
||||
BPF_PROG_TYPE_LIRC_MODE2 = 0x14
|
||||
BPF_PROG_TYPE_SK_REUSEPORT = 0x15
|
||||
BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16
|
||||
BPF_PROG_TYPE_CGROUP_SYSCTL = 0x17
|
||||
BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 0x18
|
||||
BPF_PROG_TYPE_CGROUP_SOCKOPT = 0x19
|
||||
BPF_PROG_TYPE_TRACING = 0x1a
|
||||
BPF_CGROUP_INET_INGRESS = 0x0
|
||||
BPF_CGROUP_INET_EGRESS = 0x1
|
||||
BPF_CGROUP_INET_SOCK_CREATE = 0x2
|
||||
BPF_CGROUP_SOCK_OPS = 0x3
|
||||
BPF_SK_SKB_STREAM_PARSER = 0x4
|
||||
BPF_SK_SKB_STREAM_VERDICT = 0x5
|
||||
BPF_CGROUP_DEVICE = 0x6
|
||||
BPF_SK_MSG_VERDICT = 0x7
|
||||
BPF_CGROUP_INET4_BIND = 0x8
|
||||
BPF_CGROUP_INET6_BIND = 0x9
|
||||
BPF_CGROUP_INET4_CONNECT = 0xa
|
||||
BPF_CGROUP_INET6_CONNECT = 0xb
|
||||
BPF_CGROUP_INET4_POST_BIND = 0xc
|
||||
BPF_CGROUP_INET6_POST_BIND = 0xd
|
||||
BPF_CGROUP_UDP4_SENDMSG = 0xe
|
||||
BPF_CGROUP_UDP6_SENDMSG = 0xf
|
||||
BPF_LIRC_MODE2 = 0x10
|
||||
BPF_FLOW_DISSECTOR = 0x11
|
||||
BPF_CGROUP_SYSCTL = 0x12
|
||||
BPF_CGROUP_UDP4_RECVMSG = 0x13
|
||||
BPF_CGROUP_UDP6_RECVMSG = 0x14
|
||||
BPF_CGROUP_GETSOCKOPT = 0x15
|
||||
BPF_CGROUP_SETSOCKOPT = 0x16
|
||||
BPF_TRACE_RAW_TP = 0x17
|
||||
BPF_TRACE_FENTRY = 0x18
|
||||
BPF_TRACE_FEXIT = 0x19
|
||||
BPF_STACK_BUILD_ID_EMPTY = 0x0
|
||||
BPF_STACK_BUILD_ID_VALID = 0x1
|
||||
BPF_STACK_BUILD_ID_IP = 0x2
|
||||
BPF_ADJ_ROOM_NET = 0x0
|
||||
BPF_ADJ_ROOM_MAC = 0x1
|
||||
BPF_HDR_START_MAC = 0x0
|
||||
BPF_HDR_START_NET = 0x1
|
||||
BPF_LWT_ENCAP_SEG6 = 0x0
|
||||
BPF_LWT_ENCAP_SEG6_INLINE = 0x1
|
||||
BPF_LWT_ENCAP_IP = 0x2
|
||||
BPF_OK = 0x0
|
||||
BPF_DROP = 0x2
|
||||
BPF_REDIRECT = 0x7
|
||||
BPF_LWT_REROUTE = 0x80
|
||||
BPF_SOCK_OPS_VOID = 0x0
|
||||
BPF_SOCK_OPS_TIMEOUT_INIT = 0x1
|
||||
BPF_SOCK_OPS_RWND_INIT = 0x2
|
||||
BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3
|
||||
BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4
|
||||
BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5
|
||||
BPF_SOCK_OPS_NEEDS_ECN = 0x6
|
||||
BPF_SOCK_OPS_BASE_RTT = 0x7
|
||||
BPF_SOCK_OPS_RTO_CB = 0x8
|
||||
BPF_SOCK_OPS_RETRANS_CB = 0x9
|
||||
BPF_SOCK_OPS_STATE_CB = 0xa
|
||||
BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb
|
||||
BPF_SOCK_OPS_RTT_CB = 0xc
|
||||
BPF_TCP_ESTABLISHED = 0x1
|
||||
BPF_TCP_SYN_SENT = 0x2
|
||||
BPF_TCP_SYN_RECV = 0x3
|
||||
BPF_TCP_FIN_WAIT1 = 0x4
|
||||
BPF_TCP_FIN_WAIT2 = 0x5
|
||||
BPF_TCP_TIME_WAIT = 0x6
|
||||
BPF_TCP_CLOSE = 0x7
|
||||
BPF_TCP_CLOSE_WAIT = 0x8
|
||||
BPF_TCP_LAST_ACK = 0x9
|
||||
BPF_TCP_LISTEN = 0xa
|
||||
BPF_TCP_CLOSING = 0xb
|
||||
BPF_TCP_NEW_SYN_RECV = 0xc
|
||||
BPF_TCP_MAX_STATES = 0xd
|
||||
BPF_FIB_LKUP_RET_SUCCESS = 0x0
|
||||
BPF_FIB_LKUP_RET_BLACKHOLE = 0x1
|
||||
BPF_FIB_LKUP_RET_UNREACHABLE = 0x2
|
||||
BPF_FIB_LKUP_RET_PROHIBIT = 0x3
|
||||
BPF_FIB_LKUP_RET_NOT_FWDED = 0x4
|
||||
BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5
|
||||
BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6
|
||||
BPF_FIB_LKUP_RET_NO_NEIGH = 0x7
|
||||
BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8
|
||||
BPF_FD_TYPE_RAW_TRACEPOINT = 0x0
|
||||
BPF_FD_TYPE_TRACEPOINT = 0x1
|
||||
BPF_FD_TYPE_KPROBE = 0x2
|
||||
BPF_FD_TYPE_KRETPROBE = 0x3
|
||||
BPF_FD_TYPE_UPROBE = 0x4
|
||||
BPF_FD_TYPE_URETPROBE = 0x5
|
||||
BPF_REG_0 = 0x0
|
||||
BPF_REG_1 = 0x1
|
||||
BPF_REG_2 = 0x2
|
||||
BPF_REG_3 = 0x3
|
||||
BPF_REG_4 = 0x4
|
||||
BPF_REG_5 = 0x5
|
||||
BPF_REG_6 = 0x6
|
||||
BPF_REG_7 = 0x7
|
||||
BPF_REG_8 = 0x8
|
||||
BPF_REG_9 = 0x9
|
||||
BPF_REG_10 = 0xa
|
||||
BPF_MAP_CREATE = 0x0
|
||||
BPF_MAP_LOOKUP_ELEM = 0x1
|
||||
BPF_MAP_UPDATE_ELEM = 0x2
|
||||
BPF_MAP_DELETE_ELEM = 0x3
|
||||
BPF_MAP_GET_NEXT_KEY = 0x4
|
||||
BPF_PROG_LOAD = 0x5
|
||||
BPF_OBJ_PIN = 0x6
|
||||
BPF_OBJ_GET = 0x7
|
||||
BPF_PROG_ATTACH = 0x8
|
||||
BPF_PROG_DETACH = 0x9
|
||||
BPF_PROG_TEST_RUN = 0xa
|
||||
BPF_PROG_GET_NEXT_ID = 0xb
|
||||
BPF_MAP_GET_NEXT_ID = 0xc
|
||||
BPF_PROG_GET_FD_BY_ID = 0xd
|
||||
BPF_MAP_GET_FD_BY_ID = 0xe
|
||||
BPF_OBJ_GET_INFO_BY_FD = 0xf
|
||||
BPF_PROG_QUERY = 0x10
|
||||
BPF_RAW_TRACEPOINT_OPEN = 0x11
|
||||
BPF_BTF_LOAD = 0x12
|
||||
BPF_BTF_GET_FD_BY_ID = 0x13
|
||||
BPF_TASK_FD_QUERY = 0x14
|
||||
BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15
|
||||
BPF_MAP_FREEZE = 0x16
|
||||
BPF_BTF_GET_NEXT_ID = 0x17
|
||||
BPF_MAP_LOOKUP_BATCH = 0x18
|
||||
BPF_MAP_LOOKUP_AND_DELETE_BATCH = 0x19
|
||||
BPF_MAP_UPDATE_BATCH = 0x1a
|
||||
BPF_MAP_DELETE_BATCH = 0x1b
|
||||
BPF_LINK_CREATE = 0x1c
|
||||
BPF_LINK_UPDATE = 0x1d
|
||||
BPF_MAP_TYPE_UNSPEC = 0x0
|
||||
BPF_MAP_TYPE_HASH = 0x1
|
||||
BPF_MAP_TYPE_ARRAY = 0x2
|
||||
BPF_MAP_TYPE_PROG_ARRAY = 0x3
|
||||
BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4
|
||||
BPF_MAP_TYPE_PERCPU_HASH = 0x5
|
||||
BPF_MAP_TYPE_PERCPU_ARRAY = 0x6
|
||||
BPF_MAP_TYPE_STACK_TRACE = 0x7
|
||||
BPF_MAP_TYPE_CGROUP_ARRAY = 0x8
|
||||
BPF_MAP_TYPE_LRU_HASH = 0x9
|
||||
BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa
|
||||
BPF_MAP_TYPE_LPM_TRIE = 0xb
|
||||
BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc
|
||||
BPF_MAP_TYPE_HASH_OF_MAPS = 0xd
|
||||
BPF_MAP_TYPE_DEVMAP = 0xe
|
||||
BPF_MAP_TYPE_SOCKMAP = 0xf
|
||||
BPF_MAP_TYPE_CPUMAP = 0x10
|
||||
BPF_MAP_TYPE_XSKMAP = 0x11
|
||||
BPF_MAP_TYPE_SOCKHASH = 0x12
|
||||
BPF_MAP_TYPE_CGROUP_STORAGE = 0x13
|
||||
BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14
|
||||
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15
|
||||
BPF_MAP_TYPE_QUEUE = 0x16
|
||||
BPF_MAP_TYPE_STACK = 0x17
|
||||
BPF_MAP_TYPE_SK_STORAGE = 0x18
|
||||
BPF_MAP_TYPE_DEVMAP_HASH = 0x19
|
||||
BPF_MAP_TYPE_STRUCT_OPS = 0x1a
|
||||
BPF_PROG_TYPE_UNSPEC = 0x0
|
||||
BPF_PROG_TYPE_SOCKET_FILTER = 0x1
|
||||
BPF_PROG_TYPE_KPROBE = 0x2
|
||||
BPF_PROG_TYPE_SCHED_CLS = 0x3
|
||||
BPF_PROG_TYPE_SCHED_ACT = 0x4
|
||||
BPF_PROG_TYPE_TRACEPOINT = 0x5
|
||||
BPF_PROG_TYPE_XDP = 0x6
|
||||
BPF_PROG_TYPE_PERF_EVENT = 0x7
|
||||
BPF_PROG_TYPE_CGROUP_SKB = 0x8
|
||||
BPF_PROG_TYPE_CGROUP_SOCK = 0x9
|
||||
BPF_PROG_TYPE_LWT_IN = 0xa
|
||||
BPF_PROG_TYPE_LWT_OUT = 0xb
|
||||
BPF_PROG_TYPE_LWT_XMIT = 0xc
|
||||
BPF_PROG_TYPE_SOCK_OPS = 0xd
|
||||
BPF_PROG_TYPE_SK_SKB = 0xe
|
||||
BPF_PROG_TYPE_CGROUP_DEVICE = 0xf
|
||||
BPF_PROG_TYPE_SK_MSG = 0x10
|
||||
BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11
|
||||
BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12
|
||||
BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13
|
||||
BPF_PROG_TYPE_LIRC_MODE2 = 0x14
|
||||
BPF_PROG_TYPE_SK_REUSEPORT = 0x15
|
||||
BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16
|
||||
BPF_PROG_TYPE_CGROUP_SYSCTL = 0x17
|
||||
BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 0x18
|
||||
BPF_PROG_TYPE_CGROUP_SOCKOPT = 0x19
|
||||
BPF_PROG_TYPE_TRACING = 0x1a
|
||||
BPF_PROG_TYPE_STRUCT_OPS = 0x1b
|
||||
BPF_PROG_TYPE_EXT = 0x1c
|
||||
BPF_PROG_TYPE_LSM = 0x1d
|
||||
BPF_CGROUP_INET_INGRESS = 0x0
|
||||
BPF_CGROUP_INET_EGRESS = 0x1
|
||||
BPF_CGROUP_INET_SOCK_CREATE = 0x2
|
||||
BPF_CGROUP_SOCK_OPS = 0x3
|
||||
BPF_SK_SKB_STREAM_PARSER = 0x4
|
||||
BPF_SK_SKB_STREAM_VERDICT = 0x5
|
||||
BPF_CGROUP_DEVICE = 0x6
|
||||
BPF_SK_MSG_VERDICT = 0x7
|
||||
BPF_CGROUP_INET4_BIND = 0x8
|
||||
BPF_CGROUP_INET6_BIND = 0x9
|
||||
BPF_CGROUP_INET4_CONNECT = 0xa
|
||||
BPF_CGROUP_INET6_CONNECT = 0xb
|
||||
BPF_CGROUP_INET4_POST_BIND = 0xc
|
||||
BPF_CGROUP_INET6_POST_BIND = 0xd
|
||||
BPF_CGROUP_UDP4_SENDMSG = 0xe
|
||||
BPF_CGROUP_UDP6_SENDMSG = 0xf
|
||||
BPF_LIRC_MODE2 = 0x10
|
||||
BPF_FLOW_DISSECTOR = 0x11
|
||||
BPF_CGROUP_SYSCTL = 0x12
|
||||
BPF_CGROUP_UDP4_RECVMSG = 0x13
|
||||
BPF_CGROUP_UDP6_RECVMSG = 0x14
|
||||
BPF_CGROUP_GETSOCKOPT = 0x15
|
||||
BPF_CGROUP_SETSOCKOPT = 0x16
|
||||
BPF_TRACE_RAW_TP = 0x17
|
||||
BPF_TRACE_FENTRY = 0x18
|
||||
BPF_TRACE_FEXIT = 0x19
|
||||
BPF_MODIFY_RETURN = 0x1a
|
||||
BPF_LSM_MAC = 0x1b
|
||||
BPF_ANY = 0x0
|
||||
BPF_NOEXIST = 0x1
|
||||
BPF_EXIST = 0x2
|
||||
BPF_F_LOCK = 0x4
|
||||
BPF_F_NO_PREALLOC = 0x1
|
||||
BPF_F_NO_COMMON_LRU = 0x2
|
||||
BPF_F_NUMA_NODE = 0x4
|
||||
BPF_F_RDONLY = 0x8
|
||||
BPF_F_WRONLY = 0x10
|
||||
BPF_F_STACK_BUILD_ID = 0x20
|
||||
BPF_F_ZERO_SEED = 0x40
|
||||
BPF_F_RDONLY_PROG = 0x80
|
||||
BPF_F_WRONLY_PROG = 0x100
|
||||
BPF_F_CLONE = 0x200
|
||||
BPF_F_MMAPABLE = 0x400
|
||||
BPF_STACK_BUILD_ID_EMPTY = 0x0
|
||||
BPF_STACK_BUILD_ID_VALID = 0x1
|
||||
BPF_STACK_BUILD_ID_IP = 0x2
|
||||
BPF_F_RECOMPUTE_CSUM = 0x1
|
||||
BPF_F_INVALIDATE_HASH = 0x2
|
||||
BPF_F_HDR_FIELD_MASK = 0xf
|
||||
BPF_F_PSEUDO_HDR = 0x10
|
||||
BPF_F_MARK_MANGLED_0 = 0x20
|
||||
BPF_F_MARK_ENFORCE = 0x40
|
||||
BPF_F_INGRESS = 0x1
|
||||
BPF_F_TUNINFO_IPV6 = 0x1
|
||||
BPF_F_SKIP_FIELD_MASK = 0xff
|
||||
BPF_F_USER_STACK = 0x100
|
||||
BPF_F_FAST_STACK_CMP = 0x200
|
||||
BPF_F_REUSE_STACKID = 0x400
|
||||
BPF_F_USER_BUILD_ID = 0x800
|
||||
BPF_F_ZERO_CSUM_TX = 0x2
|
||||
BPF_F_DONT_FRAGMENT = 0x4
|
||||
BPF_F_SEQ_NUMBER = 0x8
|
||||
BPF_F_INDEX_MASK = 0xffffffff
|
||||
BPF_F_CURRENT_CPU = 0xffffffff
|
||||
BPF_F_CTXLEN_MASK = 0xfffff00000000
|
||||
BPF_F_CURRENT_NETNS = -0x1
|
||||
BPF_F_ADJ_ROOM_FIXED_GSO = 0x1
|
||||
BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2
|
||||
BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4
|
||||
BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8
|
||||
BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10
|
||||
BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff
|
||||
BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38
|
||||
BPF_F_SYSCTL_BASE_NAME = 0x1
|
||||
BPF_SK_STORAGE_GET_F_CREATE = 0x1
|
||||
BPF_F_GET_BRANCH_RECORDS_SIZE = 0x1
|
||||
BPF_ADJ_ROOM_NET = 0x0
|
||||
BPF_ADJ_ROOM_MAC = 0x1
|
||||
BPF_HDR_START_MAC = 0x0
|
||||
BPF_HDR_START_NET = 0x1
|
||||
BPF_LWT_ENCAP_SEG6 = 0x0
|
||||
BPF_LWT_ENCAP_SEG6_INLINE = 0x1
|
||||
BPF_LWT_ENCAP_IP = 0x2
|
||||
BPF_OK = 0x0
|
||||
BPF_DROP = 0x2
|
||||
BPF_REDIRECT = 0x7
|
||||
BPF_LWT_REROUTE = 0x80
|
||||
BPF_SOCK_OPS_RTO_CB_FLAG = 0x1
|
||||
BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2
|
||||
BPF_SOCK_OPS_STATE_CB_FLAG = 0x4
|
||||
BPF_SOCK_OPS_RTT_CB_FLAG = 0x8
|
||||
BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf
|
||||
BPF_SOCK_OPS_VOID = 0x0
|
||||
BPF_SOCK_OPS_TIMEOUT_INIT = 0x1
|
||||
BPF_SOCK_OPS_RWND_INIT = 0x2
|
||||
BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3
|
||||
BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4
|
||||
BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5
|
||||
BPF_SOCK_OPS_NEEDS_ECN = 0x6
|
||||
BPF_SOCK_OPS_BASE_RTT = 0x7
|
||||
BPF_SOCK_OPS_RTO_CB = 0x8
|
||||
BPF_SOCK_OPS_RETRANS_CB = 0x9
|
||||
BPF_SOCK_OPS_STATE_CB = 0xa
|
||||
BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb
|
||||
BPF_SOCK_OPS_RTT_CB = 0xc
|
||||
BPF_TCP_ESTABLISHED = 0x1
|
||||
BPF_TCP_SYN_SENT = 0x2
|
||||
BPF_TCP_SYN_RECV = 0x3
|
||||
BPF_TCP_FIN_WAIT1 = 0x4
|
||||
BPF_TCP_FIN_WAIT2 = 0x5
|
||||
BPF_TCP_TIME_WAIT = 0x6
|
||||
BPF_TCP_CLOSE = 0x7
|
||||
BPF_TCP_CLOSE_WAIT = 0x8
|
||||
BPF_TCP_LAST_ACK = 0x9
|
||||
BPF_TCP_LISTEN = 0xa
|
||||
BPF_TCP_CLOSING = 0xb
|
||||
BPF_TCP_NEW_SYN_RECV = 0xc
|
||||
BPF_TCP_MAX_STATES = 0xd
|
||||
TCP_BPF_IW = 0x3e9
|
||||
TCP_BPF_SNDCWND_CLAMP = 0x3ea
|
||||
BPF_DEVCG_ACC_MKNOD = 0x1
|
||||
BPF_DEVCG_ACC_READ = 0x2
|
||||
BPF_DEVCG_ACC_WRITE = 0x4
|
||||
BPF_DEVCG_DEV_BLOCK = 0x1
|
||||
BPF_DEVCG_DEV_CHAR = 0x2
|
||||
BPF_FIB_LOOKUP_DIRECT = 0x1
|
||||
BPF_FIB_LOOKUP_OUTPUT = 0x2
|
||||
BPF_FIB_LKUP_RET_SUCCESS = 0x0
|
||||
BPF_FIB_LKUP_RET_BLACKHOLE = 0x1
|
||||
BPF_FIB_LKUP_RET_UNREACHABLE = 0x2
|
||||
BPF_FIB_LKUP_RET_PROHIBIT = 0x3
|
||||
BPF_FIB_LKUP_RET_NOT_FWDED = 0x4
|
||||
BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5
|
||||
BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6
|
||||
BPF_FIB_LKUP_RET_NO_NEIGH = 0x7
|
||||
BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8
|
||||
BPF_FD_TYPE_RAW_TRACEPOINT = 0x0
|
||||
BPF_FD_TYPE_TRACEPOINT = 0x1
|
||||
BPF_FD_TYPE_KPROBE = 0x2
|
||||
BPF_FD_TYPE_KRETPROBE = 0x3
|
||||
BPF_FD_TYPE_UPROBE = 0x4
|
||||
BPF_FD_TYPE_URETPROBE = 0x5
|
||||
BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = 0x1
|
||||
BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = 0x2
|
||||
BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = 0x4
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -2205,7 +2279,7 @@ const (
|
|||
DEVLINK_CMD_DPIPE_ENTRIES_GET = 0x20
|
||||
DEVLINK_CMD_DPIPE_HEADERS_GET = 0x21
|
||||
DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET = 0x22
|
||||
DEVLINK_CMD_MAX = 0x44
|
||||
DEVLINK_CMD_MAX = 0x48
|
||||
DEVLINK_PORT_TYPE_NOTSET = 0x0
|
||||
DEVLINK_PORT_TYPE_AUTO = 0x1
|
||||
DEVLINK_PORT_TYPE_ETH = 0x2
|
||||
|
@ -2285,7 +2359,7 @@ const (
|
|||
DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE = 0x3c
|
||||
DEVLINK_ATTR_PAD = 0x3d
|
||||
DEVLINK_ATTR_ESWITCH_ENCAP_MODE = 0x3e
|
||||
DEVLINK_ATTR_MAX = 0x8c
|
||||
DEVLINK_ATTR_MAX = 0x90
|
||||
DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0x0
|
||||
DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 0x1
|
||||
DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0x0
|
||||
|
|
11
vendor/golang.org/x/sys/windows/env_windows.go
generated
vendored
11
vendor/golang.org/x/sys/windows/env_windows.go
generated
vendored
|
@ -8,7 +8,6 @@ package windows
|
|||
|
||||
import (
|
||||
"syscall"
|
||||
"unicode/utf16"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
|
@ -40,17 +39,11 @@ func (token Token) Environ(inheritExisting bool) (env []string, err error) {
|
|||
defer DestroyEnvironmentBlock(block)
|
||||
blockp := uintptr(unsafe.Pointer(block))
|
||||
for {
|
||||
entry := (*[(1 << 30) - 1]uint16)(unsafe.Pointer(blockp))[:]
|
||||
for i, v := range entry {
|
||||
if v == 0 {
|
||||
entry = entry[:i]
|
||||
break
|
||||
}
|
||||
}
|
||||
entry := UTF16PtrToString((*uint16)(unsafe.Pointer(blockp)))
|
||||
if len(entry) == 0 {
|
||||
break
|
||||
}
|
||||
env = append(env, string(utf16.Decode(entry)))
|
||||
env = append(env, entry)
|
||||
blockp += 2 * (uintptr(len(entry)) + 1)
|
||||
}
|
||||
return env, nil
|
||||
|
|
5
vendor/golang.org/x/sys/windows/memory_windows.go
generated
vendored
5
vendor/golang.org/x/sys/windows/memory_windows.go
generated
vendored
|
@ -23,4 +23,9 @@ const (
|
|||
PAGE_EXECUTE_READ = 0x20
|
||||
PAGE_EXECUTE_READWRITE = 0x40
|
||||
PAGE_EXECUTE_WRITECOPY = 0x80
|
||||
|
||||
QUOTA_LIMITS_HARDWS_MIN_DISABLE = 0x00000002
|
||||
QUOTA_LIMITS_HARDWS_MIN_ENABLE = 0x00000001
|
||||
QUOTA_LIMITS_HARDWS_MAX_DISABLE = 0x00000008
|
||||
QUOTA_LIMITS_HARDWS_MAX_ENABLE = 0x00000004
|
||||
)
|
||||
|
|
20
vendor/golang.org/x/sys/windows/security_windows.go
generated
vendored
20
vendor/golang.org/x/sys/windows/security_windows.go
generated
vendored
|
@ -7,6 +7,8 @@ package windows
|
|||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/internal/unsafeheader"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -1229,7 +1231,7 @@ func (sd *SECURITY_DESCRIPTOR) String() string {
|
|||
return ""
|
||||
}
|
||||
defer LocalFree(Handle(unsafe.Pointer(sddl)))
|
||||
return UTF16ToString((*[(1 << 30) - 1]uint16)(unsafe.Pointer(sddl))[:])
|
||||
return UTF16PtrToString(sddl)
|
||||
}
|
||||
|
||||
// ToAbsolute converts a self-relative security descriptor into an absolute one.
|
||||
|
@ -1307,9 +1309,17 @@ func (absoluteSD *SECURITY_DESCRIPTOR) ToSelfRelative() (selfRelativeSD *SECURIT
|
|||
}
|
||||
|
||||
func (selfRelativeSD *SECURITY_DESCRIPTOR) copySelfRelativeSecurityDescriptor() *SECURITY_DESCRIPTOR {
|
||||
sdBytes := make([]byte, selfRelativeSD.Length())
|
||||
copy(sdBytes, (*[(1 << 31) - 1]byte)(unsafe.Pointer(selfRelativeSD))[:len(sdBytes)])
|
||||
return (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&sdBytes[0]))
|
||||
sdLen := (int)(selfRelativeSD.Length())
|
||||
|
||||
var src []byte
|
||||
h := (*unsafeheader.Slice)(unsafe.Pointer(&src))
|
||||
h.Data = unsafe.Pointer(selfRelativeSD)
|
||||
h.Len = sdLen
|
||||
h.Cap = sdLen
|
||||
|
||||
dst := make([]byte, sdLen)
|
||||
copy(dst, src)
|
||||
return (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&dst[0]))
|
||||
}
|
||||
|
||||
// SecurityDescriptorFromString converts an SDDL string describing a security descriptor into a
|
||||
|
@ -1391,6 +1401,6 @@ func ACLFromEntries(explicitEntries []EXPLICIT_ACCESS, mergedACL *ACL) (acl *ACL
|
|||
}
|
||||
defer LocalFree(Handle(unsafe.Pointer(winHeapACL)))
|
||||
aclBytes := make([]byte, winHeapACL.aclSize)
|
||||
copy(aclBytes, (*[(1 << 31) - 1]byte)(unsafe.Pointer(winHeapACL))[:len(aclBytes)])
|
||||
copy(aclBytes, (*[(1 << 31) - 1]byte)(unsafe.Pointer(winHeapACL))[:len(aclBytes):len(aclBytes)])
|
||||
return (*ACL)(unsafe.Pointer(&aclBytes[0])), nil
|
||||
}
|
||||
|
|
7
vendor/golang.org/x/sys/windows/svc/security.go
generated
vendored
7
vendor/golang.org/x/sys/windows/svc/security.go
generated
vendored
|
@ -7,8 +7,6 @@
|
|||
package svc
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
|
@ -48,9 +46,8 @@ func IsAnInteractiveSession() (bool, error) {
|
|||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
p := unsafe.Pointer(&gs.Groups[0])
|
||||
groups := (*[2 << 20]windows.SIDAndAttributes)(p)[:gs.GroupCount]
|
||||
for _, g := range groups {
|
||||
|
||||
for _, g := range gs.AllGroups() {
|
||||
if windows.EqualSid(g.Sid, interSid) {
|
||||
return true, nil
|
||||
}
|
||||
|
|
11
vendor/golang.org/x/sys/windows/svc/service.go
generated
vendored
11
vendor/golang.org/x/sys/windows/svc/service.go
generated
vendored
|
@ -14,6 +14,7 @@ import (
|
|||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/internal/unsafeheader"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
|
@ -224,10 +225,16 @@ const (
|
|||
func (s *service) run() {
|
||||
s.goWaits.Wait()
|
||||
s.h = windows.Handle(ssHandle)
|
||||
argv := (*[100]*int16)(unsafe.Pointer(sArgv))[:sArgc]
|
||||
|
||||
var argv []*uint16
|
||||
hdr := (*unsafeheader.Slice)(unsafe.Pointer(&argv))
|
||||
hdr.Data = unsafe.Pointer(sArgv)
|
||||
hdr.Len = int(sArgc)
|
||||
hdr.Cap = int(sArgc)
|
||||
|
||||
args := make([]string, len(argv))
|
||||
for i, a := range argv {
|
||||
args[i] = syscall.UTF16ToString((*[1 << 20]uint16)(unsafe.Pointer(a))[:])
|
||||
args[i] = windows.UTF16PtrToString(a)
|
||||
}
|
||||
|
||||
cmdsToHandler := make(chan ChangeRequest)
|
||||
|
|
39
vendor/golang.org/x/sys/windows/syscall_windows.go
generated
vendored
39
vendor/golang.org/x/sys/windows/syscall_windows.go
generated
vendored
|
@ -13,6 +13,8 @@ import (
|
|||
"time"
|
||||
"unicode/utf16"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/internal/unsafeheader"
|
||||
)
|
||||
|
||||
type Handle uintptr
|
||||
|
@ -117,6 +119,32 @@ func UTF16PtrFromString(s string) (*uint16, error) {
|
|||
return &a[0], nil
|
||||
}
|
||||
|
||||
// UTF16PtrToString takes a pointer to a UTF-16 sequence and returns the corresponding UTF-8 encoded string.
|
||||
// If the pointer is nil, this returns the empty string. This assumes that the UTF-16 sequence is terminated
|
||||
// at a zero word; if the zero word is not present, the program may crash.
|
||||
func UTF16PtrToString(p *uint16) string {
|
||||
if p == nil {
|
||||
return ""
|
||||
}
|
||||
if *p == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Find NUL terminator.
|
||||
n := 0
|
||||
for ptr := unsafe.Pointer(p); *(*uint16)(ptr) != 0; n++ {
|
||||
ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p))
|
||||
}
|
||||
|
||||
var s []uint16
|
||||
h := (*unsafeheader.Slice)(unsafe.Pointer(&s))
|
||||
h.Data = unsafe.Pointer(p)
|
||||
h.Len = n
|
||||
h.Cap = n
|
||||
|
||||
return string(utf16.Decode(s))
|
||||
}
|
||||
|
||||
func Getpagesize() int { return 4096 }
|
||||
|
||||
// NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention.
|
||||
|
@ -280,6 +308,8 @@ func NewCallbackCDecl(fn interface{}) uintptr {
|
|||
//sys GetProcessId(process Handle) (id uint32, err error)
|
||||
//sys OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error)
|
||||
//sys SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost
|
||||
//sys GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32)
|
||||
//sys SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error)
|
||||
|
||||
// Volume Management Functions
|
||||
//sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW
|
||||
|
@ -1181,7 +1211,12 @@ type IPv6Mreq struct {
|
|||
Interface uint32
|
||||
}
|
||||
|
||||
func GetsockoptInt(fd Handle, level, opt int) (int, error) { return -1, syscall.EWINDOWS }
|
||||
func GetsockoptInt(fd Handle, level, opt int) (int, error) {
|
||||
v := int32(0)
|
||||
l := int32(unsafe.Sizeof(v))
|
||||
err := Getsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&v)), &l)
|
||||
return int(v), err
|
||||
}
|
||||
|
||||
func SetsockoptLinger(fd Handle, level, opt int, l *Linger) (err error) {
|
||||
sys := sysLinger{Onoff: uint16(l.Onoff), Linger: uint16(l.Linger)}
|
||||
|
@ -1378,7 +1413,7 @@ func (t Token) KnownFolderPath(folderID *KNOWNFOLDERID, flags uint32) (string, e
|
|||
return "", err
|
||||
}
|
||||
defer CoTaskMemFree(unsafe.Pointer(p))
|
||||
return UTF16ToString((*[(1 << 30) - 1]uint16)(unsafe.Pointer(p))[:]), nil
|
||||
return UTF16PtrToString(p), nil
|
||||
}
|
||||
|
||||
// RtlGetVersion returns the version of the underlying operating system, ignoring
|
||||
|
|
19
vendor/golang.org/x/sys/windows/zsyscall_windows.go
generated
vendored
19
vendor/golang.org/x/sys/windows/zsyscall_windows.go
generated
vendored
|
@ -217,6 +217,8 @@ var (
|
|||
procGetProcessId = modkernel32.NewProc("GetProcessId")
|
||||
procOpenThread = modkernel32.NewProc("OpenThread")
|
||||
procSetProcessPriorityBoost = modkernel32.NewProc("SetProcessPriorityBoost")
|
||||
procGetProcessWorkingSetSizeEx = modkernel32.NewProc("GetProcessWorkingSetSizeEx")
|
||||
procSetProcessWorkingSetSizeEx = modkernel32.NewProc("SetProcessWorkingSetSizeEx")
|
||||
procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW")
|
||||
procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW")
|
||||
procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW")
|
||||
|
@ -2414,6 +2416,23 @@ func SetProcessPriorityBoost(process Handle, disable bool) (err error) {
|
|||
return
|
||||
}
|
||||
|
||||
func GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) {
|
||||
syscall.Syscall6(procGetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags)), 0, 0)
|
||||
return
|
||||
}
|
||||
|
||||
func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procSetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags), 0, 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)))
|
||||
if r1 == 0 {
|
||||
|
|
11
vendor/modules.txt
vendored
11
vendor/modules.txt
vendored
|
@ -233,7 +233,7 @@ github.com/go-git/go-billy/v5/helper/chroot
|
|||
github.com/go-git/go-billy/v5/helper/polyfill
|
||||
github.com/go-git/go-billy/v5/osfs
|
||||
github.com/go-git/go-billy/v5/util
|
||||
# github.com/go-git/go-git/v5 v5.0.0
|
||||
# github.com/go-git/go-git/v5 v5.1.0
|
||||
## explicit
|
||||
github.com/go-git/go-git/v5
|
||||
github.com/go-git/go-git/v5/config
|
||||
|
@ -241,6 +241,7 @@ github.com/go-git/go-git/v5/internal/revision
|
|||
github.com/go-git/go-git/v5/internal/url
|
||||
github.com/go-git/go-git/v5/plumbing
|
||||
github.com/go-git/go-git/v5/plumbing/cache
|
||||
github.com/go-git/go-git/v5/plumbing/color
|
||||
github.com/go-git/go-git/v5/plumbing/filemode
|
||||
github.com/go-git/go-git/v5/plumbing/format/commitgraph
|
||||
github.com/go-git/go-git/v5/plumbing/format/config
|
||||
|
@ -399,6 +400,8 @@ github.com/hashicorp/hcl/json/token
|
|||
# github.com/huandu/xstrings v1.3.0
|
||||
## explicit
|
||||
github.com/huandu/xstrings
|
||||
# github.com/imdario/mergo v0.3.9
|
||||
github.com/imdario/mergo
|
||||
# github.com/issue9/assert v1.3.2
|
||||
## explicit
|
||||
# github.com/issue9/identicon v1.0.1
|
||||
|
@ -697,7 +700,7 @@ go.mongodb.org/mongo-driver/bson/bsonrw
|
|||
go.mongodb.org/mongo-driver/bson/bsontype
|
||||
go.mongodb.org/mongo-driver/bson/primitive
|
||||
go.mongodb.org/mongo-driver/x/bsonx/bsoncore
|
||||
# golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79
|
||||
# golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9
|
||||
## explicit
|
||||
golang.org/x/crypto/acme
|
||||
golang.org/x/crypto/acme/autocert
|
||||
|
@ -728,7 +731,7 @@ golang.org/x/crypto/ssh/knownhosts
|
|||
# golang.org/x/mod v0.2.0
|
||||
golang.org/x/mod/module
|
||||
golang.org/x/mod/semver
|
||||
# golang.org/x/net v0.0.0-20200506145744-7e3656a0809f
|
||||
# golang.org/x/net v0.0.0-20200602114024-627f9648deb9
|
||||
## explicit
|
||||
golang.org/x/net/context
|
||||
golang.org/x/net/context/ctxhttp
|
||||
|
@ -745,7 +748,7 @@ golang.org/x/oauth2/google
|
|||
golang.org/x/oauth2/internal
|
||||
golang.org/x/oauth2/jws
|
||||
golang.org/x/oauth2/jwt
|
||||
# golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f
|
||||
# golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1
|
||||
## explicit
|
||||
golang.org/x/sys/cpu
|
||||
golang.org/x/sys/internal/unsafeheader
|
||||
|
|
Loading…
Reference in a new issue