1
0
Fork 0
mirror of https://codeberg.org/forgejo/forgejo.git synced 2024-12-21 12:44:49 -05:00

Add gitea-vet (#10948)

* Add copyright

Signed-off-by: jolheiser <john.olheiser@gmail.com>

* Add gitea-vet and fix non-compliance

Signed-off-by: jolheiser <john.olheiser@gmail.com>

* Combine tools.go into build.go and clean up

Signed-off-by: jolheiser <john.olheiser@gmail.com>

* Remove extra GO111MODULE=on

Signed-off-by: jolheiser <john.olheiser@gmail.com>
This commit is contained in:
John Olheiser 2020-04-05 01:20:50 -05:00 committed by GitHub
parent b74d30ae27
commit baadb51445
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
55 changed files with 2990 additions and 101 deletions

1
.gitignore vendored
View file

@ -44,6 +44,7 @@ coverage.all
*.log
/gitea
/gitea-vet
/debug
/integrations.test

View file

@ -193,7 +193,11 @@ fmt:
.PHONY: vet
vet:
# Default vet
$(GO) vet $(GO_PACKAGES)
# Custom vet
$(GO) build -mod=vendor gitea.com/jolheiser/gitea-vet
$(GO) vet -vettool=gitea-vet $(GO_PACKAGES)
.PHONY: $(TAGS_EVIDENCE)
$(TAGS_EVIDENCE):
@ -264,7 +268,7 @@ fmt-check:
lint: lint-backend lint-frontend
.PHONY: lint-backend
lint-backend: golangci-lint revive swagger-check swagger-validate test-vendor
lint-backend: golangci-lint revive vet swagger-check swagger-validate test-vendor
.PHONY: lint-frontend
lint-frontend: node_modules
@ -301,7 +305,7 @@ unit-test-coverage:
.PHONY: vendor
vendor:
$(GO) mod tidy && TAGS="$(TAGS) vendor" $(GO) mod vendor
$(GO) mod tidy && $(GO) mod vendor
.PHONY: test-vendor
test-vendor: vendor

View file

@ -23,4 +23,10 @@ import (
// for cover merge
_ "golang.org/x/tools/cover"
// for vet
_ "gitea.com/jolheiser/gitea-vet"
// for swagger
_ "github.com/go-swagger/go-swagger/cmd/swagger"
)

View file

@ -1,3 +1,7 @@
// Copyright 2020 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package main
/*

3
go.mod
View file

@ -4,6 +4,7 @@ go 1.14
require (
cloud.google.com/go v0.45.0 // indirect
gitea.com/jolheiser/gitea-vet v0.1.0
gitea.com/lunny/levelqueue v0.2.0
gitea.com/macaron/binding v0.0.0-20190822013154-a5f53841ed2b
gitea.com/macaron/cache v0.0.0-20190822004001-a6e7fee4ee76
@ -111,7 +112,7 @@ require (
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527
golang.org/x/text v0.3.2
golang.org/x/tools v0.0.0-20200225230052-807dcd883420
golang.org/x/tools v0.0.0-20200325010219-a49f79bcc224
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
gopkg.in/asn1-ber.v1 v1.0.0-20150924051756-4e86f4367175 // indirect
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df

11
go.sum
View file

@ -9,6 +9,8 @@ cloud.google.com/go v0.45.0 h1:bALuGBSgE+BD4rxsopAYlqjcwqcQtye6pWG4bC3N/k0=
cloud.google.com/go v0.45.0/go.mod h1:452BcPOeI9AZfbvDw0Tbo7D32wA+WX9WME8AZwMEDZU=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
gitea.com/jolheiser/gitea-vet v0.1.0 h1:gJEms9YWbIcrPOEmDOJ+5JZXCYFxNpwxlI73uRulAi4=
gitea.com/jolheiser/gitea-vet v0.1.0/go.mod h1:2Oa6TAdEp1N/38oBNh3ZeiSEER60D/CeDaBFv2sdH58=
gitea.com/lunny/levelqueue v0.2.0 h1:lR/5EAwQtFcn5YvPEkNMw0p9pAy2/O2nSP5ImECLA2E=
gitea.com/lunny/levelqueue v0.2.0/go.mod h1:G7hVb908t0Bl0uk7zGSg14fyzNtxgtD9Shf04wkMK7s=
gitea.com/macaron/binding v0.0.0-20190822013154-a5f53841ed2b h1:vXt85uYV17KURaUlhU7v4GbCShkqRZDSfo0TkC0YCjQ=
@ -651,6 +653,8 @@ golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -672,6 +676,7 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180620175406-ef147856a6dd/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -684,6 +689,7 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180824143301-4910a1d54f87/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -734,11 +740,16 @@ golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgw
golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200225230052-807dcd883420 h1:4RJNOV+2rLxMEfr6QIpC7GEv9MjD6ApGXTCLrNF9+eA=
golang.org/x/tools v0.0.0-20200225230052-807dcd883420/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200325010219-a49f79bcc224 h1:azwY/v0y0K4mFHVsg5+UrTgchqALYWpqVo6vL5OmkmI=
golang.org/x/tools v0.0.0-20200325010219-a49f79bcc224/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=

View file

@ -1,3 +1,7 @@
// Copyright 2020 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package models
import (

View file

@ -1,3 +1,7 @@
// Copyright 2020 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package models
import (

View file

@ -1,3 +1,7 @@
// Copyright 2020 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package models
import (

View file

@ -1,3 +1,7 @@
// Copyright 2020 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package models
import (

View file

@ -1,3 +1,7 @@
// Copyright 2020 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package models
import (

View file

@ -1,3 +1,7 @@
// Copyright 2020 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package models
import (

View file

@ -1,3 +1,7 @@
// Copyright 2020 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package models
import (

View file

@ -1,3 +1,7 @@
// Copyright 2020 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package base
import (

View file

@ -1,3 +1,7 @@
// Copyright 2020 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package generate
import (

View file

@ -1,3 +1,7 @@
// Copyright 2020 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package git
import (

View file

@ -1,3 +1,7 @@
// Copyright 2020 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package lfs
import (

View file

@ -1,3 +1,7 @@
// Copyright 2020 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package lfs
import (

View file

@ -1,3 +1,7 @@
// Copyright 2020 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package process
import (

View file

@ -1,3 +1,7 @@
// Copyright 2020 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package user
import (

View file

@ -1,3 +1,7 @@
// Copyright 2020 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package misc
import (

View file

@ -1,3 +1,7 @@
// Copyright 2020 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package misc
import (

View file

@ -1,3 +1,7 @@
// Copyright 2020 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repo
import (

View file

@ -1,3 +1,7 @@
// Copyright 2020 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package routers
import (

View file

@ -1,10 +0,0 @@
// Copyright 2019 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
// +build tools
package tools
import (
_ "github.com/go-swagger/go-swagger/cmd/swagger"
)

5
vendor/gitea.com/jolheiser/gitea-vet/.gitignore generated vendored Normal file
View file

@ -0,0 +1,5 @@
# GoLand
.idea/
# Binaries
/gitea-vet*

19
vendor/gitea.com/jolheiser/gitea-vet/LICENSE generated vendored Normal file
View file

@ -0,0 +1,19 @@
Copyright (c) 2020 The Gitea Authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

7
vendor/gitea.com/jolheiser/gitea-vet/Makefile generated vendored Normal file
View file

@ -0,0 +1,7 @@
.PHONY: build
build:
go build
.PHONY: fmt
fmt:
go fmt ./...

7
vendor/gitea.com/jolheiser/gitea-vet/README.md generated vendored Normal file
View file

@ -0,0 +1,7 @@
# gitea-vet
`go vet` tool for Gitea
| Analyzer | Description |
|----------|---------------------------------------------------------------------|
| Imports | Checks for import sorting. stdlib->code.gitea.io->other |
| License | Checks file headers for some form of `Copyright...YYYY...Gitea/Gogs`|

54
vendor/gitea.com/jolheiser/gitea-vet/checks/imports.go generated vendored Normal file
View file

@ -0,0 +1,54 @@
// Copyright 2020 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package checks
import (
"strings"
"golang.org/x/tools/go/analysis"
)
var Imports = &analysis.Analyzer{
Name: "imports",
Doc: "check for import order.",
Run: runImports,
}
func runImports(pass *analysis.Pass) (interface{}, error) {
for _, file := range pass.Files {
level := 0
for _, im := range file.Imports {
var lvl int
val := im.Path.Value
if importHasPrefix(val, "code.gitea.io") {
lvl = 2
} else if strings.Contains(val, ".") {
lvl = 3
} else {
lvl = 1
}
if lvl < level {
pass.Reportf(file.Pos(), "Imports are sorted wrong")
break
}
level = lvl
}
}
return nil, nil
}
func importHasPrefix(s, p string) bool {
return strings.HasPrefix(s, "\""+p)
}
func sliceHasPrefix(s string, prefixes ...string) bool {
for _, p := range prefixes {
if importHasPrefix(s, p) {
return true
}
}
return false
}

73
vendor/gitea.com/jolheiser/gitea-vet/checks/license.go generated vendored Normal file
View file

@ -0,0 +1,73 @@
// Copyright 2020 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package checks
import (
"regexp"
"strings"
"golang.org/x/tools/go/analysis"
)
var (
header = regexp.MustCompile(`.*Copyright.*\d{4}.*(Gitea|Gogs)`)
goGenerate = "//go:generate"
buildTag = "// +build"
)
var License = &analysis.Analyzer{
Name: "license",
Doc: "check for a copyright header.",
Run: runLicense,
}
func runLicense(pass *analysis.Pass) (interface{}, error) {
for _, file := range pass.Files {
if len(file.Comments) == 0 {
pass.Reportf(file.Pos(), "Copyright not found")
continue
}
if len(file.Comments[0].List) == 0 {
pass.Reportf(file.Pos(), "Copyright not found or wrong")
continue
}
commentGroup := 0
if strings.HasPrefix(file.Comments[0].List[0].Text, goGenerate) {
if len(file.Comments[0].List) > 1 {
pass.Reportf(file.Pos(), "Must be an empty line between the go:generate and the Copyright")
continue
}
commentGroup++
}
if strings.HasPrefix(file.Comments[0].List[0].Text, buildTag) {
commentGroup++
}
if len(file.Comments) < commentGroup+1 {
pass.Reportf(file.Pos(), "Copyright not found")
continue
}
if len(file.Comments[commentGroup].List) < 1 {
pass.Reportf(file.Pos(), "Copyright not found or wrong")
continue
}
var check bool
for _, comment := range file.Comments[commentGroup].List {
if header.MatchString(comment.Text) {
check = true
}
}
if !check {
pass.Reportf(file.Pos(), "Copyright did not match check")
}
}
return nil, nil
}

5
vendor/gitea.com/jolheiser/gitea-vet/go.mod generated vendored Normal file
View file

@ -0,0 +1,5 @@
module gitea.com/jolheiser/gitea-vet
go 1.14
require golang.org/x/tools v0.0.0-20200325010219-a49f79bcc224

20
vendor/gitea.com/jolheiser/gitea-vet/go.sum generated vendored Normal file
View file

@ -0,0 +1,20 @@
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200325010219-a49f79bcc224 h1:azwY/v0y0K4mFHVsg5+UrTgchqALYWpqVo6vL5OmkmI=
golang.org/x/tools v0.0.0-20200325010219-a49f79bcc224/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

17
vendor/gitea.com/jolheiser/gitea-vet/main.go generated vendored Normal file
View file

@ -0,0 +1,17 @@
// Copyright 2020 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package main
import (
"gitea.com/jolheiser/gitea-vet/checks"
"golang.org/x/tools/go/analysis/unitchecker"
)
func main() {
unitchecker.Main(
checks.Imports,
checks.License,
)
}

View file

@ -123,8 +123,12 @@ type Version struct {
Version string `json:",omitempty"`
}
// String returns the module version syntax Path@Version.
// String returns a representation of the Version suitable for logging
// (Path@Version, or just Path if Version is empty).
func (m Version) String() string {
if m.Version == "" {
return m.Path
}
return m.Path + "@" + m.Version
}

221
vendor/golang.org/x/tools/go/analysis/analysis.go generated vendored Normal file
View file

@ -0,0 +1,221 @@
package analysis
import (
"flag"
"fmt"
"go/ast"
"go/token"
"go/types"
"reflect"
)
// An Analyzer describes an analysis function and its options.
type Analyzer struct {
// The Name of the analyzer must be a valid Go identifier
// as it may appear in command-line flags, URLs, and so on.
Name string
// Doc is the documentation for the analyzer.
// The part before the first "\n\n" is the title
// (no capital or period, max ~60 letters).
Doc string
// Flags defines any flags accepted by the analyzer.
// The manner in which these flags are exposed to the user
// depends on the driver which runs the analyzer.
Flags flag.FlagSet
// Run applies the analyzer to a package.
// It returns an error if the analyzer failed.
//
// On success, the Run function may return a result
// computed by the Analyzer; its type must match ResultType.
// The driver makes this result available as an input to
// another Analyzer that depends directly on this one (see
// Requires) when it analyzes the same package.
//
// To pass analysis results between packages (and thus
// potentially between address spaces), use Facts, which are
// serializable.
Run func(*Pass) (interface{}, error)
// RunDespiteErrors allows the driver to invoke
// the Run method of this analyzer even on a
// package that contains parse or type errors.
RunDespiteErrors bool
// Requires is a set of analyzers that must run successfully
// before this one on a given package. This analyzer may inspect
// the outputs produced by each analyzer in Requires.
// The graph over analyzers implied by Requires edges must be acyclic.
//
// Requires establishes a "horizontal" dependency between
// analysis passes (different analyzers, same package).
Requires []*Analyzer
// ResultType is the type of the optional result of the Run function.
ResultType reflect.Type
// FactTypes indicates that this analyzer imports and exports
// Facts of the specified concrete types.
// An analyzer that uses facts may assume that its import
// dependencies have been similarly analyzed before it runs.
// Facts must be pointers.
//
// FactTypes establishes a "vertical" dependency between
// analysis passes (same analyzer, different packages).
FactTypes []Fact
}
func (a *Analyzer) String() string { return a.Name }
// A Pass provides information to the Run function that
// applies a specific analyzer to a single Go package.
//
// It forms the interface between the analysis logic and the driver
// program, and has both input and an output components.
//
// As in a compiler, one pass may depend on the result computed by another.
//
// The Run function should not call any of the Pass functions concurrently.
type Pass struct {
Analyzer *Analyzer // the identity of the current analyzer
// syntax and type information
Fset *token.FileSet // file position information
Files []*ast.File // the abstract syntax tree of each file
OtherFiles []string // names of non-Go files of this package
Pkg *types.Package // type information about the package
TypesInfo *types.Info // type information about the syntax trees
TypesSizes types.Sizes // function for computing sizes of types
// Report reports a Diagnostic, a finding about a specific location
// in the analyzed source code such as a potential mistake.
// It may be called by the Run function.
Report func(Diagnostic)
// ResultOf provides the inputs to this analysis pass, which are
// the corresponding results of its prerequisite analyzers.
// The map keys are the elements of Analysis.Required,
// and the type of each corresponding value is the required
// analysis's ResultType.
ResultOf map[*Analyzer]interface{}
// -- facts --
// ImportObjectFact retrieves a fact associated with obj.
// Given a value ptr of type *T, where *T satisfies Fact,
// ImportObjectFact copies the value to *ptr.
//
// ImportObjectFact panics if called after the pass is complete.
// ImportObjectFact is not concurrency-safe.
ImportObjectFact func(obj types.Object, fact Fact) bool
// ImportPackageFact retrieves a fact associated with package pkg,
// which must be this package or one of its dependencies.
// See comments for ImportObjectFact.
ImportPackageFact func(pkg *types.Package, fact Fact) bool
// ExportObjectFact associates a fact of type *T with the obj,
// replacing any previous fact of that type.
//
// ExportObjectFact panics if it is called after the pass is
// complete, or if obj does not belong to the package being analyzed.
// ExportObjectFact is not concurrency-safe.
ExportObjectFact func(obj types.Object, fact Fact)
// ExportPackageFact associates a fact with the current package.
// See comments for ExportObjectFact.
ExportPackageFact func(fact Fact)
// AllPackageFacts returns a new slice containing all package facts of the analysis's FactTypes
// in unspecified order.
// WARNING: This is an experimental API and may change in the future.
AllPackageFacts func() []PackageFact
// AllObjectFacts returns a new slice containing all object facts of the analysis's FactTypes
// in unspecified order.
// WARNING: This is an experimental API and may change in the future.
AllObjectFacts func() []ObjectFact
/* Further fields may be added in future. */
// For example, suggested or applied refactorings.
}
// PackageFact is a package together with an associated fact.
// WARNING: This is an experimental API and may change in the future.
type PackageFact struct {
Package *types.Package
Fact Fact
}
// ObjectFact is an object together with an associated fact.
// WARNING: This is an experimental API and may change in the future.
type ObjectFact struct {
Object types.Object
Fact Fact
}
// Reportf is a helper function that reports a Diagnostic using the
// specified position and formatted error message.
func (pass *Pass) Reportf(pos token.Pos, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
pass.Report(Diagnostic{Pos: pos, Message: msg})
}
// The Range interface provides a range. It's equivalent to and satisfied by
// ast.Node.
type Range interface {
Pos() token.Pos // position of first character belonging to the node
End() token.Pos // position of first character immediately after the node
}
// ReportRangef is a helper function that reports a Diagnostic using the
// range provided. ast.Node values can be passed in as the range because
// they satisfy the Range interface.
func (pass *Pass) ReportRangef(rng Range, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
pass.Report(Diagnostic{Pos: rng.Pos(), End: rng.End(), Message: msg})
}
func (pass *Pass) String() string {
return fmt.Sprintf("%s@%s", pass.Analyzer.Name, pass.Pkg.Path())
}
// A Fact is an intermediate fact produced during analysis.
//
// Each fact is associated with a named declaration (a types.Object) or
// with a package as a whole. A single object or package may have
// multiple associated facts, but only one of any particular fact type.
//
// A Fact represents a predicate such as "never returns", but does not
// represent the subject of the predicate such as "function F" or "package P".
//
// Facts may be produced in one analysis pass and consumed by another
// analysis pass even if these are in different address spaces.
// If package P imports Q, all facts about Q produced during
// analysis of that package will be available during later analysis of P.
// Facts are analogous to type export data in a build system:
// just as export data enables separate compilation of several passes,
// facts enable "separate analysis".
//
// Each pass (a, p) starts with the set of facts produced by the
// same analyzer a applied to the packages directly imported by p.
// The analysis may add facts to the set, and they may be exported in turn.
// An analysis's Run function may retrieve facts by calling
// Pass.Import{Object,Package}Fact and update them using
// Pass.Export{Object,Package}Fact.
//
// A fact is logically private to its Analysis. To pass values
// between different analyzers, use the results mechanism;
// see Analyzer.Requires, Analyzer.ResultType, and Pass.ResultOf.
//
// A Fact type must be a pointer.
// Facts are encoded and decoded using encoding/gob.
// A Fact may implement the GobEncoder/GobDecoder interfaces
// to customize its encoding. Fact encoding should not fail.
//
// A Fact should not be modified once exported.
type Fact interface {
AFact() // dummy method to avoid type errors
}

61
vendor/golang.org/x/tools/go/analysis/diagnostic.go generated vendored Normal file
View file

@ -0,0 +1,61 @@
package analysis
import "go/token"
// A Diagnostic is a message associated with a source location or range.
//
// An Analyzer may return a variety of diagnostics; the optional Category,
// which should be a constant, may be used to classify them.
// It is primarily intended to make it easy to look up documentation.
//
// If End is provided, the diagnostic is specified to apply to the range between
// Pos and End.
type Diagnostic struct {
Pos token.Pos
End token.Pos // optional
Category string // optional
Message string
// SuggestedFixes contains suggested fixes for a diagnostic which can be used to perform
// edits to a file that address the diagnostic.
// TODO(matloob): Should multiple SuggestedFixes be allowed for a diagnostic?
// Diagnostics should not contain SuggestedFixes that overlap.
// Experimental: This API is experimental and may change in the future.
SuggestedFixes []SuggestedFix // optional
// Experimental: This API is experimental and may change in the future.
Related []RelatedInformation // optional
}
// RelatedInformation contains information related to a diagnostic.
// For example, a diagnostic that flags duplicated declarations of a
// variable may include one RelatedInformation per existing
// declaration.
type RelatedInformation struct {
Pos token.Pos
End token.Pos
Message string
}
// A SuggestedFix is a code change associated with a Diagnostic that a user can choose
// to apply to their code. Usually the SuggestedFix is meant to fix the issue flagged
// by the diagnostic.
// TextEdits for a SuggestedFix should not overlap. TextEdits for a SuggestedFix
// should not contain edits for other packages.
// Experimental: This API is experimental and may change in the future.
type SuggestedFix struct {
// A description for this suggested fix to be shown to a user deciding
// whether to accept it.
Message string
TextEdits []TextEdit
}
// A TextEdit represents the replacement of the code between Pos and End with the new text.
// Each TextEdit should apply to a single file. End should not be earlier in the file than Pos.
// Experimental: This API is experimental and may change in the future.
type TextEdit struct {
// For a pure insertion, End can either be set to Pos or token.NoPos.
Pos token.Pos
End token.Pos
NewText []byte
}

301
vendor/golang.org/x/tools/go/analysis/doc.go generated vendored Normal file
View file

@ -0,0 +1,301 @@
/*
Package analysis defines the interface between a modular static
analysis and an analysis driver program.
Background
A static analysis is a function that inspects a package of Go code and
reports a set of diagnostics (typically mistakes in the code), and
perhaps produces other results as well, such as suggested refactorings
or other facts. An analysis that reports mistakes is informally called a
"checker". For example, the printf checker reports mistakes in
fmt.Printf format strings.
A "modular" analysis is one that inspects one package at a time but can
save information from a lower-level package and use it when inspecting a
higher-level package, analogous to separate compilation in a toolchain.
The printf checker is modular: when it discovers that a function such as
log.Fatalf delegates to fmt.Printf, it records this fact, and checks
calls to that function too, including calls made from another package.
By implementing a common interface, checkers from a variety of sources
can be easily selected, incorporated, and reused in a wide range of
driver programs including command-line tools (such as vet), text editors and
IDEs, build and test systems (such as go build, Bazel, or Buck), test
frameworks, code review tools, code-base indexers (such as SourceGraph),
documentation viewers (such as godoc), batch pipelines for large code
bases, and so on.
Analyzer
The primary type in the API is Analyzer. An Analyzer statically
describes an analysis function: its name, documentation, flags,
relationship to other analyzers, and of course, its logic.
To define an analysis, a user declares a (logically constant) variable
of type Analyzer. Here is a typical example from one of the analyzers in
the go/analysis/passes/ subdirectory:
package unusedresult
var Analyzer = &analysis.Analyzer{
Name: "unusedresult",
Doc: "check for unused results of calls to some functions",
Run: run,
...
}
func run(pass *analysis.Pass) (interface{}, error) {
...
}
An analysis driver is a program such as vet that runs a set of
analyses and prints the diagnostics that they report.
The driver program must import the list of Analyzers it needs.
Typically each Analyzer resides in a separate package.
To add a new Analyzer to an existing driver, add another item to the list:
import ( "unusedresult"; "nilness"; "printf" )
var analyses = []*analysis.Analyzer{
unusedresult.Analyzer,
nilness.Analyzer,
printf.Analyzer,
}
A driver may use the name, flags, and documentation to provide on-line
help that describes the analyses it performs.
The doc comment contains a brief one-line summary,
optionally followed by paragraphs of explanation.
The Analyzer type has more fields besides those shown above:
type Analyzer struct {
Name string
Doc string
Flags flag.FlagSet
Run func(*Pass) (interface{}, error)
RunDespiteErrors bool
ResultType reflect.Type
Requires []*Analyzer
FactTypes []Fact
}
The Flags field declares a set of named (global) flag variables that
control analysis behavior. Unlike vet, analysis flags are not declared
directly in the command line FlagSet; it is up to the driver to set the
flag variables. A driver for a single analysis, a, might expose its flag
f directly on the command line as -f, whereas a driver for multiple
analyses might prefix the flag name by the analysis name (-a.f) to avoid
ambiguity. An IDE might expose the flags through a graphical interface,
and a batch pipeline might configure them from a config file.
See the "findcall" analyzer for an example of flags in action.
The RunDespiteErrors flag indicates whether the analysis is equipped to
handle ill-typed code. If not, the driver will skip the analysis if
there were parse or type errors.
The optional ResultType field specifies the type of the result value
computed by this analysis and made available to other analyses.
The Requires field specifies a list of analyses upon which
this one depends and whose results it may access, and it constrains the
order in which a driver may run analyses.
The FactTypes field is discussed in the section on Modularity.
The analysis package provides a Validate function to perform basic
sanity checks on an Analyzer, such as that its Requires graph is
acyclic, its fact and result types are unique, and so on.
Finally, the Run field contains a function to be called by the driver to
execute the analysis on a single package. The driver passes it an
instance of the Pass type.
Pass
A Pass describes a single unit of work: the application of a particular
Analyzer to a particular package of Go code.
The Pass provides information to the Analyzer's Run function about the
package being analyzed, and provides operations to the Run function for
reporting diagnostics and other information back to the driver.
type Pass struct {
Fset *token.FileSet
Files []*ast.File
OtherFiles []string
Pkg *types.Package
TypesInfo *types.Info
ResultOf map[*Analyzer]interface{}
Report func(Diagnostic)
...
}
The Fset, Files, Pkg, and TypesInfo fields provide the syntax trees,
type information, and source positions for a single package of Go code.
The OtherFiles field provides the names, but not the contents, of non-Go
files such as assembly that are part of this package. See the "asmdecl"
or "buildtags" analyzers for examples of loading non-Go files and reporting
diagnostics against them.
The ResultOf field provides the results computed by the analyzers
required by this one, as expressed in its Analyzer.Requires field. The
driver runs the required analyzers first and makes their results
available in this map. Each Analyzer must return a value of the type
described in its Analyzer.ResultType field.
For example, the "ctrlflow" analyzer returns a *ctrlflow.CFGs, which
provides a control-flow graph for each function in the package (see
golang.org/x/tools/go/cfg); the "inspect" analyzer returns a value that
enables other Analyzers to traverse the syntax trees of the package more
efficiently; and the "buildssa" analyzer constructs an SSA-form
intermediate representation.
Each of these Analyzers extends the capabilities of later Analyzers
without adding a dependency to the core API, so an analysis tool pays
only for the extensions it needs.
The Report function emits a diagnostic, a message associated with a
source position. For most analyses, diagnostics are their primary
result.
For convenience, Pass provides a helper method, Reportf, to report a new
diagnostic by formatting a string.
Diagnostic is defined as:
type Diagnostic struct {
Pos token.Pos
Category string // optional
Message string
}
The optional Category field is a short identifier that classifies the
kind of message when an analysis produces several kinds of diagnostic.
Most Analyzers inspect typed Go syntax trees, but a few, such as asmdecl
and buildtag, inspect the raw text of Go source files or even non-Go
files such as assembly. To report a diagnostic against a line of a
raw text file, use the following sequence:
content, err := ioutil.ReadFile(filename)
if err != nil { ... }
tf := fset.AddFile(filename, -1, len(content))
tf.SetLinesForContent(content)
...
pass.Reportf(tf.LineStart(line), "oops")
Modular analysis with Facts
To improve efficiency and scalability, large programs are routinely
built using separate compilation: units of the program are compiled
separately, and recompiled only when one of their dependencies changes;
independent modules may be compiled in parallel. The same technique may
be applied to static analyses, for the same benefits. Such analyses are
described as "modular".
A compilers type checker is an example of a modular static analysis.
Many other checkers we would like to apply to Go programs can be
understood as alternative or non-standard type systems. For example,
vet's printf checker infers whether a function has the "printf wrapper"
type, and it applies stricter checks to calls of such functions. In
addition, it records which functions are printf wrappers for use by
later analysis passes to identify other printf wrappers by induction.
A result such as f is a printf wrapper that is not interesting by
itself but serves as a stepping stone to an interesting result (such as
a diagnostic) is called a "fact".
The analysis API allows an analysis to define new types of facts, to
associate facts of these types with objects (named entities) declared
within the current package, or with the package as a whole, and to query
for an existing fact of a given type associated with an object or
package.
An Analyzer that uses facts must declare their types:
var Analyzer = &analysis.Analyzer{
Name: "printf",
FactTypes: []analysis.Fact{new(isWrapper)},
...
}
type isWrapper struct{} // => *types.Func f “is a printf wrapper”
The driver program ensures that facts for a passs dependencies are
generated before analyzing the package and is responsible for propagating
facts from one package to another, possibly across address spaces.
Consequently, Facts must be serializable. The API requires that drivers
use the gob encoding, an efficient, robust, self-describing binary
protocol. A fact type may implement the GobEncoder/GobDecoder interfaces
if the default encoding is unsuitable. Facts should be stateless.
The Pass type has functions to import and export facts,
associated either with an object or with a package:
type Pass struct {
...
ExportObjectFact func(types.Object, Fact)
ImportObjectFact func(types.Object, Fact) bool
ExportPackageFact func(fact Fact)
ImportPackageFact func(*types.Package, Fact) bool
}
An Analyzer may only export facts associated with the current package or
its objects, though it may import facts from any package or object that
is an import dependency of the current package.
Conceptually, ExportObjectFact(obj, fact) inserts fact into a hidden map keyed by
the pair (obj, TypeOf(fact)), and the ImportObjectFact function
retrieves the entry from this map and copies its value into the variable
pointed to by fact. This scheme assumes that the concrete type of fact
is a pointer; this assumption is checked by the Validate function.
See the "printf" analyzer for an example of object facts in action.
Some driver implementations (such as those based on Bazel and Blaze) do
not currently apply analyzers to packages of the standard library.
Therefore, for best results, analyzer authors should not rely on
analysis facts being available for standard packages.
For example, although the printf checker is capable of deducing during
analysis of the log package that log.Printf is a printf wrapper,
this fact is built in to the analyzer so that it correctly checks
calls to log.Printf even when run in a driver that does not apply
it to standard packages. We would like to remove this limitation in future.
Testing an Analyzer
The analysistest subpackage provides utilities for testing an Analyzer.
In a few lines of code, it is possible to run an analyzer on a package
of testdata files and check that it reported all the expected
diagnostics and facts (and no more). Expectations are expressed using
"// want ..." comments in the input code.
Standalone commands
Analyzers are provided in the form of packages that a driver program is
expected to import. The vet command imports a set of several analyzers,
but users may wish to define their own analysis commands that perform
additional checks. To simplify the task of creating an analysis command,
either for a single analyzer or for a whole suite, we provide the
singlechecker and multichecker subpackages.
The singlechecker package provides the main function for a command that
runs one analyzer. By convention, each analyzer such as
go/passes/findcall should be accompanied by a singlechecker-based
command such as go/analysis/passes/findcall/cmd/findcall, defined in its
entirety as:
package main
import (
"golang.org/x/tools/go/analysis/passes/findcall"
"golang.org/x/tools/go/analysis/singlechecker"
)
func main() { singlechecker.Main(findcall.Analyzer) }
A tool that provides multiple analyzers can use multichecker in a
similar way, giving it the list of Analyzers.
*/
package analysis

View file

@ -0,0 +1,388 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package analysisflags defines helpers for processing flags of
// analysis driver tools.
package analysisflags
import (
"crypto/sha256"
"encoding/gob"
"encoding/json"
"flag"
"fmt"
"go/token"
"io"
"io/ioutil"
"log"
"os"
"strconv"
"strings"
"golang.org/x/tools/go/analysis"
)
// flags common to all {single,multi,unit}checkers.
var (
JSON = false // -json
Context = -1 // -c=N: if N>0, display offending line plus N lines of context
)
// Parse creates a flag for each of the analyzer's flags,
// including (in multi mode) a flag named after the analyzer,
// parses the flags, then filters and returns the list of
// analyzers enabled by flags.
//
// The result is intended to be passed to unitchecker.Run or checker.Run.
// Use in unitchecker.Run will gob.Register all fact types for the returned
// graph of analyzers but of course not the ones only reachable from
// dropped analyzers. To avoid inconsistency about which gob types are
// registered from run to run, Parse itself gob.Registers all the facts
// only reachable from dropped analyzers.
// This is not a particularly elegant API, but this is an internal package.
func Parse(analyzers []*analysis.Analyzer, multi bool) []*analysis.Analyzer {
// Connect each analysis flag to the command line as -analysis.flag.
enabled := make(map[*analysis.Analyzer]*triState)
for _, a := range analyzers {
var prefix string
// Add -NAME flag to enable it.
if multi {
prefix = a.Name + "."
enable := new(triState)
enableUsage := "enable " + a.Name + " analysis"
flag.Var(enable, a.Name, enableUsage)
enabled[a] = enable
}
a.Flags.VisitAll(func(f *flag.Flag) {
if !multi && flag.Lookup(f.Name) != nil {
log.Printf("%s flag -%s would conflict with driver; skipping", a.Name, f.Name)
return
}
name := prefix + f.Name
flag.Var(f.Value, name, f.Usage)
})
}
// standard flags: -flags, -V.
printflags := flag.Bool("flags", false, "print analyzer flags in JSON")
addVersionFlag()
// flags common to all checkers
flag.BoolVar(&JSON, "json", JSON, "emit JSON output")
flag.IntVar(&Context, "c", Context, `display offending line with this many lines of context`)
// Add shims for legacy vet flags to enable existing
// scripts that run vet to continue to work.
_ = flag.Bool("source", false, "no effect (deprecated)")
_ = flag.Bool("v", false, "no effect (deprecated)")
_ = flag.Bool("all", false, "no effect (deprecated)")
_ = flag.String("tags", "", "no effect (deprecated)")
for old, new := range vetLegacyFlags {
newFlag := flag.Lookup(new)
if newFlag != nil && flag.Lookup(old) == nil {
flag.Var(newFlag.Value, old, "deprecated alias for -"+new)
}
}
flag.Parse() // (ExitOnError)
// -flags: print flags so that go vet knows which ones are legitimate.
if *printflags {
printFlags()
os.Exit(0)
}
everything := expand(analyzers)
// If any -NAME flag is true, run only those analyzers. Otherwise,
// if any -NAME flag is false, run all but those analyzers.
if multi {
var hasTrue, hasFalse bool
for _, ts := range enabled {
switch *ts {
case setTrue:
hasTrue = true
case setFalse:
hasFalse = true
}
}
var keep []*analysis.Analyzer
if hasTrue {
for _, a := range analyzers {
if *enabled[a] == setTrue {
keep = append(keep, a)
}
}
analyzers = keep
} else if hasFalse {
for _, a := range analyzers {
if *enabled[a] != setFalse {
keep = append(keep, a)
}
}
analyzers = keep
}
}
// Register fact types of skipped analyzers
// in case we encounter them in imported files.
kept := expand(analyzers)
for a := range everything {
if !kept[a] {
for _, f := range a.FactTypes {
gob.Register(f)
}
}
}
return analyzers
}
func expand(analyzers []*analysis.Analyzer) map[*analysis.Analyzer]bool {
seen := make(map[*analysis.Analyzer]bool)
var visitAll func([]*analysis.Analyzer)
visitAll = func(analyzers []*analysis.Analyzer) {
for _, a := range analyzers {
if !seen[a] {
seen[a] = true
visitAll(a.Requires)
}
}
}
visitAll(analyzers)
return seen
}
func printFlags() {
type jsonFlag struct {
Name string
Bool bool
Usage string
}
var flags []jsonFlag = nil
flag.VisitAll(func(f *flag.Flag) {
// Don't report {single,multi}checker debugging
// flags or fix as these have no effect on unitchecker
// (as invoked by 'go vet').
switch f.Name {
case "debug", "cpuprofile", "memprofile", "trace", "fix":
return
}
b, ok := f.Value.(interface{ IsBoolFlag() bool })
isBool := ok && b.IsBoolFlag()
flags = append(flags, jsonFlag{f.Name, isBool, f.Usage})
})
data, err := json.MarshalIndent(flags, "", "\t")
if err != nil {
log.Fatal(err)
}
os.Stdout.Write(data)
}
// addVersionFlag registers a -V flag that, if set,
// prints the executable version and exits 0.
//
// If the -V flag already exists — for example, because it was already
// registered by a call to cmd/internal/objabi.AddVersionFlag — then
// addVersionFlag does nothing.
func addVersionFlag() {
if flag.Lookup("V") == nil {
flag.Var(versionFlag{}, "V", "print version and exit")
}
}
// versionFlag minimally complies with the -V protocol required by "go vet".
type versionFlag struct{}
func (versionFlag) IsBoolFlag() bool { return true }
func (versionFlag) Get() interface{} { return nil }
func (versionFlag) String() string { return "" }
func (versionFlag) Set(s string) error {
if s != "full" {
log.Fatalf("unsupported flag value: -V=%s", s)
}
// This replicates the minimal subset of
// cmd/internal/objabi.AddVersionFlag, which is private to the
// go tool yet forms part of our command-line interface.
// TODO(adonovan): clarify the contract.
// Print the tool version so the build system can track changes.
// Formats:
// $progname version devel ... buildID=...
// $progname version go1.9.1
progname := os.Args[0]
f, err := os.Open(progname)
if err != nil {
log.Fatal(err)
}
h := sha256.New()
if _, err := io.Copy(h, f); err != nil {
log.Fatal(err)
}
f.Close()
fmt.Printf("%s version devel comments-go-here buildID=%02x\n",
progname, string(h.Sum(nil)))
os.Exit(0)
return nil
}
// A triState is a boolean that knows whether
// it has been set to either true or false.
// It is used to identify whether a flag appears;
// the standard boolean flag cannot
// distinguish missing from unset.
// It also satisfies flag.Value.
type triState int
const (
unset triState = iota
setTrue
setFalse
)
func triStateFlag(name string, value triState, usage string) *triState {
flag.Var(&value, name, usage)
return &value
}
// triState implements flag.Value, flag.Getter, and flag.boolFlag.
// They work like boolean flags: we can say vet -printf as well as vet -printf=true
func (ts *triState) Get() interface{} {
return *ts == setTrue
}
func (ts triState) isTrue() bool {
return ts == setTrue
}
func (ts *triState) Set(value string) error {
b, err := strconv.ParseBool(value)
if err != nil {
// This error message looks poor but package "flag" adds
// "invalid boolean value %q for -NAME: %s"
return fmt.Errorf("want true or false")
}
if b {
*ts = setTrue
} else {
*ts = setFalse
}
return nil
}
func (ts *triState) String() string {
switch *ts {
case unset:
return "true"
case setTrue:
return "true"
case setFalse:
return "false"
}
panic("not reached")
}
func (ts triState) IsBoolFlag() bool {
return true
}
// Legacy flag support
// vetLegacyFlags maps flags used by legacy vet to their corresponding
// new names. The old names will continue to work.
var vetLegacyFlags = map[string]string{
// Analyzer name changes
"bool": "bools",
"buildtags": "buildtag",
"methods": "stdmethods",
"rangeloops": "loopclosure",
// Analyzer flags
"compositewhitelist": "composites.whitelist",
"printfuncs": "printf.funcs",
"shadowstrict": "shadow.strict",
"unusedfuncs": "unusedresult.funcs",
"unusedstringmethods": "unusedresult.stringmethods",
}
// ---- output helpers common to all drivers ----
// PrintPlain prints a diagnostic in plain text form,
// with context specified by the -c flag.
func PrintPlain(fset *token.FileSet, diag analysis.Diagnostic) {
posn := fset.Position(diag.Pos)
fmt.Fprintf(os.Stderr, "%s: %s\n", posn, diag.Message)
// -c=N: show offending line plus N lines of context.
if Context >= 0 {
posn := fset.Position(diag.Pos)
end := fset.Position(diag.End)
if !end.IsValid() {
end = posn
}
data, _ := ioutil.ReadFile(posn.Filename)
lines := strings.Split(string(data), "\n")
for i := posn.Line - Context; i <= end.Line+Context; i++ {
if 1 <= i && i <= len(lines) {
fmt.Fprintf(os.Stderr, "%d\t%s\n", i, lines[i-1])
}
}
}
}
// A JSONTree is a mapping from package ID to analysis name to result.
// Each result is either a jsonError or a list of jsonDiagnostic.
type JSONTree map[string]map[string]interface{}
// Add adds the result of analysis 'name' on package 'id'.
// The result is either a list of diagnostics or an error.
func (tree JSONTree) Add(fset *token.FileSet, id, name string, diags []analysis.Diagnostic, err error) {
var v interface{}
if err != nil {
type jsonError struct {
Err string `json:"error"`
}
v = jsonError{err.Error()}
} else if len(diags) > 0 {
type jsonDiagnostic struct {
Category string `json:"category,omitempty"`
Posn string `json:"posn"`
Message string `json:"message"`
}
var diagnostics []jsonDiagnostic
// TODO(matloob): Should the JSON diagnostics contain ranges?
// If so, how should they be formatted?
for _, f := range diags {
diagnostics = append(diagnostics, jsonDiagnostic{
Category: f.Category,
Posn: fset.Position(f.Pos).String(),
Message: f.Message,
})
}
v = diagnostics
}
if v != nil {
m, ok := tree[id]
if !ok {
m = make(map[string]interface{})
tree[id] = m
}
m[name] = v
}
}
func (tree JSONTree) Print() {
data, err := json.MarshalIndent(tree, "", "\t")
if err != nil {
log.Panicf("internal error: JSON marshalling failed: %v", err)
}
fmt.Printf("%s\n", data)
}

View file

@ -0,0 +1,92 @@
package analysisflags
import (
"flag"
"fmt"
"log"
"os"
"sort"
"strings"
"golang.org/x/tools/go/analysis"
)
const help = `PROGNAME is a tool for static analysis of Go programs.
PROGNAME examines Go source code and reports suspicious constructs,
such as Printf calls whose arguments do not align with the format
string. It uses heuristics that do not guarantee all reports are
genuine problems, but it can find errors not caught by the compilers.
`
// Help implements the help subcommand for a multichecker or unitchecker
// style command. The optional args specify the analyzers to describe.
// Help calls log.Fatal if no such analyzer exists.
func Help(progname string, analyzers []*analysis.Analyzer, args []string) {
// No args: show summary of all analyzers.
if len(args) == 0 {
fmt.Println(strings.Replace(help, "PROGNAME", progname, -1))
fmt.Println("Registered analyzers:")
fmt.Println()
sort.Slice(analyzers, func(i, j int) bool {
return analyzers[i].Name < analyzers[j].Name
})
for _, a := range analyzers {
title := strings.Split(a.Doc, "\n\n")[0]
fmt.Printf(" %-12s %s\n", a.Name, title)
}
fmt.Println("\nBy default all analyzers are run.")
fmt.Println("To select specific analyzers, use the -NAME flag for each one,")
fmt.Println(" or -NAME=false to run all analyzers not explicitly disabled.")
// Show only the core command-line flags.
fmt.Println("\nCore flags:")
fmt.Println()
fs := flag.NewFlagSet("", flag.ExitOnError)
flag.VisitAll(func(f *flag.Flag) {
if !strings.Contains(f.Name, ".") {
fs.Var(f.Value, f.Name, f.Usage)
}
})
fs.SetOutput(os.Stdout)
fs.PrintDefaults()
fmt.Printf("\nTo see details and flags of a specific analyzer, run '%s help name'.\n", progname)
return
}
// Show help on specific analyzer(s).
outer:
for _, arg := range args {
for _, a := range analyzers {
if a.Name == arg {
paras := strings.Split(a.Doc, "\n\n")
title := paras[0]
fmt.Printf("%s: %s\n", a.Name, title)
// Show only the flags relating to this analysis,
// properly prefixed.
first := true
fs := flag.NewFlagSet(a.Name, flag.ExitOnError)
a.Flags.VisitAll(func(f *flag.Flag) {
if first {
first = false
fmt.Println("\nAnalyzer flags:")
fmt.Println()
}
fs.Var(f.Value, a.Name+"."+f.Name, f.Usage)
})
fs.SetOutput(os.Stdout)
fs.PrintDefaults()
if len(paras) > 1 {
fmt.Printf("\n%s\n", strings.Join(paras[1:], "\n\n"))
}
continue outer
}
}
log.Fatalf("Analyzer %q not registered", arg)
}
}

View file

@ -0,0 +1,323 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package facts defines a serializable set of analysis.Fact.
//
// It provides a partial implementation of the Fact-related parts of the
// analysis.Pass interface for use in analysis drivers such as "go vet"
// and other build systems.
//
// The serial format is unspecified and may change, so the same version
// of this package must be used for reading and writing serialized facts.
//
// The handling of facts in the analysis system parallels the handling
// of type information in the compiler: during compilation of package P,
// the compiler emits an export data file that describes the type of
// every object (named thing) defined in package P, plus every object
// indirectly reachable from one of those objects. Thus the downstream
// compiler of package Q need only load one export data file per direct
// import of Q, and it will learn everything about the API of package P
// and everything it needs to know about the API of P's dependencies.
//
// Similarly, analysis of package P emits a fact set containing facts
// about all objects exported from P, plus additional facts about only
// those objects of P's dependencies that are reachable from the API of
// package P; the downstream analysis of Q need only load one fact set
// per direct import of Q.
//
// The notion of "exportedness" that matters here is that of the
// compiler. According to the language spec, a method pkg.T.f is
// unexported simply because its name starts with lowercase. But the
// compiler must nonetheless export f so that downstream compilations can
// accurately ascertain whether pkg.T implements an interface pkg.I
// defined as interface{f()}. Exported thus means "described in export
// data".
//
package facts
import (
"bytes"
"encoding/gob"
"fmt"
"go/types"
"io/ioutil"
"log"
"reflect"
"sort"
"sync"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/types/objectpath"
)
const debug = false
// A Set is a set of analysis.Facts.
//
// Decode creates a Set of facts by reading from the imports of a given
// package, and Encode writes out the set. Between these operation,
// the Import and Export methods will query and update the set.
//
// All of Set's methods except String are safe to call concurrently.
type Set struct {
pkg *types.Package
mu sync.Mutex
m map[key]analysis.Fact
}
type key struct {
pkg *types.Package
obj types.Object // (object facts only)
t reflect.Type
}
// ImportObjectFact implements analysis.Pass.ImportObjectFact.
func (s *Set) ImportObjectFact(obj types.Object, ptr analysis.Fact) bool {
if obj == nil {
panic("nil object")
}
key := key{pkg: obj.Pkg(), obj: obj, t: reflect.TypeOf(ptr)}
s.mu.Lock()
defer s.mu.Unlock()
if v, ok := s.m[key]; ok {
reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem())
return true
}
return false
}
// ExportObjectFact implements analysis.Pass.ExportObjectFact.
func (s *Set) ExportObjectFact(obj types.Object, fact analysis.Fact) {
if obj.Pkg() != s.pkg {
log.Panicf("in package %s: ExportObjectFact(%s, %T): can't set fact on object belonging another package",
s.pkg, obj, fact)
}
key := key{pkg: obj.Pkg(), obj: obj, t: reflect.TypeOf(fact)}
s.mu.Lock()
s.m[key] = fact // clobber any existing entry
s.mu.Unlock()
}
func (s *Set) AllObjectFacts(filter map[reflect.Type]bool) []analysis.ObjectFact {
var facts []analysis.ObjectFact
s.mu.Lock()
for k, v := range s.m {
if k.obj != nil && filter[k.t] {
facts = append(facts, analysis.ObjectFact{Object: k.obj, Fact: v})
}
}
s.mu.Unlock()
return facts
}
// ImportPackageFact implements analysis.Pass.ImportPackageFact.
func (s *Set) ImportPackageFact(pkg *types.Package, ptr analysis.Fact) bool {
if pkg == nil {
panic("nil package")
}
key := key{pkg: pkg, t: reflect.TypeOf(ptr)}
s.mu.Lock()
defer s.mu.Unlock()
if v, ok := s.m[key]; ok {
reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem())
return true
}
return false
}
// ExportPackageFact implements analysis.Pass.ExportPackageFact.
func (s *Set) ExportPackageFact(fact analysis.Fact) {
key := key{pkg: s.pkg, t: reflect.TypeOf(fact)}
s.mu.Lock()
s.m[key] = fact // clobber any existing entry
s.mu.Unlock()
}
func (s *Set) AllPackageFacts(filter map[reflect.Type]bool) []analysis.PackageFact {
var facts []analysis.PackageFact
s.mu.Lock()
for k, v := range s.m {
if k.obj == nil && filter[k.t] {
facts = append(facts, analysis.PackageFact{Package: k.pkg, Fact: v})
}
}
s.mu.Unlock()
return facts
}
// gobFact is the Gob declaration of a serialized fact.
type gobFact struct {
PkgPath string // path of package
Object objectpath.Path // optional path of object relative to package itself
Fact analysis.Fact // type and value of user-defined Fact
}
// Decode decodes all the facts relevant to the analysis of package pkg.
// The read function reads serialized fact data from an external source
// for one of of pkg's direct imports. The empty file is a valid
// encoding of an empty fact set.
//
// It is the caller's responsibility to call gob.Register on all
// necessary fact types.
func Decode(pkg *types.Package, read func(packagePath string) ([]byte, error)) (*Set, error) {
// Compute the import map for this package.
// See the package doc comment.
packages := importMap(pkg.Imports())
// Read facts from imported packages.
// Facts may describe indirectly imported packages, or their objects.
m := make(map[key]analysis.Fact) // one big bucket
for _, imp := range pkg.Imports() {
logf := func(format string, args ...interface{}) {
if debug {
prefix := fmt.Sprintf("in %s, importing %s: ",
pkg.Path(), imp.Path())
log.Print(prefix, fmt.Sprintf(format, args...))
}
}
// Read the gob-encoded facts.
data, err := read(imp.Path())
if err != nil {
return nil, fmt.Errorf("in %s, can't import facts for package %q: %v",
pkg.Path(), imp.Path(), err)
}
if len(data) == 0 {
continue // no facts
}
var gobFacts []gobFact
if err := gob.NewDecoder(bytes.NewReader(data)).Decode(&gobFacts); err != nil {
return nil, fmt.Errorf("decoding facts for %q: %v", imp.Path(), err)
}
if debug {
logf("decoded %d facts: %v", len(gobFacts), gobFacts)
}
// Parse each one into a key and a Fact.
for _, f := range gobFacts {
factPkg := packages[f.PkgPath]
if factPkg == nil {
// Fact relates to a dependency that was
// unused in this translation unit. Skip.
logf("no package %q; discarding %v", f.PkgPath, f.Fact)
continue
}
key := key{pkg: factPkg, t: reflect.TypeOf(f.Fact)}
if f.Object != "" {
// object fact
obj, err := objectpath.Object(factPkg, f.Object)
if err != nil {
// (most likely due to unexported object)
// TODO(adonovan): audit for other possibilities.
logf("no object for path: %v; discarding %s", err, f.Fact)
continue
}
key.obj = obj
logf("read %T fact %s for %v", f.Fact, f.Fact, key.obj)
} else {
// package fact
logf("read %T fact %s for %v", f.Fact, f.Fact, factPkg)
}
m[key] = f.Fact
}
}
return &Set{pkg: pkg, m: m}, nil
}
// Encode encodes a set of facts to a memory buffer.
//
// It may fail if one of the Facts could not be gob-encoded, but this is
// a sign of a bug in an Analyzer.
func (s *Set) Encode() []byte {
// TODO(adonovan): opt: use a more efficient encoding
// that avoids repeating PkgPath for each fact.
// Gather all facts, including those from imported packages.
var gobFacts []gobFact
s.mu.Lock()
for k, fact := range s.m {
if debug {
log.Printf("%v => %s\n", k, fact)
}
var object objectpath.Path
if k.obj != nil {
path, err := objectpath.For(k.obj)
if err != nil {
if debug {
log.Printf("discarding fact %s about %s\n", fact, k.obj)
}
continue // object not accessible from package API; discard fact
}
object = path
}
gobFacts = append(gobFacts, gobFact{
PkgPath: k.pkg.Path(),
Object: object,
Fact: fact,
})
}
s.mu.Unlock()
// Sort facts by (package, object, type) for determinism.
sort.Slice(gobFacts, func(i, j int) bool {
x, y := gobFacts[i], gobFacts[j]
if x.PkgPath != y.PkgPath {
return x.PkgPath < y.PkgPath
}
if x.Object != y.Object {
return x.Object < y.Object
}
tx := reflect.TypeOf(x.Fact)
ty := reflect.TypeOf(y.Fact)
if tx != ty {
return tx.String() < ty.String()
}
return false // equal
})
var buf bytes.Buffer
if len(gobFacts) > 0 {
if err := gob.NewEncoder(&buf).Encode(gobFacts); err != nil {
// Fact encoding should never fail. Identify the culprit.
for _, gf := range gobFacts {
if err := gob.NewEncoder(ioutil.Discard).Encode(gf); err != nil {
fact := gf.Fact
pkgpath := reflect.TypeOf(fact).Elem().PkgPath()
log.Panicf("internal error: gob encoding of analysis fact %s failed: %v; please report a bug against fact %T in package %q",
fact, err, fact, pkgpath)
}
}
}
}
if debug {
log.Printf("package %q: encode %d facts, %d bytes\n",
s.pkg.Path(), len(gobFacts), buf.Len())
}
return buf.Bytes()
}
// String is provided only for debugging, and must not be called
// concurrent with any Import/Export method.
func (s *Set) String() string {
var buf bytes.Buffer
buf.WriteString("{")
for k, f := range s.m {
if buf.Len() > 1 {
buf.WriteString(", ")
}
if k.obj != nil {
buf.WriteString(k.obj.String())
} else {
buf.WriteString(k.pkg.Path())
}
fmt.Fprintf(&buf, ": %v", f)
}
buf.WriteString("}")
return buf.String()
}

View file

@ -0,0 +1,88 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package facts
import "go/types"
// importMap computes the import map for a package by traversing the
// entire exported API each of its imports.
//
// This is a workaround for the fact that we cannot access the map used
// internally by the types.Importer returned by go/importer. The entries
// in this map are the packages and objects that may be relevant to the
// current analysis unit.
//
// Packages in the map that are only indirectly imported may be
// incomplete (!pkg.Complete()).
//
func importMap(imports []*types.Package) map[string]*types.Package {
objects := make(map[types.Object]bool)
packages := make(map[string]*types.Package)
var addObj func(obj types.Object) bool
var addType func(T types.Type)
addObj = func(obj types.Object) bool {
if !objects[obj] {
objects[obj] = true
addType(obj.Type())
if pkg := obj.Pkg(); pkg != nil {
packages[pkg.Path()] = pkg
}
return true
}
return false
}
addType = func(T types.Type) {
switch T := T.(type) {
case *types.Basic:
// nop
case *types.Named:
if addObj(T.Obj()) {
for i := 0; i < T.NumMethods(); i++ {
addObj(T.Method(i))
}
}
case *types.Pointer:
addType(T.Elem())
case *types.Slice:
addType(T.Elem())
case *types.Array:
addType(T.Elem())
case *types.Chan:
addType(T.Elem())
case *types.Map:
addType(T.Key())
addType(T.Elem())
case *types.Signature:
addType(T.Params())
addType(T.Results())
case *types.Struct:
for i := 0; i < T.NumFields(); i++ {
addObj(T.Field(i))
}
case *types.Tuple:
for i := 0; i < T.Len(); i++ {
addObj(T.At(i))
}
case *types.Interface:
for i := 0; i < T.NumMethods(); i++ {
addObj(T.Method(i))
}
}
}
for _, imp := range imports {
packages[imp.Path()] = imp
scope := imp.Scope()
for _, name := range scope.Names() {
addObj(scope.Lookup(name))
}
}
return packages
}

View file

@ -0,0 +1,396 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The unitchecker package defines the main function for an analysis
// driver that analyzes a single compilation unit during a build.
// It is invoked by a build system such as "go vet":
//
// $ go vet -vettool=$(which vet)
//
// It supports the following command-line protocol:
//
// -V=full describe executable (to the build tool)
// -flags describe flags (to the build tool)
// foo.cfg description of compilation unit (from the build tool)
//
// This package does not depend on go/packages.
// If you need a standalone tool, use multichecker,
// which supports this mode but can also load packages
// from source using go/packages.
package unitchecker
// TODO(adonovan):
// - with gccgo, go build does not build standard library,
// so we will not get to analyze it. Yet we must in order
// to create base facts for, say, the fmt package for the
// printf checker.
import (
"encoding/gob"
"encoding/json"
"flag"
"fmt"
"go/ast"
"go/build"
"go/importer"
"go/parser"
"go/token"
"go/types"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"reflect"
"sort"
"strings"
"sync"
"time"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/internal/analysisflags"
"golang.org/x/tools/go/analysis/internal/facts"
)
// A Config describes a compilation unit to be analyzed.
// It is provided to the tool in a JSON-encoded file
// whose name ends with ".cfg".
type Config struct {
ID string // e.g. "fmt [fmt.test]"
Compiler string
Dir string
ImportPath string
GoFiles []string
NonGoFiles []string
ImportMap map[string]string
PackageFile map[string]string
Standard map[string]bool
PackageVetx map[string]string
VetxOnly bool
VetxOutput string
SucceedOnTypecheckFailure bool
}
// Main is the main function of a vet-like analysis tool that must be
// invoked by a build system to analyze a single package.
//
// The protocol required by 'go vet -vettool=...' is that the tool must support:
//
// -flags describe flags in JSON
// -V=full describe executable for build caching
// foo.cfg perform separate modular analyze on the single
// unit described by a JSON config file foo.cfg.
//
func Main(analyzers ...*analysis.Analyzer) {
progname := filepath.Base(os.Args[0])
log.SetFlags(0)
log.SetPrefix(progname + ": ")
if err := analysis.Validate(analyzers); err != nil {
log.Fatal(err)
}
flag.Usage = func() {
fmt.Fprintf(os.Stderr, `%[1]s is a tool for static analysis of Go programs.
Usage of %[1]s:
%.16[1]s unit.cfg # execute analysis specified by config file
%.16[1]s help # general help
%.16[1]s help name # help on specific analyzer and its flags
`, progname)
os.Exit(1)
}
analyzers = analysisflags.Parse(analyzers, true)
args := flag.Args()
if len(args) == 0 {
flag.Usage()
}
if args[0] == "help" {
analysisflags.Help(progname, analyzers, args[1:])
os.Exit(0)
}
if len(args) != 1 || !strings.HasSuffix(args[0], ".cfg") {
log.Fatalf(`invoking "go tool vet" directly is unsupported; use "go vet"`)
}
Run(args[0], analyzers)
}
// Run reads the *.cfg file, runs the analysis,
// and calls os.Exit with an appropriate error code.
// It assumes flags have already been set.
func Run(configFile string, analyzers []*analysis.Analyzer) {
cfg, err := readConfig(configFile)
if err != nil {
log.Fatal(err)
}
fset := token.NewFileSet()
results, err := run(fset, cfg, analyzers)
if err != nil {
log.Fatal(err)
}
// In VetxOnly mode, the analysis is run only for facts.
if !cfg.VetxOnly {
if analysisflags.JSON {
// JSON output
tree := make(analysisflags.JSONTree)
for _, res := range results {
tree.Add(fset, cfg.ID, res.a.Name, res.diagnostics, res.err)
}
tree.Print()
} else {
// plain text
exit := 0
for _, res := range results {
if res.err != nil {
log.Println(res.err)
exit = 1
}
}
for _, res := range results {
for _, diag := range res.diagnostics {
analysisflags.PrintPlain(fset, diag)
exit = 1
}
}
os.Exit(exit)
}
}
os.Exit(0)
}
func readConfig(filename string) (*Config, error) {
data, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
cfg := new(Config)
if err := json.Unmarshal(data, cfg); err != nil {
return nil, fmt.Errorf("cannot decode JSON config file %s: %v", filename, err)
}
if len(cfg.GoFiles) == 0 {
// The go command disallows packages with no files.
// The only exception is unsafe, but the go command
// doesn't call vet on it.
return nil, fmt.Errorf("package has no files: %s", cfg.ImportPath)
}
return cfg, nil
}
var importerForCompiler = func(_ *token.FileSet, compiler string, lookup importer.Lookup) types.Importer {
// broken legacy implementation (https://golang.org/issue/28995)
return importer.For(compiler, lookup)
}
func run(fset *token.FileSet, cfg *Config, analyzers []*analysis.Analyzer) ([]result, error) {
// Load, parse, typecheck.
var files []*ast.File
for _, name := range cfg.GoFiles {
f, err := parser.ParseFile(fset, name, nil, parser.ParseComments)
if err != nil {
if cfg.SucceedOnTypecheckFailure {
// Silently succeed; let the compiler
// report parse errors.
err = nil
}
return nil, err
}
files = append(files, f)
}
compilerImporter := importerForCompiler(fset, cfg.Compiler, func(path string) (io.ReadCloser, error) {
// path is a resolved package path, not an import path.
file, ok := cfg.PackageFile[path]
if !ok {
if cfg.Compiler == "gccgo" && cfg.Standard[path] {
return nil, nil // fall back to default gccgo lookup
}
return nil, fmt.Errorf("no package file for %q", path)
}
return os.Open(file)
})
importer := importerFunc(func(importPath string) (*types.Package, error) {
path, ok := cfg.ImportMap[importPath] // resolve vendoring, etc
if !ok {
return nil, fmt.Errorf("can't resolve import %q", path)
}
return compilerImporter.Import(path)
})
tc := &types.Config{
Importer: importer,
Sizes: types.SizesFor("gc", build.Default.GOARCH), // assume gccgo ≡ gc?
}
info := &types.Info{
Types: make(map[ast.Expr]types.TypeAndValue),
Defs: make(map[*ast.Ident]types.Object),
Uses: make(map[*ast.Ident]types.Object),
Implicits: make(map[ast.Node]types.Object),
Scopes: make(map[ast.Node]*types.Scope),
Selections: make(map[*ast.SelectorExpr]*types.Selection),
}
pkg, err := tc.Check(cfg.ImportPath, fset, files, info)
if err != nil {
if cfg.SucceedOnTypecheckFailure {
// Silently succeed; let the compiler
// report type errors.
err = nil
}
return nil, err
}
// Register fact types with gob.
// In VetxOnly mode, analyzers are only for their facts,
// so we can skip any analysis that neither produces facts
// nor depends on any analysis that produces facts.
// Also build a map to hold working state and result.
type action struct {
once sync.Once
result interface{}
err error
usesFacts bool // (transitively uses)
diagnostics []analysis.Diagnostic
}
actions := make(map[*analysis.Analyzer]*action)
var registerFacts func(a *analysis.Analyzer) bool
registerFacts = func(a *analysis.Analyzer) bool {
act, ok := actions[a]
if !ok {
act = new(action)
var usesFacts bool
for _, f := range a.FactTypes {
usesFacts = true
gob.Register(f)
}
for _, req := range a.Requires {
if registerFacts(req) {
usesFacts = true
}
}
act.usesFacts = usesFacts
actions[a] = act
}
return act.usesFacts
}
var filtered []*analysis.Analyzer
for _, a := range analyzers {
if registerFacts(a) || !cfg.VetxOnly {
filtered = append(filtered, a)
}
}
analyzers = filtered
// Read facts from imported packages.
read := func(path string) ([]byte, error) {
if vetx, ok := cfg.PackageVetx[path]; ok {
return ioutil.ReadFile(vetx)
}
return nil, nil // no .vetx file, no facts
}
facts, err := facts.Decode(pkg, read)
if err != nil {
return nil, err
}
// In parallel, execute the DAG of analyzers.
var exec func(a *analysis.Analyzer) *action
var execAll func(analyzers []*analysis.Analyzer)
exec = func(a *analysis.Analyzer) *action {
act := actions[a]
act.once.Do(func() {
execAll(a.Requires) // prefetch dependencies in parallel
// The inputs to this analysis are the
// results of its prerequisites.
inputs := make(map[*analysis.Analyzer]interface{})
var failed []string
for _, req := range a.Requires {
reqact := exec(req)
if reqact.err != nil {
failed = append(failed, req.String())
continue
}
inputs[req] = reqact.result
}
// Report an error if any dependency failed.
if failed != nil {
sort.Strings(failed)
act.err = fmt.Errorf("failed prerequisites: %s", strings.Join(failed, ", "))
return
}
factFilter := make(map[reflect.Type]bool)
for _, f := range a.FactTypes {
factFilter[reflect.TypeOf(f)] = true
}
pass := &analysis.Pass{
Analyzer: a,
Fset: fset,
Files: files,
OtherFiles: cfg.NonGoFiles,
Pkg: pkg,
TypesInfo: info,
TypesSizes: tc.Sizes,
ResultOf: inputs,
Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) },
ImportObjectFact: facts.ImportObjectFact,
ExportObjectFact: facts.ExportObjectFact,
AllObjectFacts: func() []analysis.ObjectFact { return facts.AllObjectFacts(factFilter) },
ImportPackageFact: facts.ImportPackageFact,
ExportPackageFact: facts.ExportPackageFact,
AllPackageFacts: func() []analysis.PackageFact { return facts.AllPackageFacts(factFilter) },
}
t0 := time.Now()
act.result, act.err = a.Run(pass)
if false {
log.Printf("analysis %s = %s", pass, time.Since(t0))
}
})
return act
}
execAll = func(analyzers []*analysis.Analyzer) {
var wg sync.WaitGroup
for _, a := range analyzers {
wg.Add(1)
go func(a *analysis.Analyzer) {
_ = exec(a)
wg.Done()
}(a)
}
wg.Wait()
}
execAll(analyzers)
// Return diagnostics and errors from root analyzers.
results := make([]result, len(analyzers))
for i, a := range analyzers {
act := actions[a]
results[i].a = a
results[i].err = act.err
results[i].diagnostics = act.diagnostics
}
data := facts.Encode()
if err := ioutil.WriteFile(cfg.VetxOutput, data, 0666); err != nil {
return nil, fmt.Errorf("failed to write analysis facts: %v", err)
}
return results, nil
}
type result struct {
a *analysis.Analyzer
diagnostics []analysis.Diagnostic
err error
}
type importerFunc func(path string) (*types.Package, error)
func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }

View file

@ -0,0 +1,9 @@
// +build go1.12
package unitchecker
import "go/importer"
func init() {
importerForCompiler = importer.ForCompiler
}

97
vendor/golang.org/x/tools/go/analysis/validate.go generated vendored Normal file
View file

@ -0,0 +1,97 @@
package analysis
import (
"fmt"
"reflect"
"unicode"
)
// Validate reports an error if any of the analyzers are misconfigured.
// Checks include:
// that the name is a valid identifier;
// that the Requires graph is acyclic;
// that analyzer fact types are unique;
// that each fact type is a pointer.
func Validate(analyzers []*Analyzer) error {
// Map each fact type to its sole generating analyzer.
factTypes := make(map[reflect.Type]*Analyzer)
// Traverse the Requires graph, depth first.
const (
white = iota
grey
black
finished
)
color := make(map[*Analyzer]uint8)
var visit func(a *Analyzer) error
visit = func(a *Analyzer) error {
if a == nil {
return fmt.Errorf("nil *Analyzer")
}
if color[a] == white {
color[a] = grey
// names
if !validIdent(a.Name) {
return fmt.Errorf("invalid analyzer name %q", a)
}
if a.Doc == "" {
return fmt.Errorf("analyzer %q is undocumented", a)
}
// fact types
for _, f := range a.FactTypes {
if f == nil {
return fmt.Errorf("analyzer %s has nil FactType", a)
}
t := reflect.TypeOf(f)
if prev := factTypes[t]; prev != nil {
return fmt.Errorf("fact type %s registered by two analyzers: %v, %v",
t, a, prev)
}
if t.Kind() != reflect.Ptr {
return fmt.Errorf("%s: fact type %s is not a pointer", a, t)
}
factTypes[t] = a
}
// recursion
for i, req := range a.Requires {
if err := visit(req); err != nil {
return fmt.Errorf("%s.Requires[%d]: %v", a.Name, i, err)
}
}
color[a] = black
}
return nil
}
for _, a := range analyzers {
if err := visit(a); err != nil {
return err
}
}
// Reject duplicates among analyzers.
// Precondition: color[a] == black.
// Postcondition: color[a] == finished.
for _, a := range analyzers {
if color[a] == finished {
return fmt.Errorf("duplicate analyzer: %s", a.Name)
}
color[a] = finished
}
return nil
}
func validIdent(name string) bool {
for i, r := range name {
if !(r == '_' || unicode.IsLetter(r) || i > 0 && unicode.IsDigit(r)) {
return false
}
}
return name != ""
}

View file

@ -0,0 +1,523 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package objectpath defines a naming scheme for types.Objects
// (that is, named entities in Go programs) relative to their enclosing
// package.
//
// Type-checker objects are canonical, so they are usually identified by
// their address in memory (a pointer), but a pointer has meaning only
// within one address space. By contrast, objectpath names allow the
// identity of an object to be sent from one program to another,
// establishing a correspondence between types.Object variables that are
// distinct but logically equivalent.
//
// A single object may have multiple paths. In this example,
// type A struct{ X int }
// type B A
// the field X has two paths due to its membership of both A and B.
// The For(obj) function always returns one of these paths, arbitrarily
// but consistently.
package objectpath
import (
"fmt"
"strconv"
"strings"
"go/types"
)
// A Path is an opaque name that identifies a types.Object
// relative to its package. Conceptually, the name consists of a
// sequence of destructuring operations applied to the package scope
// to obtain the original object.
// The name does not include the package itself.
type Path string
// Encoding
//
// An object path is a textual and (with training) human-readable encoding
// of a sequence of destructuring operators, starting from a types.Package.
// The sequences represent a path through the package/object/type graph.
// We classify these operators by their type:
//
// PO package->object Package.Scope.Lookup
// OT object->type Object.Type
// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU]
// TO type->object Type.{At,Field,Method,Obj} [AFMO]
//
// All valid paths start with a package and end at an object
// and thus may be defined by the regular language:
//
// objectpath = PO (OT TT* TO)*
//
// The concrete encoding follows directly:
// - The only PO operator is Package.Scope.Lookup, which requires an identifier.
// - The only OT operator is Object.Type,
// which we encode as '.' because dot cannot appear in an identifier.
// - The TT operators are encoded as [EKPRU].
// - The OT operators are encoded as [AFMO];
// three of these (At,Field,Method) require an integer operand,
// which is encoded as a string of decimal digits.
// These indices are stable across different representations
// of the same package, even source and export data.
//
// In the example below,
//
// package p
//
// type T interface {
// f() (a string, b struct{ X int })
// }
//
// field X has the path "T.UM0.RA1.F0",
// representing the following sequence of operations:
//
// p.Lookup("T") T
// .Type().Underlying().Method(0). f
// .Type().Results().At(1) b
// .Type().Field(0) X
//
// The encoding is not maximally compact---every R or P is
// followed by an A, for example---but this simplifies the
// encoder and decoder.
//
const (
// object->type operators
opType = '.' // .Type() (Object)
// type->type operators
opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map)
opKey = 'K' // .Key() (Map)
opParams = 'P' // .Params() (Signature)
opResults = 'R' // .Results() (Signature)
opUnderlying = 'U' // .Underlying() (Named)
// type->object operators
opAt = 'A' // .At(i) (Tuple)
opField = 'F' // .Field(i) (Struct)
opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored)
opObj = 'O' // .Obj() (Named)
)
// The For function returns the path to an object relative to its package,
// or an error if the object is not accessible from the package's Scope.
//
// The For function guarantees to return a path only for the following objects:
// - package-level types
// - exported package-level non-types
// - methods
// - parameter and result variables
// - struct fields
// These objects are sufficient to define the API of their package.
// The objects described by a package's export data are drawn from this set.
//
// For does not return a path for predeclared names, imported package
// names, local names, and unexported package-level names (except
// types).
//
// Example: given this definition,
//
// package p
//
// type T interface {
// f() (a string, b struct{ X int })
// }
//
// For(X) would return a path that denotes the following sequence of operations:
//
// p.Scope().Lookup("T") (TypeName T)
// .Type().Underlying().Method(0). (method Func f)
// .Type().Results().At(1) (field Var b)
// .Type().Field(0) (field Var X)
//
// where p is the package (*types.Package) to which X belongs.
func For(obj types.Object) (Path, error) {
pkg := obj.Pkg()
// This table lists the cases of interest.
//
// Object Action
// ------ ------
// nil reject
// builtin reject
// pkgname reject
// label reject
// var
// package-level accept
// func param/result accept
// local reject
// struct field accept
// const
// package-level accept
// local reject
// func
// package-level accept
// init functions reject
// concrete method accept
// interface method accept
// type
// package-level accept
// local reject
//
// The only accessible package-level objects are members of pkg itself.
//
// The cases are handled in four steps:
//
// 1. reject nil and builtin
// 2. accept package-level objects
// 3. reject obviously invalid objects
// 4. search the API for the path to the param/result/field/method.
// 1. reference to nil or builtin?
if pkg == nil {
return "", fmt.Errorf("predeclared %s has no path", obj)
}
scope := pkg.Scope()
// 2. package-level object?
if scope.Lookup(obj.Name()) == obj {
// Only exported objects (and non-exported types) have a path.
// Non-exported types may be referenced by other objects.
if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() {
return "", fmt.Errorf("no path for non-exported %v", obj)
}
return Path(obj.Name()), nil
}
// 3. Not a package-level object.
// Reject obviously non-viable cases.
switch obj := obj.(type) {
case *types.Const, // Only package-level constants have a path.
*types.TypeName, // Only package-level types have a path.
*types.Label, // Labels are function-local.
*types.PkgName: // PkgNames are file-local.
return "", fmt.Errorf("no path for %v", obj)
case *types.Var:
// Could be:
// - a field (obj.IsField())
// - a func parameter or result
// - a local var.
// Sadly there is no way to distinguish
// a param/result from a local
// so we must proceed to the find.
case *types.Func:
// A func, if not package-level, must be a method.
if recv := obj.Type().(*types.Signature).Recv(); recv == nil {
return "", fmt.Errorf("func is not a method: %v", obj)
}
// TODO(adonovan): opt: if the method is concrete,
// do a specialized version of the rest of this function so
// that it's O(1) not O(|scope|). Basically 'find' is needed
// only for struct fields and interface methods.
default:
panic(obj)
}
// 4. Search the API for the path to the var (field/param/result) or method.
// First inspect package-level named types.
// In the presence of path aliases, these give
// the best paths because non-types may
// refer to types, but not the reverse.
empty := make([]byte, 0, 48) // initial space
for _, name := range scope.Names() {
o := scope.Lookup(name)
tname, ok := o.(*types.TypeName)
if !ok {
continue // handle non-types in second pass
}
path := append(empty, name...)
path = append(path, opType)
T := o.Type()
if tname.IsAlias() {
// type alias
if r := find(obj, T, path); r != nil {
return Path(r), nil
}
} else {
// defined (named) type
if r := find(obj, T.Underlying(), append(path, opUnderlying)); r != nil {
return Path(r), nil
}
}
}
// Then inspect everything else:
// non-types, and declared methods of defined types.
for _, name := range scope.Names() {
o := scope.Lookup(name)
path := append(empty, name...)
if _, ok := o.(*types.TypeName); !ok {
if o.Exported() {
// exported non-type (const, var, func)
if r := find(obj, o.Type(), append(path, opType)); r != nil {
return Path(r), nil
}
}
continue
}
// Inspect declared methods of defined types.
if T, ok := o.Type().(*types.Named); ok {
path = append(path, opType)
for i := 0; i < T.NumMethods(); i++ {
m := T.Method(i)
path2 := appendOpArg(path, opMethod, i)
if m == obj {
return Path(path2), nil // found declared method
}
if r := find(obj, m.Type(), append(path2, opType)); r != nil {
return Path(r), nil
}
}
}
}
return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path())
}
func appendOpArg(path []byte, op byte, arg int) []byte {
path = append(path, op)
path = strconv.AppendInt(path, int64(arg), 10)
return path
}
// find finds obj within type T, returning the path to it, or nil if not found.
func find(obj types.Object, T types.Type, path []byte) []byte {
switch T := T.(type) {
case *types.Basic, *types.Named:
// Named types belonging to pkg were handled already,
// so T must belong to another package. No path.
return nil
case *types.Pointer:
return find(obj, T.Elem(), append(path, opElem))
case *types.Slice:
return find(obj, T.Elem(), append(path, opElem))
case *types.Array:
return find(obj, T.Elem(), append(path, opElem))
case *types.Chan:
return find(obj, T.Elem(), append(path, opElem))
case *types.Map:
if r := find(obj, T.Key(), append(path, opKey)); r != nil {
return r
}
return find(obj, T.Elem(), append(path, opElem))
case *types.Signature:
if r := find(obj, T.Params(), append(path, opParams)); r != nil {
return r
}
return find(obj, T.Results(), append(path, opResults))
case *types.Struct:
for i := 0; i < T.NumFields(); i++ {
f := T.Field(i)
path2 := appendOpArg(path, opField, i)
if f == obj {
return path2 // found field var
}
if r := find(obj, f.Type(), append(path2, opType)); r != nil {
return r
}
}
return nil
case *types.Tuple:
for i := 0; i < T.Len(); i++ {
v := T.At(i)
path2 := appendOpArg(path, opAt, i)
if v == obj {
return path2 // found param/result var
}
if r := find(obj, v.Type(), append(path2, opType)); r != nil {
return r
}
}
return nil
case *types.Interface:
for i := 0; i < T.NumMethods(); i++ {
m := T.Method(i)
path2 := appendOpArg(path, opMethod, i)
if m == obj {
return path2 // found interface method
}
if r := find(obj, m.Type(), append(path2, opType)); r != nil {
return r
}
}
return nil
}
panic(T)
}
// Object returns the object denoted by path p within the package pkg.
func Object(pkg *types.Package, p Path) (types.Object, error) {
if p == "" {
return nil, fmt.Errorf("empty path")
}
pathstr := string(p)
var pkgobj, suffix string
if dot := strings.IndexByte(pathstr, opType); dot < 0 {
pkgobj = pathstr
} else {
pkgobj = pathstr[:dot]
suffix = pathstr[dot:] // suffix starts with "."
}
obj := pkg.Scope().Lookup(pkgobj)
if obj == nil {
return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj)
}
// abstraction of *types.{Pointer,Slice,Array,Chan,Map}
type hasElem interface {
Elem() types.Type
}
// abstraction of *types.{Interface,Named}
type hasMethods interface {
Method(int) *types.Func
NumMethods() int
}
// The loop state is the pair (t, obj),
// exactly one of which is non-nil, initially obj.
// All suffixes start with '.' (the only object->type operation),
// followed by optional type->type operations,
// then a type->object operation.
// The cycle then repeats.
var t types.Type
for suffix != "" {
code := suffix[0]
suffix = suffix[1:]
// Codes [AFM] have an integer operand.
var index int
switch code {
case opAt, opField, opMethod:
rest := strings.TrimLeft(suffix, "0123456789")
numerals := suffix[:len(suffix)-len(rest)]
suffix = rest
i, err := strconv.Atoi(numerals)
if err != nil {
return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code)
}
index = int(i)
case opObj:
// no operand
default:
// The suffix must end with a type->object operation.
if suffix == "" {
return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code)
}
}
if code == opType {
if t != nil {
return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType)
}
t = obj.Type()
obj = nil
continue
}
if t == nil {
return nil, fmt.Errorf("invalid path: code %q in object context", code)
}
// Inv: t != nil, obj == nil
switch code {
case opElem:
hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map
if !ok {
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t)
}
t = hasElem.Elem()
case opKey:
mapType, ok := t.(*types.Map)
if !ok {
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t)
}
t = mapType.Key()
case opParams:
sig, ok := t.(*types.Signature)
if !ok {
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
}
t = sig.Params()
case opResults:
sig, ok := t.(*types.Signature)
if !ok {
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
}
t = sig.Results()
case opUnderlying:
named, ok := t.(*types.Named)
if !ok {
return nil, fmt.Errorf("cannot apply %q to %s (got %s, want named)", code, t, t)
}
t = named.Underlying()
case opAt:
tuple, ok := t.(*types.Tuple)
if !ok {
return nil, fmt.Errorf("cannot apply %q to %s (got %s, want tuple)", code, t, t)
}
if n := tuple.Len(); index >= n {
return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
}
obj = tuple.At(index)
t = nil
case opField:
structType, ok := t.(*types.Struct)
if !ok {
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t)
}
if n := structType.NumFields(); index >= n {
return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n)
}
obj = structType.Field(index)
t = nil
case opMethod:
hasMethods, ok := t.(hasMethods) // Interface or Named
if !ok {
return nil, fmt.Errorf("cannot apply %q to %s (got %s, want interface or named)", code, t, t)
}
if n := hasMethods.NumMethods(); index >= n {
return nil, fmt.Errorf("method index %d out of range [0-%d)", index, n)
}
obj = hasMethods.Method(index)
t = nil
case opObj:
named, ok := t.(*types.Named)
if !ok {
return nil, fmt.Errorf("cannot apply %q to %s (got %s, want named)", code, t, t)
}
obj = named.Obj()
t = nil
default:
return nil, fmt.Errorf("invalid path: unknown code %q", code)
}
}
if obj.Pkg() != pkg {
return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj)
}
return obj, nil // success
}

View file

@ -4,6 +4,7 @@ package imports // import "golang.org/x/tools/imports"
import (
"go/build"
"log"
"os"
intimp "golang.org/x/tools/internal/imports"
@ -47,7 +48,6 @@ func Process(filename string, src []byte, opt *Options) ([]byte, error) {
GO111MODULE: os.Getenv("GO111MODULE"),
GOPROXY: os.Getenv("GOPROXY"),
GOSUMDB: os.Getenv("GOSUMDB"),
Debug: Debug,
LocalPrefix: LocalPrefix,
},
AllErrors: opt.AllErrors,
@ -57,6 +57,9 @@ func Process(filename string, src []byte, opt *Options) ([]byte, error) {
TabIndent: opt.TabIndent,
TabWidth: opt.TabWidth,
}
if Debug {
intopt.Env.Logf = log.Printf
}
return intimp.Process(filename, src, intopt)
}

View file

@ -76,8 +76,9 @@ func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) e
}
func parseDirEnt(buf []byte) (consumed int, name string, typ os.FileMode) {
// golang.org/issue/15653
dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0]))
// golang.org/issue/37269
dirent := &syscall.Dirent{}
copy((*[unsafe.Sizeof(syscall.Dirent{})]byte)(unsafe.Pointer(dirent))[:], buf)
if v := unsafe.Offsetof(dirent.Reclen) + unsafe.Sizeof(dirent.Reclen); uintptr(len(buf)) < v {
panic(fmt.Sprintf("buf size of %d smaller than dirent header size %d", len(buf), v))
}

View file

@ -5,6 +5,7 @@ import (
"bytes"
"context"
"fmt"
"io"
"os"
"os/exec"
"strings"
@ -28,9 +29,27 @@ func (i *Invocation) Run(ctx context.Context) (*bytes.Buffer, error) {
return stdout, friendly
}
// RunRaw is like Run, but also returns the raw stderr and error for callers
// RunRaw is like RunPiped, but also returns the raw stderr and error for callers
// that want to do low-level error handling/recovery.
func (i *Invocation) RunRaw(ctx context.Context) (stdout *bytes.Buffer, stderr *bytes.Buffer, friendlyError error, rawError error) {
stdout = &bytes.Buffer{}
stderr = &bytes.Buffer{}
rawError = i.RunPiped(ctx, stdout, stderr)
if rawError != nil {
// Check for 'go' executable not being found.
if ee, ok := rawError.(*exec.Error); ok && ee.Err == exec.ErrNotFound {
friendlyError = fmt.Errorf("go command required, not found: %v", ee)
}
if ctx.Err() != nil {
friendlyError = ctx.Err()
}
friendlyError = fmt.Errorf("err: %v: stderr: %s", rawError, stderr)
}
return
}
// RunPiped is like Run, but relies on the given stdout/stderr
func (i *Invocation) RunPiped(ctx context.Context, stdout, stderr io.Writer) error {
log := i.Logf
if log == nil {
log = func(string, ...interface{}) {}
@ -51,8 +70,6 @@ func (i *Invocation) RunRaw(ctx context.Context) (stdout *bytes.Buffer, stderr *
goArgs = append(goArgs, i.Args...)
}
cmd := exec.Command("go", goArgs...)
stdout = &bytes.Buffer{}
stderr = &bytes.Buffer{}
cmd.Stdout = stdout
cmd.Stderr = stderr
// On darwin the cwd gets resolved to the real path, which breaks anything that
@ -66,19 +83,7 @@ func (i *Invocation) RunRaw(ctx context.Context) (stdout *bytes.Buffer, stderr *
defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now())
rawError = runCmdContext(ctx, cmd)
friendlyError = rawError
if rawError != nil {
// Check for 'go' executable not being found.
if ee, ok := rawError.(*exec.Error); ok && ee.Err == exec.ErrNotFound {
friendlyError = fmt.Errorf("go command required, not found: %v", ee)
}
if ctx.Err() != nil {
friendlyError = ctx.Err()
}
friendlyError = fmt.Errorf("err: %v: stderr: %s", rawError, stderr)
}
return
return runCmdContext(ctx, cmd)
}
// runCmdContext is like exec.CommandContext except it sends os.Interrupt

View file

@ -23,8 +23,10 @@ import (
// Options controls the behavior of a Walk call.
type Options struct {
Debug bool // Enable debug logging
ModulesEnabled bool // Search module caches. Also disables legacy goimports ignore rules.
// If Logf is non-nil, debug logging is enabled through this function.
Logf func(format string, args ...interface{})
// Search module caches. Also disables legacy goimports ignore rules.
ModulesEnabled bool
}
// RootType indicates the type of a Root.
@ -80,14 +82,14 @@ func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root
// walkDir creates a walker and starts fastwalk with this walker.
func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) {
if _, err := os.Stat(root.Path); os.IsNotExist(err) {
if opts.Debug {
log.Printf("skipping nonexistent directory: %v", root.Path)
if opts.Logf != nil {
opts.Logf("skipping nonexistent directory: %v", root.Path)
}
return
}
start := time.Now()
if opts.Debug {
log.Printf("gopathwalk: scanning %s", root.Path)
if opts.Logf != nil {
opts.Logf("gopathwalk: scanning %s", root.Path)
}
w := &walker{
root: root,
@ -100,8 +102,8 @@ func walkDir(root Root, add func(Root, string), skip func(root Root, dir string)
log.Printf("gopathwalk: scanning directory %v: %v", root.Path, err)
}
if opts.Debug {
log.Printf("gopathwalk: scanned %s in %v", root.Path, time.Since(start))
if opts.Logf != nil {
opts.Logf("gopathwalk: scanned %s in %v", root.Path, time.Since(start))
}
}
@ -130,11 +132,11 @@ func (w *walker) init() {
full := filepath.Join(w.root.Path, p)
if fi, err := os.Stat(full); err == nil {
w.ignoredDirs = append(w.ignoredDirs, fi)
if w.opts.Debug {
log.Printf("Directory added to ignore list: %s", full)
if w.opts.Logf != nil {
w.opts.Logf("Directory added to ignore list: %s", full)
}
} else if w.opts.Debug {
log.Printf("Error statting ignored directory: %v", err)
} else if w.opts.Logf != nil {
w.opts.Logf("Error statting ignored directory: %v", err)
}
}
}
@ -145,11 +147,11 @@ func (w *walker) init() {
func (w *walker) getIgnoredDirs(path string) []string {
file := filepath.Join(path, ".goimportsignore")
slurp, err := ioutil.ReadFile(file)
if w.opts.Debug {
if w.opts.Logf != nil {
if err != nil {
log.Print(err)
w.opts.Logf("%v", err)
} else {
log.Printf("Read %s", file)
w.opts.Logf("Read %s", file)
}
}
if err != nil {

View file

@ -262,7 +262,7 @@ type pass struct {
// loadPackageNames saves the package names for everything referenced by imports.
func (p *pass) loadPackageNames(imports []*ImportInfo) error {
if p.env.Debug {
if p.env.Logf != nil {
p.env.Logf("loading package names for %v packages", len(imports))
defer func() {
p.env.Logf("done loading package names for %v packages", len(imports))
@ -334,7 +334,7 @@ func (p *pass) load() ([]*ImportFix, bool) {
if p.loadRealPackageNames {
err := p.loadPackageNames(append(imports, p.candidates...))
if err != nil {
if p.env.Debug {
if p.env.Logf != nil {
p.env.Logf("loading package names: %v", err)
}
return nil, false
@ -528,7 +528,7 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv
return nil, err
}
srcDir := filepath.Dir(abs)
if env.Debug {
if env.Logf != nil {
env.Logf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir)
}
@ -746,7 +746,6 @@ func getPackageExports(ctx context.Context, wrapped func(PackageExport), searchP
// the go command, the go/build package, etc.
type ProcessEnv struct {
LocalPrefix string
Debug bool
BuildFlags []string
@ -755,7 +754,7 @@ type ProcessEnv struct {
GOPATH, GOROOT, GO111MODULE, GOPROXY, GOFLAGS, GOSUMDB string
WorkingDir string
// Logf is the default logger for the ProcessEnv.
// If Logf is non-nil, debug logging is enabled through this function.
Logf func(format string, args ...interface{})
resolver Resolver
@ -1238,7 +1237,7 @@ func (r *gopathResolver) scan(ctx context.Context, callback *scanCallback) error
case <-r.scanSema:
}
defer func() { r.scanSema <- struct{}{} }()
gopathwalk.Walk(roots, add, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: false})
gopathwalk.Walk(roots, add, gopathwalk.Options{Logf: r.env.Logf, ModulesEnabled: false})
close(scanDone)
}()
select {
@ -1342,7 +1341,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl
}
}
if env.Debug {
if env.Logf != nil {
sortedExports := append([]string(nil), exports...)
sort.Strings(sortedExports)
env.Logf("loaded exports in dir %v (package %v): %v", dir, pkgName, strings.Join(sortedExports, ", "))
@ -1358,7 +1357,7 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa
// ones. Note that this sorts by the de-vendored name, so
// there's no "penalty" for vendoring.
sort.Sort(byDistanceOrImportPathShortLength(candidates))
if pass.env.Debug {
if pass.env.Logf != nil {
for i, c := range candidates {
pass.env.Logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir)
}
@ -1396,14 +1395,14 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa
wg.Done()
}()
if pass.env.Debug {
if pass.env.Logf != nil {
pass.env.Logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName)
}
// If we're an x_test, load the package under test's test variant.
includeTest := strings.HasSuffix(pass.f.Name.Name, "_test") && c.pkg.dir == pass.srcDir
_, exports, err := pass.env.GetResolver().loadExports(ctx, c.pkg, includeTest)
if err != nil {
if pass.env.Debug {
if pass.env.Logf != nil {
pass.env.Logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err)
}
resc <- nil

View file

@ -21,7 +21,6 @@ import (
"go/token"
"io"
"io/ioutil"
"log"
"os"
"regexp"
"strconv"
@ -155,12 +154,6 @@ func initialize(filename string, src []byte, opt *Options) ([]byte, *Options, er
GOSUMDB: os.Getenv("GOSUMDB"),
}
}
// Set the logger if the user has not provided it.
if opt.Env.Logf == nil {
opt.Env.Logf = log.Printf
}
if src == nil {
b, err := ioutil.ReadFile(filename)
if err != nil {

View file

@ -156,7 +156,7 @@ func (r *ModuleResolver) initAllMods() error {
return err
}
if mod.Dir == "" {
if r.env.Debug {
if r.env.Logf != nil {
r.env.Logf("module %v has not been downloaded and will be ignored", mod.Path)
}
// Can't do anything with a module that's not downloaded.
@ -470,7 +470,7 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error
if r.scannedRoots[root] {
continue
}
gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: true})
gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Logf: r.env.Logf, ModulesEnabled: true})
r.scannedRoots[root] = true
}
close(scanDone)
@ -583,7 +583,7 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir
}
modPath, err := module.UnescapePath(filepath.ToSlash(matches[1]))
if err != nil {
if r.env.Debug {
if r.env.Logf != nil {
r.env.Logf("decoding module cache path %q: %v", subdir, err)
}
return directoryPackageInfo{

138
vendor/golang.org/x/xerrors/fmt.go generated vendored
View file

@ -7,10 +7,14 @@ package xerrors
import (
"fmt"
"strings"
"unicode"
"unicode/utf8"
"golang.org/x/xerrors/internal"
)
const percentBangString = "%!"
// Errorf formats according to a format specifier and returns the string as a
// value that satisfies error.
//
@ -18,29 +22,71 @@ import (
// formatted with additional detail enabled. If the last argument is an error
// the returned error's Format method will return it if the format string ends
// with ": %s", ": %v", or ": %w". If the last argument is an error and the
// format string ends with ": %w", the returned error implements Wrapper
// with an Unwrap method returning it.
// format string ends with ": %w", the returned error implements an Unwrap
// method returning it.
//
// If the format specifier includes a %w verb with an error operand in a
// position other than at the end, the returned error will still implement an
// Unwrap method returning the operand, but the error's Format method will not
// return the wrapped error.
//
// It is invalid to include more than one %w verb or to supply it with an
// operand that does not implement the error interface. The %w verb is otherwise
// a synonym for %v.
func Errorf(format string, a ...interface{}) error {
err, wrap := lastError(format, a)
format = formatPlusW(format)
if err == nil {
return &noWrapError{fmt.Sprintf(format, a...), nil, Caller(1)}
// Support a ": %[wsv]" suffix, which works well with xerrors.Formatter.
wrap := strings.HasSuffix(format, ": %w")
idx, format2, ok := parsePercentW(format)
percentWElsewhere := !wrap && idx >= 0
if !percentWElsewhere && (wrap || strings.HasSuffix(format, ": %s") || strings.HasSuffix(format, ": %v")) {
err := errorAt(a, len(a)-1)
if err == nil {
return &noWrapError{fmt.Sprintf(format, a...), nil, Caller(1)}
}
// TODO: this is not entirely correct. The error value could be
// printed elsewhere in format if it mixes numbered with unnumbered
// substitutions. With relatively small changes to doPrintf we can
// have it optionally ignore extra arguments and pass the argument
// list in its entirety.
msg := fmt.Sprintf(format[:len(format)-len(": %s")], a[:len(a)-1]...)
frame := Frame{}
if internal.EnableTrace {
frame = Caller(1)
}
if wrap {
return &wrapError{msg, err, frame}
}
return &noWrapError{msg, err, frame}
}
// Support %w anywhere.
// TODO: don't repeat the wrapped error's message when %w occurs in the middle.
msg := fmt.Sprintf(format2, a...)
if idx < 0 {
return &noWrapError{msg, nil, Caller(1)}
}
err := errorAt(a, idx)
if !ok || err == nil {
// Too many %ws or argument of %w is not an error. Approximate the Go
// 1.13 fmt.Errorf message.
return &noWrapError{fmt.Sprintf("%sw(%s)", percentBangString, msg), nil, Caller(1)}
}
// TODO: this is not entirely correct. The error value could be
// printed elsewhere in format if it mixes numbered with unnumbered
// substitutions. With relatively small changes to doPrintf we can
// have it optionally ignore extra arguments and pass the argument
// list in its entirety.
msg := fmt.Sprintf(format[:len(format)-len(": %s")], a[:len(a)-1]...)
frame := Frame{}
if internal.EnableTrace {
frame = Caller(1)
}
if wrap {
return &wrapError{msg, err, frame}
return &wrapError{msg, err, frame}
}
func errorAt(args []interface{}, i int) error {
if i < 0 || i >= len(args) {
return nil
}
return &noWrapError{msg, err, frame}
err, ok := args[i].(error)
if !ok {
return nil
}
return err
}
// formatPlusW is used to avoid the vet check that will barf at %w.
@ -48,24 +94,56 @@ func formatPlusW(s string) string {
return s
}
func lastError(format string, a []interface{}) (err error, wrap bool) {
wrap = strings.HasSuffix(format, ": %w")
if !wrap &&
!strings.HasSuffix(format, ": %s") &&
!strings.HasSuffix(format, ": %v") {
return nil, false
// Return the index of the only %w in format, or -1 if none.
// Also return a rewritten format string with %w replaced by %v, and
// false if there is more than one %w.
// TODO: handle "%[N]w".
func parsePercentW(format string) (idx int, newFormat string, ok bool) {
// Loosely copied from golang.org/x/tools/go/analysis/passes/printf/printf.go.
idx = -1
ok = true
n := 0
sz := 0
var isW bool
for i := 0; i < len(format); i += sz {
if format[i] != '%' {
sz = 1
continue
}
// "%%" is not a format directive.
if i+1 < len(format) && format[i+1] == '%' {
sz = 2
continue
}
sz, isW = parsePrintfVerb(format[i:])
if isW {
if idx >= 0 {
ok = false
} else {
idx = n
}
// "Replace" the last character, the 'w', with a 'v'.
p := i + sz - 1
format = format[:p] + "v" + format[p+1:]
}
n++
}
return idx, format, ok
}
if len(a) == 0 {
return nil, false
// Parse the printf verb starting with a % at s[0].
// Return how many bytes it occupies and whether the verb is 'w'.
func parsePrintfVerb(s string) (int, bool) {
// Assume only that the directive is a sequence of non-letters followed by a single letter.
sz := 0
var r rune
for i := 1; i < len(s); i += sz {
r, sz = utf8.DecodeRuneInString(s[i:])
if unicode.IsLetter(r) {
return i + sz, r == 'w'
}
}
err, ok := a[len(a)-1].(error)
if !ok {
return nil, false
}
return err, wrap
return len(s), false
}
type noWrapError struct {

15
vendor/modules.txt vendored
View file

@ -1,6 +1,10 @@
# cloud.google.com/go v0.45.0
## explicit
cloud.google.com/go/compute/metadata
# gitea.com/jolheiser/gitea-vet v0.1.0
## explicit
gitea.com/jolheiser/gitea-vet
gitea.com/jolheiser/gitea-vet/checks
# gitea.com/lunny/levelqueue v0.2.0
## explicit
gitea.com/lunny/levelqueue
@ -681,7 +685,7 @@ golang.org/x/crypto/ssh
golang.org/x/crypto/ssh/agent
golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
golang.org/x/crypto/ssh/knownhosts
# golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee
# golang.org/x/mod v0.2.0
golang.org/x/mod/module
golang.org/x/mod/semver
# golang.org/x/net v0.0.0-20200301022130-244492dfa37a
@ -731,9 +735,13 @@ golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
golang.org/x/text/width
# golang.org/x/tools v0.0.0-20200225230052-807dcd883420
# golang.org/x/tools v0.0.0-20200325010219-a49f79bcc224
## explicit
golang.org/x/tools/cover
golang.org/x/tools/go/analysis
golang.org/x/tools/go/analysis/internal/analysisflags
golang.org/x/tools/go/analysis/internal/facts
golang.org/x/tools/go/analysis/unitchecker
golang.org/x/tools/go/ast/astutil
golang.org/x/tools/go/buildutil
golang.org/x/tools/go/gcexportdata
@ -742,13 +750,14 @@ golang.org/x/tools/go/internal/gcimporter
golang.org/x/tools/go/internal/packagesdriver
golang.org/x/tools/go/loader
golang.org/x/tools/go/packages
golang.org/x/tools/go/types/objectpath
golang.org/x/tools/imports
golang.org/x/tools/internal/fastwalk
golang.org/x/tools/internal/gocommand
golang.org/x/tools/internal/gopathwalk
golang.org/x/tools/internal/imports
golang.org/x/tools/internal/packagesinternal
# golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898
# golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543
golang.org/x/xerrors
golang.org/x/xerrors/internal
# google.golang.org/appengine v1.6.4