Commit 545a9b6e authored by Jacob Vosmaer's avatar Jacob Vosmaer

Merge branch '212-replace-megacheck-with-staticcheck' into 'master'

Replace megacheck with staticcheck

Closes #212

See merge request gitlab-org/gitlab-workhorse!376
parents 570d5d87 61cb169a
image: golang:1.10 image: golang:1.11
verify: verify:
script: script:
...@@ -17,10 +17,10 @@ verify: ...@@ -17,10 +17,10 @@ verify:
- make test - make test
test using go 1.10: test using go 1.10:
image: golang:1.10
<<: *test_definition <<: *test_definition
test using go 1.11: test using go 1.11:
image: golang:1.11
<<: *test_definition <<: *test_definition
test:release: test:release:
......
...@@ -106,7 +106,7 @@ testdata/data/group/test.git: ...@@ -106,7 +106,7 @@ testdata/data/group/test.git:
git clone --quiet --bare https://gitlab.com/gitlab-org/gitlab-test.git $@ git clone --quiet --bare https://gitlab.com/gitlab-org/gitlab-test.git $@
.PHONY: verify .PHONY: verify
verify: lint vet detect-context check-formatting megacheck verify: lint vet detect-context check-formatting staticcheck
.PHONY: lint .PHONY: lint
lint: $(TARGET_SETUP) govendor-sync lint: $(TARGET_SETUP) govendor-sync
...@@ -132,11 +132,11 @@ check-formatting: $(TARGET_SETUP) install-goimports ...@@ -132,11 +132,11 @@ check-formatting: $(TARGET_SETUP) install-goimports
# Megacheck will tailor some responses given a minimum Go version, so pass that through the CLI # Megacheck will tailor some responses given a minimum Go version, so pass that through the CLI
# Additionally, megacheck will not return failure exit codes unless explicitely told to via the # Additionally, megacheck will not return failure exit codes unless explicitely told to via the
# `-simple.exit-non-zero` `-unused.exit-non-zero` and `-staticcheck.exit-non-zero` flags # `-simple.exit-non-zero` `-unused.exit-non-zero` and `-staticcheck.exit-non-zero` flags
.PHONY: megacheck .PHONY: staticcheck
megacheck: $(TARGET_SETUP) govendor-sync staticcheck: $(TARGET_SETUP) govendor-sync
$(call message,Verify: $@) $(call message,Verify: $@)
@command -v megacheck || go get -v honnef.co/go/tools/cmd/megacheck @command -v staticcheck || go get -v honnef.co/go/tools/cmd/staticcheck
@megacheck -go $(MINIMUM_SUPPORTED_GO_VERSION) -simple.exit-non-zero -unused.exit-non-zero -staticcheck.exit-non-zero $(LOCAL_PACKAGES) @staticcheck -go $(MINIMUM_SUPPORTED_GO_VERSION) $(LOCAL_PACKAGES)
# Some vendor components, used for testing are GPL, so we don't distribute them # Some vendor components, used for testing are GPL, so we don't distribute them
# and need to go a sync before using them # and need to go a sync before using them
......
...@@ -83,7 +83,7 @@ func TestPreAuthorizeContentTypeFailure(t *testing.T) { ...@@ -83,7 +83,7 @@ func TestPreAuthorizeContentTypeFailure(t *testing.T) {
func TestPreAuthorizeRedirect(t *testing.T) { func TestPreAuthorizeRedirect(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "/", 301) http.Redirect(w, r, "/", http.StatusMovedPermanently)
})) }))
defer ts.Close() defer ts.Close()
......
...@@ -63,20 +63,20 @@ func (t *TerminalSettings) Dial() (*websocket.Conn, *http.Response, error) { ...@@ -63,20 +63,20 @@ func (t *TerminalSettings) Dial() (*websocket.Conn, *http.Response, error) {
func (t *TerminalSettings) Validate() error { func (t *TerminalSettings) Validate() error {
if t == nil { if t == nil {
return fmt.Errorf("Terminal details not specified") return fmt.Errorf("terminal details not specified")
} }
if len(t.Subprotocols) == 0 { if len(t.Subprotocols) == 0 {
return fmt.Errorf("No subprotocol specified") return fmt.Errorf("no subprotocol specified")
} }
parsedURL, err := t.URL() parsedURL, err := t.URL()
if err != nil { if err != nil {
return fmt.Errorf("Invalid URL") return fmt.Errorf("invalid URL")
} }
if parsedURL.Scheme != "ws" && parsedURL.Scheme != "wss" { if parsedURL.Scheme != "ws" && parsedURL.Scheme != "wss" {
return fmt.Errorf("Invalid websocket scheme: %q", parsedURL.Scheme) return fmt.Errorf("invalid websocket scheme: %q", parsedURL.Scheme)
} }
return nil return nil
......
...@@ -77,10 +77,10 @@ func (a *artifactsUploadProcessor) ProcessFile(ctx context.Context, formName str ...@@ -77,10 +77,10 @@ func (a *artifactsUploadProcessor) ProcessFile(ctx context.Context, formName str
// ProcessFile for artifacts requires file form-data field name to eq `file` // ProcessFile for artifacts requires file form-data field name to eq `file`
if formName != "file" { if formName != "file" {
return fmt.Errorf("Invalid form field: %q", formName) return fmt.Errorf("invalid form field: %q", formName)
} }
if a.stored { if a.stored {
return fmt.Errorf("Artifacts request contains more than one file") return fmt.Errorf("artifacts request contains more than one file")
} }
a.stored = true a.stored = true
......
...@@ -150,7 +150,7 @@ func (a *alwaysLocalPreparer) Prepare(_ *api.Response) (*filestore.SaveFileOpts, ...@@ -150,7 +150,7 @@ func (a *alwaysLocalPreparer) Prepare(_ *api.Response) (*filestore.SaveFileOpts,
type alwaysFailsVerifier struct{} type alwaysFailsVerifier struct{}
func (_ alwaysFailsVerifier) Verify(handler *filestore.FileHandler) error { func (alwaysFailsVerifier) Verify(handler *filestore.FileHandler) error {
return fmt.Errorf("Verification failed") return fmt.Errorf("Verification failed")
} }
......
...@@ -15,7 +15,7 @@ import ( ...@@ -15,7 +15,7 @@ import (
type SizeError error type SizeError error
// ErrEntityTooLarge means that the uploaded content is bigger then maximum allowed size // ErrEntityTooLarge means that the uploaded content is bigger then maximum allowed size
var ErrEntityTooLarge = errors.New("Entity is too large") var ErrEntityTooLarge = errors.New("entity is too large")
// FileHandler represent a file that has been processed for upload // FileHandler represent a file that has been processed for upload
// it may be either uploaded to an ObjectStore and/or saved on local path. // it may be either uploaded to an ObjectStore and/or saved on local path.
...@@ -125,7 +125,7 @@ func SaveFileFromReader(ctx context.Context, reader io.Reader, size int64, opts ...@@ -125,7 +125,7 @@ func SaveFileFromReader(ctx context.Context, reader io.Reader, size int64, opts
} }
if len(writers) == 1 { if len(writers) == 1 {
return nil, errors.New("Missing upload destination") return nil, errors.New("missing upload destination")
} }
multiWriter := io.MultiWriter(writers...) multiWriter := io.MultiWriter(writers...)
...@@ -135,7 +135,7 @@ func SaveFileFromReader(ctx context.Context, reader io.Reader, size int64, opts ...@@ -135,7 +135,7 @@ func SaveFileFromReader(ctx context.Context, reader io.Reader, size int64, opts
} }
if size != -1 && size != fh.Size { if size != -1 && size != fh.Size {
return nil, SizeError(fmt.Errorf("Expected %d bytes but got only %d", size, fh.Size)) return nil, SizeError(fmt.Errorf("expected %d bytes but got only %d", size, fh.Size))
} }
fh.hashes = hashes.finish() fh.hashes = hashes.finish()
......
...@@ -18,7 +18,7 @@ import ( ...@@ -18,7 +18,7 @@ import (
) )
// ErrNotEnoughParts will be used when writing more than size * len(partURLs) // ErrNotEnoughParts will be used when writing more than size * len(partURLs)
var ErrNotEnoughParts = errors.New("Not enough Parts") var ErrNotEnoughParts = errors.New("not enough Parts")
// Multipart represents a MultipartUpload on a S3 compatible Object Store service. // Multipart represents a MultipartUpload on a S3 compatible Object Store service.
// It can be used as io.WriteCloser for uploading an object // It can be used as io.WriteCloser for uploading an object
...@@ -76,7 +76,7 @@ func NewMultipart(ctx context.Context, partURLs []string, completeURL, abortURL, ...@@ -76,7 +76,7 @@ func NewMultipart(ctx context.Context, partURLs []string, completeURL, abortURL,
n, err := io.Copy(ioutil.Discard, pr) n, err := io.Copy(ioutil.Discard, pr)
if err != nil { if err != nil {
m.uploadError = fmt.Errorf("Cannot drain pipe: %v", err) m.uploadError = fmt.Errorf("cannot drain pipe: %v", err)
return return
} }
if n > 0 { if n > 0 {
...@@ -120,12 +120,12 @@ func (m *Multipart) cleanup(ctx context.Context) { ...@@ -120,12 +120,12 @@ func (m *Multipart) cleanup(ctx context.Context) {
func (m *Multipart) complete(cmu *CompleteMultipartUpload) error { func (m *Multipart) complete(cmu *CompleteMultipartUpload) error {
body, err := xml.Marshal(cmu) body, err := xml.Marshal(cmu)
if err != nil { if err != nil {
return fmt.Errorf("Cannot marshal CompleteMultipartUpload request: %v", err) return fmt.Errorf("cannot marshal CompleteMultipartUpload request: %v", err)
} }
req, err := http.NewRequest("POST", m.CompleteURL, bytes.NewReader(body)) req, err := http.NewRequest("POST", m.CompleteURL, bytes.NewReader(body))
if err != nil { if err != nil {
return fmt.Errorf("Cannot create CompleteMultipartUpload request: %v", err) return fmt.Errorf("cannot create CompleteMultipartUpload request: %v", err)
} }
req.ContentLength = int64(len(body)) req.ContentLength = int64(len(body))
req.Header.Set("Content-Type", "application/xml") req.Header.Set("Content-Type", "application/xml")
...@@ -144,7 +144,7 @@ func (m *Multipart) complete(cmu *CompleteMultipartUpload) error { ...@@ -144,7 +144,7 @@ func (m *Multipart) complete(cmu *CompleteMultipartUpload) error {
result := &compoundCompleteMultipartUploadResult{} result := &compoundCompleteMultipartUploadResult{}
decoder := xml.NewDecoder(resp.Body) decoder := xml.NewDecoder(resp.Body)
if err := decoder.Decode(&result); err != nil { if err := decoder.Decode(&result); err != nil {
return fmt.Errorf("Cannot decode CompleteMultipartUpload answer: %v", err) return fmt.Errorf("cannot decode CompleteMultipartUpload answer: %v", err)
} }
if result.isError() { if result.isError() {
...@@ -152,7 +152,7 @@ func (m *Multipart) complete(cmu *CompleteMultipartUpload) error { ...@@ -152,7 +152,7 @@ func (m *Multipart) complete(cmu *CompleteMultipartUpload) error {
} }
if result.CompleteMultipartUploadResult == nil { if result.CompleteMultipartUploadResult == nil {
return fmt.Errorf("Cannot read CompleteMultipartUpload answer") return fmt.Errorf("cannot read CompleteMultipartUpload answer")
} }
m.extractETag(result.ETag) m.extractETag(result.ETag)
...@@ -178,7 +178,7 @@ func (m *Multipart) verifyETag(cmu *CompleteMultipartUpload) error { ...@@ -178,7 +178,7 @@ func (m *Multipart) verifyETag(cmu *CompleteMultipartUpload) error {
func (m *Multipart) readAndUploadOnePart(partURL string, putHeaders map[string]string, src io.Reader, partNumber int) (*completeMultipartUploadPart, error) { func (m *Multipart) readAndUploadOnePart(partURL string, putHeaders map[string]string, src io.Reader, partNumber int) (*completeMultipartUploadPart, error) {
file, err := ioutil.TempFile("", "part-buffer") file, err := ioutil.TempFile("", "part-buffer")
if err != nil { if err != nil {
return nil, fmt.Errorf("Unable to create a temporary file for buffering: %v", err) return nil, fmt.Errorf("unable to create a temporary file for buffering: %v", err)
} }
defer func(path string) { defer func(path string) {
if err := os.Remove(path); err != nil { if err := os.Remove(path); err != nil {
...@@ -188,19 +188,19 @@ func (m *Multipart) readAndUploadOnePart(partURL string, putHeaders map[string]s ...@@ -188,19 +188,19 @@ func (m *Multipart) readAndUploadOnePart(partURL string, putHeaders map[string]s
n, err := io.Copy(file, src) n, err := io.Copy(file, src)
if err != nil { if err != nil {
return nil, fmt.Errorf("Cannot write part %d to disk: %v", partNumber, err) return nil, fmt.Errorf("cannot write part %d to disk: %v", partNumber, err)
} }
if n == 0 { if n == 0 {
return nil, nil return nil, nil
} }
if _, err = file.Seek(0, io.SeekStart); err != nil { if _, err = file.Seek(0, io.SeekStart); err != nil {
return nil, fmt.Errorf("Cannot rewind part %d temporary dump : %v", partNumber, err) return nil, fmt.Errorf("cannot rewind part %d temporary dump : %v", partNumber, err)
} }
etag, err := m.uploadPart(partURL, putHeaders, file, n) etag, err := m.uploadPart(partURL, putHeaders, file, n)
if err != nil { if err != nil {
return nil, fmt.Errorf("Cannot upload part %d: %v", partNumber, err) return nil, fmt.Errorf("cannot upload part %d: %v", partNumber, err)
} }
return &completeMultipartUploadPart{PartNumber: partNumber, ETag: etag}, nil return &completeMultipartUploadPart{PartNumber: partNumber, ETag: etag}, nil
} }
...@@ -208,7 +208,7 @@ func (m *Multipart) readAndUploadOnePart(partURL string, putHeaders map[string]s ...@@ -208,7 +208,7 @@ func (m *Multipart) readAndUploadOnePart(partURL string, putHeaders map[string]s
func (m *Multipart) uploadPart(url string, headers map[string]string, body io.Reader, size int64) (string, error) { func (m *Multipart) uploadPart(url string, headers map[string]string, body io.Reader, size int64) (string, error) {
deadline, ok := m.ctx.Deadline() deadline, ok := m.ctx.Deadline()
if !ok { if !ok {
return "", fmt.Errorf("Missing deadline") return "", fmt.Errorf("missing deadline")
} }
part, err := newObject(m.ctx, url, "", headers, deadline, size, false) part, err := newObject(m.ctx, url, "", headers, deadline, size, false)
......
...@@ -274,7 +274,7 @@ func Configure(cfg *config.RedisConfig, dialFunc func(*config.RedisConfig, bool) ...@@ -274,7 +274,7 @@ func Configure(cfg *config.RedisConfig, dialFunc func(*config.RedisConfig, bool)
if sntnl != nil { if sntnl != nil {
pool.TestOnBorrow = func(c redis.Conn, t time.Time) error { pool.TestOnBorrow = func(c redis.Conn, t time.Time) error {
if !sentinel.TestRole(c, "master") { if !sentinel.TestRole(c, "master") {
return errors.New("Role check failed") return errors.New("role check failed")
} }
return nil return nil
} }
......
...@@ -132,7 +132,7 @@ func sendFileFromDisk(w http.ResponseWriter, r *http.Request, file string) { ...@@ -132,7 +132,7 @@ func sendFileFromDisk(w http.ResponseWriter, r *http.Request, file string) {
if contentTypeHeaderPresent { if contentTypeHeaderPresent {
data, err := ioutil.ReadAll(io.LimitReader(content, headers.MaxDetectSize)) data, err := ioutil.ReadAll(io.LimitReader(content, headers.MaxDetectSize))
if err != nil { if err != nil {
helper.Fail500(w, r, fmt.Errorf("Error reading the file")) helper.Fail500(w, r, fmt.Errorf("error reading the file"))
return return
} }
......
...@@ -52,7 +52,7 @@ var ( ...@@ -52,7 +52,7 @@ var (
kubeMsg = append([]byte{0}, msg...) kubeMsg = append([]byte{0}, msg...)
kubeMsgBase64 = append([]byte{'0'}, msgBase64...) kubeMsgBase64 = append([]byte{'0'}, msgBase64...)
fakeErr = errors.New("fake error") errFake = errors.New("fake error")
text = websocket.TextMessage text = websocket.TextMessage
binary = websocket.BinaryMessage binary = websocket.BinaryMessage
...@@ -81,25 +81,25 @@ func assertEqual(t *testing.T, expected, actual *fakeConn, msg string, args ...i ...@@ -81,25 +81,25 @@ func assertEqual(t *testing.T, expected, actual *fakeConn, msg string, args ...i
func TestReadMessage(t *testing.T) { func TestReadMessage(t *testing.T) {
testCases := map[string][]testcase{ testCases := map[string][]testcase{
"channel.k8s.io": { "channel.k8s.io": {
{fake(binary, kubeMsg, fakeErr), fake(binary, kubeMsg, fakeErr)}, {fake(binary, kubeMsg, errFake), fake(binary, kubeMsg, errFake)},
{fake(binary, kubeMsg, nil), fake(binary, msg, nil)}, {fake(binary, kubeMsg, nil), fake(binary, msg, nil)},
{fake(text, kubeMsg, nil), fake(binary, msg, nil)}, {fake(text, kubeMsg, nil), fake(binary, msg, nil)},
{fakeOther, fakeOther}, {fakeOther, fakeOther},
}, },
"base64.channel.k8s.io": { "base64.channel.k8s.io": {
{fake(text, kubeMsgBase64, fakeErr), fake(text, kubeMsgBase64, fakeErr)}, {fake(text, kubeMsgBase64, errFake), fake(text, kubeMsgBase64, errFake)},
{fake(text, kubeMsgBase64, nil), fake(binary, msg, nil)}, {fake(text, kubeMsgBase64, nil), fake(binary, msg, nil)},
{fake(binary, kubeMsgBase64, nil), fake(binary, msg, nil)}, {fake(binary, kubeMsgBase64, nil), fake(binary, msg, nil)},
{fakeOther, fakeOther}, {fakeOther, fakeOther},
}, },
"terminal.gitlab.com": { "terminal.gitlab.com": {
{fake(binary, msg, fakeErr), fake(binary, msg, fakeErr)}, {fake(binary, msg, errFake), fake(binary, msg, errFake)},
{fake(binary, msg, nil), fake(binary, msg, nil)}, {fake(binary, msg, nil), fake(binary, msg, nil)},
{fake(text, msg, nil), fake(binary, msg, nil)}, {fake(text, msg, nil), fake(binary, msg, nil)},
{fakeOther, fakeOther}, {fakeOther, fakeOther},
}, },
"base64.terminal.gitlab.com": { "base64.terminal.gitlab.com": {
{fake(text, msgBase64, fakeErr), fake(text, msgBase64, fakeErr)}, {fake(text, msgBase64, errFake), fake(text, msgBase64, errFake)},
{fake(text, msgBase64, nil), fake(binary, msg, nil)}, {fake(text, msgBase64, nil), fake(binary, msg, nil)},
{fake(binary, msgBase64, nil), fake(binary, msg, nil)}, {fake(binary, msgBase64, nil), fake(binary, msg, nil)},
{fakeOther, fakeOther}, {fakeOther, fakeOther},
...@@ -119,25 +119,25 @@ func TestReadMessage(t *testing.T) { ...@@ -119,25 +119,25 @@ func TestReadMessage(t *testing.T) {
func TestWriteMessage(t *testing.T) { func TestWriteMessage(t *testing.T) {
testCases := map[string][]testcase{ testCases := map[string][]testcase{
"channel.k8s.io": { "channel.k8s.io": {
{fake(binary, msg, fakeErr), fake(binary, kubeMsg, fakeErr)}, {fake(binary, msg, errFake), fake(binary, kubeMsg, errFake)},
{fake(binary, msg, nil), fake(binary, kubeMsg, nil)}, {fake(binary, msg, nil), fake(binary, kubeMsg, nil)},
{fake(text, msg, nil), fake(binary, kubeMsg, nil)}, {fake(text, msg, nil), fake(binary, kubeMsg, nil)},
{fakeOther, fakeOther}, {fakeOther, fakeOther},
}, },
"base64.channel.k8s.io": { "base64.channel.k8s.io": {
{fake(binary, msg, fakeErr), fake(text, kubeMsgBase64, fakeErr)}, {fake(binary, msg, errFake), fake(text, kubeMsgBase64, errFake)},
{fake(binary, msg, nil), fake(text, kubeMsgBase64, nil)}, {fake(binary, msg, nil), fake(text, kubeMsgBase64, nil)},
{fake(text, msg, nil), fake(text, kubeMsgBase64, nil)}, {fake(text, msg, nil), fake(text, kubeMsgBase64, nil)},
{fakeOther, fakeOther}, {fakeOther, fakeOther},
}, },
"terminal.gitlab.com": { "terminal.gitlab.com": {
{fake(binary, msg, fakeErr), fake(binary, msg, fakeErr)}, {fake(binary, msg, errFake), fake(binary, msg, errFake)},
{fake(binary, msg, nil), fake(binary, msg, nil)}, {fake(binary, msg, nil), fake(binary, msg, nil)},
{fake(text, msg, nil), fake(binary, msg, nil)}, {fake(text, msg, nil), fake(binary, msg, nil)},
{fakeOther, fakeOther}, {fakeOther, fakeOther},
}, },
"base64.terminal.gitlab.com": { "base64.terminal.gitlab.com": {
{fake(binary, msg, fakeErr), fake(text, msgBase64, fakeErr)}, {fake(binary, msg, errFake), fake(text, msgBase64, errFake)},
{fake(binary, msg, nil), fake(text, msgBase64, nil)}, {fake(binary, msg, nil), fake(text, msgBase64, nil)},
{fake(text, msg, nil), fake(text, msgBase64, nil)}, {fake(text, msg, nil), fake(text, msgBase64, nil)},
{fakeOther, fakeOther}, {fakeOther, fakeOther},
......
...@@ -118,7 +118,7 @@ func (rew *rewriter) handleFilePart(ctx context.Context, name string, p *multipa ...@@ -118,7 +118,7 @@ func (rew *rewriter) handleFilePart(ctx context.Context, name string, p *multipa
if err == filestore.ErrEntityTooLarge { if err == filestore.ErrEntityTooLarge {
return err return err
} }
return fmt.Errorf("Persisting multipart file: %v", err) return fmt.Errorf("persisting multipart file: %v", err)
} }
for key, value := range fh.GitLabFinalizeFields(name) { for key, value := range fh.GitLabFinalizeFields(name) {
......
...@@ -59,7 +59,7 @@ func openHTTPArchive(ctx context.Context, archivePath string) (*zip.Reader, erro ...@@ -59,7 +59,7 @@ func openHTTPArchive(ctx context.Context, archivePath string) (*zip.Reader, erro
scrubbedArchivePath := helper.ScrubURLParams(archivePath) scrubbedArchivePath := helper.ScrubURLParams(archivePath)
req, err := http.NewRequest(http.MethodGet, archivePath, nil) req, err := http.NewRequest(http.MethodGet, archivePath, nil)
if err != nil { if err != nil {
return nil, fmt.Errorf("Can't create HTTP GET %q: %v", scrubbedArchivePath, err) return nil, fmt.Errorf("can't create HTTP GET %q: %v", scrubbedArchivePath, err)
} }
req = req.WithContext(ctx) req = req.WithContext(ctx)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment