Commit 78d67164 authored by Jay Conrod's avatar Jay Conrod

cmd/go: integrate changes made in x/mod packages into internal packages

This change integrates changes made to x/mod packages into our internal
copies of those packages.

This is the first step of a bidirectional synchronization. A follow-up
change will copy changes made to the internal packages after x/mod was
forked. After that, we can vendor x/mod, update imports, and delete
the internal copies.

The following packages are affected:

* internal/module
* internal/semver (no change)
* internal/sumweb (renamed to internal/sumdb)
* internal/dirhash
* internal/note
* internal/tlog

Several integrated changes affect other packages:

* cmd/go/internal/module.MatchPathMajor now wraps a new function,
  CheckPathMajor, which returns error. MatchPathMajor returns
  bool. This will avoid an incompatible change in the next step.
* module.EncodePath renamed to EscapePath, EncodeVersion to
  EscapeVersion, DecodePath to UnescapePath, DecodeVersion to
  UnescapeVersion.
* cmd/go/internal/sumweb moved to cmd/go/internal/sumdb and package
  renamed to sumdb.
* sumdb.Client renamed to ClientOps, Conn to Client, Server to
  ServerOps, Paths to ServerPaths.
* sumdb/encode.go and encode_test.go are not present in x/mod since
  they are redundant with functionality in module. Both files are
  deleted.
* sumdb.TestServer doesn't implement sumdb.ServerOps after changes
  were were made to golang.org/x/mod/sumdb.ServerOps during the fork.
  Local changes made so tests will pass. These will be copied to x/mod
  in the next step.

Updates #34801

Change-Id: I7e820f10ae0cdbec238e59d039e978fd1cdc7201
Reviewed-on: https://go-review.googlesource.com/c/go/+/200138
Run-TryBot: Jay Conrod <jayconrod@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarBryan C. Mills <bcmills@google.com>
parent 2686e749
...@@ -3,6 +3,8 @@ ...@@ -3,6 +3,8 @@
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// Package dirhash defines hashes over directory trees. // Package dirhash defines hashes over directory trees.
// These hashes are recorded in go.sum files and in the Go checksum database,
// to allow verifying that a newly-downloaded module has the expected content.
package dirhash package dirhash
import ( import (
...@@ -18,17 +20,34 @@ import ( ...@@ -18,17 +20,34 @@ import (
"strings" "strings"
) )
var DefaultHash = Hash1 // DefaultHash is the default hash function used in new go.sum entries.
var DefaultHash Hash = Hash1
// A Hash is a directory hash function.
// It accepts a list of files along with a function that opens the content of each file.
// It opens, reads, hashes, and closes each file and returns the overall directory hash.
type Hash func(files []string, open func(string) (io.ReadCloser, error)) (string, error) type Hash func(files []string, open func(string) (io.ReadCloser, error)) (string, error)
// Hash1 is the "h1:" directory hash function, using SHA-256.
//
// Hash1 is "h1:" followed by the base64-encoded SHA-256 hash of a summary
// prepared as if by the Unix command:
//
// find . -type f | sort | sha256sum
//
// More precisely, the hashed summary contains a single line for each file in the list,
// ordered by sort.Strings applied to the file names, where each line consists of
// the hexadecimal SHA-256 hash of the file content,
// two spaces (U+0020), the file name, and a newline (U+000A).
//
// File names with newlines (U+000A) are disallowed.
func Hash1(files []string, open func(string) (io.ReadCloser, error)) (string, error) { func Hash1(files []string, open func(string) (io.ReadCloser, error)) (string, error) {
h := sha256.New() h := sha256.New()
files = append([]string(nil), files...) files = append([]string(nil), files...)
sort.Strings(files) sort.Strings(files)
for _, file := range files { for _, file := range files {
if strings.Contains(file, "\n") { if strings.Contains(file, "\n") {
return "", errors.New("filenames with newlines are not supported") return "", errors.New("dirhash: filenames with newlines are not supported")
} }
r, err := open(file) r, err := open(file)
if err != nil { if err != nil {
...@@ -45,6 +64,9 @@ func Hash1(files []string, open func(string) (io.ReadCloser, error)) (string, er ...@@ -45,6 +64,9 @@ func Hash1(files []string, open func(string) (io.ReadCloser, error)) (string, er
return "h1:" + base64.StdEncoding.EncodeToString(h.Sum(nil)), nil return "h1:" + base64.StdEncoding.EncodeToString(h.Sum(nil)), nil
} }
// HashDir returns the hash of the local file system directory dir,
// replacing the directory name itself with prefix in the file names
// used in the hash function.
func HashDir(dir, prefix string, hash Hash) (string, error) { func HashDir(dir, prefix string, hash Hash) (string, error) {
files, err := DirFiles(dir, prefix) files, err := DirFiles(dir, prefix)
if err != nil { if err != nil {
...@@ -56,6 +78,9 @@ func HashDir(dir, prefix string, hash Hash) (string, error) { ...@@ -56,6 +78,9 @@ func HashDir(dir, prefix string, hash Hash) (string, error) {
return hash(files, osOpen) return hash(files, osOpen)
} }
// DirFiles returns the list of files in the tree rooted at dir,
// replacing the directory name dir with prefix in each name.
// The resulting names always use forward slashes.
func DirFiles(dir, prefix string) ([]string, error) { func DirFiles(dir, prefix string) ([]string, error) {
var files []string var files []string
dir = filepath.Clean(dir) dir = filepath.Clean(dir)
...@@ -80,6 +105,10 @@ func DirFiles(dir, prefix string) ([]string, error) { ...@@ -80,6 +105,10 @@ func DirFiles(dir, prefix string) ([]string, error) {
return files, nil return files, nil
} }
// HashZip returns the hash of the file content in the named zip file.
// Only the file names and their contents are included in the hash:
// the exact zip file format encoding, compression method,
// per-file modification times, and other metadata are ignored.
func HashZip(zipfile string, hash Hash) (string, error) { func HashZip(zipfile string, hash Hash) (string, error) {
z, err := zip.OpenReader(zipfile) z, err := zip.OpenReader(zipfile)
if err != nil { if err != nil {
......
...@@ -32,7 +32,7 @@ func cacheDir(path string) (string, error) { ...@@ -32,7 +32,7 @@ func cacheDir(path string) (string, error) {
if PkgMod == "" { if PkgMod == "" {
return "", fmt.Errorf("internal error: modfetch.PkgMod not set") return "", fmt.Errorf("internal error: modfetch.PkgMod not set")
} }
enc, err := module.EncodePath(path) enc, err := module.EscapePath(path)
if err != nil { if err != nil {
return "", err return "", err
} }
...@@ -50,7 +50,7 @@ func CachePath(m module.Version, suffix string) (string, error) { ...@@ -50,7 +50,7 @@ func CachePath(m module.Version, suffix string) (string, error) {
if module.CanonicalVersion(m.Version) != m.Version { if module.CanonicalVersion(m.Version) != m.Version {
return "", fmt.Errorf("non-canonical module version %q", m.Version) return "", fmt.Errorf("non-canonical module version %q", m.Version)
} }
encVer, err := module.EncodeVersion(m.Version) encVer, err := module.EscapeVersion(m.Version)
if err != nil { if err != nil {
return "", err return "", err
} }
...@@ -63,7 +63,7 @@ func DownloadDir(m module.Version) (string, error) { ...@@ -63,7 +63,7 @@ func DownloadDir(m module.Version) (string, error) {
if PkgMod == "" { if PkgMod == "" {
return "", fmt.Errorf("internal error: modfetch.PkgMod not set") return "", fmt.Errorf("internal error: modfetch.PkgMod not set")
} }
enc, err := module.EncodePath(m.Path) enc, err := module.EscapePath(m.Path)
if err != nil { if err != nil {
return "", err return "", err
} }
...@@ -73,7 +73,7 @@ func DownloadDir(m module.Version) (string, error) { ...@@ -73,7 +73,7 @@ func DownloadDir(m module.Version) (string, error) {
if module.CanonicalVersion(m.Version) != m.Version { if module.CanonicalVersion(m.Version) != m.Version {
return "", fmt.Errorf("non-canonical module version %q", m.Version) return "", fmt.Errorf("non-canonical module version %q", m.Version)
} }
encVer, err := module.EncodeVersion(m.Version) encVer, err := module.EscapeVersion(m.Version)
if err != nil { if err != nil {
return "", err return "", err
} }
......
...@@ -159,7 +159,7 @@ func (r *codeRepo) Versions(prefix string) ([]string, error) { ...@@ -159,7 +159,7 @@ func (r *codeRepo) Versions(prefix string) ([]string, error) {
if v == "" || v != module.CanonicalVersion(v) || IsPseudoVersion(v) { if v == "" || v != module.CanonicalVersion(v) || IsPseudoVersion(v) {
continue continue
} }
if err := module.MatchPathMajor(v, r.pathMajor); err != nil { if err := module.CheckPathMajor(v, r.pathMajor); err != nil {
if r.codeDir == "" && r.pathMajor == "" && semver.Major(v) > "v1" { if r.codeDir == "" && r.pathMajor == "" && semver.Major(v) > "v1" {
incompatible = append(incompatible, v) incompatible = append(incompatible, v)
} }
...@@ -293,7 +293,7 @@ func (r *codeRepo) convert(info *codehost.RevInfo, statVers string) (*RevInfo, e ...@@ -293,7 +293,7 @@ func (r *codeRepo) convert(info *codehost.RevInfo, statVers string) (*RevInfo, e
} }
} }
if err := module.MatchPathMajor(strings.TrimSuffix(info2.Version, "+incompatible"), r.pathMajor); err == nil { if err := module.CheckPathMajor(strings.TrimSuffix(info2.Version, "+incompatible"), r.pathMajor); err == nil {
return nil, invalidf("+incompatible suffix not allowed: major version %s is compatible", semver.Major(info2.Version)) return nil, invalidf("+incompatible suffix not allowed: major version %s is compatible", semver.Major(info2.Version))
} }
} }
...@@ -317,7 +317,7 @@ func (r *codeRepo) convert(info *codehost.RevInfo, statVers string) (*RevInfo, e ...@@ -317,7 +317,7 @@ func (r *codeRepo) convert(info *codehost.RevInfo, statVers string) (*RevInfo, e
return checkGoMod() return checkGoMod()
} }
if err := module.MatchPathMajor(info2.Version, r.pathMajor); err != nil { if err := module.CheckPathMajor(info2.Version, r.pathMajor); err != nil {
if canUseIncompatible() { if canUseIncompatible() {
info2.Version += "+incompatible" info2.Version += "+incompatible"
return checkGoMod() return checkGoMod()
...@@ -365,7 +365,7 @@ func (r *codeRepo) convert(info *codehost.RevInfo, statVers string) (*RevInfo, e ...@@ -365,7 +365,7 @@ func (r *codeRepo) convert(info *codehost.RevInfo, statVers string) (*RevInfo, e
tagIsCanonical = true tagIsCanonical = true
} }
if err := module.MatchPathMajor(v, r.pathMajor); err != nil { if err := module.CheckPathMajor(v, r.pathMajor); err != nil {
if canUseIncompatible() { if canUseIncompatible() {
return v + "+incompatible", tagIsCanonical return v + "+incompatible", tagIsCanonical
} }
...@@ -464,7 +464,7 @@ func (r *codeRepo) validatePseudoVersion(info *codehost.RevInfo, version string) ...@@ -464,7 +464,7 @@ func (r *codeRepo) validatePseudoVersion(info *codehost.RevInfo, version string)
} }
}() }()
if err := module.MatchPathMajor(version, r.pathMajor); err != nil { if err := module.CheckPathMajor(version, r.pathMajor); err != nil {
return err return err
} }
......
...@@ -212,7 +212,7 @@ func newProxyRepo(baseURL, path string) (Repo, error) { ...@@ -212,7 +212,7 @@ func newProxyRepo(baseURL, path string) (Repo, error) {
return nil, fmt.Errorf("invalid proxy URL scheme (must be https, http, file): %s", web.Redacted(base)) return nil, fmt.Errorf("invalid proxy URL scheme (must be https, http, file): %s", web.Redacted(base))
} }
enc, err := module.EncodePath(path) enc, err := module.EscapePath(path)
if err != nil { if err != nil {
return nil, err return nil, err
} }
...@@ -351,7 +351,7 @@ func (p *proxyRepo) latest() (*RevInfo, error) { ...@@ -351,7 +351,7 @@ func (p *proxyRepo) latest() (*RevInfo, error) {
} }
func (p *proxyRepo) Stat(rev string) (*RevInfo, error) { func (p *proxyRepo) Stat(rev string) (*RevInfo, error) {
encRev, err := module.EncodeVersion(rev) encRev, err := module.EscapeVersion(rev)
if err != nil { if err != nil {
return nil, p.versionError(rev, err) return nil, p.versionError(rev, err)
} }
...@@ -392,7 +392,7 @@ func (p *proxyRepo) GoMod(version string) ([]byte, error) { ...@@ -392,7 +392,7 @@ func (p *proxyRepo) GoMod(version string) ([]byte, error) {
return nil, p.versionError(version, fmt.Errorf("internal error: version passed to GoMod is not canonical")) return nil, p.versionError(version, fmt.Errorf("internal error: version passed to GoMod is not canonical"))
} }
encVer, err := module.EncodeVersion(version) encVer, err := module.EscapeVersion(version)
if err != nil { if err != nil {
return nil, p.versionError(version, err) return nil, p.versionError(version, err)
} }
...@@ -408,7 +408,7 @@ func (p *proxyRepo) Zip(dst io.Writer, version string) error { ...@@ -408,7 +408,7 @@ func (p *proxyRepo) Zip(dst io.Writer, version string) error {
return p.versionError(version, fmt.Errorf("internal error: version passed to Zip is not canonical")) return p.versionError(version, fmt.Errorf("internal error: version passed to Zip is not canonical"))
} }
encVer, err := module.EncodeVersion(version) encVer, err := module.EscapeVersion(version)
if err != nil { if err != nil {
return p.versionError(version, err) return p.versionError(version, err)
} }
......
...@@ -27,7 +27,7 @@ import ( ...@@ -27,7 +27,7 @@ import (
"cmd/go/internal/module" "cmd/go/internal/module"
"cmd/go/internal/note" "cmd/go/internal/note"
"cmd/go/internal/str" "cmd/go/internal/str"
"cmd/go/internal/sumweb" "cmd/go/internal/sumdb"
"cmd/go/internal/web" "cmd/go/internal/web"
) )
...@@ -52,11 +52,11 @@ func lookupSumDB(mod module.Version) (dbname string, lines []string, err error) ...@@ -52,11 +52,11 @@ func lookupSumDB(mod module.Version) (dbname string, lines []string, err error)
var ( var (
dbOnce sync.Once dbOnce sync.Once
dbName string dbName string
db *sumweb.Conn db *sumdb.Client
dbErr error dbErr error
) )
func dbDial() (dbName string, db *sumweb.Conn, err error) { func dbDial() (dbName string, db *sumdb.Client, err error) {
// $GOSUMDB can be "key" or "key url", // $GOSUMDB can be "key" or "key url",
// and the key can be a full verifier key // and the key can be a full verifier key
// or a host on our list of known keys. // or a host on our list of known keys.
...@@ -106,7 +106,7 @@ func dbDial() (dbName string, db *sumweb.Conn, err error) { ...@@ -106,7 +106,7 @@ func dbDial() (dbName string, db *sumweb.Conn, err error) {
base = u base = u
} }
return name, sumweb.NewConn(&dbClient{key: key[0], name: name, direct: direct, base: base}), nil return name, sumdb.NewClient(&dbClient{key: key[0], name: name, direct: direct, base: base}), nil
} }
type dbClient struct { type dbClient struct {
...@@ -227,7 +227,7 @@ func (*dbClient) WriteConfig(file string, old, new []byte) error { ...@@ -227,7 +227,7 @@ func (*dbClient) WriteConfig(file string, old, new []byte) error {
return err return err
} }
if len(data) > 0 && !bytes.Equal(data, old) { if len(data) > 0 && !bytes.Equal(data, old) {
return sumweb.ErrWriteConflict return sumdb.ErrWriteConflict
} }
if _, err := f.Seek(0, 0); err != nil { if _, err := f.Seek(0, 0); err != nil {
return err return err
......
...@@ -223,7 +223,7 @@ func (f *File) add(errs *bytes.Buffer, line *Line, verb string, args []string, f ...@@ -223,7 +223,7 @@ func (f *File) add(errs *bytes.Buffer, line *Line, verb string, args []string, f
fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err) fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err)
return return
} }
if err := module.MatchPathMajor(v, pathMajor); err != nil { if err := module.CheckPathMajor(v, pathMajor); err != nil {
fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, &Error{Verb: verb, ModPath: s, Err: err}) fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, &Error{Verb: verb, ModPath: s, Err: err})
return return
} }
...@@ -265,7 +265,7 @@ func (f *File) add(errs *bytes.Buffer, line *Line, verb string, args []string, f ...@@ -265,7 +265,7 @@ func (f *File) add(errs *bytes.Buffer, line *Line, verb string, args []string, f
fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err) fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err)
return return
} }
if err := module.MatchPathMajor(v, pathMajor); err != nil { if err := module.CheckPathMajor(v, pathMajor); err != nil {
fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, &Error{Verb: verb, ModPath: s, Err: err}) fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, &Error{Verb: verb, ModPath: s, Err: err})
return return
} }
......
...@@ -848,7 +848,7 @@ func fixVersion(path, vers string) (string, error) { ...@@ -848,7 +848,7 @@ func fixVersion(path, vers string) (string, error) {
} }
} }
if vers != "" && module.CanonicalVersion(vers) == vers { if vers != "" && module.CanonicalVersion(vers) == vers {
if err := module.MatchPathMajor(vers, pathMajor); err == nil { if err := module.CheckPathMajor(vers, pathMajor); err == nil {
return vers, nil return vers, nil
} }
} }
......
This diff is collapsed.
...@@ -238,43 +238,43 @@ func TestSplitPathVersion(t *testing.T) { ...@@ -238,43 +238,43 @@ func TestSplitPathVersion(t *testing.T) {
} }
} }
var encodeTests = []struct { var escapeTests = []struct {
path string path string
enc string // empty means same as path esc string // empty means same as path
}{ }{
{path: "ascii.com/abcdefghijklmnopqrstuvwxyz.-+/~_0123456789"}, {path: "ascii.com/abcdefghijklmnopqrstuvwxyz.-+/~_0123456789"},
{path: "github.com/GoogleCloudPlatform/omega", enc: "github.com/!google!cloud!platform/omega"}, {path: "github.com/GoogleCloudPlatform/omega", esc: "github.com/!google!cloud!platform/omega"},
} }
func TestEncodePath(t *testing.T) { func TestEscapePath(t *testing.T) {
// Check invalid paths. // Check invalid paths.
for _, tt := range checkPathTests { for _, tt := range checkPathTests {
if !tt.ok { if !tt.ok {
_, err := EncodePath(tt.path) _, err := EscapePath(tt.path)
if err == nil { if err == nil {
t.Errorf("EncodePath(%q): succeeded, want error (invalid path)", tt.path) t.Errorf("EscapePath(%q): succeeded, want error (invalid path)", tt.path)
} }
} }
} }
// Check encodings. // Check encodings.
for _, tt := range encodeTests { for _, tt := range escapeTests {
enc, err := EncodePath(tt.path) esc, err := EscapePath(tt.path)
if err != nil { if err != nil {
t.Errorf("EncodePath(%q): unexpected error: %v", tt.path, err) t.Errorf("EscapePath(%q): unexpected error: %v", tt.path, err)
continue continue
} }
want := tt.enc want := tt.esc
if want == "" { if want == "" {
want = tt.path want = tt.path
} }
if enc != want { if esc != want {
t.Errorf("EncodePath(%q) = %q, want %q", tt.path, enc, want) t.Errorf("EscapePath(%q) = %q, want %q", tt.path, esc, want)
} }
} }
} }
var badDecode = []string{ var badUnescape = []string{
"github.com/GoogleCloudPlatform/omega", "github.com/GoogleCloudPlatform/omega",
"github.com/!google!cloud!platform!/omega", "github.com/!google!cloud!platform!/omega",
"github.com/!0google!cloud!platform/omega", "github.com/!0google!cloud!platform/omega",
...@@ -283,38 +283,38 @@ var badDecode = []string{ ...@@ -283,38 +283,38 @@ var badDecode = []string{
"", "",
} }
func TestDecodePath(t *testing.T) { func TestUnescapePath(t *testing.T) {
// Check invalid decodings. // Check invalid decodings.
for _, bad := range badDecode { for _, bad := range badUnescape {
_, err := DecodePath(bad) _, err := UnescapePath(bad)
if err == nil { if err == nil {
t.Errorf("DecodePath(%q): succeeded, want error (invalid decoding)", bad) t.Errorf("UnescapePath(%q): succeeded, want error (invalid decoding)", bad)
} }
} }
// Check invalid paths (or maybe decodings). // Check invalid paths (or maybe decodings).
for _, tt := range checkPathTests { for _, tt := range checkPathTests {
if !tt.ok { if !tt.ok {
path, err := DecodePath(tt.path) path, err := UnescapePath(tt.path)
if err == nil { if err == nil {
t.Errorf("DecodePath(%q) = %q, want error (invalid path)", tt.path, path) t.Errorf("UnescapePath(%q) = %q, want error (invalid path)", tt.path, path)
} }
} }
} }
// Check encodings. // Check encodings.
for _, tt := range encodeTests { for _, tt := range escapeTests {
enc := tt.enc esc := tt.esc
if enc == "" { if esc == "" {
enc = tt.path esc = tt.path
} }
path, err := DecodePath(enc) path, err := UnescapePath(esc)
if err != nil { if err != nil {
t.Errorf("DecodePath(%q): unexpected error: %v", enc, err) t.Errorf("UnescapePath(%q): unexpected error: %v", esc, err)
continue continue
} }
if path != tt.path { if path != tt.path {
t.Errorf("DecodePath(%q) = %q, want %q", enc, path, tt.path) t.Errorf("UnescapePath(%q) = %q, want %q", esc, path, tt.path)
} }
} }
} }
...@@ -548,9 +548,6 @@ func Open(msg []byte, known Verifiers) (*Note, error) { ...@@ -548,9 +548,6 @@ func Open(msg []byte, known Verifiers) (*Note, error) {
Text: string(text), Text: string(text),
} }
var buf bytes.Buffer
buf.Write(text)
// Parse and verify signatures. // Parse and verify signatures.
// Ignore duplicate signatures. // Ignore duplicate signatures.
seen := make(map[nameHash]bool) seen := make(map[nameHash]bool)
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
// Parallel cache. // Parallel cache.
// This file is copied from cmd/go/internal/par. // This file is copied from cmd/go/internal/par.
package sumweb package sumdb
import ( import (
"sync" "sync"
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package sumweb package sumdb
import ( import (
"bytes" "bytes"
...@@ -21,7 +21,7 @@ const ( ...@@ -21,7 +21,7 @@ const (
testSignerKey = "PRIVATE+KEY+localhost.localdev/sumdb+00000c67+AXu6+oaVaOYuQOFrf1V59JK1owcFlJcHwwXHDfDGxSPk" testSignerKey = "PRIVATE+KEY+localhost.localdev/sumdb+00000c67+AXu6+oaVaOYuQOFrf1V59JK1owcFlJcHwwXHDfDGxSPk"
) )
func TestConnLookup(t *testing.T) { func TestClientLookup(t *testing.T) {
tc := newTestClient(t) tc := newTestClient(t)
tc.mustHaveLatest(1) tc.mustHaveLatest(1)
...@@ -49,7 +49,7 @@ func TestConnLookup(t *testing.T) { ...@@ -49,7 +49,7 @@ func TestConnLookup(t *testing.T) {
tc.mustHaveLatest(4) tc.mustHaveLatest(4)
} }
func TestConnBadTiles(t *testing.T) { func TestClientBadTiles(t *testing.T) {
tc := newTestClient(t) tc := newTestClient(t)
flipBits := func() { flipBits := func() {
...@@ -65,33 +65,33 @@ func TestConnBadTiles(t *testing.T) { ...@@ -65,33 +65,33 @@ func TestConnBadTiles(t *testing.T) {
// Bad tiles in initial download. // Bad tiles in initial download.
tc.mustHaveLatest(1) tc.mustHaveLatest(1)
flipBits() flipBits()
_, err := tc.conn.Lookup("rsc.io/sampler", "v1.3.0") _, err := tc.client.Lookup("rsc.io/sampler", "v1.3.0")
tc.mustError(err, "rsc.io/sampler@v1.3.0: initializing sumweb.Conn: checking tree#1: downloaded inconsistent tile") tc.mustError(err, "rsc.io/sampler@v1.3.0: initializing sumdb.Client: checking tree#1: downloaded inconsistent tile")
flipBits() flipBits()
tc.newConn() tc.newClient()
tc.mustLookup("rsc.io/sampler", "v1.3.0", "rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4=") tc.mustLookup("rsc.io/sampler", "v1.3.0", "rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4=")
// Bad tiles after initial download. // Bad tiles after initial download.
flipBits() flipBits()
_, err = tc.conn.Lookup("rsc.io/Quote", "v1.5.2") _, err = tc.client.Lookup("rsc.io/Quote", "v1.5.2")
tc.mustError(err, "rsc.io/Quote@v1.5.2: checking tree#3 against tree#4: downloaded inconsistent tile") tc.mustError(err, "rsc.io/Quote@v1.5.2: checking tree#3 against tree#4: downloaded inconsistent tile")
flipBits() flipBits()
tc.newConn() tc.newClient()
tc.mustLookup("rsc.io/Quote", "v1.5.2", "rsc.io/Quote v1.5.2 h1:uppercase!=") tc.mustLookup("rsc.io/Quote", "v1.5.2", "rsc.io/Quote v1.5.2 h1:uppercase!=")
// Bad starting tree hash looks like bad tiles. // Bad starting tree hash looks like bad tiles.
tc.newConn() tc.newClient()
text := tlog.FormatTree(tlog.Tree{N: 1, Hash: tlog.Hash{}}) text := tlog.FormatTree(tlog.Tree{N: 1, Hash: tlog.Hash{}})
data, err := note.Sign(&note.Note{Text: string(text)}, tc.signer) data, err := note.Sign(&note.Note{Text: string(text)}, tc.signer)
if err != nil { if err != nil {
tc.t.Fatal(err) tc.t.Fatal(err)
} }
tc.config[testName+"/latest"] = data tc.config[testName+"/latest"] = data
_, err = tc.conn.Lookup("rsc.io/sampler", "v1.3.0") _, err = tc.client.Lookup("rsc.io/sampler", "v1.3.0")
tc.mustError(err, "rsc.io/sampler@v1.3.0: initializing sumweb.Conn: checking tree#1: downloaded inconsistent tile") tc.mustError(err, "rsc.io/sampler@v1.3.0: initializing sumdb.Client: checking tree#1: downloaded inconsistent tile")
} }
func TestConnFork(t *testing.T) { func TestClientFork(t *testing.T) {
tc := newTestClient(t) tc := newTestClient(t)
tc2 := tc.fork() tc2 := tc.fork()
...@@ -109,7 +109,7 @@ func TestConnFork(t *testing.T) { ...@@ -109,7 +109,7 @@ func TestConnFork(t *testing.T) {
key := "/lookup/rsc.io/pkg1@v1.5.2" key := "/lookup/rsc.io/pkg1@v1.5.2"
tc2.remote[key] = tc.remote[key] tc2.remote[key] = tc.remote[key]
_, err := tc2.conn.Lookup("rsc.io/pkg1", "v1.5.2") _, err := tc2.client.Lookup("rsc.io/pkg1", "v1.5.2")
tc2.mustError(err, ErrSecurity.Error()) tc2.mustError(err, ErrSecurity.Error())
/* /*
...@@ -154,10 +154,10 @@ func TestConnFork(t *testing.T) { ...@@ -154,10 +154,10 @@ func TestConnFork(t *testing.T) {
} }
} }
func TestConnGONOSUMDB(t *testing.T) { func TestClientGONOSUMDB(t *testing.T) {
tc := newTestClient(t) tc := newTestClient(t)
tc.conn.SetGONOSUMDB("p,*/q") tc.client.SetGONOSUMDB("p,*/q")
tc.conn.Lookup("rsc.io/sampler", "v1.3.0") // initialize before we turn off network tc.client.Lookup("rsc.io/sampler", "v1.3.0") // initialize before we turn off network
tc.getOK = false tc.getOK = false
ok := []string{ ok := []string{
...@@ -175,13 +175,13 @@ func TestConnGONOSUMDB(t *testing.T) { ...@@ -175,13 +175,13 @@ func TestConnGONOSUMDB(t *testing.T) {
} }
for _, path := range ok { for _, path := range ok {
_, err := tc.conn.Lookup(path, "v1.0.0") _, err := tc.client.Lookup(path, "v1.0.0")
if err == ErrGONOSUMDB { if err == ErrGONOSUMDB {
t.Errorf("Lookup(%q): ErrGONOSUMDB, wanted failed actual lookup", path) t.Errorf("Lookup(%q): ErrGONOSUMDB, wanted failed actual lookup", path)
} }
} }
for _, path := range skip { for _, path := range skip {
_, err := tc.conn.Lookup(path, "v1.0.0") _, err := tc.client.Lookup(path, "v1.0.0")
if err != ErrGONOSUMDB { if err != ErrGONOSUMDB {
t.Errorf("Lookup(%q): %v, wanted ErrGONOSUMDB", path, err) t.Errorf("Lookup(%q): %v, wanted ErrGONOSUMDB", path, err)
} }
...@@ -191,7 +191,7 @@ func TestConnGONOSUMDB(t *testing.T) { ...@@ -191,7 +191,7 @@ func TestConnGONOSUMDB(t *testing.T) {
// A testClient is a self-contained client-side testing environment. // A testClient is a self-contained client-side testing environment.
type testClient struct { type testClient struct {
t *testing.T // active test t *testing.T // active test
conn *Conn // conn being tested client *Client // client being tested
tileHeight int // tile height to use (default 2) tileHeight int // tile height to use (default 2)
getOK bool // should tc.GetURL succeed? getOK bool // should tc.GetURL succeed?
getTileOK bool // should tc.GetURL of tiles succeed? getTileOK bool // should tc.GetURL of tiles succeed?
...@@ -202,12 +202,12 @@ type testClient struct { ...@@ -202,12 +202,12 @@ type testClient struct {
// mu protects config, cache, log, security // mu protects config, cache, log, security
// during concurrent use of the exported methods // during concurrent use of the exported methods
// by the conn itself (testClient is the Conn's Client, // by the client itself (testClient is the Client's ClientOps,
// and the Client methods can both read and write these fields). // and the Client methods can both read and write these fields).
// Unexported methods invoked directly by the test // Unexported methods invoked directly by the test
// (for example, addRecord) need not hold the mutex: // (for example, addRecord) need not hold the mutex:
// for proper test execution those methods should only // for proper test execution those methods should only
// be called when the Conn is idle and not using its Client. // be called when the Client is idle and not using its ClientOps.
// Not holding the mutex in those methods ensures // Not holding the mutex in those methods ensures
// that if a mistake is made, go test -race will report it. // that if a mistake is made, go test -race will report it.
// (Holding the mutex would eliminate the race report but // (Holding the mutex would eliminate the race report but
...@@ -240,7 +240,7 @@ func newTestClient(t *testing.T) *testClient { ...@@ -240,7 +240,7 @@ func newTestClient(t *testing.T) *testClient {
t.Fatal(err) t.Fatal(err)
} }
tc.newConn() tc.newClient()
tc.addRecord("rsc.io/quote@v1.5.2", `rsc.io/quote v1.5.2 h1:w5fcysjrx7yqtD/aO+QwRjYZOKnaM9Uh2b40tElTs3Y= tc.addRecord("rsc.io/quote@v1.5.2", `rsc.io/quote v1.5.2 h1:w5fcysjrx7yqtD/aO+QwRjYZOKnaM9Uh2b40tElTs3Y=
rsc.io/quote v1.5.2/go.mod h1:LzX7hefJvL54yjefDEDHNONDjII0t9xZLPXsUe+TKr0= rsc.io/quote v1.5.2/go.mod h1:LzX7hefJvL54yjefDEDHNONDjII0t9xZLPXsUe+TKr0=
...@@ -260,18 +260,18 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= ...@@ -260,18 +260,18 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
return tc return tc
} }
// newConn resets the Conn associated with tc. // newClient resets the Client associated with tc.
// This clears any in-memory cache from the Conn // This clears any in-memory cache from the Client
// but not tc's on-disk cache. // but not tc's on-disk cache.
func (tc *testClient) newConn() { func (tc *testClient) newClient() {
tc.conn = NewConn(tc) tc.client = NewClient(tc)
tc.conn.SetTileHeight(tc.tileHeight) tc.client.SetTileHeight(tc.tileHeight)
} }
// mustLookup does a lookup for path@vers and checks that the lines that come back match want. // mustLookup does a lookup for path@vers and checks that the lines that come back match want.
func (tc *testClient) mustLookup(path, vers, want string) { func (tc *testClient) mustLookup(path, vers, want string) {
tc.t.Helper() tc.t.Helper()
lines, err := tc.conn.Lookup(path, vers) lines, err := tc.client.Lookup(path, vers)
if err != nil { if err != nil {
tc.t.Fatal(err) tc.t.Fatal(err)
} }
...@@ -315,7 +315,7 @@ func (tc *testClient) fork() *testClient { ...@@ -315,7 +315,7 @@ func (tc *testClient) fork() *testClient {
cache: copyMap(tc.cache), cache: copyMap(tc.cache),
remote: copyMap(tc.remote), remote: copyMap(tc.remote),
} }
tc2.newConn() tc2.newClient()
return tc2 return tc2
} }
......
...@@ -2,8 +2,8 @@ ...@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// Package sumweb implements the HTTP protocols for serving or accessing a go.sum database. // Package sumdb implements the HTTP protocols for serving or accessing a module checksum database.
package sumweb package sumdb
import ( import (
"context" "context"
...@@ -12,48 +12,50 @@ import ( ...@@ -12,48 +12,50 @@ import (
"os" "os"
"strings" "strings"
"cmd/go/internal/module"
"cmd/go/internal/tlog" "cmd/go/internal/tlog"
) )
// A Server provides the external operations // A ServerOps provides the external operations
// (underlying database access and so on) // (underlying database access and so on) needed by the Server.
// needed to implement the HTTP server Handler. type ServerOps interface {
type Server interface {
// NewContext returns the context to use for the request r.
NewContext(r *http.Request) (context.Context, error)
// Signed returns the signed hash of the latest tree. // Signed returns the signed hash of the latest tree.
Signed(ctx context.Context) ([]byte, error) Signed(ctx context.Context) ([]byte, error)
// ReadRecords returns the content for the n records id through id+n-1. // ReadRecords returns the content for the n records id through id+n-1.
ReadRecords(ctx context.Context, id, n int64) ([][]byte, error) ReadRecords(ctx context.Context, id, n int64) ([][]byte, error)
// Lookup looks up a record by its associated key ("module@version"), // Lookup looks up a record for the given module,
// returning the record ID. // returning the record ID.
Lookup(ctx context.Context, key string) (int64, error) Lookup(ctx context.Context, m module.Version) (int64, error)
// ReadTileData reads the content of tile t. // ReadTileData reads the content of tile t.
// It is only invoked for hash tiles (t.L ≥ 0). // It is only invoked for hash tiles (t.L ≥ 0).
ReadTileData(ctx context.Context, t tlog.Tile) ([]byte, error) ReadTileData(ctx context.Context, t tlog.Tile) ([]byte, error)
} }
// A Handler is the go.sum database server handler, // A Server is the checksum database HTTP server,
// which should be invoked to serve the paths listed in Paths. // which implements http.Handler and should be invoked
// The calling code is responsible for initializing Server. // to serve the paths listed in ServerPaths.
type Handler struct { type Server struct {
Server Server ops ServerOps
}
// NewServer returns a new Server using the given operations.
func NewServer(ops ServerOps) *Server {
return &Server{ops: ops}
} }
// Paths are the URL paths for which Handler should be invoked. // ServerPaths are the URL paths the Server can (and should) serve.
// //
// Typically a server will do: // Typically a server will do:
// //
// handler := &sumweb.Handler{Server: srv} // srv := sumdb.NewServer(ops)
// for _, path := range sumweb.Paths { // for _, path := range sumdb.ServerPaths {
// http.HandleFunc(path, handler) // http.Handle(path, srv)
// } // }
// //
var Paths = []string{ var ServerPaths = []string{
"/lookup/", "/lookup/",
"/latest", "/latest",
"/tile/", "/tile/",
...@@ -61,12 +63,8 @@ var Paths = []string{ ...@@ -61,12 +63,8 @@ var Paths = []string{
var modVerRE = lazyregexp.New(`^[^@]+@v[0-9]+\.[0-9]+\.[0-9]+(-[^@]*)?(\+incompatible)?$`) var modVerRE = lazyregexp.New(`^[^@]+@v[0-9]+\.[0-9]+\.[0-9]+(-[^@]*)?(\+incompatible)?$`)
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx, err := h.Server.NewContext(r) ctx := r.Context()
if err != nil {
http.Error(w, err.Error(), 500)
return
}
switch { switch {
default: default:
...@@ -79,23 +77,23 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { ...@@ -79,23 +77,23 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return return
} }
i := strings.Index(mod, "@") i := strings.Index(mod, "@")
encPath, encVers := mod[:i], mod[i+1:] escPath, escVers := mod[:i], mod[i+1:]
path, err := decodePath(encPath) path, err := module.UnescapePath(escPath)
if err != nil { if err != nil {
reportError(w, r, err) reportError(w, r, err)
return return
} }
vers, err := decodeVersion(encVers) vers, err := module.UnescapeVersion(escVers)
if err != nil { if err != nil {
reportError(w, r, err) reportError(w, r, err)
return return
} }
id, err := h.Server.Lookup(ctx, path+"@"+vers) id, err := s.ops.Lookup(ctx, module.Version{Path: path, Version: vers})
if err != nil { if err != nil {
reportError(w, r, err) reportError(w, r, err)
return return
} }
records, err := h.Server.ReadRecords(ctx, id, 1) records, err := s.ops.ReadRecords(ctx, id, 1)
if err != nil { if err != nil {
// This should never happen - the lookup says the record exists. // This should never happen - the lookup says the record exists.
http.Error(w, err.Error(), http.StatusInternalServerError) http.Error(w, err.Error(), http.StatusInternalServerError)
...@@ -110,7 +108,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { ...@@ -110,7 +108,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
http.Error(w, err.Error(), http.StatusInternalServerError) http.Error(w, err.Error(), http.StatusInternalServerError)
return return
} }
signed, err := h.Server.Signed(ctx) signed, err := s.ops.Signed(ctx)
if err != nil { if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError) http.Error(w, err.Error(), http.StatusInternalServerError)
return return
...@@ -120,7 +118,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { ...@@ -120,7 +118,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Write(signed) w.Write(signed)
case r.URL.Path == "/latest": case r.URL.Path == "/latest":
data, err := h.Server.Signed(ctx) data, err := s.ops.Signed(ctx)
if err != nil { if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError) http.Error(w, err.Error(), http.StatusInternalServerError)
return return
...@@ -137,7 +135,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { ...@@ -137,7 +135,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if t.L == -1 { if t.L == -1 {
// Record data. // Record data.
start := t.N << uint(t.H) start := t.N << uint(t.H)
records, err := h.Server.ReadRecords(ctx, start, int64(t.W)) records, err := s.ops.ReadRecords(ctx, start, int64(t.W))
if err != nil { if err != nil {
reportError(w, r, err) reportError(w, r, err)
return return
...@@ -159,7 +157,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { ...@@ -159,7 +157,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return return
} }
data, err := h.Server.ReadTileData(ctx, t) data, err := s.ops.ReadTileData(ctx, t)
if err != nil { if err != nil {
reportError(w, r, err) reportError(w, r, err)
return return
......
...@@ -2,22 +2,21 @@ ...@@ -2,22 +2,21 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package sumweb package sumdb
import ( import (
"context" "context"
"fmt" "fmt"
"net/http"
"strings"
"sync" "sync"
"cmd/go/internal/module"
"cmd/go/internal/note" "cmd/go/internal/note"
"cmd/go/internal/tlog" "cmd/go/internal/tlog"
) )
// NewTestServer constructs a new TestServer // NewTestServer constructs a new TestServer
// that will sign its tree with the given signer key // that will sign its tree with the given signer key
// (see cmd/go/internal/note) // (see golang.org/x/mod/sumdb/note)
// and fetch new records as needed by calling gosum. // and fetch new records as needed by calling gosum.
func NewTestServer(signer string, gosum func(path, vers string) ([]byte, error)) *TestServer { func NewTestServer(signer string, gosum func(path, vers string) ([]byte, error)) *TestServer {
return &TestServer{signer: signer, gosum: gosum} return &TestServer{signer: signer, gosum: gosum}
...@@ -45,10 +44,6 @@ func (h testHashes) ReadHashes(indexes []int64) ([]tlog.Hash, error) { ...@@ -45,10 +44,6 @@ func (h testHashes) ReadHashes(indexes []int64) ([]tlog.Hash, error) {
return list, nil return list, nil
} }
func (s *TestServer) NewContext(r *http.Request) (context.Context, error) {
return nil, nil
}
func (s *TestServer) Signed(ctx context.Context) ([]byte, error) { func (s *TestServer) Signed(ctx context.Context) ([]byte, error) {
s.mu.Lock() s.mu.Lock()
defer s.mu.Unlock() defer s.mu.Unlock()
...@@ -80,7 +75,8 @@ func (s *TestServer) ReadRecords(ctx context.Context, id, n int64) ([][]byte, er ...@@ -80,7 +75,8 @@ func (s *TestServer) ReadRecords(ctx context.Context, id, n int64) ([][]byte, er
return list, nil return list, nil
} }
func (s *TestServer) Lookup(ctx context.Context, key string) (int64, error) { func (s *TestServer) Lookup(ctx context.Context, m module.Version) (int64, error) {
key := m.String()
s.mu.Lock() s.mu.Lock()
id, ok := s.lookup[key] id, ok := s.lookup[key]
s.mu.Unlock() s.mu.Unlock()
...@@ -89,12 +85,7 @@ func (s *TestServer) Lookup(ctx context.Context, key string) (int64, error) { ...@@ -89,12 +85,7 @@ func (s *TestServer) Lookup(ctx context.Context, key string) (int64, error) {
} }
// Look up module and compute go.sum lines. // Look up module and compute go.sum lines.
i := strings.Index(key, "@") data, err := s.gosum(m.Path, m.Version)
if i < 0 {
return 0, fmt.Errorf("invalid lookup key %q", key)
}
path, vers := key[:i], key[i+1:]
data, err := s.gosum(path, vers)
if err != nil { if err != nil {
return 0, err return 0, err
} }
......
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// FS-safe encoding of module paths and versions.
// Copied from cmd/go/internal/module and unexported.
package sumweb
import (
"fmt"
"unicode/utf8"
)
// Safe encodings
//
// Module paths appear as substrings of file system paths
// (in the download cache) and of web server URLs in the proxy protocol.
// In general we cannot rely on file systems to be case-sensitive,
// nor can we rely on web servers, since they read from file systems.
// That is, we cannot rely on the file system to keep rsc.io/QUOTE
// and rsc.io/quote separate. Windows and macOS don't.
// Instead, we must never require two different casings of a file path.
// Because we want the download cache to match the proxy protocol,
// and because we want the proxy protocol to be possible to serve
// from a tree of static files (which might be stored on a case-insensitive
// file system), the proxy protocol must never require two different casings
// of a URL path either.
//
// One possibility would be to make the safe encoding be the lowercase
// hexadecimal encoding of the actual path bytes. This would avoid ever
// needing different casings of a file path, but it would be fairly illegible
// to most programmers when those paths appeared in the file system
// (including in file paths in compiler errors and stack traces)
// in web server logs, and so on. Instead, we want a safe encoding that
// leaves most paths unaltered.
//
// The safe encoding is this:
// replace every uppercase letter with an exclamation mark
// followed by the letter's lowercase equivalent.
//
// For example,
// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go.
// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy
// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus.
//
// Import paths that avoid upper-case letters are left unchanged.
// Note that because import paths are ASCII-only and avoid various
// problematic punctuation (like : < and >), the safe encoding is also ASCII-only
// and avoids the same problematic punctuation.
//
// Import paths have never allowed exclamation marks, so there is no
// need to define how to encode a literal !.
//
// Although paths are disallowed from using Unicode (see pathOK above),
// the eventual plan is to allow Unicode letters as well, to assume that
// file systems and URLs are Unicode-safe (storing UTF-8), and apply
// the !-for-uppercase convention. Note however that not all runes that
// are different but case-fold equivalent are an upper/lower pair.
// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin)
// are considered to case-fold to each other. When we do add Unicode
// letters, we must not assume that upper/lower are the only case-equivalent pairs.
// Perhaps the Kelvin symbol would be disallowed entirely, for example.
// Or perhaps it would encode as "!!k", or perhaps as "(212A)".
//
// Also, it would be nice to allow Unicode marks as well as letters,
// but marks include combining marks, and then we must deal not
// only with case folding but also normalization: both U+00E9 ('é')
// and U+0065 U+0301 ('e' followed by combining acute accent)
// look the same on the page and are treated by some file systems
// as the same path. If we do allow Unicode marks in paths, there
// must be some kind of normalization to allow only one canonical
// encoding of any character used in an import path.
// encodePath returns the safe encoding of the given module path.
// It fails if the module path is invalid.
func encodePath(path string) (encoding string, err error) {
return encodeString(path)
}
// encodeVersion returns the safe encoding of the given module version.
// Versions are allowed to be in non-semver form but must be valid file names
// and not contain exclamation marks.
func encodeVersion(v string) (encoding string, err error) {
return encodeString(v)
}
func encodeString(s string) (encoding string, err error) {
haveUpper := false
for _, r := range s {
if r == '!' || r >= utf8.RuneSelf {
// This should be disallowed by CheckPath, but diagnose anyway.
// The correctness of the encoding loop below depends on it.
return "", fmt.Errorf("internal error: inconsistency in EncodePath")
}
if 'A' <= r && r <= 'Z' {
haveUpper = true
}
}
if !haveUpper {
return s, nil
}
var buf []byte
for _, r := range s {
if 'A' <= r && r <= 'Z' {
buf = append(buf, '!', byte(r+'a'-'A'))
} else {
buf = append(buf, byte(r))
}
}
return string(buf), nil
}
// decodePath returns the module path of the given safe encoding.
// It fails if the encoding is invalid or encodes an invalid path.
func decodePath(encoding string) (path string, err error) {
path, ok := decodeString(encoding)
if !ok {
return "", fmt.Errorf("invalid module path encoding %q", encoding)
}
return path, nil
}
// decodeVersion returns the version string for the given safe encoding.
// It fails if the encoding is invalid or encodes an invalid version.
// Versions are allowed to be in non-semver form but must be valid file names
// and not contain exclamation marks.
func decodeVersion(encoding string) (v string, err error) {
v, ok := decodeString(encoding)
if !ok {
return "", fmt.Errorf("invalid version encoding %q", encoding)
}
return v, nil
}
func decodeString(encoding string) (string, bool) {
var buf []byte
bang := false
for _, r := range encoding {
if r >= utf8.RuneSelf {
return "", false
}
if bang {
bang = false
if r < 'a' || 'z' < r {
return "", false
}
buf = append(buf, byte(r+'A'-'a'))
continue
}
if r == '!' {
bang = true
continue
}
if 'A' <= r && r <= 'Z' {
return "", false
}
buf = append(buf, byte(r))
}
if bang {
return "", false
}
return string(buf), true
}
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sumweb
import "testing"
var encodeTests = []struct {
path string
enc string // empty means same as path
}{
{path: "ascii.com/abcdefghijklmnopqrstuvwxyz.-+/~_0123456789"},
{path: "github.com/GoogleCloudPlatform/omega", enc: "github.com/!google!cloud!platform/omega"},
}
func TestEncodePath(t *testing.T) {
// Check encodings.
for _, tt := range encodeTests {
enc, err := encodePath(tt.path)
if err != nil {
t.Errorf("encodePath(%q): unexpected error: %v", tt.path, err)
continue
}
want := tt.enc
if want == "" {
want = tt.path
}
if enc != want {
t.Errorf("encodePath(%q) = %q, want %q", tt.path, enc, want)
}
}
}
var badDecode = []string{
"github.com/GoogleCloudPlatform/omega",
"github.com/!google!cloud!platform!/omega",
"github.com/!0google!cloud!platform/omega",
"github.com/!_google!cloud!platform/omega",
"github.com/!!google!cloud!platform/omega",
}
func TestDecodePath(t *testing.T) {
// Check invalid decodings.
for _, bad := range badDecode {
_, err := decodePath(bad)
if err == nil {
t.Errorf("DecodePath(%q): succeeded, want error (invalid decoding)", bad)
}
}
// Check encodings.
for _, tt := range encodeTests {
enc := tt.enc
if enc == "" {
enc = tt.path
}
path, err := decodePath(enc)
if err != nil {
t.Errorf("decodePath(%q): unexpected error: %v", enc, err)
continue
}
if path != tt.path {
t.Errorf("decodePath(%q) = %q, want %q", enc, path, tt.path)
}
}
}
...@@ -41,7 +41,7 @@ func FormatTree(tree Tree) []byte { ...@@ -41,7 +41,7 @@ func FormatTree(tree Tree) []byte {
var errMalformedTree = errors.New("malformed tree note") var errMalformedTree = errors.New("malformed tree note")
var treePrefix = []byte("go.sum database tree\n") var treePrefix = []byte("go.sum database tree\n")
// ParseTree parses a tree root description. // ParseTree parses a formatted tree root description.
func ParseTree(text []byte) (tree Tree, err error) { func ParseTree(text []byte) (tree Tree, err error) {
// The message looks like: // The message looks like:
// //
......
...@@ -33,6 +33,9 @@ import ( ...@@ -33,6 +33,9 @@ import (
// The special level L=-1 holds raw record data instead of hashes. // The special level L=-1 holds raw record data instead of hashes.
// In this case, the level encodes into a tile path as the path element // In this case, the level encodes into a tile path as the path element
// "data" instead of "-1". // "data" instead of "-1".
//
// See also https://golang.org/design/25530-sumdb#checksum-database
// and https://research.swtch.com/tlog#tiling_a_log.
type Tile struct { type Tile struct {
H int // height of tile (1 ≤ H ≤ 30) H int // height of tile (1 ≤ H ≤ 30)
L int // level in tiling (-1 ≤ L ≤ 63) L int // level in tiling (-1 ≤ L ≤ 63)
...@@ -40,11 +43,13 @@ type Tile struct { ...@@ -40,11 +43,13 @@ type Tile struct {
W int // width of tile (1 ≤ W ≤ 2**H; 2**H is complete tile) W int // width of tile (1 ≤ W ≤ 2**H; 2**H is complete tile)
} }
// TileForIndex returns the tile of height h ≥ 1 // TileForIndex returns the tile of fixed height h ≥ 1
// and least width storing the given hash storage index. // and least width storing the given hash storage index.
//
// If h ≤ 0, TileForIndex panics.
func TileForIndex(h int, index int64) Tile { func TileForIndex(h int, index int64) Tile {
if h < 1 { if h <= 0 {
panic("TileForIndex: invalid height") panic(fmt.Sprintf("TileForIndex: invalid height %d", h))
} }
t, _, _ := tileForIndex(h, index) t, _, _ := tileForIndex(h, index)
return t return t
...@@ -99,8 +104,10 @@ func tileHash(data []byte) Hash { ...@@ -99,8 +104,10 @@ func tileHash(data []byte) Hash {
// that must be published when publishing from a tree of // that must be published when publishing from a tree of
// size newTreeSize to replace a tree of size oldTreeSize. // size newTreeSize to replace a tree of size oldTreeSize.
// (No tiles need to be published for a tree of size zero.) // (No tiles need to be published for a tree of size zero.)
//
// If h ≤ 0, TileForIndex panics.
func NewTiles(h int, oldTreeSize, newTreeSize int64) []Tile { func NewTiles(h int, oldTreeSize, newTreeSize int64) []Tile {
if h < 1 { if h <= 0 {
panic(fmt.Sprintf("NewTiles: invalid height %d", h)) panic(fmt.Sprintf("NewTiles: invalid height %d", h))
} }
H := uint(h) H := uint(h)
...@@ -244,6 +251,16 @@ type TileReader interface { ...@@ -244,6 +251,16 @@ type TileReader interface {
// a data record for each tile (len(data) == len(tiles)) // a data record for each tile (len(data) == len(tiles))
// and each data record must be the correct length // and each data record must be the correct length
// (len(data[i]) == tiles[i].W*HashSize). // (len(data[i]) == tiles[i].W*HashSize).
//
// An implementation of ReadTiles typically reads
// them from an on-disk cache or else from a remote
// tile server. Tile data downloaded from a server should
// be considered suspect and not saved into a persistent
// on-disk cache before returning from ReadTiles.
// When the client confirms the validity of the tile data,
// it will call SaveTiles to signal that they can be safely
// written to persistent storage.
// See also https://research.swtch.com/tlog#authenticating_tiles.
ReadTiles(tiles []Tile) (data [][]byte, err error) ReadTiles(tiles []Tile) (data [][]byte, err error)
// SaveTiles informs the TileReader that the tile data // SaveTiles informs the TileReader that the tile data
......
...@@ -5,9 +5,6 @@ ...@@ -5,9 +5,6 @@
// Package tlog implements a tamper-evident log // Package tlog implements a tamper-evident log
// used in the Go module go.sum database server. // used in the Go module go.sum database server.
// //
// This package is part of a DRAFT of what the go.sum database server will look like.
// Do not assume the details here are final!
//
// This package follows the design of Certificate Transparency (RFC 6962) // This package follows the design of Certificate Transparency (RFC 6962)
// and its proofs are compatible with that system. // and its proofs are compatible with that system.
// See TestCertificateTransparency. // See TestCertificateTransparency.
......
...@@ -29,7 +29,7 @@ import ( ...@@ -29,7 +29,7 @@ import (
"cmd/go/internal/module" "cmd/go/internal/module"
"cmd/go/internal/par" "cmd/go/internal/par"
"cmd/go/internal/semver" "cmd/go/internal/semver"
"cmd/go/internal/sumweb" "cmd/go/internal/sumdb"
"cmd/go/internal/txtar" "cmd/go/internal/txtar"
) )
...@@ -65,7 +65,7 @@ func StartProxy() { ...@@ -65,7 +65,7 @@ func StartProxy() {
// Prepopulate main sumdb. // Prepopulate main sumdb.
for _, mod := range modList { for _, mod := range modList {
sumdbHandler.Server.Lookup(nil, mod.Path+"@"+mod.Version) sumdbOps.Lookup(nil, mod)
} }
}) })
} }
...@@ -88,7 +88,7 @@ func readModList() { ...@@ -88,7 +88,7 @@ func readModList() {
continue continue
} }
encPath := strings.ReplaceAll(name[:i], "_", "/") encPath := strings.ReplaceAll(name[:i], "_", "/")
path, err := module.DecodePath(encPath) path, err := module.UnescapePath(encPath)
if err != nil { if err != nil {
if encPath != "example.com/invalidpath/v1" { if encPath != "example.com/invalidpath/v1" {
fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err) fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err)
...@@ -96,7 +96,7 @@ func readModList() { ...@@ -96,7 +96,7 @@ func readModList() {
continue continue
} }
encVers := name[i+1:] encVers := name[i+1:]
vers, err := module.DecodeVersion(encVers) vers, err := module.UnescapeVersion(encVers)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err) fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err)
continue continue
...@@ -113,8 +113,13 @@ const ( ...@@ -113,8 +113,13 @@ const (
testSumDBSignerKey = "PRIVATE+KEY+localhost.localdev/sumdb+00000c67+AXu6+oaVaOYuQOFrf1V59JK1owcFlJcHwwXHDfDGxSPk" testSumDBSignerKey = "PRIVATE+KEY+localhost.localdev/sumdb+00000c67+AXu6+oaVaOYuQOFrf1V59JK1owcFlJcHwwXHDfDGxSPk"
) )
var sumdbHandler = &sumweb.Handler{Server: sumweb.NewTestServer(testSumDBSignerKey, proxyGoSum)} var (
var sumdbWrongHandler = &sumweb.Handler{Server: sumweb.NewTestServer(testSumDBSignerKey, proxyGoSumWrong)} sumdbOps = sumdb.NewTestServer(testSumDBSignerKey, proxyGoSum)
sumdbServer = sumdb.NewServer(sumdbOps)
sumdbWrongOps = sumdb.NewTestServer(testSumDBSignerKey, proxyGoSumWrong)
sumdbWrongServer = sumdb.NewServer(sumdbWrongOps)
)
// proxyHandler serves the Go module proxy protocol. // proxyHandler serves the Go module proxy protocol.
// See the proxy section of https://research.swtch.com/vgo-module. // See the proxy section of https://research.swtch.com/vgo-module.
...@@ -155,7 +160,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) { ...@@ -155,7 +160,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) {
// (Client thinks it is talking directly to a sumdb.) // (Client thinks it is talking directly to a sumdb.)
if strings.HasPrefix(path, "sumdb-direct/") { if strings.HasPrefix(path, "sumdb-direct/") {
r.URL.Path = path[len("sumdb-direct"):] r.URL.Path = path[len("sumdb-direct"):]
sumdbHandler.ServeHTTP(w, r) sumdbServer.ServeHTTP(w, r)
return return
} }
...@@ -164,7 +169,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) { ...@@ -164,7 +169,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) {
// (Client thinks it is talking directly to a sumdb.) // (Client thinks it is talking directly to a sumdb.)
if strings.HasPrefix(path, "sumdb-wrong/") { if strings.HasPrefix(path, "sumdb-wrong/") {
r.URL.Path = path[len("sumdb-wrong"):] r.URL.Path = path[len("sumdb-wrong"):]
sumdbWrongHandler.ServeHTTP(w, r) sumdbWrongServer.ServeHTTP(w, r)
return return
} }
...@@ -178,7 +183,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) { ...@@ -178,7 +183,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) {
// Request for $GOPROXY/sumdb/<name>/... goes to sumdb. // Request for $GOPROXY/sumdb/<name>/... goes to sumdb.
if sumdbPrefix := "sumdb/" + testSumDBName + "/"; strings.HasPrefix(path, sumdbPrefix) { if sumdbPrefix := "sumdb/" + testSumDBName + "/"; strings.HasPrefix(path, sumdbPrefix) {
r.URL.Path = path[len(sumdbPrefix)-1:] r.URL.Path = path[len(sumdbPrefix)-1:]
sumdbHandler.ServeHTTP(w, r) sumdbServer.ServeHTTP(w, r)
return return
} }
...@@ -187,7 +192,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) { ...@@ -187,7 +192,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) {
// latest version, including pseudo-versions. // latest version, including pseudo-versions.
if i := strings.LastIndex(path, "/@latest"); i >= 0 { if i := strings.LastIndex(path, "/@latest"); i >= 0 {
enc := path[:i] enc := path[:i]
modPath, err := module.DecodePath(enc) modPath, err := module.UnescapePath(enc)
if err != nil { if err != nil {
if !quiet { if !quiet {
fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err) fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err)
...@@ -225,7 +230,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) { ...@@ -225,7 +230,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
encVers, err := module.EncodeVersion(latest) encVers, err := module.EscapeVersion(latest)
if err != nil { if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError) http.Error(w, err.Error(), http.StatusInternalServerError)
return return
...@@ -240,7 +245,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) { ...@@ -240,7 +245,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
enc, file := path[:i], path[i+len("/@v/"):] enc, file := path[:i], path[i+len("/@v/"):]
path, err := module.DecodePath(enc) path, err := module.UnescapePath(enc)
if err != nil { if err != nil {
if !quiet { if !quiet {
fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err) fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err)
...@@ -276,7 +281,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) { ...@@ -276,7 +281,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
encVers, ext := file[:i], file[i+1:] encVers, ext := file[:i], file[i+1:]
vers, err := module.DecodeVersion(encVers) vers, err := module.UnescapeVersion(encVers)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err) fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err)
http.NotFound(w, r) http.NotFound(w, r)
...@@ -397,11 +402,11 @@ var archiveCache par.Cache ...@@ -397,11 +402,11 @@ var archiveCache par.Cache
var cmdGoDir, _ = os.Getwd() var cmdGoDir, _ = os.Getwd()
func readArchive(path, vers string) (*txtar.Archive, error) { func readArchive(path, vers string) (*txtar.Archive, error) {
enc, err := module.EncodePath(path) enc, err := module.EscapePath(path)
if err != nil { if err != nil {
return nil, err return nil, err
} }
encVers, err := module.EncodeVersion(vers) encVers, err := module.EscapeVersion(vers)
if err != nil { if err != nil {
return nil, err return nil, err
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment