Commit 78d67164 authored by Jay Conrod's avatar Jay Conrod

cmd/go: integrate changes made in x/mod packages into internal packages

This change integrates changes made to x/mod packages into our internal
copies of those packages.

This is the first step of a bidirectional synchronization. A follow-up
change will copy changes made to the internal packages after x/mod was
forked. After that, we can vendor x/mod, update imports, and delete
the internal copies.

The following packages are affected:

* internal/module
* internal/semver (no change)
* internal/sumweb (renamed to internal/sumdb)
* internal/dirhash
* internal/note
* internal/tlog

Several integrated changes affect other packages:

* cmd/go/internal/module.MatchPathMajor now wraps a new function,
  CheckPathMajor, which returns error. MatchPathMajor returns
  bool. This will avoid an incompatible change in the next step.
* module.EncodePath renamed to EscapePath, EncodeVersion to
  EscapeVersion, DecodePath to UnescapePath, DecodeVersion to
  UnescapeVersion.
* cmd/go/internal/sumweb moved to cmd/go/internal/sumdb and package
  renamed to sumdb.
* sumdb.Client renamed to ClientOps, Conn to Client, Server to
  ServerOps, Paths to ServerPaths.
* sumdb/encode.go and encode_test.go are not present in x/mod since
  they are redundant with functionality in module. Both files are
  deleted.
* sumdb.TestServer doesn't implement sumdb.ServerOps after changes
  were were made to golang.org/x/mod/sumdb.ServerOps during the fork.
  Local changes made so tests will pass. These will be copied to x/mod
  in the next step.

Updates #34801

Change-Id: I7e820f10ae0cdbec238e59d039e978fd1cdc7201
Reviewed-on: https://go-review.googlesource.com/c/go/+/200138
Run-TryBot: Jay Conrod <jayconrod@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: default avatarBryan C. Mills <bcmills@google.com>
parent 2686e749
...@@ -3,6 +3,8 @@ ...@@ -3,6 +3,8 @@
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// Package dirhash defines hashes over directory trees. // Package dirhash defines hashes over directory trees.
// These hashes are recorded in go.sum files and in the Go checksum database,
// to allow verifying that a newly-downloaded module has the expected content.
package dirhash package dirhash
import ( import (
...@@ -18,17 +20,34 @@ import ( ...@@ -18,17 +20,34 @@ import (
"strings" "strings"
) )
var DefaultHash = Hash1 // DefaultHash is the default hash function used in new go.sum entries.
var DefaultHash Hash = Hash1
// A Hash is a directory hash function.
// It accepts a list of files along with a function that opens the content of each file.
// It opens, reads, hashes, and closes each file and returns the overall directory hash.
type Hash func(files []string, open func(string) (io.ReadCloser, error)) (string, error) type Hash func(files []string, open func(string) (io.ReadCloser, error)) (string, error)
// Hash1 is the "h1:" directory hash function, using SHA-256.
//
// Hash1 is "h1:" followed by the base64-encoded SHA-256 hash of a summary
// prepared as if by the Unix command:
//
// find . -type f | sort | sha256sum
//
// More precisely, the hashed summary contains a single line for each file in the list,
// ordered by sort.Strings applied to the file names, where each line consists of
// the hexadecimal SHA-256 hash of the file content,
// two spaces (U+0020), the file name, and a newline (U+000A).
//
// File names with newlines (U+000A) are disallowed.
func Hash1(files []string, open func(string) (io.ReadCloser, error)) (string, error) { func Hash1(files []string, open func(string) (io.ReadCloser, error)) (string, error) {
h := sha256.New() h := sha256.New()
files = append([]string(nil), files...) files = append([]string(nil), files...)
sort.Strings(files) sort.Strings(files)
for _, file := range files { for _, file := range files {
if strings.Contains(file, "\n") { if strings.Contains(file, "\n") {
return "", errors.New("filenames with newlines are not supported") return "", errors.New("dirhash: filenames with newlines are not supported")
} }
r, err := open(file) r, err := open(file)
if err != nil { if err != nil {
...@@ -45,6 +64,9 @@ func Hash1(files []string, open func(string) (io.ReadCloser, error)) (string, er ...@@ -45,6 +64,9 @@ func Hash1(files []string, open func(string) (io.ReadCloser, error)) (string, er
return "h1:" + base64.StdEncoding.EncodeToString(h.Sum(nil)), nil return "h1:" + base64.StdEncoding.EncodeToString(h.Sum(nil)), nil
} }
// HashDir returns the hash of the local file system directory dir,
// replacing the directory name itself with prefix in the file names
// used in the hash function.
func HashDir(dir, prefix string, hash Hash) (string, error) { func HashDir(dir, prefix string, hash Hash) (string, error) {
files, err := DirFiles(dir, prefix) files, err := DirFiles(dir, prefix)
if err != nil { if err != nil {
...@@ -56,6 +78,9 @@ func HashDir(dir, prefix string, hash Hash) (string, error) { ...@@ -56,6 +78,9 @@ func HashDir(dir, prefix string, hash Hash) (string, error) {
return hash(files, osOpen) return hash(files, osOpen)
} }
// DirFiles returns the list of files in the tree rooted at dir,
// replacing the directory name dir with prefix in each name.
// The resulting names always use forward slashes.
func DirFiles(dir, prefix string) ([]string, error) { func DirFiles(dir, prefix string) ([]string, error) {
var files []string var files []string
dir = filepath.Clean(dir) dir = filepath.Clean(dir)
...@@ -80,6 +105,10 @@ func DirFiles(dir, prefix string) ([]string, error) { ...@@ -80,6 +105,10 @@ func DirFiles(dir, prefix string) ([]string, error) {
return files, nil return files, nil
} }
// HashZip returns the hash of the file content in the named zip file.
// Only the file names and their contents are included in the hash:
// the exact zip file format encoding, compression method,
// per-file modification times, and other metadata are ignored.
func HashZip(zipfile string, hash Hash) (string, error) { func HashZip(zipfile string, hash Hash) (string, error) {
z, err := zip.OpenReader(zipfile) z, err := zip.OpenReader(zipfile)
if err != nil { if err != nil {
......
...@@ -32,7 +32,7 @@ func cacheDir(path string) (string, error) { ...@@ -32,7 +32,7 @@ func cacheDir(path string) (string, error) {
if PkgMod == "" { if PkgMod == "" {
return "", fmt.Errorf("internal error: modfetch.PkgMod not set") return "", fmt.Errorf("internal error: modfetch.PkgMod not set")
} }
enc, err := module.EncodePath(path) enc, err := module.EscapePath(path)
if err != nil { if err != nil {
return "", err return "", err
} }
...@@ -50,7 +50,7 @@ func CachePath(m module.Version, suffix string) (string, error) { ...@@ -50,7 +50,7 @@ func CachePath(m module.Version, suffix string) (string, error) {
if module.CanonicalVersion(m.Version) != m.Version { if module.CanonicalVersion(m.Version) != m.Version {
return "", fmt.Errorf("non-canonical module version %q", m.Version) return "", fmt.Errorf("non-canonical module version %q", m.Version)
} }
encVer, err := module.EncodeVersion(m.Version) encVer, err := module.EscapeVersion(m.Version)
if err != nil { if err != nil {
return "", err return "", err
} }
...@@ -63,7 +63,7 @@ func DownloadDir(m module.Version) (string, error) { ...@@ -63,7 +63,7 @@ func DownloadDir(m module.Version) (string, error) {
if PkgMod == "" { if PkgMod == "" {
return "", fmt.Errorf("internal error: modfetch.PkgMod not set") return "", fmt.Errorf("internal error: modfetch.PkgMod not set")
} }
enc, err := module.EncodePath(m.Path) enc, err := module.EscapePath(m.Path)
if err != nil { if err != nil {
return "", err return "", err
} }
...@@ -73,7 +73,7 @@ func DownloadDir(m module.Version) (string, error) { ...@@ -73,7 +73,7 @@ func DownloadDir(m module.Version) (string, error) {
if module.CanonicalVersion(m.Version) != m.Version { if module.CanonicalVersion(m.Version) != m.Version {
return "", fmt.Errorf("non-canonical module version %q", m.Version) return "", fmt.Errorf("non-canonical module version %q", m.Version)
} }
encVer, err := module.EncodeVersion(m.Version) encVer, err := module.EscapeVersion(m.Version)
if err != nil { if err != nil {
return "", err return "", err
} }
......
...@@ -159,7 +159,7 @@ func (r *codeRepo) Versions(prefix string) ([]string, error) { ...@@ -159,7 +159,7 @@ func (r *codeRepo) Versions(prefix string) ([]string, error) {
if v == "" || v != module.CanonicalVersion(v) || IsPseudoVersion(v) { if v == "" || v != module.CanonicalVersion(v) || IsPseudoVersion(v) {
continue continue
} }
if err := module.MatchPathMajor(v, r.pathMajor); err != nil { if err := module.CheckPathMajor(v, r.pathMajor); err != nil {
if r.codeDir == "" && r.pathMajor == "" && semver.Major(v) > "v1" { if r.codeDir == "" && r.pathMajor == "" && semver.Major(v) > "v1" {
incompatible = append(incompatible, v) incompatible = append(incompatible, v)
} }
...@@ -293,7 +293,7 @@ func (r *codeRepo) convert(info *codehost.RevInfo, statVers string) (*RevInfo, e ...@@ -293,7 +293,7 @@ func (r *codeRepo) convert(info *codehost.RevInfo, statVers string) (*RevInfo, e
} }
} }
if err := module.MatchPathMajor(strings.TrimSuffix(info2.Version, "+incompatible"), r.pathMajor); err == nil { if err := module.CheckPathMajor(strings.TrimSuffix(info2.Version, "+incompatible"), r.pathMajor); err == nil {
return nil, invalidf("+incompatible suffix not allowed: major version %s is compatible", semver.Major(info2.Version)) return nil, invalidf("+incompatible suffix not allowed: major version %s is compatible", semver.Major(info2.Version))
} }
} }
...@@ -317,7 +317,7 @@ func (r *codeRepo) convert(info *codehost.RevInfo, statVers string) (*RevInfo, e ...@@ -317,7 +317,7 @@ func (r *codeRepo) convert(info *codehost.RevInfo, statVers string) (*RevInfo, e
return checkGoMod() return checkGoMod()
} }
if err := module.MatchPathMajor(info2.Version, r.pathMajor); err != nil { if err := module.CheckPathMajor(info2.Version, r.pathMajor); err != nil {
if canUseIncompatible() { if canUseIncompatible() {
info2.Version += "+incompatible" info2.Version += "+incompatible"
return checkGoMod() return checkGoMod()
...@@ -365,7 +365,7 @@ func (r *codeRepo) convert(info *codehost.RevInfo, statVers string) (*RevInfo, e ...@@ -365,7 +365,7 @@ func (r *codeRepo) convert(info *codehost.RevInfo, statVers string) (*RevInfo, e
tagIsCanonical = true tagIsCanonical = true
} }
if err := module.MatchPathMajor(v, r.pathMajor); err != nil { if err := module.CheckPathMajor(v, r.pathMajor); err != nil {
if canUseIncompatible() { if canUseIncompatible() {
return v + "+incompatible", tagIsCanonical return v + "+incompatible", tagIsCanonical
} }
...@@ -464,7 +464,7 @@ func (r *codeRepo) validatePseudoVersion(info *codehost.RevInfo, version string) ...@@ -464,7 +464,7 @@ func (r *codeRepo) validatePseudoVersion(info *codehost.RevInfo, version string)
} }
}() }()
if err := module.MatchPathMajor(version, r.pathMajor); err != nil { if err := module.CheckPathMajor(version, r.pathMajor); err != nil {
return err return err
} }
......
...@@ -212,7 +212,7 @@ func newProxyRepo(baseURL, path string) (Repo, error) { ...@@ -212,7 +212,7 @@ func newProxyRepo(baseURL, path string) (Repo, error) {
return nil, fmt.Errorf("invalid proxy URL scheme (must be https, http, file): %s", web.Redacted(base)) return nil, fmt.Errorf("invalid proxy URL scheme (must be https, http, file): %s", web.Redacted(base))
} }
enc, err := module.EncodePath(path) enc, err := module.EscapePath(path)
if err != nil { if err != nil {
return nil, err return nil, err
} }
...@@ -351,7 +351,7 @@ func (p *proxyRepo) latest() (*RevInfo, error) { ...@@ -351,7 +351,7 @@ func (p *proxyRepo) latest() (*RevInfo, error) {
} }
func (p *proxyRepo) Stat(rev string) (*RevInfo, error) { func (p *proxyRepo) Stat(rev string) (*RevInfo, error) {
encRev, err := module.EncodeVersion(rev) encRev, err := module.EscapeVersion(rev)
if err != nil { if err != nil {
return nil, p.versionError(rev, err) return nil, p.versionError(rev, err)
} }
...@@ -392,7 +392,7 @@ func (p *proxyRepo) GoMod(version string) ([]byte, error) { ...@@ -392,7 +392,7 @@ func (p *proxyRepo) GoMod(version string) ([]byte, error) {
return nil, p.versionError(version, fmt.Errorf("internal error: version passed to GoMod is not canonical")) return nil, p.versionError(version, fmt.Errorf("internal error: version passed to GoMod is not canonical"))
} }
encVer, err := module.EncodeVersion(version) encVer, err := module.EscapeVersion(version)
if err != nil { if err != nil {
return nil, p.versionError(version, err) return nil, p.versionError(version, err)
} }
...@@ -408,7 +408,7 @@ func (p *proxyRepo) Zip(dst io.Writer, version string) error { ...@@ -408,7 +408,7 @@ func (p *proxyRepo) Zip(dst io.Writer, version string) error {
return p.versionError(version, fmt.Errorf("internal error: version passed to Zip is not canonical")) return p.versionError(version, fmt.Errorf("internal error: version passed to Zip is not canonical"))
} }
encVer, err := module.EncodeVersion(version) encVer, err := module.EscapeVersion(version)
if err != nil { if err != nil {
return p.versionError(version, err) return p.versionError(version, err)
} }
......
...@@ -27,7 +27,7 @@ import ( ...@@ -27,7 +27,7 @@ import (
"cmd/go/internal/module" "cmd/go/internal/module"
"cmd/go/internal/note" "cmd/go/internal/note"
"cmd/go/internal/str" "cmd/go/internal/str"
"cmd/go/internal/sumweb" "cmd/go/internal/sumdb"
"cmd/go/internal/web" "cmd/go/internal/web"
) )
...@@ -52,11 +52,11 @@ func lookupSumDB(mod module.Version) (dbname string, lines []string, err error) ...@@ -52,11 +52,11 @@ func lookupSumDB(mod module.Version) (dbname string, lines []string, err error)
var ( var (
dbOnce sync.Once dbOnce sync.Once
dbName string dbName string
db *sumweb.Conn db *sumdb.Client
dbErr error dbErr error
) )
func dbDial() (dbName string, db *sumweb.Conn, err error) { func dbDial() (dbName string, db *sumdb.Client, err error) {
// $GOSUMDB can be "key" or "key url", // $GOSUMDB can be "key" or "key url",
// and the key can be a full verifier key // and the key can be a full verifier key
// or a host on our list of known keys. // or a host on our list of known keys.
...@@ -106,7 +106,7 @@ func dbDial() (dbName string, db *sumweb.Conn, err error) { ...@@ -106,7 +106,7 @@ func dbDial() (dbName string, db *sumweb.Conn, err error) {
base = u base = u
} }
return name, sumweb.NewConn(&dbClient{key: key[0], name: name, direct: direct, base: base}), nil return name, sumdb.NewClient(&dbClient{key: key[0], name: name, direct: direct, base: base}), nil
} }
type dbClient struct { type dbClient struct {
...@@ -227,7 +227,7 @@ func (*dbClient) WriteConfig(file string, old, new []byte) error { ...@@ -227,7 +227,7 @@ func (*dbClient) WriteConfig(file string, old, new []byte) error {
return err return err
} }
if len(data) > 0 && !bytes.Equal(data, old) { if len(data) > 0 && !bytes.Equal(data, old) {
return sumweb.ErrWriteConflict return sumdb.ErrWriteConflict
} }
if _, err := f.Seek(0, 0); err != nil { if _, err := f.Seek(0, 0); err != nil {
return err return err
......
...@@ -223,7 +223,7 @@ func (f *File) add(errs *bytes.Buffer, line *Line, verb string, args []string, f ...@@ -223,7 +223,7 @@ func (f *File) add(errs *bytes.Buffer, line *Line, verb string, args []string, f
fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err) fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err)
return return
} }
if err := module.MatchPathMajor(v, pathMajor); err != nil { if err := module.CheckPathMajor(v, pathMajor); err != nil {
fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, &Error{Verb: verb, ModPath: s, Err: err}) fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, &Error{Verb: verb, ModPath: s, Err: err})
return return
} }
...@@ -265,7 +265,7 @@ func (f *File) add(errs *bytes.Buffer, line *Line, verb string, args []string, f ...@@ -265,7 +265,7 @@ func (f *File) add(errs *bytes.Buffer, line *Line, verb string, args []string, f
fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err) fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err)
return return
} }
if err := module.MatchPathMajor(v, pathMajor); err != nil { if err := module.CheckPathMajor(v, pathMajor); err != nil {
fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, &Error{Verb: verb, ModPath: s, Err: err}) fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, &Error{Verb: verb, ModPath: s, Err: err})
return return
} }
......
...@@ -848,7 +848,7 @@ func fixVersion(path, vers string) (string, error) { ...@@ -848,7 +848,7 @@ func fixVersion(path, vers string) (string, error) {
} }
} }
if vers != "" && module.CanonicalVersion(vers) == vers { if vers != "" && module.CanonicalVersion(vers) == vers {
if err := module.MatchPathMajor(vers, pathMajor); err == nil { if err := module.CheckPathMajor(vers, pathMajor); err == nil {
return vers, nil return vers, nil
} }
} }
......
...@@ -2,8 +2,86 @@ ...@@ -2,8 +2,86 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// Package module defines the module.Version type // Package module defines the module.Version type along with support code.
// along with support code. //
// The module.Version type is a simple Path, Version pair:
//
// type Version struct {
// Path string
// Version string
// }
//
// There are no restrictions imposed directly by use of this structure,
// but additional checking functions, most notably Check, verify that
// a particular path, version pair is valid.
//
// Escaped Paths
//
// Module paths appear as substrings of file system paths
// (in the download cache) and of web server URLs in the proxy protocol.
// In general we cannot rely on file systems to be case-sensitive,
// nor can we rely on web servers, since they read from file systems.
// That is, we cannot rely on the file system to keep rsc.io/QUOTE
// and rsc.io/quote separate. Windows and macOS don't.
// Instead, we must never require two different casings of a file path.
// Because we want the download cache to match the proxy protocol,
// and because we want the proxy protocol to be possible to serve
// from a tree of static files (which might be stored on a case-insensitive
// file system), the proxy protocol must never require two different casings
// of a URL path either.
//
// One possibility would be to make the escaped form be the lowercase
// hexadecimal encoding of the actual path bytes. This would avoid ever
// needing different casings of a file path, but it would be fairly illegible
// to most programmers when those paths appeared in the file system
// (including in file paths in compiler errors and stack traces)
// in web server logs, and so on. Instead, we want a safe escaped form that
// leaves most paths unaltered.
//
// The safe escaped form is to replace every uppercase letter
// with an exclamation mark followed by the letter's lowercase equivalent.
//
// For example,
//
// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go.
// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy
// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus.
//
// Import paths that avoid upper-case letters are left unchanged.
// Note that because import paths are ASCII-only and avoid various
// problematic punctuation (like : < and >), the escaped form is also ASCII-only
// and avoids the same problematic punctuation.
//
// Import paths have never allowed exclamation marks, so there is no
// need to define how to escape a literal !.
//
// Unicode Restrictions
//
// Today, paths are disallowed from using Unicode.
//
// Although paths are currently disallowed from using Unicode,
// we would like at some point to allow Unicode letters as well, to assume that
// file systems and URLs are Unicode-safe (storing UTF-8), and apply
// the !-for-uppercase convention for escaping them in the file system.
// But there are at least two subtle considerations.
//
// First, note that not all case-fold equivalent distinct runes
// form an upper/lower pair.
// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin)
// are three distinct runes that case-fold to each other.
// When we do add Unicode letters, we must not assume that upper/lower
// are the only case-equivalent pairs.
// Perhaps the Kelvin symbol would be disallowed entirely, for example.
// Or perhaps it would escape as "!!k", or perhaps as "(212A)".
//
// Second, it would be nice to allow Unicode marks as well as letters,
// but marks include combining marks, and then we must deal not
// only with case folding but also normalization: both U+00E9 ('é')
// and U+0065 U+0301 ('e' followed by combining acute accent)
// look the same on the page and are treated by some file systems
// as the same path. If we do allow Unicode marks in paths, there
// must be some kind of normalization to allow only one canonical
// encoding of any character used in an import path.
package module package module
// IMPORTANT NOTE // IMPORTANT NOTE
...@@ -28,8 +106,10 @@ import ( ...@@ -28,8 +106,10 @@ import (
"cmd/go/internal/semver" "cmd/go/internal/semver"
) )
// A Version is defined by a module path and version pair. // A Version (for clients, a module.Version) is defined by a module path and version pair.
// These are stored in their plain (unescaped) form.
type Version struct { type Version struct {
// Path is a module path, like "golang.org/x/text" or "rsc.io/quote/v2".
Path string Path string
// Version is usually a semantic version in canonical form. // Version is usually a semantic version in canonical form.
...@@ -43,6 +123,11 @@ type Version struct { ...@@ -43,6 +123,11 @@ type Version struct {
Version string `json:",omitempty"` Version string `json:",omitempty"`
} }
// String returns the module version syntax Path@Version.
func (m Version) String() string {
return m.Path + "@" + m.Version
}
// A ModuleError indicates an error specific to a module. // A ModuleError indicates an error specific to a module.
type ModuleError struct { type ModuleError struct {
Path string Path string
...@@ -119,7 +204,7 @@ func Check(path, version string) error { ...@@ -119,7 +204,7 @@ func Check(path, version string) error {
} }
} }
_, pathMajor, _ := SplitPathVersion(path) _, pathMajor, _ := SplitPathVersion(path)
if err := MatchPathMajor(version, pathMajor); err != nil { if err := CheckPathMajor(version, pathMajor); err != nil {
return &ModuleError{Path: path, Err: err} return &ModuleError{Path: path, Err: err}
} }
return nil return nil
...@@ -138,7 +223,7 @@ func firstPathOK(r rune) bool { ...@@ -138,7 +223,7 @@ func firstPathOK(r rune) bool {
// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~. // Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~.
// This matches what "go get" has historically recognized in import paths. // This matches what "go get" has historically recognized in import paths.
// TODO(rsc): We would like to allow Unicode letters, but that requires additional // TODO(rsc): We would like to allow Unicode letters, but that requires additional
// care in the safe encoding (see note below). // care in the safe encoding (see "escaped paths" above).
func pathOK(r rune) bool { func pathOK(r rune) bool {
if r < utf8.RuneSelf { if r < utf8.RuneSelf {
return r == '+' || r == '-' || r == '.' || r == '_' || r == '~' || return r == '+' || r == '-' || r == '.' || r == '_' || r == '~' ||
...@@ -153,7 +238,7 @@ func pathOK(r rune) bool { ...@@ -153,7 +238,7 @@ func pathOK(r rune) bool {
// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters. // For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters.
// If we expand the set of allowed characters here, we have to // If we expand the set of allowed characters here, we have to
// work harder at detecting potential case-folding and normalization collisions. // work harder at detecting potential case-folding and normalization collisions.
// See note about "safe encoding" below. // See note about "escaped paths" above.
func fileNameOK(r rune) bool { func fileNameOK(r rune) bool {
if r < utf8.RuneSelf { if r < utf8.RuneSelf {
// Entire set of ASCII punctuation, from which we remove characters: // Entire set of ASCII punctuation, from which we remove characters:
...@@ -179,6 +264,17 @@ func fileNameOK(r rune) bool { ...@@ -179,6 +264,17 @@ func fileNameOK(r rune) bool {
} }
// CheckPath checks that a module path is valid. // CheckPath checks that a module path is valid.
// A valid module path is a valid import path, as checked by CheckImportPath,
// with two additional constraints.
// First, the leading path element (up to the first slash, if any),
// by convention a domain name, must contain only lower-case ASCII letters,
// ASCII digits, dots (U+002E), and dashes (U+002D);
// it must contain at least one dot and cannot start with a dash.
// Second, for a final path element of the form /vN, where N looks numeric
// (ASCII digits and dots) must not begin with a leading zero, must not be /v1,
// and must not contain any dots. For paths beginning with "gopkg.in/",
// this second requirement is replaced by a requirement that the path
// follow the gopkg.in server's conventions.
func CheckPath(path string) error { func CheckPath(path string) error {
if err := checkPath(path, false); err != nil { if err := checkPath(path, false); err != nil {
return fmt.Errorf("malformed module path %q: %v", path, err) return fmt.Errorf("malformed module path %q: %v", path, err)
...@@ -208,6 +304,20 @@ func CheckPath(path string) error { ...@@ -208,6 +304,20 @@ func CheckPath(path string) error {
} }
// CheckImportPath checks that an import path is valid. // CheckImportPath checks that an import path is valid.
//
// A valid import path consists of one or more valid path elements
// separated by slashes (U+002F). (It must not begin with nor end in a slash.)
//
// A valid path element is a non-empty string made up of
// ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~.
// It must not begin or end with a dot (U+002E), nor contain two dots in a row.
//
// The element prefix up to the first dot must not be a reserved file name
// on Windows, regardless of case (CON, com1, NuL, and so on).
//
// CheckImportPath may be less restrictive in the future, but see the
// top-level package documentation for additional information about
// subtleties of Unicode.
func CheckImportPath(path string) error { func CheckImportPath(path string) error {
if err := checkPath(path, false); err != nil { if err := checkPath(path, false); err != nil {
return fmt.Errorf("malformed import path %q: %v", path, err) return fmt.Errorf("malformed import path %q: %v", path, err)
...@@ -291,7 +401,18 @@ func checkElem(elem string, fileName bool) error { ...@@ -291,7 +401,18 @@ func checkElem(elem string, fileName bool) error {
return nil return nil
} }
// CheckFilePath checks whether a slash-separated file path is valid. // CheckFilePath checks that a slash-separated file path is valid.
// The definition of a valid file path is the same as the definition
// of a valid import path except that the set of allowed characters is larger:
// all Unicode letters, ASCII digits, the ASCII space character (U+0020),
// and the ASCII punctuation characters
// “!#$%&()+,-.=@[]^_{}~”.
// (The excluded punctuation characters, " * < > ? ` ' | / \ and :,
// have special meanings in certain shells or operating systems.)
//
// CheckFilePath may be less restrictive in the future, but see the
// top-level package documentation for additional information about
// subtleties of Unicode.
func CheckFilePath(path string) error { func CheckFilePath(path string) error {
if err := checkPath(path, true); err != nil { if err := checkPath(path, true); err != nil {
return fmt.Errorf("malformed file path %q: %v", path, err) return fmt.Errorf("malformed file path %q: %v", path, err)
...@@ -330,6 +451,9 @@ var badWindowsNames = []string{ ...@@ -330,6 +451,9 @@ var badWindowsNames = []string{
// and version is either empty or "/vN" for N >= 2. // and version is either empty or "/vN" for N >= 2.
// As a special case, gopkg.in paths are recognized directly; // As a special case, gopkg.in paths are recognized directly;
// they require ".vN" instead of "/vN", and for all N, not just N >= 2. // they require ".vN" instead of "/vN", and for all N, not just N >= 2.
// SplitPathVersion returns with ok = false when presented with
// a path whose last path element does not satisfy the constraints
// applied by CheckPath, such as "example.com/pkg/v1" or "example.com/pkg/v1.2".
func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) { func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) {
if strings.HasPrefix(path, "gopkg.in/") { if strings.HasPrefix(path, "gopkg.in/") {
return splitGopkgIn(path) return splitGopkgIn(path)
...@@ -376,9 +500,17 @@ func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) { ...@@ -376,9 +500,17 @@ func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) {
return prefix, pathMajor, true return prefix, pathMajor, true
} }
// MatchPathMajor returns a non-nil error if the semantic version v // MatchPathMajor reports whether the semantic version v
// matches the path major version pathMajor.
//
// MatchPathMajor returns true if and only if CheckPathMajor returns non-nil.
func MatchPathMajor(v, pathMajor string) bool {
return CheckPathMajor(v, pathMajor) != nil
}
// CheckPathMajor returns a non-nil error if the semantic version v
// does not match the path major version pathMajor. // does not match the path major version pathMajor.
func MatchPathMajor(v, pathMajor string) error { func CheckPathMajor(v, pathMajor string) error {
if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") { if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") {
pathMajor = strings.TrimSuffix(pathMajor, "-unstable") pathMajor = strings.TrimSuffix(pathMajor, "-unstable")
} }
...@@ -438,7 +570,10 @@ func CanonicalVersion(v string) string { ...@@ -438,7 +570,10 @@ func CanonicalVersion(v string) string {
return cv return cv
} }
// Sort sorts the list by Path, breaking ties by comparing Versions. // Sort sorts the list by Path, breaking ties by comparing Version fields.
// The Version fields are interpreted as semantic versions (using semver.Compare)
// optionally followed by a tie-breaking suffix introduced by a slash character,
// like in "v0.0.1/go.mod".
func Sort(list []Version) { func Sort(list []Version) {
sort.Slice(list, func(i, j int) bool { sort.Slice(list, func(i, j int) bool {
mi := list[i] mi := list[i]
...@@ -465,96 +600,36 @@ func Sort(list []Version) { ...@@ -465,96 +600,36 @@ func Sort(list []Version) {
}) })
} }
// Safe encodings // EscapePath returns the escaped form of the given module path.
//
// Module paths appear as substrings of file system paths
// (in the download cache) and of web server URLs in the proxy protocol.
// In general we cannot rely on file systems to be case-sensitive,
// nor can we rely on web servers, since they read from file systems.
// That is, we cannot rely on the file system to keep rsc.io/QUOTE
// and rsc.io/quote separate. Windows and macOS don't.
// Instead, we must never require two different casings of a file path.
// Because we want the download cache to match the proxy protocol,
// and because we want the proxy protocol to be possible to serve
// from a tree of static files (which might be stored on a case-insensitive
// file system), the proxy protocol must never require two different casings
// of a URL path either.
//
// One possibility would be to make the safe encoding be the lowercase
// hexadecimal encoding of the actual path bytes. This would avoid ever
// needing different casings of a file path, but it would be fairly illegible
// to most programmers when those paths appeared in the file system
// (including in file paths in compiler errors and stack traces)
// in web server logs, and so on. Instead, we want a safe encoding that
// leaves most paths unaltered.
//
// The safe encoding is this:
// replace every uppercase letter with an exclamation mark
// followed by the letter's lowercase equivalent.
//
// For example,
// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go.
// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy
// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus.
//
// Import paths that avoid upper-case letters are left unchanged.
// Note that because import paths are ASCII-only and avoid various
// problematic punctuation (like : < and >), the safe encoding is also ASCII-only
// and avoids the same problematic punctuation.
//
// Import paths have never allowed exclamation marks, so there is no
// need to define how to encode a literal !.
//
// Although paths are disallowed from using Unicode (see pathOK above),
// the eventual plan is to allow Unicode letters as well, to assume that
// file systems and URLs are Unicode-safe (storing UTF-8), and apply
// the !-for-uppercase convention. Note however that not all runes that
// are different but case-fold equivalent are an upper/lower pair.
// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin)
// are considered to case-fold to each other. When we do add Unicode
// letters, we must not assume that upper/lower are the only case-equivalent pairs.
// Perhaps the Kelvin symbol would be disallowed entirely, for example.
// Or perhaps it would encode as "!!k", or perhaps as "(212A)".
//
// Also, it would be nice to allow Unicode marks as well as letters,
// but marks include combining marks, and then we must deal not
// only with case folding but also normalization: both U+00E9 ('é')
// and U+0065 U+0301 ('e' followed by combining acute accent)
// look the same on the page and are treated by some file systems
// as the same path. If we do allow Unicode marks in paths, there
// must be some kind of normalization to allow only one canonical
// encoding of any character used in an import path.
// EncodePath returns the safe encoding of the given module path.
// It fails if the module path is invalid. // It fails if the module path is invalid.
func EncodePath(path string) (encoding string, err error) { func EscapePath(path string) (escaped string, err error) {
if err := CheckPath(path); err != nil { if err := CheckPath(path); err != nil {
return "", err return "", err
} }
return encodeString(path) return escapeString(path)
} }
// EncodeVersion returns the safe encoding of the given module version. // EscapeVersion returns the escaped form of the given module version.
// Versions are allowed to be in non-semver form but must be valid file names // Versions are allowed to be in non-semver form but must be valid file names
// and not contain exclamation marks. // and not contain exclamation marks.
func EncodeVersion(v string) (encoding string, err error) { func EscapeVersion(v string) (escaped string, err error) {
if err := checkElem(v, true); err != nil || strings.Contains(v, "!") { if err := checkElem(v, true); err != nil || strings.Contains(v, "!") {
return "", &InvalidVersionError{ return "", &InvalidVersionError{
Version: v, Version: v,
Err: fmt.Errorf("disallowed version string"), Err: fmt.Errorf("disallowed version string"),
} }
} }
return encodeString(v) return escapeString(v)
} }
func encodeString(s string) (encoding string, err error) { func escapeString(s string) (escaped string, err error) {
haveUpper := false haveUpper := false
for _, r := range s { for _, r := range s {
if r == '!' || r >= utf8.RuneSelf { if r == '!' || r >= utf8.RuneSelf {
// This should be disallowed by CheckPath, but diagnose anyway. // This should be disallowed by CheckPath, but diagnose anyway.
// The correctness of the encoding loop below depends on it. // The correctness of the escaping loop below depends on it.
return "", fmt.Errorf("internal error: inconsistency in EncodePath") return "", fmt.Errorf("internal error: inconsistency in EscapePath")
} }
if 'A' <= r && r <= 'Z' { if 'A' <= r && r <= 'Z' {
haveUpper = true haveUpper = true
...@@ -576,39 +651,39 @@ func encodeString(s string) (encoding string, err error) { ...@@ -576,39 +651,39 @@ func encodeString(s string) (encoding string, err error) {
return string(buf), nil return string(buf), nil
} }
// DecodePath returns the module path of the given safe encoding. // UnescapePath returns the module path for the given escaped path.
// It fails if the encoding is invalid or encodes an invalid path. // It fails if the escaped path is invalid or describes an invalid path.
func DecodePath(encoding string) (path string, err error) { func UnescapePath(escaped string) (path string, err error) {
path, ok := decodeString(encoding) path, ok := unescapeString(escaped)
if !ok { if !ok {
return "", fmt.Errorf("invalid module path encoding %q", encoding) return "", fmt.Errorf("invalid escaped module path %q", escaped)
} }
if err := CheckPath(path); err != nil { if err := CheckPath(path); err != nil {
return "", fmt.Errorf("invalid module path encoding %q: %v", encoding, err) return "", fmt.Errorf("invalid escaped module path %q: %v", escaped, err)
} }
return path, nil return path, nil
} }
// DecodeVersion returns the version string for the given safe encoding. // UnescapeVersion returns the version string for the given escaped version.
// It fails if the encoding is invalid or encodes an invalid version. // It fails if the escaped form is invalid or describes an invalid version.
// Versions are allowed to be in non-semver form but must be valid file names // Versions are allowed to be in non-semver form but must be valid file names
// and not contain exclamation marks. // and not contain exclamation marks.
func DecodeVersion(encoding string) (v string, err error) { func UnescapeVersion(escaped string) (v string, err error) {
v, ok := decodeString(encoding) v, ok := unescapeString(escaped)
if !ok { if !ok {
return "", fmt.Errorf("invalid version encoding %q", encoding) return "", fmt.Errorf("invalid escaped version %q", escaped)
} }
if err := checkElem(v, true); err != nil { if err := checkElem(v, true); err != nil {
return "", fmt.Errorf("disallowed version string %q", v) return "", fmt.Errorf("invalid escaped version %q: %v", v, err)
} }
return v, nil return v, nil
} }
func decodeString(encoding string) (string, bool) { func unescapeString(escaped string) (string, bool) {
var buf []byte var buf []byte
bang := false bang := false
for _, r := range encoding { for _, r := range escaped {
if r >= utf8.RuneSelf { if r >= utf8.RuneSelf {
return "", false return "", false
} }
......
...@@ -238,43 +238,43 @@ func TestSplitPathVersion(t *testing.T) { ...@@ -238,43 +238,43 @@ func TestSplitPathVersion(t *testing.T) {
} }
} }
var encodeTests = []struct { var escapeTests = []struct {
path string path string
enc string // empty means same as path esc string // empty means same as path
}{ }{
{path: "ascii.com/abcdefghijklmnopqrstuvwxyz.-+/~_0123456789"}, {path: "ascii.com/abcdefghijklmnopqrstuvwxyz.-+/~_0123456789"},
{path: "github.com/GoogleCloudPlatform/omega", enc: "github.com/!google!cloud!platform/omega"}, {path: "github.com/GoogleCloudPlatform/omega", esc: "github.com/!google!cloud!platform/omega"},
} }
func TestEncodePath(t *testing.T) { func TestEscapePath(t *testing.T) {
// Check invalid paths. // Check invalid paths.
for _, tt := range checkPathTests { for _, tt := range checkPathTests {
if !tt.ok { if !tt.ok {
_, err := EncodePath(tt.path) _, err := EscapePath(tt.path)
if err == nil { if err == nil {
t.Errorf("EncodePath(%q): succeeded, want error (invalid path)", tt.path) t.Errorf("EscapePath(%q): succeeded, want error (invalid path)", tt.path)
} }
} }
} }
// Check encodings. // Check encodings.
for _, tt := range encodeTests { for _, tt := range escapeTests {
enc, err := EncodePath(tt.path) esc, err := EscapePath(tt.path)
if err != nil { if err != nil {
t.Errorf("EncodePath(%q): unexpected error: %v", tt.path, err) t.Errorf("EscapePath(%q): unexpected error: %v", tt.path, err)
continue continue
} }
want := tt.enc want := tt.esc
if want == "" { if want == "" {
want = tt.path want = tt.path
} }
if enc != want { if esc != want {
t.Errorf("EncodePath(%q) = %q, want %q", tt.path, enc, want) t.Errorf("EscapePath(%q) = %q, want %q", tt.path, esc, want)
} }
} }
} }
var badDecode = []string{ var badUnescape = []string{
"github.com/GoogleCloudPlatform/omega", "github.com/GoogleCloudPlatform/omega",
"github.com/!google!cloud!platform!/omega", "github.com/!google!cloud!platform!/omega",
"github.com/!0google!cloud!platform/omega", "github.com/!0google!cloud!platform/omega",
...@@ -283,38 +283,38 @@ var badDecode = []string{ ...@@ -283,38 +283,38 @@ var badDecode = []string{
"", "",
} }
func TestDecodePath(t *testing.T) { func TestUnescapePath(t *testing.T) {
// Check invalid decodings. // Check invalid decodings.
for _, bad := range badDecode { for _, bad := range badUnescape {
_, err := DecodePath(bad) _, err := UnescapePath(bad)
if err == nil { if err == nil {
t.Errorf("DecodePath(%q): succeeded, want error (invalid decoding)", bad) t.Errorf("UnescapePath(%q): succeeded, want error (invalid decoding)", bad)
} }
} }
// Check invalid paths (or maybe decodings). // Check invalid paths (or maybe decodings).
for _, tt := range checkPathTests { for _, tt := range checkPathTests {
if !tt.ok { if !tt.ok {
path, err := DecodePath(tt.path) path, err := UnescapePath(tt.path)
if err == nil { if err == nil {
t.Errorf("DecodePath(%q) = %q, want error (invalid path)", tt.path, path) t.Errorf("UnescapePath(%q) = %q, want error (invalid path)", tt.path, path)
} }
} }
} }
// Check encodings. // Check encodings.
for _, tt := range encodeTests { for _, tt := range escapeTests {
enc := tt.enc esc := tt.esc
if enc == "" { if esc == "" {
enc = tt.path esc = tt.path
} }
path, err := DecodePath(enc) path, err := UnescapePath(esc)
if err != nil { if err != nil {
t.Errorf("DecodePath(%q): unexpected error: %v", enc, err) t.Errorf("UnescapePath(%q): unexpected error: %v", esc, err)
continue continue
} }
if path != tt.path { if path != tt.path {
t.Errorf("DecodePath(%q) = %q, want %q", enc, path, tt.path) t.Errorf("UnescapePath(%q) = %q, want %q", esc, path, tt.path)
} }
} }
} }
...@@ -548,9 +548,6 @@ func Open(msg []byte, known Verifiers) (*Note, error) { ...@@ -548,9 +548,6 @@ func Open(msg []byte, known Verifiers) (*Note, error) {
Text: string(text), Text: string(text),
} }
var buf bytes.Buffer
buf.Write(text)
// Parse and verify signatures. // Parse and verify signatures.
// Ignore duplicate signatures. // Ignore duplicate signatures.
seen := make(map[nameHash]bool) seen := make(map[nameHash]bool)
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
// Parallel cache. // Parallel cache.
// This file is copied from cmd/go/internal/par. // This file is copied from cmd/go/internal/par.
package sumweb package sumdb
import ( import (
"sync" "sync"
......
...@@ -2,28 +2,29 @@ ...@@ -2,28 +2,29 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package sumweb package sumdb
import ( import (
"bytes" "bytes"
"errors" "errors"
"fmt" "fmt"
"path"
"strings" "strings"
"sync" "sync"
"sync/atomic" "sync/atomic"
"cmd/go/internal/module"
"cmd/go/internal/note" "cmd/go/internal/note"
"cmd/go/internal/str"
"cmd/go/internal/tlog" "cmd/go/internal/tlog"
) )
// A Client provides the external operations // A ClientOps provides the external operations
// (file caching, HTTP fetches, and so on) // (file caching, HTTP fetches, and so on) needed by the Client.
// needed to implement the HTTP client Conn.
// The methods must be safe for concurrent use by multiple goroutines. // The methods must be safe for concurrent use by multiple goroutines.
type Client interface { type ClientOps interface {
// ReadRemote reads and returns the content served at the given path // ReadRemote reads and returns the content served at the given path
// on the remote database server. The path begins with "/lookup" or "/tile/". // on the remote database server. The path begins with "/lookup" or "/tile/",
// and there is no need to parse the path in any way.
// It is the implementation's responsibility to turn that path into a full URL // It is the implementation's responsibility to turn that path into a full URL
// and make the HTTP request. ReadRemote should return an error for // and make the HTTP request. ReadRemote should return an error for
// any non-200 HTTP response status. // any non-200 HTTP response status.
...@@ -35,7 +36,7 @@ type Client interface { ...@@ -35,7 +36,7 @@ type Client interface {
// "key" returns a file containing the verifier key for the server. // "key" returns a file containing the verifier key for the server.
// //
// serverName + "/latest" returns a file containing the latest known // serverName + "/latest" returns a file containing the latest known
// signed tree from the server. It is read and written (using WriteConfig). // signed tree from the server.
// To signal that the client wishes to start with an "empty" signed tree, // To signal that the client wishes to start with an "empty" signed tree,
// ReadConfig can return a successful empty result (0 bytes of data). // ReadConfig can return a successful empty result (0 bytes of data).
ReadConfig(file string) ([]byte, error) ReadConfig(file string) ([]byte, error)
...@@ -45,6 +46,7 @@ type Client interface { ...@@ -45,6 +46,7 @@ type Client interface {
// If the old []byte does not match the stored configuration, // If the old []byte does not match the stored configuration,
// WriteConfig must return ErrWriteConflict. // WriteConfig must return ErrWriteConflict.
// Otherwise, WriteConfig should atomically replace old with new. // Otherwise, WriteConfig should atomically replace old with new.
// The "key" configuration file is never written using WriteConfig.
WriteConfig(file string, old, new []byte) error WriteConfig(file string, old, new []byte) error
// ReadCache reads and returns the content of the named cache file. // ReadCache reads and returns the content of the named cache file.
...@@ -61,7 +63,7 @@ type Client interface { ...@@ -61,7 +63,7 @@ type Client interface {
Log(msg string) Log(msg string)
// SecurityError prints the given security error log message. // SecurityError prints the given security error log message.
// The Conn returns ErrSecurity from any operation that invokes SecurityError, // The Client returns ErrSecurity from any operation that invokes SecurityError,
// but the return value is mainly for testing. In a real program, // but the return value is mainly for testing. In a real program,
// SecurityError should typically print the message and call log.Fatal or os.Exit. // SecurityError should typically print the message and call log.Fatal or os.Exit.
SecurityError(msg string) SecurityError(msg string)
...@@ -70,13 +72,13 @@ type Client interface { ...@@ -70,13 +72,13 @@ type Client interface {
// ErrWriteConflict signals a write conflict during Client.WriteConfig. // ErrWriteConflict signals a write conflict during Client.WriteConfig.
var ErrWriteConflict = errors.New("write conflict") var ErrWriteConflict = errors.New("write conflict")
// ErrSecurity is returned by Conn operations that invoke Client.SecurityError. // ErrSecurity is returned by Client operations that invoke Client.SecurityError.
var ErrSecurity = errors.New("security error: misbehaving server") var ErrSecurity = errors.New("security error: misbehaving server")
// A Conn is a client connection to a go.sum database. // A Client is a client connection to a checksum database.
// All the methods are safe for simultaneous use by multiple goroutines. // All the methods are safe for simultaneous use by multiple goroutines.
type Conn struct { type Client struct {
client Client // client-provided external world ops ClientOps // access to operations in the external world
didLookup uint32 didLookup uint32
...@@ -97,28 +99,28 @@ type Conn struct { ...@@ -97,28 +99,28 @@ type Conn struct {
latestMsg []byte // encoded signed note for latest latestMsg []byte // encoded signed note for latest
tileSavedMu sync.Mutex tileSavedMu sync.Mutex
tileSaved map[tlog.Tile]bool // which tiles have been saved using c.client.WriteCache already tileSaved map[tlog.Tile]bool // which tiles have been saved using c.ops.WriteCache already
} }
// NewConn returns a new Conn using the given Client. // NewClient returns a new Client using the given Client.
func NewConn(client Client) *Conn { func NewClient(ops ClientOps) *Client {
return &Conn{ return &Client{
client: client, ops: ops,
} }
} }
// init initializes the conn (if not already initialized) // init initiailzes the client (if not already initialized)
// and returns any initialization error. // and returns any initialization error.
func (c *Conn) init() error { func (c *Client) init() error {
c.initOnce.Do(c.initWork) c.initOnce.Do(c.initWork)
return c.initErr return c.initErr
} }
// initWork does the actual initialization work. // initWork does the actual initialization work.
func (c *Conn) initWork() { func (c *Client) initWork() {
defer func() { defer func() {
if c.initErr != nil { if c.initErr != nil {
c.initErr = fmt.Errorf("initializing sumweb.Conn: %v", c.initErr) c.initErr = fmt.Errorf("initializing sumdb.Client: %v", c.initErr)
} }
}() }()
...@@ -128,7 +130,7 @@ func (c *Conn) initWork() { ...@@ -128,7 +130,7 @@ func (c *Conn) initWork() {
} }
c.tileSaved = make(map[tlog.Tile]bool) c.tileSaved = make(map[tlog.Tile]bool)
vkey, err := c.client.ReadConfig("key") vkey, err := c.ops.ReadConfig("key")
if err != nil { if err != nil {
c.initErr = err c.initErr = err
return return
...@@ -141,7 +143,7 @@ func (c *Conn) initWork() { ...@@ -141,7 +143,7 @@ func (c *Conn) initWork() {
c.verifiers = note.VerifierList(verifier) c.verifiers = note.VerifierList(verifier)
c.name = verifier.Name() c.name = verifier.Name()
data, err := c.client.ReadConfig(c.name + "/latest") data, err := c.ops.ReadConfig(c.name + "/latest")
if err != nil { if err != nil {
c.initErr = err c.initErr = err
return return
...@@ -152,24 +154,30 @@ func (c *Conn) initWork() { ...@@ -152,24 +154,30 @@ func (c *Conn) initWork() {
} }
} }
// SetTileHeight sets the tile height for the Conn. // SetTileHeight sets the tile height for the Client.
// Any call to SetTileHeight must happen before the first call to Lookup. // Any call to SetTileHeight must happen before the first call to Lookup.
// If SetTileHeight is not called, the Conn defaults to tile height 8. // If SetTileHeight is not called, the Client defaults to tile height 8.
func (c *Conn) SetTileHeight(height int) { // SetTileHeight can be called at most once,
// and if so it must be called before the first call to Lookup.
func (c *Client) SetTileHeight(height int) {
if atomic.LoadUint32(&c.didLookup) != 0 { if atomic.LoadUint32(&c.didLookup) != 0 {
panic("SetTileHeight used after Lookup") panic("SetTileHeight used after Lookup")
} }
if height <= 0 {
panic("invalid call to SetTileHeight")
}
if c.tileHeight != 0 { if c.tileHeight != 0 {
panic("multiple calls to SetTileHeight") panic("multiple calls to SetTileHeight")
} }
c.tileHeight = height c.tileHeight = height
} }
// SetGONOSUMDB sets the list of comma-separated GONOSUMDB patterns for the Conn. // SetGONOSUMDB sets the list of comma-separated GONOSUMDB patterns for the Client.
// For any module path matching one of the patterns, // For any module path matching one of the patterns,
// Lookup will return ErrGONOSUMDB. // Lookup will return ErrGONOSUMDB.
// Any call to SetGONOSUMDB must happen before the first call to Lookup. // SetGONOSUMDB can be called at most once,
func (c *Conn) SetGONOSUMDB(list string) { // and if so it must be called before the first call to Lookup.
func (c *Client) SetGONOSUMDB(list string) {
if atomic.LoadUint32(&c.didLookup) != 0 { if atomic.LoadUint32(&c.didLookup) != 0 {
panic("SetGONOSUMDB used after Lookup") panic("SetGONOSUMDB used after Lookup")
} }
...@@ -184,14 +192,58 @@ func (c *Conn) SetGONOSUMDB(list string) { ...@@ -184,14 +192,58 @@ func (c *Conn) SetGONOSUMDB(list string) {
// usually from the environment variable). // usually from the environment variable).
var ErrGONOSUMDB = errors.New("skipped (listed in GONOSUMDB)") var ErrGONOSUMDB = errors.New("skipped (listed in GONOSUMDB)")
func (c *Conn) skip(target string) bool { func (c *Client) skip(target string) bool {
return str.GlobsMatchPath(c.nosumdb, target) return globsMatchPath(c.nosumdb, target)
}
// globsMatchPath reports whether any path prefix of target
// matches one of the glob patterns (as defined by path.Match)
// in the comma-separated globs list.
// It ignores any empty or malformed patterns in the list.
func globsMatchPath(globs, target string) bool {
for globs != "" {
// Extract next non-empty glob in comma-separated list.
var glob string
if i := strings.Index(globs, ","); i >= 0 {
glob, globs = globs[:i], globs[i+1:]
} else {
glob, globs = globs, ""
}
if glob == "" {
continue
}
// A glob with N+1 path elements (N slashes) needs to be matched
// against the first N+1 path elements of target,
// which end just before the N+1'th slash.
n := strings.Count(glob, "/")
prefix := target
// Walk target, counting slashes, truncating at the N+1'th slash.
for i := 0; i < len(target); i++ {
if target[i] == '/' {
if n == 0 {
prefix = target[:i]
break
}
n--
}
}
if n > 0 {
// Not enough prefix elements.
continue
}
matched, _ := path.Match(glob, prefix)
if matched {
return true
}
}
return false
} }
// Lookup returns the go.sum lines for the given module path and version. // Lookup returns the go.sum lines for the given module path and version.
// The version may end in a /go.mod suffix, in which case Lookup returns // The version may end in a /go.mod suffix, in which case Lookup returns
// the go.sum lines for the module's go.mod-only hash. // the go.sum lines for the module's go.mod-only hash.
func (c *Conn) Lookup(path, vers string) (lines []string, err error) { func (c *Client) Lookup(path, vers string) (lines []string, err error) {
atomic.StoreUint32(&c.didLookup, 1) atomic.StoreUint32(&c.didLookup, 1)
if c.skip(path) { if c.skip(path) {
...@@ -209,16 +261,16 @@ func (c *Conn) Lookup(path, vers string) (lines []string, err error) { ...@@ -209,16 +261,16 @@ func (c *Conn) Lookup(path, vers string) (lines []string, err error) {
} }
// Prepare encoded cache filename / URL. // Prepare encoded cache filename / URL.
epath, err := encodePath(path) epath, err := module.EscapePath(path)
if err != nil { if err != nil {
return nil, err return nil, err
} }
evers, err := encodeVersion(strings.TrimSuffix(vers, "/go.mod")) evers, err := module.EscapeVersion(strings.TrimSuffix(vers, "/go.mod"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
file := c.name + "/lookup/" + epath + "@" + evers
remotePath := "/lookup/" + epath + "@" + evers remotePath := "/lookup/" + epath + "@" + evers
file := c.name + remotePath
// Fetch the data. // Fetch the data.
// The lookupCache avoids redundant ReadCache/GetURL operations // The lookupCache avoids redundant ReadCache/GetURL operations
...@@ -232,9 +284,9 @@ func (c *Conn) Lookup(path, vers string) (lines []string, err error) { ...@@ -232,9 +284,9 @@ func (c *Conn) Lookup(path, vers string) (lines []string, err error) {
result := c.record.Do(file, func() interface{} { result := c.record.Do(file, func() interface{} {
// Try the on-disk cache, or else get from web. // Try the on-disk cache, or else get from web.
writeCache := false writeCache := false
data, err := c.client.ReadCache(file) data, err := c.ops.ReadCache(file)
if err != nil { if err != nil {
data, err = c.client.ReadRemote(remotePath) data, err = c.ops.ReadRemote(remotePath)
if err != nil { if err != nil {
return cached{nil, err} return cached{nil, err}
} }
...@@ -256,7 +308,7 @@ func (c *Conn) Lookup(path, vers string) (lines []string, err error) { ...@@ -256,7 +308,7 @@ func (c *Conn) Lookup(path, vers string) (lines []string, err error) {
// Now that we've validated the record, // Now that we've validated the record,
// save it to the on-disk cache (unless that's where it came from). // save it to the on-disk cache (unless that's where it came from).
if writeCache { if writeCache {
c.client.WriteCache(file, data) c.ops.WriteCache(file, data)
} }
return cached{data, nil} return cached{data, nil}
...@@ -278,15 +330,15 @@ func (c *Conn) Lookup(path, vers string) (lines []string, err error) { ...@@ -278,15 +330,15 @@ func (c *Conn) Lookup(path, vers string) (lines []string, err error) {
} }
// mergeLatest merges the tree head in msg // mergeLatest merges the tree head in msg
// with the Conn's current latest tree head, // with the Client's current latest tree head,
// ensuring the result is a consistent timeline. // ensuring the result is a consistent timeline.
// If the result is inconsistent, mergeLatest calls c.client.SecurityError // If the result is inconsistent, mergeLatest calls c.ops.SecurityError
// with a detailed security error message and then // with a detailed security error message and then
// (only if c.client.SecurityError does not exit the program) returns ErrSecurity. // (only if c.ops.SecurityError does not exit the program) returns ErrSecurity.
// If the Conn's current latest tree head moves forward, // If the Client's current latest tree head moves forward,
// mergeLatest updates the underlying configuration file as well, // mergeLatest updates the underlying configuration file as well,
// taking care to merge any independent updates to that configuration. // taking care to merge any independent updates to that configuration.
func (c *Conn) mergeLatest(msg []byte) error { func (c *Client) mergeLatest(msg []byte) error {
// Merge msg into our in-memory copy of the latest tree head. // Merge msg into our in-memory copy of the latest tree head.
when, err := c.mergeLatestMem(msg) when, err := c.mergeLatestMem(msg)
if err != nil { if err != nil {
...@@ -303,7 +355,7 @@ func (c *Conn) mergeLatest(msg []byte) error { ...@@ -303,7 +355,7 @@ func (c *Conn) mergeLatest(msg []byte) error {
// we need to merge any updates made there as well. // we need to merge any updates made there as well.
// Note that writeConfig is an atomic compare-and-swap. // Note that writeConfig is an atomic compare-and-swap.
for { for {
msg, err := c.client.ReadConfig(c.name + "/latest") msg, err := c.ops.ReadConfig(c.name + "/latest")
if err != nil { if err != nil {
return err return err
} }
...@@ -321,7 +373,7 @@ func (c *Conn) mergeLatest(msg []byte) error { ...@@ -321,7 +373,7 @@ func (c *Conn) mergeLatest(msg []byte) error {
c.latestMu.Lock() c.latestMu.Lock()
latestMsg := c.latestMsg latestMsg := c.latestMsg
c.latestMu.Unlock() c.latestMu.Unlock()
if err := c.client.WriteConfig(c.name+"/latest", msg, latestMsg); err != ErrWriteConflict { if err := c.ops.WriteConfig(c.name+"/latest", msg, latestMsg); err != ErrWriteConflict {
// Success or a non-write-conflict error. // Success or a non-write-conflict error.
return err return err
} }
...@@ -342,7 +394,7 @@ const ( ...@@ -342,7 +394,7 @@ const (
// msgPast means msg was from before c.latest, // msgPast means msg was from before c.latest,
// msgNow means msg was exactly c.latest, and // msgNow means msg was exactly c.latest, and
// msgFuture means msg was from after c.latest, which has now been updated. // msgFuture means msg was from after c.latest, which has now been updated.
func (c *Conn) mergeLatestMem(msg []byte) (when int, err error) { func (c *Client) mergeLatestMem(msg []byte) (when int, err error) {
if len(msg) == 0 { if len(msg) == 0 {
// Accept empty msg as the unsigned, empty timeline. // Accept empty msg as the unsigned, empty timeline.
c.latestMu.Lock() c.latestMu.Lock()
...@@ -412,7 +464,7 @@ func (c *Conn) mergeLatestMem(msg []byte) (when int, err error) { ...@@ -412,7 +464,7 @@ func (c *Conn) mergeLatestMem(msg []byte) (when int, err error) {
// If an error occurs, such as malformed data or a network problem, checkTrees returns that error. // If an error occurs, such as malformed data or a network problem, checkTrees returns that error.
// If on the other hand checkTrees finds evidence of misbehavior, it prepares a detailed // If on the other hand checkTrees finds evidence of misbehavior, it prepares a detailed
// message and calls log.Fatal. // message and calls log.Fatal.
func (c *Conn) checkTrees(older tlog.Tree, olderNote []byte, newer tlog.Tree, newerNote []byte) error { func (c *Client) checkTrees(older tlog.Tree, olderNote []byte, newer tlog.Tree, newerNote []byte) error {
thr := tlog.TileHashReader(newer, &c.tileReader) thr := tlog.TileHashReader(newer, &c.tileReader)
h, err := tlog.TreeHash(older.N, thr) h, err := tlog.TreeHash(older.N, thr)
if err != nil { if err != nil {
...@@ -456,12 +508,12 @@ func (c *Conn) checkTrees(older tlog.Tree, olderNote []byte, newer tlog.Tree, ne ...@@ -456,12 +508,12 @@ func (c *Conn) checkTrees(older tlog.Tree, olderNote []byte, newer tlog.Tree, ne
fmt.Fprintf(&buf, "\n\t%v", h) fmt.Fprintf(&buf, "\n\t%v", h)
} }
} }
c.client.SecurityError(buf.String()) c.ops.SecurityError(buf.String())
return ErrSecurity return ErrSecurity
} }
// checkRecord checks that record #id's hash matches data. // checkRecord checks that record #id's hash matches data.
func (c *Conn) checkRecord(id int64, data []byte) error { func (c *Client) checkRecord(id int64, data []byte) error {
c.latestMu.Lock() c.latestMu.Lock()
latest := c.latest latest := c.latest
c.latestMu.Unlock() c.latestMu.Unlock()
...@@ -479,11 +531,11 @@ func (c *Conn) checkRecord(id int64, data []byte) error { ...@@ -479,11 +531,11 @@ func (c *Conn) checkRecord(id int64, data []byte) error {
return fmt.Errorf("cannot authenticate record data in server response") return fmt.Errorf("cannot authenticate record data in server response")
} }
// tileReader is a *Conn wrapper that implements tlog.TileReader. // tileReader is a *Client wrapper that implements tlog.TileReader.
// The separate type avoids exposing the ReadTiles and SaveTiles // The separate type avoids exposing the ReadTiles and SaveTiles
// methods on Conn itself. // methods on Client itself.
type tileReader struct { type tileReader struct {
c *Conn c *Client
} }
func (r *tileReader) Height() int { func (r *tileReader) Height() int {
...@@ -516,17 +568,17 @@ func (r *tileReader) ReadTiles(tiles []tlog.Tile) ([][]byte, error) { ...@@ -516,17 +568,17 @@ func (r *tileReader) ReadTiles(tiles []tlog.Tile) ([][]byte, error) {
} }
// tileCacheKey returns the cache key for the tile. // tileCacheKey returns the cache key for the tile.
func (c *Conn) tileCacheKey(tile tlog.Tile) string { func (c *Client) tileCacheKey(tile tlog.Tile) string {
return c.name + "/" + tile.Path() return c.name + "/" + tile.Path()
} }
// tileRemotePath returns the remote path for the tile. // tileRemotePath returns the remote path for the tile.
func (c *Conn) tileRemotePath(tile tlog.Tile) string { func (c *Client) tileRemotePath(tile tlog.Tile) string {
return "/" + tile.Path() return "/" + tile.Path()
} }
// readTile reads a single tile, either from the on-disk cache or the server. // readTile reads a single tile, either from the on-disk cache or the server.
func (c *Conn) readTile(tile tlog.Tile) ([]byte, error) { func (c *Client) readTile(tile tlog.Tile) ([]byte, error) {
type cached struct { type cached struct {
data []byte data []byte
err error err error
...@@ -534,7 +586,7 @@ func (c *Conn) readTile(tile tlog.Tile) ([]byte, error) { ...@@ -534,7 +586,7 @@ func (c *Conn) readTile(tile tlog.Tile) ([]byte, error) {
result := c.tileCache.Do(tile, func() interface{} { result := c.tileCache.Do(tile, func() interface{} {
// Try the requested tile in on-disk cache. // Try the requested tile in on-disk cache.
data, err := c.client.ReadCache(c.tileCacheKey(tile)) data, err := c.ops.ReadCache(c.tileCacheKey(tile))
if err == nil { if err == nil {
c.markTileSaved(tile) c.markTileSaved(tile)
return cached{data, nil} return cached{data, nil}
...@@ -544,9 +596,9 @@ func (c *Conn) readTile(tile tlog.Tile) ([]byte, error) { ...@@ -544,9 +596,9 @@ func (c *Conn) readTile(tile tlog.Tile) ([]byte, error) {
// We only save authenticated tiles to the on-disk cache, // We only save authenticated tiles to the on-disk cache,
// so the recreated prefix is equally authenticated. // so the recreated prefix is equally authenticated.
full := tile full := tile
full.W = 1 << tile.H full.W = 1 << uint(tile.H)
if tile != full { if tile != full {
data, err := c.client.ReadCache(c.tileCacheKey(full)) data, err := c.ops.ReadCache(c.tileCacheKey(full))
if err == nil { if err == nil {
c.markTileSaved(tile) // don't save tile later; we already have full c.markTileSaved(tile) // don't save tile later; we already have full
return cached{data[:len(data)/full.W*tile.W], nil} return cached{data[:len(data)/full.W*tile.W], nil}
...@@ -554,7 +606,7 @@ func (c *Conn) readTile(tile tlog.Tile) ([]byte, error) { ...@@ -554,7 +606,7 @@ func (c *Conn) readTile(tile tlog.Tile) ([]byte, error) {
} }
// Try requested tile from server. // Try requested tile from server.
data, err = c.client.ReadRemote(c.tileRemotePath(tile)) data, err = c.ops.ReadRemote(c.tileRemotePath(tile))
if err == nil { if err == nil {
return cached{data, nil} return cached{data, nil}
} }
...@@ -564,7 +616,7 @@ func (c *Conn) readTile(tile tlog.Tile) ([]byte, error) { ...@@ -564,7 +616,7 @@ func (c *Conn) readTile(tile tlog.Tile) ([]byte, error) {
// the tile has been completed and only the complete one // the tile has been completed and only the complete one
// is available. // is available.
if tile != full { if tile != full {
data, err := c.client.ReadRemote(c.tileRemotePath(full)) data, err := c.ops.ReadRemote(c.tileRemotePath(full))
if err == nil { if err == nil {
// Note: We could save the full tile in the on-disk cache here, // Note: We could save the full tile in the on-disk cache here,
// but we don't know if it is valid yet, and we will only find out // but we don't know if it is valid yet, and we will only find out
...@@ -585,7 +637,7 @@ func (c *Conn) readTile(tile tlog.Tile) ([]byte, error) { ...@@ -585,7 +637,7 @@ func (c *Conn) readTile(tile tlog.Tile) ([]byte, error) {
// markTileSaved records that tile is already present in the on-disk cache, // markTileSaved records that tile is already present in the on-disk cache,
// so that a future SaveTiles for that tile can be ignored. // so that a future SaveTiles for that tile can be ignored.
func (c *Conn) markTileSaved(tile tlog.Tile) { func (c *Client) markTileSaved(tile tlog.Tile) {
c.tileSavedMu.Lock() c.tileSavedMu.Lock()
c.tileSaved[tile] = true c.tileSaved[tile] = true
c.tileSavedMu.Unlock() c.tileSavedMu.Unlock()
...@@ -613,7 +665,7 @@ func (r *tileReader) SaveTiles(tiles []tlog.Tile, data [][]byte) { ...@@ -613,7 +665,7 @@ func (r *tileReader) SaveTiles(tiles []tlog.Tile, data [][]byte) {
// c.tileSaved[tile] is still true and we will not try to write it again. // c.tileSaved[tile] is still true and we will not try to write it again.
// Next time we run maybe we'll redownload it again and be // Next time we run maybe we'll redownload it again and be
// more successful. // more successful.
c.client.WriteCache(c.name+"/"+tile.Path(), data[i]) c.ops.WriteCache(c.name+"/"+tile.Path(), data[i])
} }
} }
} }
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package sumweb package sumdb
import ( import (
"bytes" "bytes"
...@@ -21,7 +21,7 @@ const ( ...@@ -21,7 +21,7 @@ const (
testSignerKey = "PRIVATE+KEY+localhost.localdev/sumdb+00000c67+AXu6+oaVaOYuQOFrf1V59JK1owcFlJcHwwXHDfDGxSPk" testSignerKey = "PRIVATE+KEY+localhost.localdev/sumdb+00000c67+AXu6+oaVaOYuQOFrf1V59JK1owcFlJcHwwXHDfDGxSPk"
) )
func TestConnLookup(t *testing.T) { func TestClientLookup(t *testing.T) {
tc := newTestClient(t) tc := newTestClient(t)
tc.mustHaveLatest(1) tc.mustHaveLatest(1)
...@@ -49,7 +49,7 @@ func TestConnLookup(t *testing.T) { ...@@ -49,7 +49,7 @@ func TestConnLookup(t *testing.T) {
tc.mustHaveLatest(4) tc.mustHaveLatest(4)
} }
func TestConnBadTiles(t *testing.T) { func TestClientBadTiles(t *testing.T) {
tc := newTestClient(t) tc := newTestClient(t)
flipBits := func() { flipBits := func() {
...@@ -65,33 +65,33 @@ func TestConnBadTiles(t *testing.T) { ...@@ -65,33 +65,33 @@ func TestConnBadTiles(t *testing.T) {
// Bad tiles in initial download. // Bad tiles in initial download.
tc.mustHaveLatest(1) tc.mustHaveLatest(1)
flipBits() flipBits()
_, err := tc.conn.Lookup("rsc.io/sampler", "v1.3.0") _, err := tc.client.Lookup("rsc.io/sampler", "v1.3.0")
tc.mustError(err, "rsc.io/sampler@v1.3.0: initializing sumweb.Conn: checking tree#1: downloaded inconsistent tile") tc.mustError(err, "rsc.io/sampler@v1.3.0: initializing sumdb.Client: checking tree#1: downloaded inconsistent tile")
flipBits() flipBits()
tc.newConn() tc.newClient()
tc.mustLookup("rsc.io/sampler", "v1.3.0", "rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4=") tc.mustLookup("rsc.io/sampler", "v1.3.0", "rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4=")
// Bad tiles after initial download. // Bad tiles after initial download.
flipBits() flipBits()
_, err = tc.conn.Lookup("rsc.io/Quote", "v1.5.2") _, err = tc.client.Lookup("rsc.io/Quote", "v1.5.2")
tc.mustError(err, "rsc.io/Quote@v1.5.2: checking tree#3 against tree#4: downloaded inconsistent tile") tc.mustError(err, "rsc.io/Quote@v1.5.2: checking tree#3 against tree#4: downloaded inconsistent tile")
flipBits() flipBits()
tc.newConn() tc.newClient()
tc.mustLookup("rsc.io/Quote", "v1.5.2", "rsc.io/Quote v1.5.2 h1:uppercase!=") tc.mustLookup("rsc.io/Quote", "v1.5.2", "rsc.io/Quote v1.5.2 h1:uppercase!=")
// Bad starting tree hash looks like bad tiles. // Bad starting tree hash looks like bad tiles.
tc.newConn() tc.newClient()
text := tlog.FormatTree(tlog.Tree{N: 1, Hash: tlog.Hash{}}) text := tlog.FormatTree(tlog.Tree{N: 1, Hash: tlog.Hash{}})
data, err := note.Sign(&note.Note{Text: string(text)}, tc.signer) data, err := note.Sign(&note.Note{Text: string(text)}, tc.signer)
if err != nil { if err != nil {
tc.t.Fatal(err) tc.t.Fatal(err)
} }
tc.config[testName+"/latest"] = data tc.config[testName+"/latest"] = data
_, err = tc.conn.Lookup("rsc.io/sampler", "v1.3.0") _, err = tc.client.Lookup("rsc.io/sampler", "v1.3.0")
tc.mustError(err, "rsc.io/sampler@v1.3.0: initializing sumweb.Conn: checking tree#1: downloaded inconsistent tile") tc.mustError(err, "rsc.io/sampler@v1.3.0: initializing sumdb.Client: checking tree#1: downloaded inconsistent tile")
} }
func TestConnFork(t *testing.T) { func TestClientFork(t *testing.T) {
tc := newTestClient(t) tc := newTestClient(t)
tc2 := tc.fork() tc2 := tc.fork()
...@@ -109,7 +109,7 @@ func TestConnFork(t *testing.T) { ...@@ -109,7 +109,7 @@ func TestConnFork(t *testing.T) {
key := "/lookup/rsc.io/pkg1@v1.5.2" key := "/lookup/rsc.io/pkg1@v1.5.2"
tc2.remote[key] = tc.remote[key] tc2.remote[key] = tc.remote[key]
_, err := tc2.conn.Lookup("rsc.io/pkg1", "v1.5.2") _, err := tc2.client.Lookup("rsc.io/pkg1", "v1.5.2")
tc2.mustError(err, ErrSecurity.Error()) tc2.mustError(err, ErrSecurity.Error())
/* /*
...@@ -154,10 +154,10 @@ func TestConnFork(t *testing.T) { ...@@ -154,10 +154,10 @@ func TestConnFork(t *testing.T) {
} }
} }
func TestConnGONOSUMDB(t *testing.T) { func TestClientGONOSUMDB(t *testing.T) {
tc := newTestClient(t) tc := newTestClient(t)
tc.conn.SetGONOSUMDB("p,*/q") tc.client.SetGONOSUMDB("p,*/q")
tc.conn.Lookup("rsc.io/sampler", "v1.3.0") // initialize before we turn off network tc.client.Lookup("rsc.io/sampler", "v1.3.0") // initialize before we turn off network
tc.getOK = false tc.getOK = false
ok := []string{ ok := []string{
...@@ -175,13 +175,13 @@ func TestConnGONOSUMDB(t *testing.T) { ...@@ -175,13 +175,13 @@ func TestConnGONOSUMDB(t *testing.T) {
} }
for _, path := range ok { for _, path := range ok {
_, err := tc.conn.Lookup(path, "v1.0.0") _, err := tc.client.Lookup(path, "v1.0.0")
if err == ErrGONOSUMDB { if err == ErrGONOSUMDB {
t.Errorf("Lookup(%q): ErrGONOSUMDB, wanted failed actual lookup", path) t.Errorf("Lookup(%q): ErrGONOSUMDB, wanted failed actual lookup", path)
} }
} }
for _, path := range skip { for _, path := range skip {
_, err := tc.conn.Lookup(path, "v1.0.0") _, err := tc.client.Lookup(path, "v1.0.0")
if err != ErrGONOSUMDB { if err != ErrGONOSUMDB {
t.Errorf("Lookup(%q): %v, wanted ErrGONOSUMDB", path, err) t.Errorf("Lookup(%q): %v, wanted ErrGONOSUMDB", path, err)
} }
...@@ -191,7 +191,7 @@ func TestConnGONOSUMDB(t *testing.T) { ...@@ -191,7 +191,7 @@ func TestConnGONOSUMDB(t *testing.T) {
// A testClient is a self-contained client-side testing environment. // A testClient is a self-contained client-side testing environment.
type testClient struct { type testClient struct {
t *testing.T // active test t *testing.T // active test
conn *Conn // conn being tested client *Client // client being tested
tileHeight int // tile height to use (default 2) tileHeight int // tile height to use (default 2)
getOK bool // should tc.GetURL succeed? getOK bool // should tc.GetURL succeed?
getTileOK bool // should tc.GetURL of tiles succeed? getTileOK bool // should tc.GetURL of tiles succeed?
...@@ -202,12 +202,12 @@ type testClient struct { ...@@ -202,12 +202,12 @@ type testClient struct {
// mu protects config, cache, log, security // mu protects config, cache, log, security
// during concurrent use of the exported methods // during concurrent use of the exported methods
// by the conn itself (testClient is the Conn's Client, // by the client itself (testClient is the Client's ClientOps,
// and the Client methods can both read and write these fields). // and the Client methods can both read and write these fields).
// Unexported methods invoked directly by the test // Unexported methods invoked directly by the test
// (for example, addRecord) need not hold the mutex: // (for example, addRecord) need not hold the mutex:
// for proper test execution those methods should only // for proper test execution those methods should only
// be called when the Conn is idle and not using its Client. // be called when the Client is idle and not using its ClientOps.
// Not holding the mutex in those methods ensures // Not holding the mutex in those methods ensures
// that if a mistake is made, go test -race will report it. // that if a mistake is made, go test -race will report it.
// (Holding the mutex would eliminate the race report but // (Holding the mutex would eliminate the race report but
...@@ -240,7 +240,7 @@ func newTestClient(t *testing.T) *testClient { ...@@ -240,7 +240,7 @@ func newTestClient(t *testing.T) *testClient {
t.Fatal(err) t.Fatal(err)
} }
tc.newConn() tc.newClient()
tc.addRecord("rsc.io/quote@v1.5.2", `rsc.io/quote v1.5.2 h1:w5fcysjrx7yqtD/aO+QwRjYZOKnaM9Uh2b40tElTs3Y= tc.addRecord("rsc.io/quote@v1.5.2", `rsc.io/quote v1.5.2 h1:w5fcysjrx7yqtD/aO+QwRjYZOKnaM9Uh2b40tElTs3Y=
rsc.io/quote v1.5.2/go.mod h1:LzX7hefJvL54yjefDEDHNONDjII0t9xZLPXsUe+TKr0= rsc.io/quote v1.5.2/go.mod h1:LzX7hefJvL54yjefDEDHNONDjII0t9xZLPXsUe+TKr0=
...@@ -260,18 +260,18 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= ...@@ -260,18 +260,18 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
return tc return tc
} }
// newConn resets the Conn associated with tc. // newClient resets the Client associated with tc.
// This clears any in-memory cache from the Conn // This clears any in-memory cache from the Client
// but not tc's on-disk cache. // but not tc's on-disk cache.
func (tc *testClient) newConn() { func (tc *testClient) newClient() {
tc.conn = NewConn(tc) tc.client = NewClient(tc)
tc.conn.SetTileHeight(tc.tileHeight) tc.client.SetTileHeight(tc.tileHeight)
} }
// mustLookup does a lookup for path@vers and checks that the lines that come back match want. // mustLookup does a lookup for path@vers and checks that the lines that come back match want.
func (tc *testClient) mustLookup(path, vers, want string) { func (tc *testClient) mustLookup(path, vers, want string) {
tc.t.Helper() tc.t.Helper()
lines, err := tc.conn.Lookup(path, vers) lines, err := tc.client.Lookup(path, vers)
if err != nil { if err != nil {
tc.t.Fatal(err) tc.t.Fatal(err)
} }
...@@ -315,7 +315,7 @@ func (tc *testClient) fork() *testClient { ...@@ -315,7 +315,7 @@ func (tc *testClient) fork() *testClient {
cache: copyMap(tc.cache), cache: copyMap(tc.cache),
remote: copyMap(tc.remote), remote: copyMap(tc.remote),
} }
tc2.newConn() tc2.newClient()
return tc2 return tc2
} }
......
...@@ -2,8 +2,8 @@ ...@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// Package sumweb implements the HTTP protocols for serving or accessing a go.sum database. // Package sumdb implements the HTTP protocols for serving or accessing a module checksum database.
package sumweb package sumdb
import ( import (
"context" "context"
...@@ -12,48 +12,50 @@ import ( ...@@ -12,48 +12,50 @@ import (
"os" "os"
"strings" "strings"
"cmd/go/internal/module"
"cmd/go/internal/tlog" "cmd/go/internal/tlog"
) )
// A Server provides the external operations // A ServerOps provides the external operations
// (underlying database access and so on) // (underlying database access and so on) needed by the Server.
// needed to implement the HTTP server Handler. type ServerOps interface {
type Server interface {
// NewContext returns the context to use for the request r.
NewContext(r *http.Request) (context.Context, error)
// Signed returns the signed hash of the latest tree. // Signed returns the signed hash of the latest tree.
Signed(ctx context.Context) ([]byte, error) Signed(ctx context.Context) ([]byte, error)
// ReadRecords returns the content for the n records id through id+n-1. // ReadRecords returns the content for the n records id through id+n-1.
ReadRecords(ctx context.Context, id, n int64) ([][]byte, error) ReadRecords(ctx context.Context, id, n int64) ([][]byte, error)
// Lookup looks up a record by its associated key ("module@version"), // Lookup looks up a record for the given module,
// returning the record ID. // returning the record ID.
Lookup(ctx context.Context, key string) (int64, error) Lookup(ctx context.Context, m module.Version) (int64, error)
// ReadTileData reads the content of tile t. // ReadTileData reads the content of tile t.
// It is only invoked for hash tiles (t.L ≥ 0). // It is only invoked for hash tiles (t.L ≥ 0).
ReadTileData(ctx context.Context, t tlog.Tile) ([]byte, error) ReadTileData(ctx context.Context, t tlog.Tile) ([]byte, error)
} }
// A Handler is the go.sum database server handler, // A Server is the checksum database HTTP server,
// which should be invoked to serve the paths listed in Paths. // which implements http.Handler and should be invoked
// The calling code is responsible for initializing Server. // to serve the paths listed in ServerPaths.
type Handler struct { type Server struct {
Server Server ops ServerOps
}
// NewServer returns a new Server using the given operations.
func NewServer(ops ServerOps) *Server {
return &Server{ops: ops}
} }
// Paths are the URL paths for which Handler should be invoked. // ServerPaths are the URL paths the Server can (and should) serve.
// //
// Typically a server will do: // Typically a server will do:
// //
// handler := &sumweb.Handler{Server: srv} // srv := sumdb.NewServer(ops)
// for _, path := range sumweb.Paths { // for _, path := range sumdb.ServerPaths {
// http.HandleFunc(path, handler) // http.Handle(path, srv)
// } // }
// //
var Paths = []string{ var ServerPaths = []string{
"/lookup/", "/lookup/",
"/latest", "/latest",
"/tile/", "/tile/",
...@@ -61,12 +63,8 @@ var Paths = []string{ ...@@ -61,12 +63,8 @@ var Paths = []string{
var modVerRE = lazyregexp.New(`^[^@]+@v[0-9]+\.[0-9]+\.[0-9]+(-[^@]*)?(\+incompatible)?$`) var modVerRE = lazyregexp.New(`^[^@]+@v[0-9]+\.[0-9]+\.[0-9]+(-[^@]*)?(\+incompatible)?$`)
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx, err := h.Server.NewContext(r) ctx := r.Context()
if err != nil {
http.Error(w, err.Error(), 500)
return
}
switch { switch {
default: default:
...@@ -79,23 +77,23 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { ...@@ -79,23 +77,23 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return return
} }
i := strings.Index(mod, "@") i := strings.Index(mod, "@")
encPath, encVers := mod[:i], mod[i+1:] escPath, escVers := mod[:i], mod[i+1:]
path, err := decodePath(encPath) path, err := module.UnescapePath(escPath)
if err != nil { if err != nil {
reportError(w, r, err) reportError(w, r, err)
return return
} }
vers, err := decodeVersion(encVers) vers, err := module.UnescapeVersion(escVers)
if err != nil { if err != nil {
reportError(w, r, err) reportError(w, r, err)
return return
} }
id, err := h.Server.Lookup(ctx, path+"@"+vers) id, err := s.ops.Lookup(ctx, module.Version{Path: path, Version: vers})
if err != nil { if err != nil {
reportError(w, r, err) reportError(w, r, err)
return return
} }
records, err := h.Server.ReadRecords(ctx, id, 1) records, err := s.ops.ReadRecords(ctx, id, 1)
if err != nil { if err != nil {
// This should never happen - the lookup says the record exists. // This should never happen - the lookup says the record exists.
http.Error(w, err.Error(), http.StatusInternalServerError) http.Error(w, err.Error(), http.StatusInternalServerError)
...@@ -110,7 +108,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { ...@@ -110,7 +108,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
http.Error(w, err.Error(), http.StatusInternalServerError) http.Error(w, err.Error(), http.StatusInternalServerError)
return return
} }
signed, err := h.Server.Signed(ctx) signed, err := s.ops.Signed(ctx)
if err != nil { if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError) http.Error(w, err.Error(), http.StatusInternalServerError)
return return
...@@ -120,7 +118,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { ...@@ -120,7 +118,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Write(signed) w.Write(signed)
case r.URL.Path == "/latest": case r.URL.Path == "/latest":
data, err := h.Server.Signed(ctx) data, err := s.ops.Signed(ctx)
if err != nil { if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError) http.Error(w, err.Error(), http.StatusInternalServerError)
return return
...@@ -137,7 +135,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { ...@@ -137,7 +135,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if t.L == -1 { if t.L == -1 {
// Record data. // Record data.
start := t.N << uint(t.H) start := t.N << uint(t.H)
records, err := h.Server.ReadRecords(ctx, start, int64(t.W)) records, err := s.ops.ReadRecords(ctx, start, int64(t.W))
if err != nil { if err != nil {
reportError(w, r, err) reportError(w, r, err)
return return
...@@ -159,7 +157,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { ...@@ -159,7 +157,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return return
} }
data, err := h.Server.ReadTileData(ctx, t) data, err := s.ops.ReadTileData(ctx, t)
if err != nil { if err != nil {
reportError(w, r, err) reportError(w, r, err)
return return
......
...@@ -2,22 +2,21 @@ ...@@ -2,22 +2,21 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package sumweb package sumdb
import ( import (
"context" "context"
"fmt" "fmt"
"net/http"
"strings"
"sync" "sync"
"cmd/go/internal/module"
"cmd/go/internal/note" "cmd/go/internal/note"
"cmd/go/internal/tlog" "cmd/go/internal/tlog"
) )
// NewTestServer constructs a new TestServer // NewTestServer constructs a new TestServer
// that will sign its tree with the given signer key // that will sign its tree with the given signer key
// (see cmd/go/internal/note) // (see golang.org/x/mod/sumdb/note)
// and fetch new records as needed by calling gosum. // and fetch new records as needed by calling gosum.
func NewTestServer(signer string, gosum func(path, vers string) ([]byte, error)) *TestServer { func NewTestServer(signer string, gosum func(path, vers string) ([]byte, error)) *TestServer {
return &TestServer{signer: signer, gosum: gosum} return &TestServer{signer: signer, gosum: gosum}
...@@ -45,10 +44,6 @@ func (h testHashes) ReadHashes(indexes []int64) ([]tlog.Hash, error) { ...@@ -45,10 +44,6 @@ func (h testHashes) ReadHashes(indexes []int64) ([]tlog.Hash, error) {
return list, nil return list, nil
} }
func (s *TestServer) NewContext(r *http.Request) (context.Context, error) {
return nil, nil
}
func (s *TestServer) Signed(ctx context.Context) ([]byte, error) { func (s *TestServer) Signed(ctx context.Context) ([]byte, error) {
s.mu.Lock() s.mu.Lock()
defer s.mu.Unlock() defer s.mu.Unlock()
...@@ -80,7 +75,8 @@ func (s *TestServer) ReadRecords(ctx context.Context, id, n int64) ([][]byte, er ...@@ -80,7 +75,8 @@ func (s *TestServer) ReadRecords(ctx context.Context, id, n int64) ([][]byte, er
return list, nil return list, nil
} }
func (s *TestServer) Lookup(ctx context.Context, key string) (int64, error) { func (s *TestServer) Lookup(ctx context.Context, m module.Version) (int64, error) {
key := m.String()
s.mu.Lock() s.mu.Lock()
id, ok := s.lookup[key] id, ok := s.lookup[key]
s.mu.Unlock() s.mu.Unlock()
...@@ -89,12 +85,7 @@ func (s *TestServer) Lookup(ctx context.Context, key string) (int64, error) { ...@@ -89,12 +85,7 @@ func (s *TestServer) Lookup(ctx context.Context, key string) (int64, error) {
} }
// Look up module and compute go.sum lines. // Look up module and compute go.sum lines.
i := strings.Index(key, "@") data, err := s.gosum(m.Path, m.Version)
if i < 0 {
return 0, fmt.Errorf("invalid lookup key %q", key)
}
path, vers := key[:i], key[i+1:]
data, err := s.gosum(path, vers)
if err != nil { if err != nil {
return 0, err return 0, err
} }
......
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// FS-safe encoding of module paths and versions.
// Copied from cmd/go/internal/module and unexported.
package sumweb
import (
"fmt"
"unicode/utf8"
)
// Safe encodings
//
// Module paths appear as substrings of file system paths
// (in the download cache) and of web server URLs in the proxy protocol.
// In general we cannot rely on file systems to be case-sensitive,
// nor can we rely on web servers, since they read from file systems.
// That is, we cannot rely on the file system to keep rsc.io/QUOTE
// and rsc.io/quote separate. Windows and macOS don't.
// Instead, we must never require two different casings of a file path.
// Because we want the download cache to match the proxy protocol,
// and because we want the proxy protocol to be possible to serve
// from a tree of static files (which might be stored on a case-insensitive
// file system), the proxy protocol must never require two different casings
// of a URL path either.
//
// One possibility would be to make the safe encoding be the lowercase
// hexadecimal encoding of the actual path bytes. This would avoid ever
// needing different casings of a file path, but it would be fairly illegible
// to most programmers when those paths appeared in the file system
// (including in file paths in compiler errors and stack traces)
// in web server logs, and so on. Instead, we want a safe encoding that
// leaves most paths unaltered.
//
// The safe encoding is this:
// replace every uppercase letter with an exclamation mark
// followed by the letter's lowercase equivalent.
//
// For example,
// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go.
// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy
// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus.
//
// Import paths that avoid upper-case letters are left unchanged.
// Note that because import paths are ASCII-only and avoid various
// problematic punctuation (like : < and >), the safe encoding is also ASCII-only
// and avoids the same problematic punctuation.
//
// Import paths have never allowed exclamation marks, so there is no
// need to define how to encode a literal !.
//
// Although paths are disallowed from using Unicode (see pathOK above),
// the eventual plan is to allow Unicode letters as well, to assume that
// file systems and URLs are Unicode-safe (storing UTF-8), and apply
// the !-for-uppercase convention. Note however that not all runes that
// are different but case-fold equivalent are an upper/lower pair.
// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin)
// are considered to case-fold to each other. When we do add Unicode
// letters, we must not assume that upper/lower are the only case-equivalent pairs.
// Perhaps the Kelvin symbol would be disallowed entirely, for example.
// Or perhaps it would encode as "!!k", or perhaps as "(212A)".
//
// Also, it would be nice to allow Unicode marks as well as letters,
// but marks include combining marks, and then we must deal not
// only with case folding but also normalization: both U+00E9 ('é')
// and U+0065 U+0301 ('e' followed by combining acute accent)
// look the same on the page and are treated by some file systems
// as the same path. If we do allow Unicode marks in paths, there
// must be some kind of normalization to allow only one canonical
// encoding of any character used in an import path.
// encodePath returns the safe encoding of the given module path.
// It fails if the module path is invalid.
func encodePath(path string) (encoding string, err error) {
return encodeString(path)
}
// encodeVersion returns the safe encoding of the given module version.
// Versions are allowed to be in non-semver form but must be valid file names
// and not contain exclamation marks.
func encodeVersion(v string) (encoding string, err error) {
return encodeString(v)
}
func encodeString(s string) (encoding string, err error) {
haveUpper := false
for _, r := range s {
if r == '!' || r >= utf8.RuneSelf {
// This should be disallowed by CheckPath, but diagnose anyway.
// The correctness of the encoding loop below depends on it.
return "", fmt.Errorf("internal error: inconsistency in EncodePath")
}
if 'A' <= r && r <= 'Z' {
haveUpper = true
}
}
if !haveUpper {
return s, nil
}
var buf []byte
for _, r := range s {
if 'A' <= r && r <= 'Z' {
buf = append(buf, '!', byte(r+'a'-'A'))
} else {
buf = append(buf, byte(r))
}
}
return string(buf), nil
}
// decodePath returns the module path of the given safe encoding.
// It fails if the encoding is invalid or encodes an invalid path.
func decodePath(encoding string) (path string, err error) {
path, ok := decodeString(encoding)
if !ok {
return "", fmt.Errorf("invalid module path encoding %q", encoding)
}
return path, nil
}
// decodeVersion returns the version string for the given safe encoding.
// It fails if the encoding is invalid or encodes an invalid version.
// Versions are allowed to be in non-semver form but must be valid file names
// and not contain exclamation marks.
func decodeVersion(encoding string) (v string, err error) {
v, ok := decodeString(encoding)
if !ok {
return "", fmt.Errorf("invalid version encoding %q", encoding)
}
return v, nil
}
func decodeString(encoding string) (string, bool) {
var buf []byte
bang := false
for _, r := range encoding {
if r >= utf8.RuneSelf {
return "", false
}
if bang {
bang = false
if r < 'a' || 'z' < r {
return "", false
}
buf = append(buf, byte(r+'A'-'a'))
continue
}
if r == '!' {
bang = true
continue
}
if 'A' <= r && r <= 'Z' {
return "", false
}
buf = append(buf, byte(r))
}
if bang {
return "", false
}
return string(buf), true
}
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sumweb
import "testing"
var encodeTests = []struct {
path string
enc string // empty means same as path
}{
{path: "ascii.com/abcdefghijklmnopqrstuvwxyz.-+/~_0123456789"},
{path: "github.com/GoogleCloudPlatform/omega", enc: "github.com/!google!cloud!platform/omega"},
}
func TestEncodePath(t *testing.T) {
// Check encodings.
for _, tt := range encodeTests {
enc, err := encodePath(tt.path)
if err != nil {
t.Errorf("encodePath(%q): unexpected error: %v", tt.path, err)
continue
}
want := tt.enc
if want == "" {
want = tt.path
}
if enc != want {
t.Errorf("encodePath(%q) = %q, want %q", tt.path, enc, want)
}
}
}
var badDecode = []string{
"github.com/GoogleCloudPlatform/omega",
"github.com/!google!cloud!platform!/omega",
"github.com/!0google!cloud!platform/omega",
"github.com/!_google!cloud!platform/omega",
"github.com/!!google!cloud!platform/omega",
}
func TestDecodePath(t *testing.T) {
// Check invalid decodings.
for _, bad := range badDecode {
_, err := decodePath(bad)
if err == nil {
t.Errorf("DecodePath(%q): succeeded, want error (invalid decoding)", bad)
}
}
// Check encodings.
for _, tt := range encodeTests {
enc := tt.enc
if enc == "" {
enc = tt.path
}
path, err := decodePath(enc)
if err != nil {
t.Errorf("decodePath(%q): unexpected error: %v", enc, err)
continue
}
if path != tt.path {
t.Errorf("decodePath(%q) = %q, want %q", enc, path, tt.path)
}
}
}
...@@ -41,7 +41,7 @@ func FormatTree(tree Tree) []byte { ...@@ -41,7 +41,7 @@ func FormatTree(tree Tree) []byte {
var errMalformedTree = errors.New("malformed tree note") var errMalformedTree = errors.New("malformed tree note")
var treePrefix = []byte("go.sum database tree\n") var treePrefix = []byte("go.sum database tree\n")
// ParseTree parses a tree root description. // ParseTree parses a formatted tree root description.
func ParseTree(text []byte) (tree Tree, err error) { func ParseTree(text []byte) (tree Tree, err error) {
// The message looks like: // The message looks like:
// //
......
...@@ -33,6 +33,9 @@ import ( ...@@ -33,6 +33,9 @@ import (
// The special level L=-1 holds raw record data instead of hashes. // The special level L=-1 holds raw record data instead of hashes.
// In this case, the level encodes into a tile path as the path element // In this case, the level encodes into a tile path as the path element
// "data" instead of "-1". // "data" instead of "-1".
//
// See also https://golang.org/design/25530-sumdb#checksum-database
// and https://research.swtch.com/tlog#tiling_a_log.
type Tile struct { type Tile struct {
H int // height of tile (1 ≤ H ≤ 30) H int // height of tile (1 ≤ H ≤ 30)
L int // level in tiling (-1 ≤ L ≤ 63) L int // level in tiling (-1 ≤ L ≤ 63)
...@@ -40,11 +43,13 @@ type Tile struct { ...@@ -40,11 +43,13 @@ type Tile struct {
W int // width of tile (1 ≤ W ≤ 2**H; 2**H is complete tile) W int // width of tile (1 ≤ W ≤ 2**H; 2**H is complete tile)
} }
// TileForIndex returns the tile of height h ≥ 1 // TileForIndex returns the tile of fixed height h ≥ 1
// and least width storing the given hash storage index. // and least width storing the given hash storage index.
//
// If h ≤ 0, TileForIndex panics.
func TileForIndex(h int, index int64) Tile { func TileForIndex(h int, index int64) Tile {
if h < 1 { if h <= 0 {
panic("TileForIndex: invalid height") panic(fmt.Sprintf("TileForIndex: invalid height %d", h))
} }
t, _, _ := tileForIndex(h, index) t, _, _ := tileForIndex(h, index)
return t return t
...@@ -99,8 +104,10 @@ func tileHash(data []byte) Hash { ...@@ -99,8 +104,10 @@ func tileHash(data []byte) Hash {
// that must be published when publishing from a tree of // that must be published when publishing from a tree of
// size newTreeSize to replace a tree of size oldTreeSize. // size newTreeSize to replace a tree of size oldTreeSize.
// (No tiles need to be published for a tree of size zero.) // (No tiles need to be published for a tree of size zero.)
//
// If h ≤ 0, TileForIndex panics.
func NewTiles(h int, oldTreeSize, newTreeSize int64) []Tile { func NewTiles(h int, oldTreeSize, newTreeSize int64) []Tile {
if h < 1 { if h <= 0 {
panic(fmt.Sprintf("NewTiles: invalid height %d", h)) panic(fmt.Sprintf("NewTiles: invalid height %d", h))
} }
H := uint(h) H := uint(h)
...@@ -244,6 +251,16 @@ type TileReader interface { ...@@ -244,6 +251,16 @@ type TileReader interface {
// a data record for each tile (len(data) == len(tiles)) // a data record for each tile (len(data) == len(tiles))
// and each data record must be the correct length // and each data record must be the correct length
// (len(data[i]) == tiles[i].W*HashSize). // (len(data[i]) == tiles[i].W*HashSize).
//
// An implementation of ReadTiles typically reads
// them from an on-disk cache or else from a remote
// tile server. Tile data downloaded from a server should
// be considered suspect and not saved into a persistent
// on-disk cache before returning from ReadTiles.
// When the client confirms the validity of the tile data,
// it will call SaveTiles to signal that they can be safely
// written to persistent storage.
// See also https://research.swtch.com/tlog#authenticating_tiles.
ReadTiles(tiles []Tile) (data [][]byte, err error) ReadTiles(tiles []Tile) (data [][]byte, err error)
// SaveTiles informs the TileReader that the tile data // SaveTiles informs the TileReader that the tile data
......
...@@ -5,9 +5,6 @@ ...@@ -5,9 +5,6 @@
// Package tlog implements a tamper-evident log // Package tlog implements a tamper-evident log
// used in the Go module go.sum database server. // used in the Go module go.sum database server.
// //
// This package is part of a DRAFT of what the go.sum database server will look like.
// Do not assume the details here are final!
//
// This package follows the design of Certificate Transparency (RFC 6962) // This package follows the design of Certificate Transparency (RFC 6962)
// and its proofs are compatible with that system. // and its proofs are compatible with that system.
// See TestCertificateTransparency. // See TestCertificateTransparency.
......
...@@ -29,7 +29,7 @@ import ( ...@@ -29,7 +29,7 @@ import (
"cmd/go/internal/module" "cmd/go/internal/module"
"cmd/go/internal/par" "cmd/go/internal/par"
"cmd/go/internal/semver" "cmd/go/internal/semver"
"cmd/go/internal/sumweb" "cmd/go/internal/sumdb"
"cmd/go/internal/txtar" "cmd/go/internal/txtar"
) )
...@@ -65,7 +65,7 @@ func StartProxy() { ...@@ -65,7 +65,7 @@ func StartProxy() {
// Prepopulate main sumdb. // Prepopulate main sumdb.
for _, mod := range modList { for _, mod := range modList {
sumdbHandler.Server.Lookup(nil, mod.Path+"@"+mod.Version) sumdbOps.Lookup(nil, mod)
} }
}) })
} }
...@@ -88,7 +88,7 @@ func readModList() { ...@@ -88,7 +88,7 @@ func readModList() {
continue continue
} }
encPath := strings.ReplaceAll(name[:i], "_", "/") encPath := strings.ReplaceAll(name[:i], "_", "/")
path, err := module.DecodePath(encPath) path, err := module.UnescapePath(encPath)
if err != nil { if err != nil {
if encPath != "example.com/invalidpath/v1" { if encPath != "example.com/invalidpath/v1" {
fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err) fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err)
...@@ -96,7 +96,7 @@ func readModList() { ...@@ -96,7 +96,7 @@ func readModList() {
continue continue
} }
encVers := name[i+1:] encVers := name[i+1:]
vers, err := module.DecodeVersion(encVers) vers, err := module.UnescapeVersion(encVers)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err) fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err)
continue continue
...@@ -113,8 +113,13 @@ const ( ...@@ -113,8 +113,13 @@ const (
testSumDBSignerKey = "PRIVATE+KEY+localhost.localdev/sumdb+00000c67+AXu6+oaVaOYuQOFrf1V59JK1owcFlJcHwwXHDfDGxSPk" testSumDBSignerKey = "PRIVATE+KEY+localhost.localdev/sumdb+00000c67+AXu6+oaVaOYuQOFrf1V59JK1owcFlJcHwwXHDfDGxSPk"
) )
var sumdbHandler = &sumweb.Handler{Server: sumweb.NewTestServer(testSumDBSignerKey, proxyGoSum)} var (
var sumdbWrongHandler = &sumweb.Handler{Server: sumweb.NewTestServer(testSumDBSignerKey, proxyGoSumWrong)} sumdbOps = sumdb.NewTestServer(testSumDBSignerKey, proxyGoSum)
sumdbServer = sumdb.NewServer(sumdbOps)
sumdbWrongOps = sumdb.NewTestServer(testSumDBSignerKey, proxyGoSumWrong)
sumdbWrongServer = sumdb.NewServer(sumdbWrongOps)
)
// proxyHandler serves the Go module proxy protocol. // proxyHandler serves the Go module proxy protocol.
// See the proxy section of https://research.swtch.com/vgo-module. // See the proxy section of https://research.swtch.com/vgo-module.
...@@ -155,7 +160,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) { ...@@ -155,7 +160,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) {
// (Client thinks it is talking directly to a sumdb.) // (Client thinks it is talking directly to a sumdb.)
if strings.HasPrefix(path, "sumdb-direct/") { if strings.HasPrefix(path, "sumdb-direct/") {
r.URL.Path = path[len("sumdb-direct"):] r.URL.Path = path[len("sumdb-direct"):]
sumdbHandler.ServeHTTP(w, r) sumdbServer.ServeHTTP(w, r)
return return
} }
...@@ -164,7 +169,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) { ...@@ -164,7 +169,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) {
// (Client thinks it is talking directly to a sumdb.) // (Client thinks it is talking directly to a sumdb.)
if strings.HasPrefix(path, "sumdb-wrong/") { if strings.HasPrefix(path, "sumdb-wrong/") {
r.URL.Path = path[len("sumdb-wrong"):] r.URL.Path = path[len("sumdb-wrong"):]
sumdbWrongHandler.ServeHTTP(w, r) sumdbWrongServer.ServeHTTP(w, r)
return return
} }
...@@ -178,7 +183,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) { ...@@ -178,7 +183,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) {
// Request for $GOPROXY/sumdb/<name>/... goes to sumdb. // Request for $GOPROXY/sumdb/<name>/... goes to sumdb.
if sumdbPrefix := "sumdb/" + testSumDBName + "/"; strings.HasPrefix(path, sumdbPrefix) { if sumdbPrefix := "sumdb/" + testSumDBName + "/"; strings.HasPrefix(path, sumdbPrefix) {
r.URL.Path = path[len(sumdbPrefix)-1:] r.URL.Path = path[len(sumdbPrefix)-1:]
sumdbHandler.ServeHTTP(w, r) sumdbServer.ServeHTTP(w, r)
return return
} }
...@@ -187,7 +192,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) { ...@@ -187,7 +192,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) {
// latest version, including pseudo-versions. // latest version, including pseudo-versions.
if i := strings.LastIndex(path, "/@latest"); i >= 0 { if i := strings.LastIndex(path, "/@latest"); i >= 0 {
enc := path[:i] enc := path[:i]
modPath, err := module.DecodePath(enc) modPath, err := module.UnescapePath(enc)
if err != nil { if err != nil {
if !quiet { if !quiet {
fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err) fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err)
...@@ -225,7 +230,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) { ...@@ -225,7 +230,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
encVers, err := module.EncodeVersion(latest) encVers, err := module.EscapeVersion(latest)
if err != nil { if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError) http.Error(w, err.Error(), http.StatusInternalServerError)
return return
...@@ -240,7 +245,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) { ...@@ -240,7 +245,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
enc, file := path[:i], path[i+len("/@v/"):] enc, file := path[:i], path[i+len("/@v/"):]
path, err := module.DecodePath(enc) path, err := module.UnescapePath(enc)
if err != nil { if err != nil {
if !quiet { if !quiet {
fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err) fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err)
...@@ -276,7 +281,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) { ...@@ -276,7 +281,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
encVers, ext := file[:i], file[i+1:] encVers, ext := file[:i], file[i+1:]
vers, err := module.DecodeVersion(encVers) vers, err := module.UnescapeVersion(encVers)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err) fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err)
http.NotFound(w, r) http.NotFound(w, r)
...@@ -397,11 +402,11 @@ var archiveCache par.Cache ...@@ -397,11 +402,11 @@ var archiveCache par.Cache
var cmdGoDir, _ = os.Getwd() var cmdGoDir, _ = os.Getwd()
func readArchive(path, vers string) (*txtar.Archive, error) { func readArchive(path, vers string) (*txtar.Archive, error) {
enc, err := module.EncodePath(path) enc, err := module.EscapePath(path)
if err != nil { if err != nil {
return nil, err return nil, err
} }
encVers, err := module.EncodeVersion(vers) encVers, err := module.EscapeVersion(vers)
if err != nil { if err != nil {
return nil, err return nil, err
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment