Commit c481d4a9 authored by Nick Thomas's avatar Nick Thomas

Merge branch '85-redis-aware' into 'master'

Redis-Awareness

Closes #85

See merge request !112
parents 55e48636 b4eb6fc2
...@@ -62,6 +62,8 @@ Options: ...@@ -62,6 +62,8 @@ Options:
How long to wait for response headers when proxying the request (default 5m0s) How long to wait for response headers when proxying the request (default 5m0s)
-secretPath string -secretPath string
File with secret key to authenticate with authBackend (default "./.gitlab_workhorse_secret") File with secret key to authenticate with authBackend (default "./.gitlab_workhorse_secret")
-config string
File that hold configuration. Currently only for redis. File is in TOML-format (default "")
-version -version
Print version and exit Print version and exit
``` ```
...@@ -74,6 +76,37 @@ Gitlab-workhorse can listen on either a TCP or a Unix domain socket. It ...@@ -74,6 +76,37 @@ Gitlab-workhorse can listen on either a TCP or a Unix domain socket. It
can also open a second listening TCP listening socket with the Go can also open a second listening TCP listening socket with the Go
[net/http/pprof profiler server](http://golang.org/pkg/net/http/pprof/). [net/http/pprof profiler server](http://golang.org/pkg/net/http/pprof/).
Gitlab-workhorse can listen on redis events (currently only builds/register
for runners). This requires you to pass a valid TOML config file via
`-config` flag.
For regular setups it only requires the following (replacing the string
with the actual socket)
```
[redis]
URL = "unix:///var/run/gitlab/redis.sock"
Password = "my_awesome_password"
Sentinel = [ "tcp://sentinel1:23456", "tcp://sentinel2:23456" ]
SentinelMaster = "mymaster"
```
- `URL` takes a string in the format `unix://path/to/redis.sock` or
`tcp://host:port`.
- `Password` is only required if your redis instance is password-protected
- `Sentinel` is used if you are using Sentinel.
*NOTE* that if both `Sentinel` and `URL` are given, only `Sentinel` will be used
Optional fields are as follows:
```
[redis]
ReadTimeout = 1000
MaxIdle = 1
MaxActive = 1
```
- `ReadTimeout` is how many milliseconds that a redis read-command can take. Defaults to `1000`
- `MaxIdle` is how many idle connections can be in the redis-pool at once. Defaults to 1
- `MaxActive` is how many connections the pool can keep. Defaults to 1
### Relative URL support ### Relative URL support
If you are mounting GitLab at a relative URL, e.g. If you are mounting GitLab at a relative URL, e.g.
......
...@@ -3,16 +3,49 @@ package config ...@@ -3,16 +3,49 @@ package config
import ( import (
"net/url" "net/url"
"time" "time"
"github.com/BurntSushi/toml"
) )
type TomlURL struct {
url.URL
}
func (u *TomlURL) UnmarshalText(text []byte) error {
temp, err := url.Parse(string(text))
u.URL = *temp
return err
}
type RedisConfig struct {
URL TomlURL
Sentinel []TomlURL
SentinelMaster string
Password string
ReadTimeout *int
MaxIdle *int
MaxActive *int
}
type Config struct { type Config struct {
Backend *url.URL Redis *RedisConfig `toml:"redis"`
Version string Backend *url.URL `toml:"-"`
DocumentRoot string Version string `toml:"-"`
DevelopmentMode bool DocumentRoot string `toml:"-"`
Socket string DevelopmentMode bool `toml:"-"`
ProxyHeadersTimeout time.Duration Socket string `toml:"-"`
APILimit uint ProxyHeadersTimeout time.Duration `toml:"-"`
APIQueueLimit uint APILimit uint `toml:"-"`
APIQueueTimeout time.Duration APIQueueLimit uint `toml:"-"`
APIQueueTimeout time.Duration `toml:"-"`
}
// LoadConfig from a file
func LoadConfig(filename string) (*Config, error) {
cfg := &Config{}
if _, err := toml.DecodeFile(filename, cfg); err != nil {
return nil, err
}
return cfg, nil
} }
package redis
import (
"errors"
"fmt"
"log"
"strings"
"sync"
"time"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/helper"
"github.com/garyburd/redigo/redis"
"github.com/jpillora/backoff"
"github.com/prometheus/client_golang/prometheus"
)
var (
keyWatcher = make(map[string][]chan string)
keyWatcherMutex sync.Mutex
redisReconnectTimeout = backoff.Backoff{
//These are the defaults
Min: 100 * time.Millisecond,
Max: 60 * time.Second,
Factor: 2,
Jitter: true,
}
keyWatchers = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "gitlab_workhorse_keywatcher_keywatchers",
Help: "The number of keys that is being watched by gitlab-workhorse",
},
)
totalMessages = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "gitlab_workhorse_keywather_total_messages",
Help: "How many messages gitlab-workhorse has recieved in total on pubsub.",
},
)
)
func init() {
prometheus.MustRegister(
keyWatchers,
totalMessages,
)
}
const (
keySubChannel = "workhorse:notifications"
promStatusMiss = "miss"
promStatusHit = "hit"
)
// KeyChan holds a key and a channel
type KeyChan struct {
Key string
Chan chan string
}
func processInner(conn redis.Conn) {
redisReconnectTimeout.Reset()
defer conn.Close()
psc := redis.PubSubConn{Conn: conn}
if err := psc.Subscribe(keySubChannel); err != nil {
return
}
defer psc.Unsubscribe(keySubChannel)
for {
switch v := psc.Receive().(type) {
case redis.Message:
totalMessages.Inc()
msg := strings.SplitN(string(v.Data), "=", 2)
if len(msg) != 2 {
helper.LogError(nil, errors.New("Redis subscribe error: got an invalid notification"))
continue
}
key, value := msg[0], msg[1]
notifyChanWatchers(key, value)
case error:
helper.LogError(nil, fmt.Errorf("Redis subscribe error: %s", v))
return
}
}
}
// Process redis subscriptions
//
// NOTE: There Can Only Be One!
// Reconnects is reconnect = true
func Process(reconnect bool) {
log.Print("Processing redis queue")
loop := true
for loop {
loop = reconnect
log.Println("Connecting to redis")
conn, err := redisDialFunc()
if err != nil {
helper.LogError(nil, fmt.Errorf("Failed to connect to redis: %s", err))
time.Sleep(redisReconnectTimeout.Duration())
continue
}
processInner(conn)
}
}
func notifyChanWatchers(key, value string) {
keyWatcherMutex.Lock()
defer keyWatcherMutex.Unlock()
if chanList, ok := keyWatcher[key]; ok {
for _, c := range chanList {
c <- value
keyWatchers.Dec()
}
delete(keyWatcher, key)
}
}
func addKeyChan(kc *KeyChan) {
keyWatcherMutex.Lock()
defer keyWatcherMutex.Unlock()
keyWatcher[kc.Key] = append(keyWatcher[kc.Key], kc.Chan)
keyWatchers.Inc()
}
func delKeyChan(kc *KeyChan) {
keyWatcherMutex.Lock()
defer keyWatcherMutex.Unlock()
if chans, ok := keyWatcher[kc.Key]; ok {
for i, c := range chans {
if kc.Chan == c {
keyWatcher[kc.Key] = append(chans[:i], chans[i+1:]...)
keyWatchers.Dec()
break
}
}
if len(keyWatcher[kc.Key]) == 0 {
delete(keyWatcher, kc.Key)
}
}
}
// WatchKeyStatus is used to tell how WatchKey returned
type WatchKeyStatus int
const (
// WatchKeyStatusTimeout is returned when the watch timeout provided by the caller was exceeded
WatchKeyStatusTimeout WatchKeyStatus = iota
// WatchKeyStatusAlreadyChanged is returned when the value passed by the caller was never observed
WatchKeyStatusAlreadyChanged
// WatchKeyStatusSeenChange is returned when we have seen the value passed by the caller get changed
WatchKeyStatusSeenChange
// WatchKeyStatusNoChange is returned when the function had to return before observing a change.
// Also returned on errors.
WatchKeyStatusNoChange
)
// WatchKey waits for a key to be updated or expired
func WatchKey(key, value string, timeout time.Duration) (WatchKeyStatus, error) {
kw := &KeyChan{
Key: key,
Chan: make(chan string, 1),
}
addKeyChan(kw)
defer delKeyChan(kw)
currentValue, err := GetString(key)
if err != nil {
return WatchKeyStatusNoChange, fmt.Errorf("Failed to get value from Redis: %#v", err)
}
if currentValue != value {
return WatchKeyStatusAlreadyChanged, nil
}
select {
case currentValue := <-kw.Chan:
if currentValue == "" {
return WatchKeyStatusNoChange, fmt.Errorf("Failed to get value from Redis")
}
if currentValue == value {
return WatchKeyStatusNoChange, nil
}
return WatchKeyStatusSeenChange, nil
case <-time.After(timeout):
return WatchKeyStatusTimeout, nil
}
}
package redis
import (
"sync"
"testing"
"time"
"github.com/rafaeljusto/redigomock"
"github.com/stretchr/testify/assert"
)
const (
runnerKey = "runner:build_queue:10"
)
func createSubscriptionMessage(key, data string) []interface{} {
return []interface{}{
[]byte("message"),
[]byte(key),
[]byte(data),
}
}
func createSubscribeMessage(key string) []interface{} {
return []interface{}{
[]byte("subscribe"),
[]byte(key),
[]byte("1"),
}
}
func createUnsubscribeMessage(key string) []interface{} {
return []interface{}{
[]byte("unsubscribe"),
[]byte(key),
[]byte("1"),
}
}
func TestWatchKeySeenChange(t *testing.T) {
mconn, td := setupMockPool()
defer td()
go Process(false)
// Setup the initial subscription message
mconn.Command("SUBSCRIBE", keySubChannel).
Expect(createSubscribeMessage(keySubChannel))
mconn.Command("UNSUBSCRIBE", keySubChannel).
Expect(createUnsubscribeMessage(keySubChannel))
mconn.Command("GET", runnerKey).
Expect("something").
Expect("somethingelse")
mconn.ReceiveWait = true
mconn.AddSubscriptionMessage(createSubscriptionMessage(keySubChannel, runnerKey+"=somethingelse"))
// ACTUALLY Fill the buffers
go func(mconn *redigomock.Conn) {
mconn.ReceiveNow <- true
mconn.ReceiveNow <- true
mconn.ReceiveNow <- true
}(mconn)
val, err := WatchKey(runnerKey, "something", time.Duration(1*time.Second))
assert.NoError(t, err, "Expected no error")
assert.Equal(t, WatchKeyStatusSeenChange, val, "Expected value to change")
}
func TestWatchKeyNoChange(t *testing.T) {
mconn, td := setupMockPool()
defer td()
go Process(false)
// Setup the initial subscription message
mconn.Command("SUBSCRIBE", keySubChannel).
Expect(createSubscribeMessage(keySubChannel))
mconn.Command("UNSUBSCRIBE", keySubChannel).
Expect(createUnsubscribeMessage(keySubChannel))
mconn.Command("GET", runnerKey).
Expect("something").
Expect("something")
mconn.ReceiveWait = true
mconn.AddSubscriptionMessage(createSubscriptionMessage(keySubChannel, runnerKey+"=something"))
// ACTUALLY Fill the buffers
go func(mconn *redigomock.Conn) {
mconn.ReceiveNow <- true
mconn.ReceiveNow <- true
mconn.ReceiveNow <- true
}(mconn)
val, err := WatchKey(runnerKey, "something", time.Duration(1*time.Second))
assert.NoError(t, err, "Expected no error")
assert.Equal(t, WatchKeyStatusNoChange, val, "Expected notification without change to value")
}
func TestWatchKeyTimeout(t *testing.T) {
mconn, td := setupMockPool()
defer td()
go Process(false)
// Setup the initial subscription message
mconn.Command("SUBSCRIBE", keySubChannel).
Expect(createSubscribeMessage(keySubChannel))
mconn.Command("UNSUBSCRIBE", keySubChannel).
Expect(createUnsubscribeMessage(keySubChannel))
mconn.Command("GET", runnerKey).
Expect("something").
Expect("something")
mconn.ReceiveWait = true
// ACTUALLY Fill the buffers
go func(mconn *redigomock.Conn) {
mconn.ReceiveNow <- true
mconn.ReceiveNow <- true
mconn.ReceiveNow <- true
}(mconn)
val, err := WatchKey(runnerKey, "something", time.Duration(1*time.Second))
assert.NoError(t, err, "Expected no error")
assert.Equal(t, WatchKeyStatusTimeout, val, "Expected value to not change")
}
func TestWatchKeyAlreadyChanged(t *testing.T) {
mconn, td := setupMockPool()
defer td()
go Process(false)
// Setup the initial subscription message
mconn.Command("SUBSCRIBE", keySubChannel).
Expect(createSubscribeMessage(keySubChannel))
mconn.Command("UNSUBSCRIBE", keySubChannel).
Expect(createUnsubscribeMessage(keySubChannel))
mconn.Command("GET", runnerKey).
Expect("somethingelse").
Expect("somethingelse")
mconn.ReceiveWait = true
// ACTUALLY Fill the buffers
go func(mconn *redigomock.Conn) {
mconn.ReceiveNow <- true
mconn.ReceiveNow <- true
mconn.ReceiveNow <- true
}(mconn)
val, err := WatchKey(runnerKey, "something", time.Duration(1*time.Second))
assert.NoError(t, err, "Expected no error")
assert.Equal(t, WatchKeyStatusAlreadyChanged, val, "Expected value to have already changed")
}
func TestWatchKeyMassiveParallel(t *testing.T) {
mconn, td := setupMockPool()
defer td()
go Process(false)
// Setup the initial subscription message
mconn.Command("SUBSCRIBE", keySubChannel).
Expect(createSubscribeMessage(keySubChannel))
mconn.Command("UNSUBSCRIBE", keySubChannel).
Expect(createUnsubscribeMessage(keySubChannel))
getCmd := mconn.Command("GET", runnerKey)
mconn.ReceiveWait = true
const runTimes = 100
for i := 0; i < runTimes; i++ {
mconn.AddSubscriptionMessage(createSubscriptionMessage(keySubChannel, runnerKey+"=somethingelse"))
getCmd = getCmd.Expect("something")
}
wg := &sync.WaitGroup{}
// Race-conditions /o/ \o\
for i := 0; i < runTimes; i++ {
wg.Add(1)
go func(mconn *redigomock.Conn) {
defer wg.Done()
// ACTUALLY Fill the buffers
go func(mconn *redigomock.Conn) {
mconn.ReceiveNow <- true
}(mconn)
val, err := WatchKey(runnerKey, "something", time.Duration(1*time.Second))
assert.NoError(t, err, "Expected no error")
assert.Equal(t, WatchKeyStatusSeenChange, val, "Expected value to change")
}(mconn)
}
wg.Wait()
}
package redis
import (
"errors"
"fmt"
"time"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/config"
sentinel "github.com/FZambia/go-sentinel"
"github.com/garyburd/redigo/redis"
"github.com/prometheus/client_golang/prometheus"
)
var (
pool *redis.Pool
sntnl *sentinel.Sentinel
)
const (
defaultMaxIdle = 1
defaultMaxActive = 1
defaultReadTimeout = 1 * time.Second
defaultIdleTimeout = 3 * time.Minute
)
var (
totalConnections = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "gitlab_workhorse_redis_total_connections",
Help: "How many connections gitlab-workhorse has opened in total. Can be used to track Redis connection rate for this process",
},
)
)
func init() {
prometheus.MustRegister(
totalConnections,
)
}
func sentinelConn(master string, urls []config.TomlURL) *sentinel.Sentinel {
if len(urls) == 0 {
return nil
}
var addrs []string
for _, url := range urls {
addrs = append(addrs, url.URL.String())
}
return &sentinel.Sentinel{
Addrs: addrs,
MasterName: master,
Dial: func(addr string) (redis.Conn, error) {
// This timeout is recommended for Sentinel-support according to the guidelines.
// https://redis.io/topics/sentinel-clients#redis-service-discovery-via-sentinel
// For every address it should try to connect to the Sentinel,
// using a short timeout (in the order of a few hundreds of milliseconds).
timeout := 500 * time.Millisecond
c, err := redis.DialTimeout("tcp", addr, timeout, timeout, timeout)
if err != nil {
return nil, err
}
return c, nil
},
}
}
var redisDialFunc func() (redis.Conn, error)
func dialOptionsBuilder(cfg *config.RedisConfig) []redis.DialOption {
readTimeout := defaultReadTimeout
if cfg.ReadTimeout != nil {
readTimeout = time.Millisecond * time.Duration(*cfg.ReadTimeout)
}
dopts := []redis.DialOption{redis.DialReadTimeout(readTimeout)}
if cfg.Password != "" {
dopts = append(dopts, redis.DialPassword(cfg.Password))
}
return dopts
}
// DefaultDialFunc should always used. Only exception is for unit-tests.
func DefaultDialFunc(cfg *config.RedisConfig) func() (redis.Conn, error) {
dopts := dialOptionsBuilder(cfg)
innerDial := func() (redis.Conn, error) {
return redis.Dial(cfg.URL.Scheme, cfg.URL.Host, dopts...)
}
if sntnl != nil {
innerDial = func() (redis.Conn, error) {
address, err := sntnl.MasterAddr()
if err != nil {
return nil, err
}
return redis.Dial("tcp", address, dopts...)
}
}
return func() (redis.Conn, error) {
c, err := innerDial()
if err == nil {
totalConnections.Inc()
}
return c, err
}
}
// Configure redis-connection
func Configure(cfg *config.RedisConfig, dialFunc func() (redis.Conn, error)) {
if cfg == nil {
return
}
maxIdle := defaultMaxIdle
if cfg.MaxIdle != nil {
maxIdle = *cfg.MaxIdle
}
maxActive := defaultMaxActive
if cfg.MaxActive != nil {
maxActive = *cfg.MaxActive
}
sntnl = sentinelConn(cfg.SentinelMaster, cfg.Sentinel)
redisDialFunc = dialFunc
pool = &redis.Pool{
MaxIdle: maxIdle, // Keep at most X hot connections
MaxActive: maxActive, // Keep at most X live connections, 0 means unlimited
IdleTimeout: defaultIdleTimeout, // X time until an unused connection is closed
Dial: redisDialFunc,
Wait: true,
}
if sntnl != nil {
pool.TestOnBorrow = func(c redis.Conn, t time.Time) error {
if !sentinel.TestRole(c, "master") {
return errors.New("Role check failed")
}
return nil
}
}
}
// Get a connection for the Redis-pool
func Get() redis.Conn {
if pool != nil {
return pool.Get()
}
return nil
}
// GetString fetches the value of a key in Redis as a string
func GetString(key string) (string, error) {
conn := Get()
if conn == nil {
return "", fmt.Errorf("Not connected to redis")
}
defer func() {
conn.Close()
}()
return redis.String(conn.Do("GET", key))
}
package redis
import (
"testing"
"time"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/config"
"github.com/garyburd/redigo/redis"
"github.com/rafaeljusto/redigomock"
"github.com/stretchr/testify/assert"
)
// Setup a MockPool for Redis
//
// Returns a teardown-function and the mock-connection
func setupMockPool() (*redigomock.Conn, func()) {
conn := redigomock.NewConn()
cfg := &config.RedisConfig{URL: config.TomlURL{}}
Configure(cfg, func() (redis.Conn, error) {
return conn, nil
})
return conn, func() {
pool = nil
}
}
func TestConfigureNoConfig(t *testing.T) {
pool = nil
Configure(nil, nil)
assert.Nil(t, pool, "Pool should be nil")
}
func TestConfigureMinimalConfig(t *testing.T) {
cfg := &config.RedisConfig{URL: config.TomlURL{}, Password: ""}
Configure(cfg, DefaultDialFunc(cfg))
if assert.NotNil(t, pool, "Pool should not be nil") {
assert.Equal(t, 1, pool.MaxIdle)
assert.Equal(t, 1, pool.MaxActive)
assert.Equal(t, 3*time.Minute, pool.IdleTimeout)
}
pool = nil
}
func TestConfigureFullConfig(t *testing.T) {
i, a, r := 4, 10, 3
cfg := &config.RedisConfig{
URL: config.TomlURL{},
Password: "",
MaxIdle: &i,
MaxActive: &a,
ReadTimeout: &r,
}
Configure(cfg, DefaultDialFunc(cfg))
if assert.NotNil(t, pool, "Pool should not be nil") {
assert.Equal(t, i, pool.MaxIdle)
assert.Equal(t, a, pool.MaxActive)
assert.Equal(t, 3*time.Minute, pool.IdleTimeout)
}
pool = nil
}
func TestGetConnFail(t *testing.T) {
conn := Get()
assert.Nil(t, conn, "Expected `conn` to be nil")
}
func TestGetConnPass(t *testing.T) {
_, teardown := setupMockPool()
defer teardown()
conn := Get()
assert.NotNil(t, conn, "Expected `conn` to be non-nil")
}
func TestGetStringPass(t *testing.T) {
conn, teardown := setupMockPool()
defer teardown()
conn.Command("GET", "foobar").Expect("baz")
str, err := GetString("foobar")
if assert.NoError(t, err, "Expected `err` to be nil") {
var value string
assert.IsType(t, value, str, "Expected value to be a string")
assert.Equal(t, "baz", str, "Expected it to be equal")
}
}
func TestGetStringFail(t *testing.T) {
_, err := GetString("foobar")
assert.Error(t, err, "Expected error when not connected to redis")
}
...@@ -26,6 +26,7 @@ import ( ...@@ -26,6 +26,7 @@ import (
"gitlab.com/gitlab-org/gitlab-workhorse/internal/config" "gitlab.com/gitlab-org/gitlab-workhorse/internal/config"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/queueing" "gitlab.com/gitlab-org/gitlab-workhorse/internal/queueing"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/redis"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/secret" "gitlab.com/gitlab-org/gitlab-workhorse/internal/secret"
"gitlab.com/gitlab-org/gitlab-workhorse/internal/upstream" "gitlab.com/gitlab-org/gitlab-workhorse/internal/upstream"
...@@ -36,6 +37,7 @@ import ( ...@@ -36,6 +37,7 @@ import (
var Version = "(unknown version)" // Set at build time in the Makefile var Version = "(unknown version)" // Set at build time in the Makefile
var printVersion = flag.Bool("version", false, "Print version and exit") var printVersion = flag.Bool("version", false, "Print version and exit")
var configFile = flag.String("config", "", "TOML file to load config from")
var listenAddr = flag.String("listenAddr", "localhost:8181", "Listen address for HTTP server") var listenAddr = flag.String("listenAddr", "localhost:8181", "Listen address for HTTP server")
var listenNetwork = flag.String("listenNetwork", "tcp", "Listen 'network' (tcp, tcp4, tcp6, unix)") var listenNetwork = flag.String("listenNetwork", "tcp", "Listen 'network' (tcp, tcp4, tcp6, unix)")
var listenUmask = flag.Int("listenUmask", 0, "Umask for Unix socket") var listenUmask = flag.Int("listenUmask", 0, "Umask for Unix socket")
...@@ -121,6 +123,18 @@ func main() { ...@@ -121,6 +123,18 @@ func main() {
APIQueueTimeout: *apiQueueTimeout, APIQueueTimeout: *apiQueueTimeout,
} }
if *configFile != "" {
cfgFromFile, err := config.LoadConfig(*configFile)
if err != nil {
log.Fatalf("Can not load config file %q: %v", *configFile, err)
}
cfg.Redis = cfgFromFile.Redis
redis.Configure(cfg.Redis, redis.DefaultDialFunc(cfg.Redis))
go redis.Process(true)
}
up := wrapRaven(upstream.NewUpstream(cfg)) up := wrapRaven(upstream.NewUpstream(cfg))
log.Fatal(http.Serve(listener, up)) log.Fatal(http.Serve(listener, up))
......
TAGS
tags
.*.swp
tomlcheck/tomlcheck
toml.test
language: go
go:
- 1.1
- 1.2
- 1.3
- 1.4
- 1.5
- 1.6
- tip
install:
- go install ./...
- go get github.com/BurntSushi/toml-test
script:
- export PATH="$PATH:$HOME/gopath/bin"
- make test
Compatible with TOML version
[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
Version 2, December 2004
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
Everyone is permitted to copy and distribute verbatim or modified
copies of this license document, and changing it is allowed as long
as the name is changed.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO.
install:
go install ./...
test: install
go test -v
toml-test toml-test-decoder
toml-test -encoder toml-test-encoder
fmt:
gofmt -w *.go */*.go
colcheck *.go */*.go
tags:
find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
push:
git push origin master
git push github master
## TOML parser and encoder for Go with reflection
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
reflection interface similar to Go's standard library `json` and `xml`
packages. This package also supports the `encoding.TextUnmarshaler` and
`encoding.TextMarshaler` interfaces so that you can define custom data
representations. (There is an example of this below.)
Spec: https://github.com/mojombo/toml
Compatible with TOML version
[v0.2.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.2.0.md)
Documentation: http://godoc.org/github.com/BurntSushi/toml
Installation:
```bash
go get github.com/BurntSushi/toml
```
Try the toml validator:
```bash
go get github.com/BurntSushi/toml/cmd/tomlv
tomlv some-toml-file.toml
```
[![Build status](https://api.travis-ci.org/BurntSushi/toml.png)](https://travis-ci.org/BurntSushi/toml)
### Testing
This package passes all tests in
[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
and the encoder.
### Examples
This package works similarly to how the Go standard library handles `XML`
and `JSON`. Namely, data is loaded into Go values via reflection.
For the simplest example, consider some TOML file as just a list of keys
and values:
```toml
Age = 25
Cats = [ "Cauchy", "Plato" ]
Pi = 3.14
Perfection = [ 6, 28, 496, 8128 ]
DOB = 1987-07-05T05:45:00Z
```
Which could be defined in Go as:
```go
type Config struct {
Age int
Cats []string
Pi float64
Perfection []int
DOB time.Time // requires `import time`
}
```
And then decoded with:
```go
var conf Config
if _, err := toml.Decode(tomlData, &conf); err != nil {
// handle error
}
```
You can also use struct tags if your struct field name doesn't map to a TOML
key value directly:
```toml
some_key_NAME = "wat"
```
```go
type TOML struct {
ObscureKey string `toml:"some_key_NAME"`
}
```
### Using the `encoding.TextUnmarshaler` interface
Here's an example that automatically parses duration strings into
`time.Duration` values:
```toml
[[song]]
name = "Thunder Road"
duration = "4m49s"
[[song]]
name = "Stairway to Heaven"
duration = "8m03s"
```
Which can be decoded with:
```go
type song struct {
Name string
Duration duration
}
type songs struct {
Song []song
}
var favorites songs
if _, err := toml.Decode(blob, &favorites); err != nil {
log.Fatal(err)
}
for _, s := range favorites.Song {
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
}
```
And you'll also need a `duration` type that satisfies the
`encoding.TextUnmarshaler` interface:
```go
type duration struct {
time.Duration
}
func (d *duration) UnmarshalText(text []byte) error {
var err error
d.Duration, err = time.ParseDuration(string(text))
return err
}
```
### More complex usage
Here's an example of how to load the example from the official spec page:
```toml
# This is a TOML document. Boom.
title = "TOML Example"
[owner]
name = "Tom Preston-Werner"
organization = "GitHub"
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
[database]
server = "192.168.1.1"
ports = [ 8001, 8001, 8002 ]
connection_max = 5000
enabled = true
[servers]
# You can indent as you please. Tabs or spaces. TOML don't care.
[servers.alpha]
ip = "10.0.0.1"
dc = "eqdc10"
[servers.beta]
ip = "10.0.0.2"
dc = "eqdc10"
[clients]
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
# Line breaks are OK when inside arrays
hosts = [
"alpha",
"omega"
]
```
And the corresponding Go types are:
```go
type tomlConfig struct {
Title string
Owner ownerInfo
DB database `toml:"database"`
Servers map[string]server
Clients clients
}
type ownerInfo struct {
Name string
Org string `toml:"organization"`
Bio string
DOB time.Time
}
type database struct {
Server string
Ports []int
ConnMax int `toml:"connection_max"`
Enabled bool
}
type server struct {
IP string
DC string
}
type clients struct {
Data [][]interface{}
Hosts []string
}
```
Note that a case insensitive match will be tried if an exact match can't be
found.
A working example of the above can be found in `_examples/example.{go,toml}`.
This diff is collapsed.
package toml
import "strings"
// MetaData allows access to meta information about TOML data that may not
// be inferrable via reflection. In particular, whether a key has been defined
// and the TOML type of a key.
type MetaData struct {
mapping map[string]interface{}
types map[string]tomlType
keys []Key
decoded map[string]bool
context Key // Used only during decoding.
}
// IsDefined returns true if the key given exists in the TOML data. The key
// should be specified hierarchially. e.g.,
//
// // access the TOML key 'a.b.c'
// IsDefined("a", "b", "c")
//
// IsDefined will return false if an empty key given. Keys are case sensitive.
func (md *MetaData) IsDefined(key ...string) bool {
if len(key) == 0 {
return false
}
var hash map[string]interface{}
var ok bool
var hashOrVal interface{} = md.mapping
for _, k := range key {
if hash, ok = hashOrVal.(map[string]interface{}); !ok {
return false
}
if hashOrVal, ok = hash[k]; !ok {
return false
}
}
return true
}
// Type returns a string representation of the type of the key specified.
//
// Type will return the empty string if given an empty key or a key that
// does not exist. Keys are case sensitive.
func (md *MetaData) Type(key ...string) string {
fullkey := strings.Join(key, ".")
if typ, ok := md.types[fullkey]; ok {
return typ.typeString()
}
return ""
}
// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
// to get values of this type.
type Key []string
func (k Key) String() string {
return strings.Join(k, ".")
}
func (k Key) maybeQuotedAll() string {
var ss []string
for i := range k {
ss = append(ss, k.maybeQuoted(i))
}
return strings.Join(ss, ".")
}
func (k Key) maybeQuoted(i int) string {
quote := false
for _, c := range k[i] {
if !isBareKeyChar(c) {
quote = true
break
}
}
if quote {
return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
}
return k[i]
}
func (k Key) add(piece string) Key {
newKey := make(Key, len(k)+1)
copy(newKey, k)
newKey[len(k)] = piece
return newKey
}
// Keys returns a slice of every key in the TOML data, including key groups.
// Each key is itself a slice, where the first element is the top of the
// hierarchy and the last is the most specific.
//
// The list will have the same order as the keys appeared in the TOML data.
//
// All keys returned are non-empty.
func (md *MetaData) Keys() []Key {
return md.keys
}
// Undecoded returns all keys that have not been decoded in the order in which
// they appear in the original TOML document.
//
// This includes keys that haven't been decoded because of a Primitive value.
// Once the Primitive value is decoded, the keys will be considered decoded.
//
// Also note that decoding into an empty interface will result in no decoding,
// and so no keys will be considered decoded.
//
// In this sense, the Undecoded keys correspond to keys in the TOML document
// that do not have a concrete type in your representation.
func (md *MetaData) Undecoded() []Key {
undecoded := make([]Key, 0, len(md.keys))
for _, key := range md.keys {
if !md.decoded[key.String()] {
undecoded = append(undecoded, key)
}
}
return undecoded
}
/*
Package toml provides facilities for decoding and encoding TOML configuration
files via reflection. There is also support for delaying decoding with
the Primitive type, and querying the set of keys in a TOML document with the
MetaData type.
The specification implemented: https://github.com/mojombo/toml
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
whether a file is a valid TOML document. It can also be used to print the
type of each key in a TOML document.
Testing
There are two important types of tests used for this package. The first is
contained inside '*_test.go' files and uses the standard Go unit testing
framework. These tests are primarily devoted to holistically testing the
decoder and encoder.
The second type of testing is used to verify the implementation's adherence
to the TOML specification. These tests have been factored into their own
project: https://github.com/BurntSushi/toml-test
The reason the tests are in a separate project is so that they can be used by
any implementation of TOML. Namely, it is language agnostic.
*/
package toml
This diff is collapsed.
// +build go1.2
package toml
// In order to support Go 1.1, we define our own TextMarshaler and
// TextUnmarshaler types. For Go 1.2+, we just alias them with the
// standard library interfaces.
import (
"encoding"
)
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
// so that Go 1.1 can be supported.
type TextMarshaler encoding.TextMarshaler
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
// here so that Go 1.1 can be supported.
type TextUnmarshaler encoding.TextUnmarshaler
// +build !go1.2
package toml
// These interfaces were introduced in Go 1.2, so we add them manually when
// compiling for Go 1.1.
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
// so that Go 1.1 can be supported.
type TextMarshaler interface {
MarshalText() (text []byte, err error)
}
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
// here so that Go 1.1 can be supported.
type TextUnmarshaler interface {
UnmarshalText(text []byte) error
}
This diff is collapsed.
This diff is collapsed.
au BufWritePost *.go silent!make tags > /dev/null 2>&1
package toml
// tomlType represents any Go type that corresponds to a TOML type.
// While the first draft of the TOML spec has a simplistic type system that
// probably doesn't need this level of sophistication, we seem to be militating
// toward adding real composite types.
type tomlType interface {
typeString() string
}
// typeEqual accepts any two types and returns true if they are equal.
func typeEqual(t1, t2 tomlType) bool {
if t1 == nil || t2 == nil {
return false
}
return t1.typeString() == t2.typeString()
}
func typeIsHash(t tomlType) bool {
return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
}
type tomlBaseType string
func (btype tomlBaseType) typeString() string {
return string(btype)
}
func (btype tomlBaseType) String() string {
return btype.typeString()
}
var (
tomlInteger tomlBaseType = "Integer"
tomlFloat tomlBaseType = "Float"
tomlDatetime tomlBaseType = "Datetime"
tomlString tomlBaseType = "String"
tomlBool tomlBaseType = "Bool"
tomlArray tomlBaseType = "Array"
tomlHash tomlBaseType = "Hash"
tomlArrayHash tomlBaseType = "ArrayHash"
)
// typeOfPrimitive returns a tomlType of any primitive value in TOML.
// Primitive values are: Integer, Float, Datetime, String and Bool.
//
// Passing a lexer item other than the following will cause a BUG message
// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
func (p *parser) typeOfPrimitive(lexItem item) tomlType {
switch lexItem.typ {
case itemInteger:
return tomlInteger
case itemFloat:
return tomlFloat
case itemDatetime:
return tomlDatetime
case itemString:
return tomlString
case itemMultilineString:
return tomlString
case itemRawString:
return tomlString
case itemRawMultilineString:
return tomlString
case itemBool:
return tomlBool
}
p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
panic("unreachable")
}
// typeOfArray returns a tomlType for an array given a list of types of its
// values.
//
// In the current spec, if an array is homogeneous, then its type is always
// "Array". If the array is not homogeneous, an error is generated.
func (p *parser) typeOfArray(types []tomlType) tomlType {
// Empty arrays are cool.
if len(types) == 0 {
return tomlArray
}
theType := types[0]
for _, t := range types[1:] {
if !typeEqual(theType, t) {
p.panicf("Array contains values of type '%s' and '%s', but "+
"arrays must be homogeneous.", theType, t)
}
}
return tomlArray
}
package toml
// Struct field handling is adapted from code in encoding/json:
//
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the Go distribution.
import (
"reflect"
"sort"
"sync"
)
// A field represents a single field found in a struct.
type field struct {
name string // the name of the field (`toml` tag included)
tag bool // whether field has a `toml` tag
index []int // represents the depth of an anonymous field
typ reflect.Type // the type of the field
}
// byName sorts field by name, breaking ties with depth,
// then breaking ties with "name came from toml tag", then
// breaking ties with index sequence.
type byName []field
func (x byName) Len() int { return len(x) }
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byName) Less(i, j int) bool {
if x[i].name != x[j].name {
return x[i].name < x[j].name
}
if len(x[i].index) != len(x[j].index) {
return len(x[i].index) < len(x[j].index)
}
if x[i].tag != x[j].tag {
return x[i].tag
}
return byIndex(x).Less(i, j)
}
// byIndex sorts field by index sequence.
type byIndex []field
func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byIndex) Less(i, j int) bool {
for k, xik := range x[i].index {
if k >= len(x[j].index) {
return false
}
if xik != x[j].index[k] {
return xik < x[j].index[k]
}
}
return len(x[i].index) < len(x[j].index)
}
// typeFields returns a list of fields that TOML should recognize for the given
// type. The algorithm is breadth-first search over the set of structs to
// include - the top struct and then any reachable anonymous structs.
func typeFields(t reflect.Type) []field {
// Anonymous fields to explore at the current level and the next.
current := []field{}
next := []field{{typ: t}}
// Count of queued names for current level and the next.
count := map[reflect.Type]int{}
nextCount := map[reflect.Type]int{}
// Types already visited at an earlier level.
visited := map[reflect.Type]bool{}
// Fields found.
var fields []field
for len(next) > 0 {
current, next = next, current[:0]
count, nextCount = nextCount, map[reflect.Type]int{}
for _, f := range current {
if visited[f.typ] {
continue
}
visited[f.typ] = true
// Scan f.typ for fields to include.
for i := 0; i < f.typ.NumField(); i++ {
sf := f.typ.Field(i)
if sf.PkgPath != "" && !sf.Anonymous { // unexported
continue
}
opts := getOptions(sf.Tag)
if opts.skip {
continue
}
index := make([]int, len(f.index)+1)
copy(index, f.index)
index[len(f.index)] = i
ft := sf.Type
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
// Follow pointer.
ft = ft.Elem()
}
// Record found field and index sequence.
if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
tagged := opts.name != ""
name := opts.name
if name == "" {
name = sf.Name
}
fields = append(fields, field{name, tagged, index, ft})
if count[f.typ] > 1 {
// If there were multiple instances, add a second,
// so that the annihilation code will see a duplicate.
// It only cares about the distinction between 1 or 2,
// so don't bother generating any more copies.
fields = append(fields, fields[len(fields)-1])
}
continue
}
// Record new anonymous struct to explore in next round.
nextCount[ft]++
if nextCount[ft] == 1 {
f := field{name: ft.Name(), index: index, typ: ft}
next = append(next, f)
}
}
}
}
sort.Sort(byName(fields))
// Delete all fields that are hidden by the Go rules for embedded fields,
// except that fields with TOML tags are promoted.
// The fields are sorted in primary order of name, secondary order
// of field index length. Loop over names; for each name, delete
// hidden fields by choosing the one dominant field that survives.
out := fields[:0]
for advance, i := 0, 0; i < len(fields); i += advance {
// One iteration per name.
// Find the sequence of fields with the name of this first field.
fi := fields[i]
name := fi.name
for advance = 1; i+advance < len(fields); advance++ {
fj := fields[i+advance]
if fj.name != name {
break
}
}
if advance == 1 { // Only one field with this name
out = append(out, fi)
continue
}
dominant, ok := dominantField(fields[i : i+advance])
if ok {
out = append(out, dominant)
}
}
fields = out
sort.Sort(byIndex(fields))
return fields
}
// dominantField looks through the fields, all of which are known to
// have the same name, to find the single field that dominates the
// others using Go's embedding rules, modified by the presence of
// TOML tags. If there are multiple top-level fields, the boolean
// will be false: This condition is an error in Go and we skip all
// the fields.
func dominantField(fields []field) (field, bool) {
// The fields are sorted in increasing index-length order. The winner
// must therefore be one with the shortest index length. Drop all
// longer entries, which is easy: just truncate the slice.
length := len(fields[0].index)
tagged := -1 // Index of first tagged field.
for i, f := range fields {
if len(f.index) > length {
fields = fields[:i]
break
}
if f.tag {
if tagged >= 0 {
// Multiple tagged fields at the same level: conflict.
// Return no field.
return field{}, false
}
tagged = i
}
}
if tagged >= 0 {
return fields[tagged], true
}
// All remaining fields have the same length. If there's more than one,
// we have a conflict (two fields named "X" at the same level) and we
// return no field.
if len(fields) > 1 {
return field{}, false
}
return fields[0], true
}
var fieldCache struct {
sync.RWMutex
m map[reflect.Type][]field
}
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
func cachedTypeFields(t reflect.Type) []field {
fieldCache.RLock()
f := fieldCache.m[t]
fieldCache.RUnlock()
if f != nil {
return f
}
// Compute fields without lock.
// Might duplicate effort but won't hold other computations back.
f = typeFields(t)
if f == nil {
f = []field{}
}
fieldCache.Lock()
if fieldCache.m == nil {
fieldCache.m = map[reflect.Type][]field{}
}
fieldCache.m[t] = f
fieldCache.Unlock()
return f
}
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
This diff is collapsed.
go-sentinel
===========
Redis Sentinel support for [redigo](https://github.com/garyburd/redigo) library.
**API is unstable and can change at any moment** – use with tools like Glide, Godep etc.
Documentation
-------------
- [API Reference](http://godoc.org/github.com/FZambia/go-sentinel)
License
-------
Library is available under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.html).
This diff is collapsed.
ISC License
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when the code is not running on Google App Engine, compiled by GopherJS, and
// "-tags safe" is not added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// +build !js,!appengine,!safe,!disableunsafe
package spew
import (
"reflect"
"unsafe"
)
const (
// UnsafeDisabled is a build-time constant which specifies whether or
// not access to the unsafe package is available.
UnsafeDisabled = false
// ptrSize is the size of a pointer on the current arch.
ptrSize = unsafe.Sizeof((*byte)(nil))
)
var (
// offsetPtr, offsetScalar, and offsetFlag are the offsets for the
// internal reflect.Value fields. These values are valid before golang
// commit ecccf07e7f9d which changed the format. The are also valid
// after commit 82f48826c6c7 which changed the format again to mirror
// the original format. Code in the init function updates these offsets
// as necessary.
offsetPtr = uintptr(ptrSize)
offsetScalar = uintptr(0)
offsetFlag = uintptr(ptrSize * 2)
// flagKindWidth and flagKindShift indicate various bits that the
// reflect package uses internally to track kind information.
//
// flagRO indicates whether or not the value field of a reflect.Value is
// read-only.
//
// flagIndir indicates whether the value field of a reflect.Value is
// the actual data or a pointer to the data.
//
// These values are valid before golang commit 90a7c3c86944 which
// changed their positions. Code in the init function updates these
// flags as necessary.
flagKindWidth = uintptr(5)
flagKindShift = uintptr(flagKindWidth - 1)
flagRO = uintptr(1 << 0)
flagIndir = uintptr(1 << 1)
)
func init() {
// Older versions of reflect.Value stored small integers directly in the
// ptr field (which is named val in the older versions). Versions
// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
// scalar for this purpose which unfortunately came before the flag
// field, so the offset of the flag field is different for those
// versions.
//
// This code constructs a new reflect.Value from a known small integer
// and checks if the size of the reflect.Value struct indicates it has
// the scalar field. When it does, the offsets are updated accordingly.
vv := reflect.ValueOf(0xf00)
if unsafe.Sizeof(vv) == (ptrSize * 4) {
offsetScalar = ptrSize * 2
offsetFlag = ptrSize * 3
}
// Commit 90a7c3c86944 changed the flag positions such that the low
// order bits are the kind. This code extracts the kind from the flags
// field and ensures it's the correct type. When it's not, the flag
// order has been changed to the newer format, so the flags are updated
// accordingly.
upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
upfv := *(*uintptr)(upf)
flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
flagKindShift = 0
flagRO = 1 << 5
flagIndir = 1 << 6
// Commit adf9b30e5594 modified the flags to separate the
// flagRO flag into two bits which specifies whether or not the
// field is embedded. This causes flagIndir to move over a bit
// and means that flagRO is the combination of either of the
// original flagRO bit and the new bit.
//
// This code detects the change by extracting what used to be
// the indirect bit to ensure it's set. When it's not, the flag
// order has been changed to the newer format, so the flags are
// updated accordingly.
if upfv&flagIndir == 0 {
flagRO = 3 << 5
flagIndir = 1 << 7
}
}
}
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
// the typical safety restrictions preventing access to unaddressable and
// unexported data. It works by digging the raw pointer to the underlying
// value out of the protected value and generating a new unprotected (unsafe)
// reflect.Value to it.
//
// This allows us to check for implementations of the Stringer and error
// interfaces to be used for pretty printing ordinarily unaddressable and
// inaccessible values such as unexported struct fields.
func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
indirects := 1
vt := v.Type()
upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
if rvf&flagIndir != 0 {
vt = reflect.PtrTo(v.Type())
indirects++
} else if offsetScalar != 0 {
// The value is in the scalar field when it's not one of the
// reference types.
switch vt.Kind() {
case reflect.Uintptr:
case reflect.Chan:
case reflect.Func:
case reflect.Map:
case reflect.Ptr:
case reflect.UnsafePointer:
default:
upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
offsetScalar)
}
}
pv := reflect.NewAt(vt, upv)
rv = pv
for i := 0; i < indirects; i++ {
rv = rv.Elem()
}
return rv
}
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when the code is running on Google App Engine, compiled by GopherJS, or
// "-tags safe" is added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// +build js appengine safe disableunsafe
package spew
import "reflect"
const (
// UnsafeDisabled is a build-time constant which specifies whether or
// not access to the unsafe package is available.
UnsafeDisabled = true
)
// unsafeReflectValue typically converts the passed reflect.Value into a one
// that bypasses the typical safety restrictions preventing access to
// unaddressable and unexported data. However, doing this relies on access to
// the unsafe package. This is a stub version which simply returns the passed
// reflect.Value when the unsafe package is not available.
func unsafeReflectValue(v reflect.Value) reflect.Value {
return v
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
// Copyright 2014 Gary Burd
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package internal
import (
"strings"
)
const (
WatchState = 1 << iota
MultiState
SubscribeState
MonitorState
)
type CommandInfo struct {
Set, Clear int
}
var commandInfos = map[string]CommandInfo{
"WATCH": {Set: WatchState},
"UNWATCH": {Clear: WatchState},
"MULTI": {Set: MultiState},
"EXEC": {Clear: WatchState | MultiState},
"DISCARD": {Clear: WatchState | MultiState},
"PSUBSCRIBE": {Set: SubscribeState},
"SUBSCRIBE": {Set: SubscribeState},
"MONITOR": {Set: MonitorState},
}
func init() {
for n, ci := range commandInfos {
commandInfos[strings.ToLower(n)] = ci
}
}
func LookupCommandInfo(commandName string) CommandInfo {
if ci, ok := commandInfos[commandName]; ok {
return ci
}
return commandInfos[strings.ToUpper(commandName)]
}
This diff is collapsed.
This diff is collapsed.
// +build go1.7
package redis
import "crypto/tls"
// similar cloneTLSClientConfig in the stdlib, but also honor skipVerify for the nil case
func cloneTLSClientConfig(cfg *tls.Config, skipVerify bool) *tls.Config {
if cfg == nil {
return &tls.Config{InsecureSkipVerify: skipVerify}
}
return &tls.Config{
Rand: cfg.Rand,
Time: cfg.Time,
Certificates: cfg.Certificates,
NameToCertificate: cfg.NameToCertificate,
GetCertificate: cfg.GetCertificate,
RootCAs: cfg.RootCAs,
NextProtos: cfg.NextProtos,
ServerName: cfg.ServerName,
ClientAuth: cfg.ClientAuth,
ClientCAs: cfg.ClientCAs,
InsecureSkipVerify: cfg.InsecureSkipVerify,
CipherSuites: cfg.CipherSuites,
PreferServerCipherSuites: cfg.PreferServerCipherSuites,
ClientSessionCache: cfg.ClientSessionCache,
MinVersion: cfg.MinVersion,
MaxVersion: cfg.MaxVersion,
CurvePreferences: cfg.CurvePreferences,
DynamicRecordSizingDisabled: cfg.DynamicRecordSizingDisabled,
Renegotiation: cfg.Renegotiation,
}
}
This diff is collapsed.
This diff is collapsed.
// +build !go1.7
package redis
import "crypto/tls"
// similar cloneTLSClientConfig in the stdlib, but also honor skipVerify for the nil case
func cloneTLSClientConfig(cfg *tls.Config, skipVerify bool) *tls.Config {
if cfg == nil {
return &tls.Config{InsecureSkipVerify: skipVerify}
}
return &tls.Config{
Rand: cfg.Rand,
Time: cfg.Time,
Certificates: cfg.Certificates,
NameToCertificate: cfg.NameToCertificate,
GetCertificate: cfg.GetCertificate,
RootCAs: cfg.RootCAs,
NextProtos: cfg.NextProtos,
ServerName: cfg.ServerName,
ClientAuth: cfg.ClientAuth,
ClientCAs: cfg.ClientCAs,
InsecureSkipVerify: cfg.InsecureSkipVerify,
CipherSuites: cfg.CipherSuites,
PreferServerCipherSuites: cfg.PreferServerCipherSuites,
ClientSessionCache: cfg.ClientSessionCache,
MinVersion: cfg.MinVersion,
MaxVersion: cfg.MaxVersion,
CurvePreferences: cfg.CurvePreferences,
}
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Rafael Dantas Justo - @rafaeljusto
Charles Law - @clawconduce
Maciej Galkowski - @szank
Zachery Moneypenny - @whazzmaster
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
{{.CommentWithoutT "a"}}
func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool {
return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}})
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment