Commit 8b5221a5 authored by Russ Cox's avatar Russ Cox

cmd/pprof: add Go implementation

Update #8798

This is a new implementation of pprof,
written in Go instead of in Perl.
It was written primarily by Raul Silvera and
is in use for profiling programs of all languages
inside Google.

The internal structure is a bit package-heavy,
but it matches the copy used inside Google, and
since it is in an internal directory, we can make
changes to it later if we need to.

The only "new" file here is src/cmd/pprof/pprof.go,
which stitches together the Google pprof and the
Go command libraries for object file access.

I am explicitly NOT interested in style or review
comments on the rest of the files
(that is, src/cmd/pprof/internal/...).
Those are intended to stay as close to the Google
copies as possible, like we did with the pprof Perl script.

Still to do:

- Basic tests.
- Real command documentation.
- Hook up disassemblers.

LGTM=r
R=r, bradfitz, alex.brainman, dave
CC=golang-codereviews
https://golang.org/cl/153750043
parent 454d1b0e
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package commands defines and manages the basic pprof commands
package commands
import (
"bytes"
"fmt"
"io"
"os"
"os/exec"
"strings"
"cmd/pprof/internal/plugin"
"cmd/pprof/internal/report"
"cmd/pprof/internal/svg"
"cmd/pprof/internal/tempfile"
)
// Commands describes the commands accepted by pprof.
type Commands map[string]*Command
// Command describes the actions for a pprof command. Includes a
// function for command-line completion, the report format to use
// during report generation, any postprocessing functions, and whether
// the command expects a regexp parameter (typically a function name).
type Command struct {
Complete Completer // autocomplete for interactive mode
Format int // report format to generate
PostProcess PostProcessor // postprocessing to run on report
HasParam bool // Collect a parameter from the CLI
Usage string // Help text
}
// Completer is a function for command-line autocompletion
type Completer func(prefix string) string
// PostProcessor is a function that applies post-processing to the report output
type PostProcessor func(input *bytes.Buffer, output io.Writer, ui plugin.UI) error
// PProf returns the basic pprof report-generation commands
func PProf(c Completer, interactive **bool, svgpan **string) Commands {
return Commands{
// Commands that require no post-processing.
"tags": {nil, report.Tags, nil, false, "Outputs all tags in the profile"},
"raw": {c, report.Raw, nil, false, "Outputs a text representation of the raw profile"},
"dot": {c, report.Dot, nil, false, "Outputs a graph in DOT format"},
"top": {c, report.Text, nil, false, "Outputs top entries in text form"},
"tree": {c, report.Tree, nil, false, "Outputs a text rendering of call graph"},
"text": {c, report.Text, nil, false, "Outputs top entries in text form"},
"disasm": {c, report.Dis, nil, true, "Output annotated assembly for functions matching regexp or address"},
"list": {c, report.List, nil, true, "Output annotated source for functions matching regexp"},
"peek": {c, report.Tree, nil, true, "Output callers/callees of functions matching regexp"},
// Save binary formats to a file
"callgrind": {c, report.Callgrind, awayFromTTY("callgraph.out"), false, "Outputs a graph in callgrind format"},
"proto": {c, report.Proto, awayFromTTY("pb.gz"), false, "Outputs the profile in compressed protobuf format"},
// Generate report in DOT format and postprocess with dot
"gif": {c, report.Dot, invokeDot("gif"), false, "Outputs a graph image in GIF format"},
"pdf": {c, report.Dot, invokeDot("pdf"), false, "Outputs a graph in PDF format"},
"png": {c, report.Dot, invokeDot("png"), false, "Outputs a graph image in PNG format"},
"ps": {c, report.Dot, invokeDot("ps"), false, "Outputs a graph in PS format"},
// Save SVG output into a file after including svgpan library
"svg": {c, report.Dot, saveSVGToFile(svgpan), false, "Outputs a graph in SVG format"},
// Visualize postprocessed dot output
"eog": {c, report.Dot, invokeVisualizer(interactive, invokeDot("svg"), "svg", []string{"eog"}), false, "Visualize graph through eog"},
"evince": {c, report.Dot, invokeVisualizer(interactive, invokeDot("pdf"), "pdf", []string{"evince"}), false, "Visualize graph through evince"},
"gv": {c, report.Dot, invokeVisualizer(interactive, invokeDot("ps"), "ps", []string{"gv --noantialias"}), false, "Visualize graph through gv"},
"web": {c, report.Dot, invokeVisualizer(interactive, saveSVGToFile(svgpan), "svg", browsers), false, "Visualize graph through web browser"},
// Visualize HTML directly generated by report.
"weblist": {c, report.WebList, invokeVisualizer(interactive, awayFromTTY("html"), "html", browsers), true, "Output annotated source in HTML for functions matching regexp or address"},
}
}
// List of web browsers to attempt for web visualization
var browsers = []string{"chrome", "google-chrome", "firefox", "/usr/bin/open"}
// NewCompleter creates an autocompletion function for a set of commands.
func NewCompleter(cs Commands) Completer {
return func(line string) string {
switch tokens := strings.Fields(line); len(tokens) {
case 0:
// Nothing to complete
case 1:
// Single token -- complete command name
found := ""
for c := range cs {
if strings.HasPrefix(c, tokens[0]) {
if found != "" {
return line
}
found = c
}
}
if found != "" {
return found
}
default:
// Multiple tokens -- complete using command completer
if c, ok := cs[tokens[0]]; ok {
if c.Complete != nil {
lastTokenIdx := len(tokens) - 1
lastToken := tokens[lastTokenIdx]
if strings.HasPrefix(lastToken, "-") {
lastToken = "-" + c.Complete(lastToken[1:])
} else {
lastToken = c.Complete(lastToken)
}
return strings.Join(append(tokens[:lastTokenIdx], lastToken), " ")
}
}
}
return line
}
}
// awayFromTTY saves the output in a file if it would otherwise go to
// the terminal screen. This is used to avoid dumping binary data on
// the screen.
func awayFromTTY(format string) PostProcessor {
return func(input *bytes.Buffer, output io.Writer, ui plugin.UI) error {
if output == os.Stdout && ui.IsTerminal() {
tempFile, err := tempfile.New("", "profile", "."+format)
if err != nil {
return err
}
ui.PrintErr("Generating report in ", tempFile.Name())
_, err = fmt.Fprint(tempFile, input)
return err
}
_, err := fmt.Fprint(output, input)
return err
}
}
func invokeDot(format string) PostProcessor {
divert := awayFromTTY(format)
return func(input *bytes.Buffer, output io.Writer, ui plugin.UI) error {
cmd := exec.Command("dot", "-T"+format)
var buf bytes.Buffer
cmd.Stdin, cmd.Stdout, cmd.Stderr = input, &buf, os.Stderr
if err := cmd.Run(); err != nil {
return err
}
return divert(&buf, output, ui)
}
}
func saveSVGToFile(svgpan **string) PostProcessor {
generateSVG := invokeDot("svg")
divert := awayFromTTY("svg")
return func(input *bytes.Buffer, output io.Writer, ui plugin.UI) error {
baseSVG := &bytes.Buffer{}
generateSVG(input, baseSVG, ui)
massaged := &bytes.Buffer{}
fmt.Fprint(massaged, svg.Massage(*baseSVG, **svgpan))
return divert(massaged, output, ui)
}
}
func invokeVisualizer(interactive **bool, format PostProcessor, suffix string, visualizers []string) PostProcessor {
return func(input *bytes.Buffer, output io.Writer, ui plugin.UI) error {
tempFile, err := tempfile.New(os.Getenv("PPROF_TMPDIR"), "pprof", "."+suffix)
if err != nil {
return err
}
tempfile.DeferDelete(tempFile.Name())
if err = format(input, tempFile, ui); err != nil {
return err
}
// Try visualizers until one is successful
for _, v := range visualizers {
// Separate command and arguments for exec.Command.
args := strings.Split(v, " ")
if len(args) == 0 {
continue
}
viewer := exec.Command(args[0], append(args[1:], tempFile.Name())...)
viewer.Stderr = os.Stderr
if err = viewer.Start(); err == nil {
if !**interactive {
// In command-line mode, wait for the viewer to be closed
// before proceeding
return viewer.Wait()
}
return nil
}
}
return err
}
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package driver implements the core pprof functionality. It can be
// parameterized with a flag implementation, fetch and symbolize
// mechanisms.
package driver
import (
"bytes"
"fmt"
"io"
"net/url"
"os"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"time"
"cmd/pprof/internal/commands"
"cmd/pprof/internal/plugin"
"cmd/pprof/internal/profile"
"cmd/pprof/internal/report"
"cmd/pprof/internal/tempfile"
)
// PProf acquires a profile, and symbolizes it using a profile
// manager. Then it generates a report formatted according to the
// options selected through the flags package.
func PProf(flagset plugin.FlagSet, fetch plugin.Fetcher, sym plugin.Symbolizer, obj plugin.ObjTool, ui plugin.UI, overrides commands.Commands) error {
// Remove any temporary files created during pprof processing.
defer tempfile.Cleanup()
f, err := getFlags(flagset, overrides, ui)
if err != nil {
return err
}
obj.SetConfig(*f.flagTools)
sources := f.profileSource
if len(sources) > 1 {
source := sources[0]
// If the first argument is a supported object file, treat as executable.
if file, err := obj.Open(source, 0); err == nil {
file.Close()
f.profileExecName = source
sources = sources[1:]
} else if *f.flagBuildID == "" && isBuildID(source) {
f.flagBuildID = &source
sources = sources[1:]
}
}
// errMu protects concurrent accesses to errset and err. errset is set if an
// error is encountered by one of the goroutines grabbing a profile.
errMu, errset := sync.Mutex{}, false
// Fetch profiles.
wg := sync.WaitGroup{}
profs := make([]*profile.Profile, len(sources))
for i, source := range sources {
wg.Add(1)
go func(i int, src string) {
defer wg.Done()
p, grabErr := grabProfile(src, f.profileExecName, *f.flagBuildID, fetch, sym, obj, ui, f)
if grabErr != nil {
errMu.Lock()
defer errMu.Unlock()
errset, err = true, grabErr
return
}
profs[i] = p
}(i, source)
}
wg.Wait()
if errset {
return err
}
// Merge profiles.
prof := profs[0]
for _, p := range profs[1:] {
if err = prof.Merge(p, 1); err != nil {
return err
}
}
if *f.flagBase != "" {
// Fetch base profile and subtract from current profile.
base, err := grabProfile(*f.flagBase, f.profileExecName, *f.flagBuildID, fetch, sym, obj, ui, f)
if err != nil {
return err
}
if err = prof.Merge(base, -1); err != nil {
return err
}
}
if err := processFlags(prof, ui, f); err != nil {
return err
}
prof.RemoveUninteresting()
if *f.flagInteractive {
return interactive(prof, obj, ui, f)
}
return generate(false, prof, obj, ui, f)
}
// isBuildID determines if the profile may contain a build ID, by
// checking that it is a string of hex digits.
func isBuildID(id string) bool {
return strings.Trim(id, "0123456789abcdefABCDEF") == ""
}
// adjustURL updates the profile source URL based on heuristics. It
// will append ?seconds=sec for CPU profiles if not already
// specified. Returns the hostname if the profile is remote.
func adjustURL(source string, sec int, ui plugin.UI) (adjusted, host string, duration time.Duration) {
// If there is a local file with this name, just use it.
if _, err := os.Stat(source); err == nil {
return source, "", 0
}
url, err := url.Parse(source)
// Automatically add http:// to URLs of the form hostname:port/path.
// url.Parse treats "hostname" as the Scheme.
if err != nil || (url.Host == "" && url.Scheme != "" && url.Scheme != "file") {
url, err = url.Parse("http://" + source)
if err != nil {
return source, url.Host, time.Duration(30) * time.Second
}
}
if scheme := strings.ToLower(url.Scheme); scheme == "" || scheme == "file" {
url.Scheme = ""
return url.String(), "", 0
}
values := url.Query()
if urlSeconds := values.Get("seconds"); urlSeconds != "" {
if us, err := strconv.ParseInt(urlSeconds, 10, 32); err == nil {
if sec >= 0 {
ui.PrintErr("Overriding -seconds for URL ", source)
}
sec = int(us)
}
}
switch strings.ToLower(url.Path) {
case "", "/":
// Apply default /profilez.
url.Path = "/profilez"
case "/protoz":
// Rewrite to /profilez?type=proto
url.Path = "/profilez"
values.Set("type", "proto")
}
if hasDuration(url.Path) {
if sec > 0 {
duration = time.Duration(sec) * time.Second
values.Set("seconds", fmt.Sprintf("%d", sec))
} else {
// Assume default duration: 30 seconds
duration = 30 * time.Second
}
}
url.RawQuery = values.Encode()
return url.String(), url.Host, duration
}
func hasDuration(path string) bool {
for _, trigger := range []string{"profilez", "wallz", "/profile"} {
if strings.Contains(path, trigger) {
return true
}
}
return false
}
// preprocess does filtering and aggregation of a profile based on the
// requested options.
func preprocess(prof *profile.Profile, ui plugin.UI, f *flags) error {
if *f.flagFocus != "" || *f.flagIgnore != "" || *f.flagHide != "" {
focus, ignore, hide, err := compileFocusIgnore(*f.flagFocus, *f.flagIgnore, *f.flagHide)
if err != nil {
return err
}
fm, im, hm := prof.FilterSamplesByName(focus, ignore, hide)
warnNoMatches(fm, *f.flagFocus, "Focus", ui)
warnNoMatches(im, *f.flagIgnore, "Ignore", ui)
warnNoMatches(hm, *f.flagHide, "Hide", ui)
}
if *f.flagTagFocus != "" || *f.flagTagIgnore != "" {
focus, err := compileTagFilter(*f.flagTagFocus, ui)
if err != nil {
return err
}
ignore, err := compileTagFilter(*f.flagTagIgnore, ui)
if err != nil {
return err
}
fm, im := prof.FilterSamplesByTag(focus, ignore)
warnNoMatches(fm, *f.flagTagFocus, "TagFocus", ui)
warnNoMatches(im, *f.flagTagIgnore, "TagIgnore", ui)
}
return aggregate(prof, f)
}
func compileFocusIgnore(focus, ignore, hide string) (f, i, h *regexp.Regexp, err error) {
if focus != "" {
if f, err = regexp.Compile(focus); err != nil {
return nil, nil, nil, fmt.Errorf("parsing focus regexp: %v", err)
}
}
if ignore != "" {
if i, err = regexp.Compile(ignore); err != nil {
return nil, nil, nil, fmt.Errorf("parsing ignore regexp: %v", err)
}
}
if hide != "" {
if h, err = regexp.Compile(hide); err != nil {
return nil, nil, nil, fmt.Errorf("parsing hide regexp: %v", err)
}
}
return
}
func compileTagFilter(filter string, ui plugin.UI) (f func(string, string, int64) bool, err error) {
if filter == "" {
return nil, nil
}
if numFilter := parseTagFilterRange(filter); numFilter != nil {
ui.PrintErr("Interpreted '", filter, "' as range, not regexp")
return func(key, val string, num int64) bool {
if val != "" {
return false
}
return numFilter(num, key)
}, nil
}
fx, err := regexp.Compile(filter)
if err != nil {
return nil, err
}
return func(key, val string, num int64) bool {
if val == "" {
return false
}
return fx.MatchString(key + ":" + val)
}, nil
}
var tagFilterRangeRx = regexp.MustCompile("([[:digit:]]+)([[:alpha:]]+)")
// parseTagFilterRange returns a function to checks if a value is
// contained on the range described by a string. It can recognize
// strings of the form:
// "32kb" -- matches values == 32kb
// ":64kb" -- matches values <= 64kb
// "4mb:" -- matches values >= 4mb
// "12kb:64mb" -- matches values between 12kb and 64mb (both included).
func parseTagFilterRange(filter string) func(int64, string) bool {
ranges := tagFilterRangeRx.FindAllStringSubmatch(filter, 2)
if len(ranges) == 0 {
return nil // No ranges were identified
}
v, err := strconv.ParseInt(ranges[0][1], 10, 64)
if err != nil {
panic(fmt.Errorf("Failed to parse int %s: %v", ranges[0][1], err))
}
value, unit := report.ScaleValue(v, ranges[0][2], ranges[0][2])
if len(ranges) == 1 {
switch match := ranges[0][0]; filter {
case match:
return func(v int64, u string) bool {
sv, su := report.ScaleValue(v, u, unit)
return su == unit && sv == value
}
case match + ":":
return func(v int64, u string) bool {
sv, su := report.ScaleValue(v, u, unit)
return su == unit && sv >= value
}
case ":" + match:
return func(v int64, u string) bool {
sv, su := report.ScaleValue(v, u, unit)
return su == unit && sv <= value
}
}
return nil
}
if filter != ranges[0][0]+":"+ranges[1][0] {
return nil
}
if v, err = strconv.ParseInt(ranges[1][1], 10, 64); err != nil {
panic(fmt.Errorf("Failed to parse int %s: %v", ranges[1][1], err))
}
value2, unit2 := report.ScaleValue(v, ranges[1][2], unit)
if unit != unit2 {
return nil
}
return func(v int64, u string) bool {
sv, su := report.ScaleValue(v, u, unit)
return su == unit && sv >= value && sv <= value2
}
}
func warnNoMatches(match bool, rx, option string, ui plugin.UI) {
if !match && rx != "" && rx != "." {
ui.PrintErr(option + " expression matched no samples: " + rx)
}
}
// grabProfile fetches and symbolizes a profile.
func grabProfile(source, exec, buildid string, fetch plugin.Fetcher, sym plugin.Symbolizer, obj plugin.ObjTool, ui plugin.UI, f *flags) (*profile.Profile, error) {
source, host, duration := adjustURL(source, *f.flagSeconds, ui)
remote := host != ""
if remote {
ui.Print("Fetching profile from ", source)
if duration != 0 {
ui.Print("Please wait... (" + duration.String() + ")")
}
}
now := time.Now()
// Fetch profile from source.
// Give 50% slack on the timeout.
p, err := fetch(source, duration+duration/2, ui)
if err != nil {
return nil, err
}
// Update the time/duration if the profile source doesn't include it.
// TODO(rsilvera): Remove this when we remove support for legacy profiles.
if remote {
if p.TimeNanos == 0 {
p.TimeNanos = now.UnixNano()
}
if duration != 0 && p.DurationNanos == 0 {
p.DurationNanos = int64(duration)
}
}
// Replace executable/buildID with the options provided in the
// command line. Assume the executable is the first Mapping entry.
if exec != "" || buildid != "" {
if len(p.Mapping) == 0 {
// Create a fake mapping to hold the user option, and associate
// all samples to it.
m := &profile.Mapping{
ID: 1,
}
for _, l := range p.Location {
l.Mapping = m
}
p.Mapping = []*profile.Mapping{m}
}
if exec != "" {
p.Mapping[0].File = exec
}
if buildid != "" {
p.Mapping[0].BuildID = buildid
}
}
if err := sym(*f.flagSymbolize, source, p, obj, ui); err != nil {
return nil, err
}
// Save a copy of any remote profiles, unless the user is explicitly
// saving it.
if remote && !f.isFormat("proto") {
prefix := "pprof."
if len(p.Mapping) > 0 && p.Mapping[0].File != "" {
prefix = prefix + filepath.Base(p.Mapping[0].File) + "."
}
if !strings.ContainsRune(host, os.PathSeparator) {
prefix = prefix + host + "."
}
for _, s := range p.SampleType {
prefix = prefix + s.Type + "."
}
dir := os.Getenv("PPROF_TMPDIR")
tempFile, err := tempfile.New(dir, prefix, ".pb.gz")
if err == nil {
if err = p.Write(tempFile); err == nil {
ui.PrintErr("Saved profile in ", tempFile.Name())
}
}
if err != nil {
ui.PrintErr("Could not save profile: ", err)
}
}
if err := p.Demangle(obj.Demangle); err != nil {
ui.PrintErr("Failed to demangle profile: ", err)
}
if err := p.CheckValid(); err != nil {
return nil, fmt.Errorf("Grab %s: %v", source, err)
}
return p, nil
}
type flags struct {
flagInteractive *bool // Accept commands interactively
flagCommands map[string]*bool // pprof commands without parameters
flagParamCommands map[string]*string // pprof commands with parameters
flagSVGPan *string // URL to fetch the SVG Pan library
flagOutput *string // Output file name
flagCum *bool // Sort by cumulative data
flagCallTree *bool // generate a context-sensitive call tree
flagAddresses *bool // Report at address level
flagLines *bool // Report at source line level
flagFiles *bool // Report at file level
flagFunctions *bool // Report at function level [default]
flagSymbolize *string // Symbolization options (=none to disable)
flagBuildID *string // Override build if for first mapping
flagNodeCount *int // Max number of nodes to show
flagNodeFraction *float64 // Hide nodes below <f>*total
flagEdgeFraction *float64 // Hide edges below <f>*total
flagTrim *bool // Set to false to ignore NodeCount/*Fraction
flagFocus *string // Restricts to paths going through a node matching regexp
flagIgnore *string // Skips paths going through any nodes matching regexp
flagHide *string // Skips sample locations matching regexp
flagTagFocus *string // Restrict to samples tagged with key:value matching regexp
flagTagIgnore *string // Discard samples tagged with key:value matching regexp
flagDropNegative *bool // Skip negative values
flagBase *string // Source for base profile to user for comparison
flagSeconds *int // Length of time for dynamic profiles
flagTotalDelay *bool // Display total delay at each region
flagContentions *bool // Display number of delays at each region
flagMeanDelay *bool // Display mean delay at each region
flagInUseSpace *bool // Display in-use memory size
flagInUseObjects *bool // Display in-use object counts
flagAllocSpace *bool // Display allocated memory size
flagAllocObjects *bool // Display allocated object counts
flagDisplayUnit *string // Measurement unit to use on reports
flagDivideBy *float64 // Ratio to divide sample values
flagSampleIndex *int // Sample value to use in reports.
flagMean *bool // Use mean of sample_index over count
flagTools *string
profileSource []string
profileExecName string
extraUsage string
commands commands.Commands
}
func (f *flags) isFormat(format string) bool {
if fl := f.flagCommands[format]; fl != nil {
return *fl
}
if fl := f.flagParamCommands[format]; fl != nil {
return *fl != ""
}
return false
}
// String provides a printable representation for the current set of flags.
func (f *flags) String(p *profile.Profile) string {
var ret string
if ix := *f.flagSampleIndex; ix != -1 {
ret += fmt.Sprintf(" %-25s : %d (%s)\n", "sample_index", ix, p.SampleType[ix].Type)
}
if ix := *f.flagMean; ix {
ret += boolFlagString("mean")
}
if *f.flagDisplayUnit != "minimum" {
ret += stringFlagString("unit", *f.flagDisplayUnit)
}
switch {
case *f.flagInteractive:
ret += boolFlagString("interactive")
}
for name, fl := range f.flagCommands {
if *fl {
ret += boolFlagString(name)
}
}
if *f.flagCum {
ret += boolFlagString("cum")
}
if *f.flagCallTree {
ret += boolFlagString("call_tree")
}
switch {
case *f.flagAddresses:
ret += boolFlagString("addresses")
case *f.flagLines:
ret += boolFlagString("lines")
case *f.flagFiles:
ret += boolFlagString("files")
case *f.flagFunctions:
ret += boolFlagString("functions")
}
if *f.flagNodeCount != -1 {
ret += intFlagString("nodecount", *f.flagNodeCount)
}
ret += floatFlagString("nodefraction", *f.flagNodeFraction)
ret += floatFlagString("edgefraction", *f.flagEdgeFraction)
if *f.flagFocus != "" {
ret += stringFlagString("focus", *f.flagFocus)
}
if *f.flagIgnore != "" {
ret += stringFlagString("ignore", *f.flagIgnore)
}
if *f.flagHide != "" {
ret += stringFlagString("hide", *f.flagHide)
}
if *f.flagTagFocus != "" {
ret += stringFlagString("tagfocus", *f.flagTagFocus)
}
if *f.flagTagIgnore != "" {
ret += stringFlagString("tagignore", *f.flagTagIgnore)
}
return ret
}
func boolFlagString(label string) string {
return fmt.Sprintf(" %-25s : true\n", label)
}
func stringFlagString(label, value string) string {
return fmt.Sprintf(" %-25s : %s\n", label, value)
}
func intFlagString(label string, value int) string {
return fmt.Sprintf(" %-25s : %d\n", label, value)
}
func floatFlagString(label string, value float64) string {
return fmt.Sprintf(" %-25s : %f\n", label, value)
}
// Utility routines to set flag values.
func newBool(b bool) *bool {
return &b
}
func newString(s string) *string {
return &s
}
func newFloat64(fl float64) *float64 {
return &fl
}
func newInt(i int) *int {
return &i
}
func (f *flags) usage(ui plugin.UI) {
var commandMsg []string
for name, cmd := range f.commands {
if cmd.HasParam {
name = name + "=p"
}
commandMsg = append(commandMsg,
fmt.Sprintf(" -%-16s %s", name, cmd.Usage))
}
sort.Strings(commandMsg)
text := usageMsgHdr + strings.Join(commandMsg, "\n") + "\n" + usageMsg + "\n"
if f.extraUsage != "" {
text += f.extraUsage + "\n"
}
text += usageMsgVars
ui.Print(text)
}
func getFlags(flag plugin.FlagSet, overrides commands.Commands, ui plugin.UI) (*flags, error) {
f := &flags{
flagInteractive: flag.Bool("interactive", false, "Accepts commands interactively"),
flagCommands: make(map[string]*bool),
flagParamCommands: make(map[string]*string),
// Filename for file-based output formats, stdout by default.
flagOutput: flag.String("output", "", "Output filename for file-based outputs "),
// Comparisons.
flagBase: flag.String("base", "", "Source for base profile for comparison"),
flagDropNegative: flag.Bool("drop_negative", false, "Ignore negative differences"),
flagSVGPan: flag.String("svgpan", "https://www.cyberz.org/projects/SVGPan/SVGPan.js", "URL for SVGPan Library"),
// Data sorting criteria.
flagCum: flag.Bool("cum", false, "Sort by cumulative data"),
// Graph handling options.
flagCallTree: flag.Bool("call_tree", false, "Create a context-sensitive call tree"),
// Granularity of output resolution.
flagAddresses: flag.Bool("addresses", false, "Report at address level"),
flagLines: flag.Bool("lines", false, "Report at source line level"),
flagFiles: flag.Bool("files", false, "Report at source file level"),
flagFunctions: flag.Bool("functions", false, "Report at function level [default]"),
// Internal options.
flagSymbolize: flag.String("symbolize", "", "Options for profile symbolization"),
flagBuildID: flag.String("buildid", "", "Override build id for first mapping"),
// Filtering options
flagNodeCount: flag.Int("nodecount", -1, "Max number of nodes to show"),
flagNodeFraction: flag.Float64("nodefraction", 0.005, "Hide nodes below <f>*total"),
flagEdgeFraction: flag.Float64("edgefraction", 0.001, "Hide edges below <f>*total"),
flagTrim: flag.Bool("trim", true, "Honor nodefraction/edgefraction/nodecount defaults"),
flagFocus: flag.String("focus", "", "Restricts to paths going through a node matching regexp"),
flagIgnore: flag.String("ignore", "", "Skips paths going through any nodes matching regexp"),
flagHide: flag.String("hide", "", "Skips nodes matching regexp"),
flagTagFocus: flag.String("tagfocus", "", "Restrict to samples with tags in range or matched by regexp"),
flagTagIgnore: flag.String("tagignore", "", "Discard samples with tags in range or matched by regexp"),
// CPU profile options
flagSeconds: flag.Int("seconds", -1, "Length of time for dynamic profiles"),
// Heap profile options
flagInUseSpace: flag.Bool("inuse_space", false, "Display in-use memory size"),
flagInUseObjects: flag.Bool("inuse_objects", false, "Display in-use object counts"),
flagAllocSpace: flag.Bool("alloc_space", false, "Display allocated memory size"),
flagAllocObjects: flag.Bool("alloc_objects", false, "Display allocated object counts"),
flagDisplayUnit: flag.String("unit", "minimum", "Measurement units to display"),
flagDivideBy: flag.Float64("divide_by", 1.0, "Ratio to divide all samples before visualization"),
flagSampleIndex: flag.Int("sample_index", -1, "Index of sample value to report"),
flagMean: flag.Bool("mean", false, "Average sample value over first value (count)"),
// Contention profile options
flagTotalDelay: flag.Bool("total_delay", false, "Display total delay at each region"),
flagContentions: flag.Bool("contentions", false, "Display number of delays at each region"),
flagMeanDelay: flag.Bool("mean_delay", false, "Display mean delay at each region"),
flagTools: flag.String("tools", os.Getenv("PPROF_TOOLS"), "Path for object tool pathnames"),
extraUsage: flag.ExtraUsage(),
}
// Flags used during command processing
interactive := &f.flagInteractive
svgpan := &f.flagSVGPan
f.commands = commands.PProf(functionCompleter, interactive, svgpan)
// Override commands
for name, cmd := range overrides {
f.commands[name] = cmd
}
for name, cmd := range f.commands {
if cmd.HasParam {
f.flagParamCommands[name] = flag.String(name, "", "Generate a report in "+name+" format, matching regexp")
} else {
f.flagCommands[name] = flag.Bool(name, false, "Generate a report in "+name+" format")
}
}
args := flag.Parse(func() { f.usage(ui) })
if len(args) == 0 {
return nil, fmt.Errorf("no profile source specified")
}
f.profileSource = args
// Instruct legacy heapz parsers to grab historical allocation data,
// instead of the default in-use data. Not available with tcmalloc.
if *f.flagAllocSpace || *f.flagAllocObjects {
profile.LegacyHeapAllocated = true
}
if profileDir := os.Getenv("PPROF_TMPDIR"); profileDir == "" {
profileDir = os.Getenv("HOME") + "/pprof"
os.Setenv("PPROF_TMPDIR", profileDir)
if err := os.MkdirAll(profileDir, 0755); err != nil {
return nil, fmt.Errorf("failed to access temp dir %s: %v", profileDir, err)
}
}
return f, nil
}
func processFlags(p *profile.Profile, ui plugin.UI, f *flags) error {
flagDis := f.isFormat("disasm")
flagPeek := f.isFormat("peek")
flagWebList := f.isFormat("weblist")
flagList := f.isFormat("list")
if flagDis || flagWebList {
// Collect all samples at address granularity for assembly
// listing.
f.flagNodeCount = newInt(0)
f.flagAddresses = newBool(true)
f.flagLines = newBool(false)
f.flagFiles = newBool(false)
f.flagFunctions = newBool(false)
}
if flagPeek {
// Collect all samples at function granularity for peek command
f.flagNodeCount = newInt(0)
f.flagAddresses = newBool(false)
f.flagLines = newBool(false)
f.flagFiles = newBool(false)
f.flagFunctions = newBool(true)
}
if flagList {
// Collect all samples at fileline granularity for source
// listing.
f.flagNodeCount = newInt(0)
f.flagAddresses = newBool(false)
f.flagLines = newBool(true)
f.flagFiles = newBool(false)
f.flagFunctions = newBool(false)
}
if !*f.flagTrim {
f.flagNodeCount = newInt(0)
f.flagNodeFraction = newFloat64(0)
f.flagEdgeFraction = newFloat64(0)
}
if oc := countFlagMap(f.flagCommands, f.flagParamCommands); oc == 0 {
f.flagInteractive = newBool(true)
} else if oc > 1 {
f.usage(ui)
return fmt.Errorf("must set at most one output format")
}
// Apply nodecount defaults for non-interactive mode. The
// interactive shell will apply defaults for the interactive mode.
if *f.flagNodeCount < 0 && !*f.flagInteractive {
switch {
default:
f.flagNodeCount = newInt(80)
case f.isFormat("text"):
f.flagNodeCount = newInt(0)
}
}
// Apply legacy options and diagnose conflicts.
if rc := countFlags([]*bool{f.flagAddresses, f.flagLines, f.flagFiles, f.flagFunctions}); rc == 0 {
f.flagFunctions = newBool(true)
} else if rc > 1 {
f.usage(ui)
return fmt.Errorf("must set at most one granularity option")
}
var err error
si, sm := *f.flagSampleIndex, *f.flagMean || *f.flagMeanDelay
si, err = sampleIndex(p, &f.flagTotalDelay, si, 1, "delay", "-total_delay", err)
si, err = sampleIndex(p, &f.flagMeanDelay, si, 1, "delay", "-mean_delay", err)
si, err = sampleIndex(p, &f.flagContentions, si, 0, "contentions", "-contentions", err)
si, err = sampleIndex(p, &f.flagInUseSpace, si, 1, "inuse_space", "-inuse_space", err)
si, err = sampleIndex(p, &f.flagInUseObjects, si, 0, "inuse_objects", "-inuse_objects", err)
si, err = sampleIndex(p, &f.flagAllocSpace, si, 1, "alloc_space", "-alloc_space", err)
si, err = sampleIndex(p, &f.flagAllocObjects, si, 0, "alloc_objects", "-alloc_objects", err)
if si == -1 {
// Use last value if none is requested.
si = len(p.SampleType) - 1
} else if si < 0 || si >= len(p.SampleType) {
err = fmt.Errorf("sample_index value %d out of range [0..%d]", si, len(p.SampleType)-1)
}
if err != nil {
f.usage(ui)
return err
}
f.flagSampleIndex, f.flagMean = newInt(si), newBool(sm)
return nil
}
func sampleIndex(p *profile.Profile, flag **bool,
sampleIndex int,
newSampleIndex int,
sampleType, option string,
err error) (int, error) {
if err != nil || !**flag {
return sampleIndex, err
}
*flag = newBool(false)
if sampleIndex != -1 {
return 0, fmt.Errorf("set at most one sample value selection option")
}
if newSampleIndex >= len(p.SampleType) ||
p.SampleType[newSampleIndex].Type != sampleType {
return 0, fmt.Errorf("option %s not valid for this profile", option)
}
return newSampleIndex, nil
}
func countFlags(bs []*bool) int {
var c int
for _, b := range bs {
if *b {
c++
}
}
return c
}
func countFlagMap(bms map[string]*bool, bmrxs map[string]*string) int {
var c int
for _, b := range bms {
if *b {
c++
}
}
for _, s := range bmrxs {
if *s != "" {
c++
}
}
return c
}
var usageMsgHdr = "usage: pprof [options] [binary] <profile source> ...\n" +
"Output format (only set one):\n"
var usageMsg = "Output file parameters (for file-based output formats):\n" +
" -output=f Generate output on file f (stdout by default)\n" +
"Output granularity (only set one):\n" +
" -functions Report at function level [default]\n" +
" -files Report at source file level\n" +
" -lines Report at source line level\n" +
" -addresses Report at address level\n" +
"Comparison options:\n" +
" -base <profile> Show delta from this profile\n" +
" -drop_negative Ignore negative differences\n" +
"Sorting options:\n" +
" -cum Sort by cumulative data\n\n" +
"Dynamic profile options:\n" +
" -seconds=N Length of time for dynamic profiles\n" +
"Profile trimming options:\n" +
" -nodecount=N Max number of nodes to show\n" +
" -nodefraction=f Hide nodes below <f>*total\n" +
" -edgefraction=f Hide edges below <f>*total\n" +
"Sample value selection option (by index):\n" +
" -sample_index Index of sample value to display\n" +
" -mean Average sample value over first value\n" +
"Sample value selection option (for heap profiles):\n" +
" -inuse_space Display in-use memory size\n" +
" -inuse_objects Display in-use object counts\n" +
" -alloc_space Display allocated memory size\n" +
" -alloc_objects Display allocated object counts\n" +
"Sample value selection option (for contention profiles):\n" +
" -total_delay Display total delay at each region\n" +
" -contentions Display number of delays at each region\n" +
" -mean_delay Display mean delay at each region\n" +
"Filtering options:\n" +
" -focus=r Restricts to paths going through a node matching regexp\n" +
" -ignore=r Skips paths going through any nodes matching regexp\n" +
" -tagfocus=r Restrict to samples tagged with key:value matching regexp\n" +
" Restrict to samples with numeric tags in range (eg \"32kb:1mb\")\n" +
" -tagignore=r Discard samples tagged with key:value matching regexp\n" +
" Avoid samples with numeric tags in range (eg \"1mb:\")\n" +
"Miscellaneous:\n" +
" -call_tree Generate a context-sensitive call tree\n" +
" -unit=u Convert all samples to unit u for display\n" +
" -show_bytes Display all space in bytes\n" +
" -divide_by=f Scale all samples by dividing them by f\n" +
" -buildid=id Override build id for main binary in profile\n" +
" -tools=path Search path for object-level tools\n" +
" -help This message"
var usageMsgVars = "Environment Variables:\n" +
" PPROF_TMPDIR Location for temporary files (default $HOME/pprof)\n" +
" PPROF_TOOLS Search path for object-level tools\n" +
" PPROF_BINARY_PATH Search path for local binary files\n" +
" default: $HOME/pprof/binaries\n" +
" finds binaries by $name and $buildid/$name"
func aggregate(prof *profile.Profile, f *flags) error {
switch {
case f.isFormat("proto"), f.isFormat("raw"):
// No aggregation for raw profiles.
case f.isFormat("callgrind"):
// Aggregate to file/line for callgrind.
fallthrough
case *f.flagLines:
return prof.Aggregate(true, true, true, true, false)
case *f.flagFiles:
return prof.Aggregate(true, false, true, false, false)
case *f.flagFunctions:
return prof.Aggregate(true, true, false, false, false)
case f.isFormat("weblist"), f.isFormat("disasm"):
return prof.Aggregate(false, true, true, true, true)
}
return nil
}
// parseOptions parses the options into report.Options
// Returns a function to postprocess the report after generation.
func parseOptions(f *flags) (o *report.Options, p commands.PostProcessor, err error) {
if *f.flagDivideBy == 0 {
return nil, nil, fmt.Errorf("zero divisor specified")
}
o = &report.Options{
CumSort: *f.flagCum,
CallTree: *f.flagCallTree,
PrintAddresses: *f.flagAddresses,
DropNegative: *f.flagDropNegative,
Ratio: 1 / *f.flagDivideBy,
NodeCount: *f.flagNodeCount,
NodeFraction: *f.flagNodeFraction,
EdgeFraction: *f.flagEdgeFraction,
OutputUnit: *f.flagDisplayUnit,
}
for cmd, b := range f.flagCommands {
if *b {
pcmd := f.commands[cmd]
o.OutputFormat = pcmd.Format
return o, pcmd.PostProcess, nil
}
}
for cmd, rx := range f.flagParamCommands {
if *rx != "" {
pcmd := f.commands[cmd]
if o.Symbol, err = regexp.Compile(*rx); err != nil {
return nil, nil, fmt.Errorf("parsing -%s regexp: %v", cmd, err)
}
o.OutputFormat = pcmd.Format
return o, pcmd.PostProcess, nil
}
}
return nil, nil, fmt.Errorf("no output format selected")
}
type sampleValueFunc func(*profile.Sample) int64
// sampleFormat returns a function to extract values out of a profile.Sample,
// and the type/units of those values.
func sampleFormat(p *profile.Profile, f *flags) (sampleValueFunc, string, string) {
valueIndex := *f.flagSampleIndex
if *f.flagMean {
return meanExtractor(valueIndex), "mean_" + p.SampleType[valueIndex].Type, p.SampleType[valueIndex].Unit
}
return valueExtractor(valueIndex), p.SampleType[valueIndex].Type, p.SampleType[valueIndex].Unit
}
func valueExtractor(ix int) sampleValueFunc {
return func(s *profile.Sample) int64 {
return s.Value[ix]
}
}
func meanExtractor(ix int) sampleValueFunc {
return func(s *profile.Sample) int64 {
if s.Value[0] == 0 {
return 0
}
return s.Value[ix] / s.Value[0]
}
}
func generate(interactive bool, prof *profile.Profile, obj plugin.ObjTool, ui plugin.UI, f *flags) error {
o, postProcess, err := parseOptions(f)
if err != nil {
return err
}
var w io.Writer
if *f.flagOutput == "" {
w = os.Stdout
} else {
ui.PrintErr("Generating report in ", *f.flagOutput)
outputFile, err := os.Create(*f.flagOutput)
if err != nil {
return err
}
defer outputFile.Close()
w = outputFile
}
value, stype, unit := sampleFormat(prof, f)
o.SampleType = stype
rpt := report.New(prof, *o, value, unit)
// Do not apply filters if we're just generating a proto, so we
// still have all the data.
if o.OutputFormat != report.Proto {
// Delay applying focus/ignore until after creating the report so
// the report reflects the total number of samples.
if err := preprocess(prof, ui, f); err != nil {
return err
}
}
if postProcess == nil {
return report.Generate(w, rpt, obj)
}
var dot bytes.Buffer
if err = report.Generate(&dot, rpt, obj); err != nil {
return err
}
return postProcess(&dot, w, ui)
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package driver
import (
"fmt"
"io"
"regexp"
"sort"
"strconv"
"strings"
"cmd/pprof/internal/commands"
"cmd/pprof/internal/plugin"
"cmd/pprof/internal/profile"
)
var profileFunctionNames = []string{}
// functionCompleter replaces provided substring with a function
// name retrieved from a profile if a single match exists. Otherwise,
// it returns unchanged substring. It defaults to no-op if the profile
// is not specified.
func functionCompleter(substring string) string {
found := ""
for _, fName := range profileFunctionNames {
if strings.Contains(fName, substring) {
if found != "" {
return substring
}
found = fName
}
}
if found != "" {
return found
}
return substring
}
// updateAutoComplete enhances autocompletion with information that can be
// retrieved from the profile
func updateAutoComplete(p *profile.Profile) {
profileFunctionNames = nil // remove function names retrieved previously
for _, fn := range p.Function {
profileFunctionNames = append(profileFunctionNames, fn.Name)
}
}
// splitCommand splits the command line input into tokens separated by
// spaces. Takes care to separate commands of the form 'top10' into
// two tokens: 'top' and '10'
func splitCommand(input string) []string {
fields := strings.Fields(input)
if num := strings.IndexAny(fields[0], "0123456789"); num != -1 {
inputNumber := fields[0][num:]
fields[0] = fields[0][:num]
fields = append([]string{fields[0], inputNumber}, fields[1:]...)
}
return fields
}
// interactive displays a prompt and reads commands for profile
// manipulation/visualization.
func interactive(p *profile.Profile, obj plugin.ObjTool, ui plugin.UI, f *flags) error {
updateAutoComplete(p)
// Enter command processing loop.
ui.Print("Entering interactive mode (type \"help\" for commands)")
ui.SetAutoComplete(commands.NewCompleter(f.commands))
for {
input, err := readCommand(p, ui, f)
if err != nil {
if err != io.EOF {
return err
}
if input == "" {
return nil
}
}
// Process simple commands.
switch input {
case "":
continue
case ":":
f.flagFocus = newString("")
f.flagIgnore = newString("")
f.flagTagFocus = newString("")
f.flagTagIgnore = newString("")
f.flagHide = newString("")
continue
}
fields := splitCommand(input)
// Process report generation commands.
if _, ok := f.commands[fields[0]]; ok {
if err := generateReport(p, fields, obj, ui, f); err != nil {
if err == io.EOF {
return nil
}
ui.PrintErr(err)
}
continue
}
switch cmd := fields[0]; cmd {
case "help":
commandHelp(fields, ui, f)
continue
case "exit", "quit":
return nil
}
// Process option settings.
if of, err := optFlags(p, input, f); err == nil {
f = of
} else {
ui.PrintErr("Error: ", err.Error())
}
}
}
func generateReport(p *profile.Profile, cmd []string, obj plugin.ObjTool, ui plugin.UI, f *flags) error {
prof := p.Copy()
cf, err := cmdFlags(prof, cmd, ui, f)
if err != nil {
return err
}
return generate(true, prof, obj, ui, cf)
}
// validateRegex checks if a string is a valid regular expression.
func validateRegex(v string) error {
_, err := regexp.Compile(v)
return err
}
// readCommand prompts for and reads the next command.
func readCommand(p *profile.Profile, ui plugin.UI, f *flags) (string, error) {
//ui.Print("Options:\n", f.String(p))
s, err := ui.ReadLine()
return strings.TrimSpace(s), err
}
func commandHelp(_ []string, ui plugin.UI, f *flags) error {
help := `
Commands:
cmd [n] [--cum] [focus_regex]* [-ignore_regex]*
Produce a text report with the top n entries.
Include samples matching focus_regex, and exclude ignore_regex.
Add --cum to sort using cumulative data.
Available commands:
`
var commands []string
for name, cmd := range f.commands {
commands = append(commands, fmt.Sprintf(" %-12s %s", name, cmd.Usage))
}
sort.Strings(commands)
help = help + strings.Join(commands, "\n") + `
peek func_regex
Display callers and callees of functions matching func_regex.
dot [n] [focus_regex]* [-ignore_regex]* [>file]
Produce an annotated callgraph with the top n entries.
Include samples matching focus_regex, and exclude ignore_regex.
For other outputs, replace dot with:
- Graphic formats: dot, svg, pdf, ps, gif, png (use > to name output file)
- Graph viewer: gv, web, evince, eog
callgrind [n] [focus_regex]* [-ignore_regex]* [>file]
Produce a file in callgrind-compatible format.
Include samples matching focus_regex, and exclude ignore_regex.
weblist func_regex [-ignore_regex]*
Show annotated source with interspersed assembly in a web browser.
list func_regex [-ignore_regex]*
Print source for routines matching func_regex, and exclude ignore_regex.
disasm func_regex [-ignore_regex]*
Disassemble routines matching func_regex, and exclude ignore_regex.
tags tag_regex [-ignore_regex]*
List tags with key:value matching tag_regex and exclude ignore_regex.
quit/exit/^D
Exit pprof.
option=value
The following options can be set individually:
cum/flat: Sort entries based on cumulative or flat data
call_tree: Build context-sensitive call trees
nodecount: Max number of entries to display
nodefraction: Min frequency ratio of nodes to display
edgefraction: Min frequency ratio of edges to display
focus/ignore: Regexp to include/exclude samples by name/file
tagfocus/tagignore: Regexp or value range to filter samples by tag
eg "1mb", "1mb:2mb", ":64kb"
functions: Level of aggregation for sample data
files:
lines:
addresses:
unit: Measurement unit to use on reports
Sample value selection by index:
sample_index: Index of sample value to display
mean: Average sample value over first value
Sample value selection by name:
alloc_space for heap profiles
alloc_objects
inuse_space
inuse_objects
total_delay for contention profiles
mean_delay
contentions
: Clear focus/ignore/hide/tagfocus/tagignore`
ui.Print(help)
return nil
}
// cmdFlags parses the options of an interactive command and returns
// an updated flags object.
func cmdFlags(prof *profile.Profile, input []string, ui plugin.UI, f *flags) (*flags, error) {
cf := *f
var focus, ignore string
output := *cf.flagOutput
nodeCount := *cf.flagNodeCount
cmd := input[0]
// Update output flags based on parameters.
tokens := input[1:]
for p := 0; p < len(tokens); p++ {
t := tokens[p]
if t == "" {
continue
}
if c, err := strconv.ParseInt(t, 10, 32); err == nil {
nodeCount = int(c)
continue
}
switch t[0] {
case '>':
if len(t) > 1 {
output = t[1:]
continue
}
// find next token
for p++; p < len(tokens); p++ {
if tokens[p] != "" {
output = tokens[p]
break
}
}
case '-':
if t == "--cum" || t == "-cum" {
cf.flagCum = newBool(true)
continue
}
ignore = catRegex(ignore, t[1:])
default:
focus = catRegex(focus, t)
}
}
pcmd, ok := f.commands[cmd]
if !ok {
return nil, fmt.Errorf("Unexpected parse failure: %v", input)
}
// Reset flags
cf.flagCommands = make(map[string]*bool)
cf.flagParamCommands = make(map[string]*string)
if !pcmd.HasParam {
cf.flagCommands[cmd] = newBool(true)
switch cmd {
case "tags":
cf.flagTagFocus = newString(focus)
cf.flagTagIgnore = newString(ignore)
default:
cf.flagFocus = newString(catRegex(*cf.flagFocus, focus))
cf.flagIgnore = newString(catRegex(*cf.flagIgnore, ignore))
}
} else {
if focus == "" {
focus = "."
}
cf.flagParamCommands[cmd] = newString(focus)
cf.flagIgnore = newString(catRegex(*cf.flagIgnore, ignore))
}
if nodeCount < 0 {
switch cmd {
case "text", "top":
// Default text/top to 10 nodes on interactive mode
nodeCount = 10
default:
nodeCount = 80
}
}
cf.flagNodeCount = newInt(nodeCount)
cf.flagOutput = newString(output)
// Do regular flags processing
if err := processFlags(prof, ui, &cf); err != nil {
cf.usage(ui)
return nil, err
}
return &cf, nil
}
func catRegex(a, b string) string {
if a == "" {
return b
}
if b == "" {
return a
}
return a + "|" + b
}
// optFlags parses an interactive option setting and returns
// an updated flags object.
func optFlags(p *profile.Profile, input string, f *flags) (*flags, error) {
inputs := strings.SplitN(input, "=", 2)
option := strings.ToLower(strings.TrimSpace(inputs[0]))
var value string
if len(inputs) == 2 {
value = strings.TrimSpace(inputs[1])
}
of := *f
var err error
var bv bool
var uv uint64
var fv float64
switch option {
case "cum":
if bv, err = parseBool(value); err != nil {
return nil, err
}
of.flagCum = newBool(bv)
case "flat":
if bv, err = parseBool(value); err != nil {
return nil, err
}
of.flagCum = newBool(!bv)
case "call_tree":
if bv, err = parseBool(value); err != nil {
return nil, err
}
of.flagCallTree = newBool(bv)
case "unit":
of.flagDisplayUnit = newString(value)
case "sample_index":
if uv, err = strconv.ParseUint(value, 10, 32); err != nil {
return nil, err
}
if ix := int(uv); ix < 0 || ix >= len(p.SampleType) {
return nil, fmt.Errorf("sample_index out of range [0..%d]", len(p.SampleType)-1)
}
of.flagSampleIndex = newInt(int(uv))
case "mean":
if bv, err = parseBool(value); err != nil {
return nil, err
}
of.flagMean = newBool(bv)
case "nodecount":
if uv, err = strconv.ParseUint(value, 10, 32); err != nil {
return nil, err
}
of.flagNodeCount = newInt(int(uv))
case "nodefraction":
if fv, err = strconv.ParseFloat(value, 64); err != nil {
return nil, err
}
of.flagNodeFraction = newFloat64(fv)
case "edgefraction":
if fv, err = strconv.ParseFloat(value, 64); err != nil {
return nil, err
}
of.flagEdgeFraction = newFloat64(fv)
case "focus":
if err = validateRegex(value); err != nil {
return nil, err
}
of.flagFocus = newString(value)
case "ignore":
if err = validateRegex(value); err != nil {
return nil, err
}
of.flagIgnore = newString(value)
case "tagfocus":
if err = validateRegex(value); err != nil {
return nil, err
}
of.flagTagFocus = newString(value)
case "tagignore":
if err = validateRegex(value); err != nil {
return nil, err
}
of.flagTagIgnore = newString(value)
case "hide":
if err = validateRegex(value); err != nil {
return nil, err
}
of.flagHide = newString(value)
case "addresses", "files", "lines", "functions":
if bv, err = parseBool(value); err != nil {
return nil, err
}
if !bv {
return nil, fmt.Errorf("select one of addresses/files/lines/functions")
}
setGranularityToggle(option, &of)
default:
if ix := findSampleIndex(p, "", option); ix >= 0 {
of.flagSampleIndex = newInt(ix)
} else if ix := findSampleIndex(p, "total_", option); ix >= 0 {
of.flagSampleIndex = newInt(ix)
of.flagMean = newBool(false)
} else if ix := findSampleIndex(p, "mean_", option); ix >= 1 {
of.flagSampleIndex = newInt(ix)
of.flagMean = newBool(true)
} else {
return nil, fmt.Errorf("unrecognized command: %s", input)
}
}
return &of, nil
}
// parseBool parses a string as a boolean value.
func parseBool(v string) (bool, error) {
switch strings.ToLower(v) {
case "true", "t", "yes", "y", "1", "":
return true, nil
case "false", "f", "no", "n", "0":
return false, nil
}
return false, fmt.Errorf(`illegal input "%s" for bool value`, v)
}
func findSampleIndex(p *profile.Profile, prefix, sampleType string) int {
if !strings.HasPrefix(sampleType, prefix) {
return -1
}
sampleType = strings.TrimPrefix(sampleType, prefix)
for i, r := range p.SampleType {
if r.Type == sampleType {
return i
}
}
return -1
}
// setGranularityToggle manages the set of granularity options. These
// operate as a toggle; turning one on turns the others off.
func setGranularityToggle(o string, fl *flags) {
t, f := newBool(true), newBool(false)
fl.flagFunctions = f
fl.flagFiles = f
fl.flagLines = f
fl.flagAddresses = f
switch o {
case "functions":
fl.flagFunctions = t
case "files":
fl.flagFiles = t
case "lines":
fl.flagLines = t
case "addresses":
fl.flagAddresses = t
default:
panic(fmt.Errorf("unexpected option %s", o))
}
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package fetch provides an extensible mechanism to fetch a profile
// from a data source.
package fetch
import (
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"strings"
"time"
"cmd/pprof/internal/plugin"
"cmd/pprof/internal/profile"
)
// FetchProfile reads from a data source (network, file) and generates a
// profile.
func FetchProfile(source string, timeout time.Duration) (*profile.Profile, error) {
return Fetcher(source, timeout, plugin.StandardUI())
}
// Fetcher is the plugin.Fetcher version of FetchProfile.
func Fetcher(source string, timeout time.Duration, ui plugin.UI) (*profile.Profile, error) {
var f io.ReadCloser
var err error
url, err := url.Parse(source)
if err == nil && url.Host != "" {
f, err = FetchURL(source, timeout)
} else {
f, err = os.Open(source)
}
if err != nil {
return nil, err
}
defer f.Close()
return profile.Parse(f)
}
// FetchURL fetches a profile from a URL using HTTP.
func FetchURL(source string, timeout time.Duration) (io.ReadCloser, error) {
resp, err := httpGet(source, timeout)
if err != nil {
return nil, fmt.Errorf("http fetch %s: %v", source, err)
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("server response: %s", resp.Status)
}
return resp.Body, nil
}
// PostURL issues a POST to a URL over HTTP.
func PostURL(source, post string) ([]byte, error) {
resp, err := http.Post(source, "application/octet-stream", strings.NewReader(post))
if err != nil {
return nil, fmt.Errorf("http post %s: %v", source, err)
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("server response: %s", resp.Status)
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// httpGet is a wrapper around http.Get; it is defined as a variable
// so it can be redefined during for testing.
var httpGet = func(url string, timeout time.Duration) (*http.Response, error) {
client := &http.Client{
Transport: &http.Transport{
ResponseHeaderTimeout: timeout + 5*time.Second,
},
}
return client.Get(url)
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package plugin defines the plugin implementations that the main pprof driver requires.
package plugin
import (
"bufio"
"fmt"
"os"
"regexp"
"strings"
"time"
"cmd/pprof/internal/profile"
)
// A FlagSet creates and parses command-line flags.
// It is similar to the standard flag.FlagSet.
type FlagSet interface {
// Bool, Int, Float64, and String define new flags,
// like the functions of the same name in package flag.
Bool(name string, def bool, usage string) *bool
Int(name string, def int, usage string) *int
Float64(name string, def float64, usage string) *float64
String(name string, def string, usage string) *string
// ExtraUsage returns any additional text that should be
// printed after the standard usage message.
// The typical use of ExtraUsage is to show any custom flags
// defined by the specific pprof plugins being used.
ExtraUsage() string
// Parse initializes the flags with their values for this run
// and returns the non-flag command line arguments.
// If an unknown flag is encountered or there are no arguments,
// Parse should call usage and return nil.
Parse(usage func()) []string
}
// An ObjTool inspects shared libraries and executable files.
type ObjTool interface {
// Open opens the named object file.
// If the object is a shared library, start is the address where
// it is mapped into memory in the address space being inspected.
Open(file string, start uint64) (ObjFile, error)
// Demangle translates a batch of symbol names from mangled
// form to human-readable form.
Demangle(names []string) (map[string]string, error)
// Disasm disassembles the named object file, starting at
// the start address and stopping at (before) the end address.
Disasm(file string, start, end uint64) ([]Inst, error)
// SetConfig configures the tool.
// The implementation defines the meaning of the string
// and can ignore it entirely.
SetConfig(config string)
}
// NoObjTool returns a trivial implementation of the ObjTool interface.
// Open returns an error indicating that the requested file does not exist.
// Demangle returns an empty map and a nil error.
// Disasm returns an error.
// SetConfig is a no-op.
func NoObjTool() ObjTool {
return noObjTool{}
}
type noObjTool struct{}
func (noObjTool) Open(file string, start uint64) (ObjFile, error) {
return nil, &os.PathError{Op: "open", Path: file, Err: os.ErrNotExist}
}
func (noObjTool) Demangle(name []string) (map[string]string, error) {
return make(map[string]string), nil
}
func (noObjTool) Disasm(file string, start, end uint64) ([]Inst, error) {
return nil, fmt.Errorf("disassembly not supported")
}
func (noObjTool) SetConfig(config string) {
}
// An ObjFile is a single object file: a shared library or executable.
type ObjFile interface {
// Name returns the underlyinf file name, if available
Name() string
// Base returns the base address to use when looking up symbols in the file.
Base() uint64
// BuildID returns the GNU build ID of the file, or an empty string.
BuildID() string
// SourceLine reports the source line information for a given
// address in the file. Due to inlining, the source line information
// is in general a list of positions representing a call stack,
// with the leaf function first.
SourceLine(addr uint64) ([]Frame, error)
// Symbols returns a list of symbols in the object file.
// If r is not nil, Symbols restricts the list to symbols
// with names matching the regular expression.
// If addr is not zero, Symbols restricts the list to symbols
// containing that address.
Symbols(r *regexp.Regexp, addr uint64) ([]*Sym, error)
// Close closes the file, releasing associated resources.
Close() error
}
// A Frame describes a single line in a source file.
type Frame struct {
Func string // name of function
File string // source file name
Line int // line in file
}
// A Sym describes a single symbol in an object file.
type Sym struct {
Name []string // names of symbol (many if symbol was dedup'ed)
File string // object file containing symbol
Start uint64 // start virtual address
End uint64 // virtual address of last byte in sym (Start+size-1)
}
// An Inst is a single instruction in an assembly listing.
type Inst struct {
Addr uint64 // virtual address of instruction
Text string // instruction text
File string // source file
Line int // source line
}
// A UI manages user interactions.
type UI interface {
// Read returns a line of text (a command) read from the user.
ReadLine() (string, error)
// Print shows a message to the user.
// It formats the text as fmt.Print would and adds a final \n if not already present.
// For line-based UI, Print writes to standard error.
// (Standard output is reserved for report data.)
Print(...interface{})
// PrintErr shows an error message to the user.
// It formats the text as fmt.Print would and adds a final \n if not already present.
// For line-based UI, PrintErr writes to standard error.
PrintErr(...interface{})
// IsTerminal returns whether the UI is known to be tied to an
// interactive terminal (as opposed to being redirected to a file).
IsTerminal() bool
// SetAutoComplete instructs the UI to call complete(cmd) to obtain
// the auto-completion of cmd, if the UI supports auto-completion at all.
SetAutoComplete(complete func(string) string)
}
// StandardUI returns a UI that reads from standard input,
// prints messages to standard output,
// prints errors to standard error, and doesn't use auto-completion.
func StandardUI() UI {
return &stdUI{r: bufio.NewReader(os.Stdin)}
}
type stdUI struct {
r *bufio.Reader
}
func (ui *stdUI) ReadLine() (string, error) {
os.Stdout.WriteString("(pprof) ")
return ui.r.ReadString('\n')
}
func (ui *stdUI) Print(args ...interface{}) {
ui.fprint(os.Stderr, args)
}
func (ui *stdUI) PrintErr(args ...interface{}) {
ui.fprint(os.Stderr, args)
}
func (ui *stdUI) IsTerminal() bool {
return false
}
func (ui *stdUI) SetAutoComplete(func(string) string) {
}
func (ui *stdUI) fprint(f *os.File, args []interface{}) {
text := fmt.Sprint(args...)
if !strings.HasSuffix(text, "\n") {
text += "\n"
}
f.WriteString(text)
}
// A Fetcher reads and returns the profile named by src.
// It gives up after the given timeout, unless src contains a timeout override
// (as defined by the implementation).
// It can print messages to ui.
type Fetcher func(src string, timeout time.Duration, ui UI) (*profile.Profile, error)
// A Symbolizer annotates a profile with symbol information.
// The profile was fetch from src.
// The meaning of mode is defined by the implementation.
type Symbolizer func(mode, src string, prof *profile.Profile, obj ObjTool, ui UI) error
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package profile
import (
"errors"
"fmt"
"sort"
)
func (p *Profile) decoder() []decoder {
return profileDecoder
}
// preEncode populates the unexported fields to be used by encode
// (with suffix X) from the corresponding exported fields. The
// exported fields are cleared up to facilitate testing.
func (p *Profile) preEncode() {
strings := make(map[string]int)
addString(strings, "")
for _, st := range p.SampleType {
st.typeX = addString(strings, st.Type)
st.unitX = addString(strings, st.Unit)
}
for _, s := range p.Sample {
s.labelX = nil
var keys []string
for k := range s.Label {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
vs := s.Label[k]
for _, v := range vs {
s.labelX = append(s.labelX,
Label{
keyX: addString(strings, k),
strX: addString(strings, v),
},
)
}
}
var numKeys []string
for k := range s.NumLabel {
numKeys = append(numKeys, k)
}
sort.Strings(numKeys)
for _, k := range numKeys {
vs := s.NumLabel[k]
for _, v := range vs {
s.labelX = append(s.labelX,
Label{
keyX: addString(strings, k),
numX: v,
},
)
}
}
s.locationIDX = nil
for _, l := range s.Location {
s.locationIDX = append(s.locationIDX, l.ID)
}
}
for _, m := range p.Mapping {
m.fileX = addString(strings, m.File)
m.buildIDX = addString(strings, m.BuildID)
}
for _, l := range p.Location {
for i, ln := range l.Line {
if ln.Function != nil {
l.Line[i].functionIDX = ln.Function.ID
} else {
l.Line[i].functionIDX = 0
}
}
if l.Mapping != nil {
l.mappingIDX = l.Mapping.ID
} else {
l.mappingIDX = 0
}
}
for _, f := range p.Function {
f.nameX = addString(strings, f.Name)
f.systemNameX = addString(strings, f.SystemName)
f.filenameX = addString(strings, f.Filename)
}
p.dropFramesX = addString(strings, p.DropFrames)
p.keepFramesX = addString(strings, p.KeepFrames)
if pt := p.PeriodType; pt != nil {
pt.typeX = addString(strings, pt.Type)
pt.unitX = addString(strings, pt.Unit)
}
p.stringTable = make([]string, len(strings))
for s, i := range strings {
p.stringTable[i] = s
}
}
func (p *Profile) encode(b *buffer) {
for _, x := range p.SampleType {
encodeMessage(b, 1, x)
}
for _, x := range p.Sample {
encodeMessage(b, 2, x)
}
for _, x := range p.Mapping {
encodeMessage(b, 3, x)
}
for _, x := range p.Location {
encodeMessage(b, 4, x)
}
for _, x := range p.Function {
encodeMessage(b, 5, x)
}
encodeStrings(b, 6, p.stringTable)
encodeInt64Opt(b, 7, p.dropFramesX)
encodeInt64Opt(b, 8, p.keepFramesX)
encodeInt64Opt(b, 9, p.TimeNanos)
encodeInt64Opt(b, 10, p.DurationNanos)
if pt := p.PeriodType; pt != nil && (pt.typeX != 0 || pt.unitX != 0) {
encodeMessage(b, 11, p.PeriodType)
}
encodeInt64Opt(b, 12, p.Period)
}
var profileDecoder = []decoder{
nil, // 0
// repeated ValueType sample_type = 1
func(b *buffer, m message) error {
x := new(ValueType)
pp := m.(*Profile)
pp.SampleType = append(pp.SampleType, x)
return decodeMessage(b, x)
},
// repeated Sample sample = 2
func(b *buffer, m message) error {
x := new(Sample)
pp := m.(*Profile)
pp.Sample = append(pp.Sample, x)
return decodeMessage(b, x)
},
// repeated Mapping mapping = 3
func(b *buffer, m message) error {
x := new(Mapping)
pp := m.(*Profile)
pp.Mapping = append(pp.Mapping, x)
return decodeMessage(b, x)
},
// repeated Location location = 4
func(b *buffer, m message) error {
x := new(Location)
pp := m.(*Profile)
pp.Location = append(pp.Location, x)
return decodeMessage(b, x)
},
// repeasted Function function = 5
func(b *buffer, m message) error {
x := new(Function)
pp := m.(*Profile)
pp.Function = append(pp.Function, x)
return decodeMessage(b, x)
},
// repeated string string_table = 6
func(b *buffer, m message) error {
err := decodeStrings(b, &m.(*Profile).stringTable)
if err != nil {
return err
}
if *&m.(*Profile).stringTable[0] != "" {
return errors.New("string_table[0] must be ''")
}
return nil
},
// repeated int64 drop_frames = 7
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).dropFramesX) },
// repeated int64 keep_frames = 8
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).keepFramesX) },
// repeated int64 time_nanos = 9
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).TimeNanos) },
// repeated int64 duration_nanos = 10
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).DurationNanos) },
// optional string period_type = 11
func(b *buffer, m message) error {
x := new(ValueType)
pp := m.(*Profile)
pp.PeriodType = x
return decodeMessage(b, x)
},
// repeated int64 period = 12
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).Period) },
}
// postDecode takes the unexported fields populated by decode (with
// suffix X) and populates the corresponding exported fields.
// The unexported fields are cleared up to facilitate testing.
func (p *Profile) postDecode() error {
var err error
mappings := make(map[uint64]*Mapping)
for _, m := range p.Mapping {
m.File, err = getString(p.stringTable, &m.fileX, err)
m.BuildID, err = getString(p.stringTable, &m.buildIDX, err)
mappings[m.ID] = m
}
functions := make(map[uint64]*Function)
for _, f := range p.Function {
f.Name, err = getString(p.stringTable, &f.nameX, err)
f.SystemName, err = getString(p.stringTable, &f.systemNameX, err)
f.Filename, err = getString(p.stringTable, &f.filenameX, err)
functions[f.ID] = f
}
locations := make(map[uint64]*Location)
for _, l := range p.Location {
l.Mapping = mappings[l.mappingIDX]
l.mappingIDX = 0
for i, ln := range l.Line {
if id := ln.functionIDX; id != 0 {
l.Line[i].Function = functions[id]
if l.Line[i].Function == nil {
return fmt.Errorf("Function ID %d not found", id)
}
l.Line[i].functionIDX = 0
}
}
locations[l.ID] = l
}
for _, st := range p.SampleType {
st.Type, err = getString(p.stringTable, &st.typeX, err)
st.Unit, err = getString(p.stringTable, &st.unitX, err)
}
for _, s := range p.Sample {
labels := make(map[string][]string)
numLabels := make(map[string][]int64)
for _, l := range s.labelX {
var key, value string
key, err = getString(p.stringTable, &l.keyX, err)
if l.strX != 0 {
value, err = getString(p.stringTable, &l.strX, err)
labels[key] = append(labels[key], value)
} else {
numLabels[key] = append(numLabels[key], l.numX)
}
}
if len(labels) > 0 {
s.Label = labels
}
if len(numLabels) > 0 {
s.NumLabel = numLabels
}
s.Location = nil
for _, lid := range s.locationIDX {
s.Location = append(s.Location, locations[lid])
}
s.locationIDX = nil
}
p.DropFrames, err = getString(p.stringTable, &p.dropFramesX, err)
p.KeepFrames, err = getString(p.stringTable, &p.keepFramesX, err)
if pt := p.PeriodType; pt == nil {
p.PeriodType = &ValueType{}
}
if pt := p.PeriodType; pt != nil {
pt.Type, err = getString(p.stringTable, &pt.typeX, err)
pt.Unit, err = getString(p.stringTable, &pt.unitX, err)
}
p.stringTable = nil
return nil
}
func (p *ValueType) decoder() []decoder {
return valueTypeDecoder
}
func (p *ValueType) encode(b *buffer) {
encodeInt64Opt(b, 1, p.typeX)
encodeInt64Opt(b, 2, p.unitX)
}
var valueTypeDecoder = []decoder{
nil, // 0
// optional int64 type = 1
func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).typeX) },
// optional int64 unit = 2
func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).unitX) },
}
func (p *Sample) decoder() []decoder {
return sampleDecoder
}
func (p *Sample) encode(b *buffer) {
encodeUint64s(b, 1, p.locationIDX)
for _, x := range p.Value {
encodeInt64(b, 2, x)
}
for _, x := range p.labelX {
encodeMessage(b, 3, x)
}
}
var sampleDecoder = []decoder{
nil, // 0
// repeated uint64 location = 1
func(b *buffer, m message) error { return decodeUint64s(b, &m.(*Sample).locationIDX) },
// repeated int64 value = 2
func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Sample).Value) },
// repeated Label label = 3
func(b *buffer, m message) error {
s := m.(*Sample)
n := len(s.labelX)
s.labelX = append(s.labelX, Label{})
return decodeMessage(b, &s.labelX[n])
},
}
func (p Label) decoder() []decoder {
return labelDecoder
}
func (p Label) encode(b *buffer) {
encodeInt64Opt(b, 1, p.keyX)
encodeInt64Opt(b, 2, p.strX)
encodeInt64Opt(b, 3, p.numX)
}
var labelDecoder = []decoder{
nil, // 0
// optional int64 key = 1
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Label).keyX) },
// optional int64 str = 2
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Label).strX) },
// optional int64 num = 3
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Label).numX) },
}
func (p *Mapping) decoder() []decoder {
return mappingDecoder
}
func (p *Mapping) encode(b *buffer) {
encodeUint64Opt(b, 1, p.ID)
encodeUint64Opt(b, 2, p.Start)
encodeUint64Opt(b, 3, p.Limit)
encodeUint64Opt(b, 4, p.Offset)
encodeInt64Opt(b, 5, p.fileX)
encodeInt64Opt(b, 6, p.buildIDX)
encodeBoolOpt(b, 7, p.HasFunctions)
encodeBoolOpt(b, 8, p.HasFilenames)
encodeBoolOpt(b, 9, p.HasLineNumbers)
encodeBoolOpt(b, 10, p.HasInlineFrames)
}
var mappingDecoder = []decoder{
nil, // 0
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).ID) }, // optional uint64 id = 1
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Start) }, // optional uint64 memory_offset = 2
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Limit) }, // optional uint64 memory_limit = 3
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Offset) }, // optional uint64 file_offset = 4
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).fileX) }, // optional int64 filename = 5
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).buildIDX) }, // optional int64 build_id = 6
func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFunctions) }, // optional bool has_functions = 7
func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFilenames) }, // optional bool has_filenames = 8
func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasLineNumbers) }, // optional bool has_line_numbers = 9
func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasInlineFrames) }, // optional bool has_inline_frames = 10
}
func (p *Location) decoder() []decoder {
return locationDecoder
}
func (p *Location) encode(b *buffer) {
encodeUint64Opt(b, 1, p.ID)
encodeUint64Opt(b, 2, p.mappingIDX)
encodeUint64Opt(b, 3, p.Address)
for i := range p.Line {
encodeMessage(b, 4, &p.Line[i])
}
}
var locationDecoder = []decoder{
nil, // 0
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).ID) }, // optional uint64 id = 1;
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).mappingIDX) }, // optional uint64 mapping_id = 2;
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).Address) }, // optional uint64 address = 3;
func(b *buffer, m message) error { // repeated Line line = 4
pp := m.(*Location)
n := len(pp.Line)
pp.Line = append(pp.Line, Line{})
return decodeMessage(b, &pp.Line[n])
},
}
func (p *Line) decoder() []decoder {
return lineDecoder
}
func (p *Line) encode(b *buffer) {
encodeUint64Opt(b, 1, p.functionIDX)
encodeInt64Opt(b, 2, p.Line)
}
var lineDecoder = []decoder{
nil, // 0
// optional uint64 function_id = 1
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) },
// optional int64 line = 2
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) },
}
func (p *Function) decoder() []decoder {
return functionDecoder
}
func (p *Function) encode(b *buffer) {
encodeUint64Opt(b, 1, p.ID)
encodeInt64Opt(b, 2, p.nameX)
encodeInt64Opt(b, 3, p.systemNameX)
encodeInt64Opt(b, 4, p.filenameX)
encodeInt64Opt(b, 5, p.StartLine)
}
var functionDecoder = []decoder{
nil, // 0
// optional uint64 id = 1
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Function).ID) },
// optional int64 function_name = 2
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).nameX) },
// optional int64 function_system_name = 3
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).systemNameX) },
// repeated int64 filename = 4
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).filenameX) },
// optional int64 start_line = 5
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).StartLine) },
}
func addString(strings map[string]int, s string) int64 {
i, ok := strings[s]
if !ok {
i = len(strings)
strings[s] = i
}
return int64(i)
}
func getString(strings []string, strng *int64, err error) (string, error) {
if err != nil {
return "", err
}
s := int(*strng)
if s < 0 || s >= len(strings) {
return "", errMalformed
}
*strng = 0
return strings[s], nil
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Implements methods to filter samples from profiles.
package profile
import "regexp"
// FilterSamplesByName filters the samples in a profile and only keeps
// samples where at least one frame matches focus but none match ignore.
// Returns true is the corresponding regexp matched at least one sample.
func (p *Profile) FilterSamplesByName(focus, ignore, hide *regexp.Regexp) (fm, im, hm bool) {
focusOrIgnore := make(map[uint64]bool)
hidden := make(map[uint64]bool)
for _, l := range p.Location {
if ignore != nil && l.matchesName(ignore) {
im = true
focusOrIgnore[l.ID] = false
} else if focus == nil || l.matchesName(focus) {
fm = true
focusOrIgnore[l.ID] = true
}
if hide != nil && l.matchesName(hide) {
hm = true
l.Line = l.unmatchedLines(hide)
if len(l.Line) == 0 {
hidden[l.ID] = true
}
}
}
s := make([]*Sample, 0, len(p.Sample))
for _, sample := range p.Sample {
if focusedAndNotIgnored(sample.Location, focusOrIgnore) {
if len(hidden) > 0 {
var locs []*Location
for _, loc := range sample.Location {
if !hidden[loc.ID] {
locs = append(locs, loc)
}
}
if len(locs) == 0 {
// Remove sample with no locations (by not adding it to s).
continue
}
sample.Location = locs
}
s = append(s, sample)
}
}
p.Sample = s
return
}
// matchesName returns whether the function name or file in the
// location matches the regular expression.
func (loc *Location) matchesName(re *regexp.Regexp) bool {
for _, ln := range loc.Line {
if fn := ln.Function; fn != nil {
if re.MatchString(fn.Name) {
return true
}
if re.MatchString(fn.Filename) {
return true
}
}
}
return false
}
// unmatchedLines returns the lines in the location that do not match
// the regular expression.
func (loc *Location) unmatchedLines(re *regexp.Regexp) []Line {
var lines []Line
for _, ln := range loc.Line {
if fn := ln.Function; fn != nil {
if re.MatchString(fn.Name) {
continue
}
if re.MatchString(fn.Filename) {
continue
}
}
lines = append(lines, ln)
}
return lines
}
// focusedAndNotIgnored looks up a slice of ids against a map of
// focused/ignored locations. The map only contains locations that are
// explicitly focused or ignored. Returns whether there is at least
// one focused location but no ignored locations.
func focusedAndNotIgnored(locs []*Location, m map[uint64]bool) bool {
var f bool
for _, loc := range locs {
if focus, focusOrIgnore := m[loc.ID]; focusOrIgnore {
if focus {
// Found focused location. Must keep searching in case there
// is an ignored one as well.
f = true
} else {
// Found ignored location. Can return false right away.
return false
}
}
}
return f
}
// TagMatch selects tags for filtering
type TagMatch func(key, val string, nval int64) bool
// FilterSamplesByTag removes all samples from the profile, except
// those that match focus and do not match the ignore regular
// expression.
func (p *Profile) FilterSamplesByTag(focus, ignore TagMatch) (fm, im bool) {
samples := make([]*Sample, 0, len(p.Sample))
for _, s := range p.Sample {
focused, ignored := focusedSample(s, focus, ignore)
fm = fm || focused
im = im || ignored
if focused && !ignored {
samples = append(samples, s)
}
}
p.Sample = samples
return
}
// focusedTag checks a sample against focus and ignore regexps.
// Returns whether the focus/ignore regexps match any tags
func focusedSample(s *Sample, focus, ignore TagMatch) (fm, im bool) {
fm = focus == nil
for key, vals := range s.Label {
for _, val := range vals {
if ignore != nil && ignore(key, val, 0) {
im = true
}
if !fm && focus(key, val, 0) {
fm = true
}
}
}
for key, vals := range s.NumLabel {
for _, val := range vals {
if ignore != nil && ignore(key, "", val) {
im = true
}
if !fm && focus(key, "", val) {
fm = true
}
}
}
return fm, im
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements parsers to convert legacy profiles into the
// profile.proto format.
package profile
import (
"bufio"
"bytes"
"fmt"
"io"
"math"
"regexp"
"strconv"
"strings"
)
var (
countStartRE = regexp.MustCompile(`\A(\w+) profile: total \d+\n\z`)
countRE = regexp.MustCompile(`\A(\d+) @(( 0x[0-9a-f]+)+)\n\z`)
heapHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] *@ *(heap[_a-z0-9]*)/?(\d*)`)
heapSampleRE = regexp.MustCompile(`(-?\d+): *(-?\d+) *\[ *(\d+): *(\d+) *] @([ x0-9a-f]*)`)
contentionSampleRE = regexp.MustCompile(`(\d+) *(\d+) @([ x0-9a-f]*)`)
hexNumberRE = regexp.MustCompile(`0x[0-9a-f]+`)
growthHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ growthz`)
fragmentationHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ fragmentationz`)
threadzStartRE = regexp.MustCompile(`--- threadz \d+ ---`)
threadStartRE = regexp.MustCompile(`--- Thread ([[:xdigit:]]+) \(name: (.*)/(\d+)\) stack: ---`)
procMapsRE = regexp.MustCompile(`([[:xdigit:]]+)-([[:xdigit:]]+)\s+([-rwxp]+)\s+([[:xdigit:]]+)\s+([[:xdigit:]]+):([[:xdigit:]]+)\s+([[:digit:]]+)\s*(\S+)?`)
briefMapsRE = regexp.MustCompile(`\s*([[:xdigit:]]+)-([[:xdigit:]]+):\s*(\S+)(\s.*@)?([[:xdigit:]]+)?`)
// LegacyHeapAllocated instructs the heapz parsers to use the
// allocated memory stats instead of the default in-use memory. Note
// that tcmalloc doesn't provide all allocated memory, only in-use
// stats.
LegacyHeapAllocated bool
)
func isSpaceOrComment(line string) bool {
trimmed := strings.TrimSpace(line)
return len(trimmed) == 0 || trimmed[0] == '#'
}
// parseGoCount parses a Go count profile (e.g., threadcreate or
// goroutine) and returns a new Profile.
func parseGoCount(b []byte) (*Profile, error) {
r := bytes.NewBuffer(b)
var line string
var err error
for {
// Skip past comments and empty lines seeking a real header.
line, err = r.ReadString('\n')
if err != nil {
return nil, err
}
if !isSpaceOrComment(line) {
break
}
}
m := countStartRE.FindStringSubmatch(line)
if m == nil {
return nil, errUnrecognized
}
profileType := string(m[1])
p := &Profile{
PeriodType: &ValueType{Type: profileType, Unit: "count"},
Period: 1,
SampleType: []*ValueType{{Type: profileType, Unit: "count"}},
}
locations := make(map[uint64]*Location)
for {
line, err = r.ReadString('\n')
if err != nil {
if err == io.EOF {
break
}
return nil, err
}
if isSpaceOrComment(line) {
continue
}
if strings.HasPrefix(line, "---") {
break
}
m := countRE.FindStringSubmatch(line)
if m == nil {
return nil, errMalformed
}
n, err := strconv.ParseInt(string(m[1]), 0, 64)
if err != nil {
return nil, errMalformed
}
fields := strings.Fields(string(m[2]))
locs := make([]*Location, 0, len(fields))
for _, stk := range fields {
addr, err := strconv.ParseUint(stk, 0, 64)
if err != nil {
return nil, errMalformed
}
// Adjust all frames by -1 (except the leaf) to land on top of
// the call instruction.
if len(locs) > 0 {
addr--
}
loc := locations[addr]
if loc == nil {
loc = &Location{
Address: addr,
}
locations[addr] = loc
p.Location = append(p.Location, loc)
}
locs = append(locs, loc)
}
p.Sample = append(p.Sample, &Sample{
Location: locs,
Value: []int64{n},
})
}
if err = parseAdditionalSections(strings.TrimSpace(line), r, p); err != nil {
return nil, err
}
return p, nil
}
// remapLocationIDs ensures there is a location for each address
// referenced by a sample, and remaps the samples to point to the new
// location ids.
func (p *Profile) remapLocationIDs() {
seen := make(map[*Location]bool, len(p.Location))
var locs []*Location
for _, s := range p.Sample {
for _, l := range s.Location {
if seen[l] {
continue
}
l.ID = uint64(len(locs) + 1)
locs = append(locs, l)
seen[l] = true
}
}
p.Location = locs
}
func (p *Profile) remapFunctionIDs() {
seen := make(map[*Function]bool, len(p.Function))
var fns []*Function
for _, l := range p.Location {
for _, ln := range l.Line {
fn := ln.Function
if fn == nil || seen[fn] {
continue
}
fn.ID = uint64(len(fns) + 1)
fns = append(fns, fn)
seen[fn] = true
}
}
p.Function = fns
}
// remapMappingIDs matches location addresses with existing mappings
// and updates them appropriately. This is O(N*M), if this ever shows
// up as a bottleneck, evaluate sorting the mappings and doing a
// binary search, which would make it O(N*log(M)).
func (p *Profile) remapMappingIDs() {
if len(p.Mapping) == 0 {
return
}
// Some profile handlers will incorrectly set regions for the main
// executable if its section is remapped. Fix them through heuristics.
// Remove the initial mapping if named '/anon_hugepage' and has a
// consecutive adjacent mapping.
if m := p.Mapping[0]; strings.HasPrefix(m.File, "/anon_hugepage") {
if len(p.Mapping) > 1 && m.Limit == p.Mapping[1].Start {
p.Mapping = p.Mapping[1:]
}
}
// Subtract the offset from the start of the main mapping if it
// ends up at a recognizable start address.
const expectedStart = 0x400000
if m := p.Mapping[0]; m.Start-m.Offset == expectedStart {
m.Start = expectedStart
m.Offset = 0
}
for _, l := range p.Location {
if a := l.Address; a != 0 {
for _, m := range p.Mapping {
if m.Start <= a && a < m.Limit {
l.Mapping = m
break
}
}
}
}
// Reset all mapping IDs.
for i, m := range p.Mapping {
m.ID = uint64(i + 1)
}
}
var cpuInts = []func([]byte) (uint64, []byte){
get32l,
get32b,
get64l,
get64b,
}
func get32l(b []byte) (uint64, []byte) {
if len(b) < 4 {
return 0, nil
}
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24, b[4:]
}
func get32b(b []byte) (uint64, []byte) {
if len(b) < 4 {
return 0, nil
}
return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24, b[4:]
}
func get64l(b []byte) (uint64, []byte) {
if len(b) < 8 {
return 0, nil
}
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56, b[8:]
}
func get64b(b []byte) (uint64, []byte) {
if len(b) < 8 {
return 0, nil
}
return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56, b[8:]
}
// ParseTracebacks parses a set of tracebacks and returns a newly
// populated profile. It will accept any text file and generate a
// Profile out of it with any hex addresses it can identify, including
// a process map if it can recognize one. Each sample will include a
// tag "source" with the addresses recognized in string format.
func ParseTracebacks(b []byte) (*Profile, error) {
r := bytes.NewBuffer(b)
p := &Profile{
PeriodType: &ValueType{Type: "trace", Unit: "count"},
Period: 1,
SampleType: []*ValueType{
{Type: "trace", Unit: "count"},
},
}
var sources []string
var sloc []*Location
locs := make(map[uint64]*Location)
for {
l, err := r.ReadString('\n')
if err != nil {
if err != io.EOF {
return nil, err
}
if l == "" {
break
}
}
if sectionTrigger(l) == memoryMapSection {
break
}
if s, addrs := extractHexAddresses(l); len(s) > 0 {
for _, addr := range addrs {
// Addresses from stack traces point to the next instruction after
// each call. Adjust by -1 to land somewhere on the actual call
// (except for the leaf, which is not a call).
if len(sloc) > 0 {
addr--
}
loc := locs[addr]
if locs[addr] == nil {
loc = &Location{
Address: addr,
}
p.Location = append(p.Location, loc)
locs[addr] = loc
}
sloc = append(sloc, loc)
}
sources = append(sources, s...)
} else {
if len(sources) > 0 || len(sloc) > 0 {
addTracebackSample(sloc, sources, p)
sloc, sources = nil, nil
}
}
}
// Add final sample to save any leftover data.
if len(sources) > 0 || len(sloc) > 0 {
addTracebackSample(sloc, sources, p)
}
if err := p.ParseMemoryMap(r); err != nil {
return nil, err
}
return p, nil
}
func addTracebackSample(l []*Location, s []string, p *Profile) {
p.Sample = append(p.Sample,
&Sample{
Value: []int64{1},
Location: l,
Label: map[string][]string{"source": s},
})
}
// parseCPU parses a profilez legacy profile and returns a newly
// populated Profile.
//
// The general format for profilez samples is a sequence of words in
// binary format. The first words are a header with the following data:
// 1st word -- 0
// 2nd word -- 3
// 3rd word -- 0 if a c++ application, 1 if a java application.
// 4th word -- Sampling period (in microseconds).
// 5th word -- Padding.
func parseCPU(b []byte) (*Profile, error) {
var parse func([]byte) (uint64, []byte)
var n1, n2, n3, n4, n5 uint64
for _, parse = range cpuInts {
var tmp []byte
n1, tmp = parse(b)
n2, tmp = parse(tmp)
n3, tmp = parse(tmp)
n4, tmp = parse(tmp)
n5, tmp = parse(tmp)
if tmp != nil && n1 == 0 && n2 == 3 && n3 == 0 && n4 > 0 && n5 == 0 {
b = tmp
return cpuProfile(b, int64(n4), parse)
}
}
return nil, errUnrecognized
}
// cpuProfile returns a new Profile from C++ profilez data.
// b is the profile bytes after the header, period is the profiling
// period, and parse is a function to parse 8-byte chunks from the
// profile in its native endianness.
func cpuProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) {
p := &Profile{
Period: period * 1000,
PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"},
SampleType: []*ValueType{
{Type: "samples", Unit: "count"},
{Type: "cpu", Unit: "nanoseconds"},
},
}
var err error
if b, _, err = parseCPUSamples(b, parse, true, p); err != nil {
return nil, err
}
// If all samples have the same second-to-the-bottom frame, it
// strongly suggests that it is an uninteresting artifact of
// measurement -- a stack frame pushed by the signal handler. The
// bottom frame is always correct as it is picked up from the signal
// structure, not the stack. Check if this is the case and if so,
// remove.
if len(p.Sample) > 1 && len(p.Sample[0].Location) > 1 {
allSame := true
id1 := p.Sample[0].Location[1].Address
for _, s := range p.Sample {
if len(s.Location) < 2 || id1 != s.Location[1].Address {
allSame = false
break
}
}
if allSame {
for _, s := range p.Sample {
s.Location = append(s.Location[:1], s.Location[2:]...)
}
}
}
if err := p.ParseMemoryMap(bytes.NewBuffer(b)); err != nil {
return nil, err
}
return p, nil
}
// parseCPUSamples parses a collection of profilez samples from a
// profile.
//
// profilez samples are a repeated sequence of stack frames of the
// form:
// 1st word -- The number of times this stack was encountered.
// 2nd word -- The size of the stack (StackSize).
// 3rd word -- The first address on the stack.
// ...
// StackSize + 2 -- The last address on the stack
// The last stack trace is of the form:
// 1st word -- 0
// 2nd word -- 1
// 3rd word -- 0
//
// Addresses from stack traces may point to the next instruction after
// each call. Optionally adjust by -1 to land somewhere on the actual
// call (except for the leaf, which is not a call).
func parseCPUSamples(b []byte, parse func(b []byte) (uint64, []byte), adjust bool, p *Profile) ([]byte, map[uint64]*Location, error) {
locs := make(map[uint64]*Location)
for len(b) > 0 {
var count, nstk uint64
count, b = parse(b)
nstk, b = parse(b)
if b == nil || nstk > uint64(len(b)/4) {
return nil, nil, errUnrecognized
}
var sloc []*Location
addrs := make([]uint64, nstk)
for i := 0; i < int(nstk); i++ {
addrs[i], b = parse(b)
}
if count == 0 && nstk == 1 && addrs[0] == 0 {
// End of data marker
break
}
for i, addr := range addrs {
if adjust && i > 0 {
addr--
}
loc := locs[addr]
if loc == nil {
loc = &Location{
Address: addr,
}
locs[addr] = loc
p.Location = append(p.Location, loc)
}
sloc = append(sloc, loc)
}
p.Sample = append(p.Sample,
&Sample{
Value: []int64{int64(count), int64(count) * int64(p.Period)},
Location: sloc,
})
}
// Reached the end without finding the EOD marker.
return b, locs, nil
}
// parseHeap parses a heapz legacy or a growthz profile and
// returns a newly populated Profile.
func parseHeap(b []byte) (p *Profile, err error) {
r := bytes.NewBuffer(b)
l, err := r.ReadString('\n')
if err != nil {
return nil, errUnrecognized
}
sampling := ""
if header := heapHeaderRE.FindStringSubmatch(l); header != nil {
p = &Profile{
SampleType: []*ValueType{
{Type: "objects", Unit: "count"},
{Type: "space", Unit: "bytes"},
},
PeriodType: &ValueType{Type: "objects", Unit: "bytes"},
}
var period int64
if len(header[6]) > 0 {
if period, err = strconv.ParseInt(string(header[6]), 10, 64); err != nil {
return nil, errUnrecognized
}
}
switch header[5] {
case "heapz_v2", "heap_v2":
sampling, p.Period = "v2", period
case "heapprofile":
sampling, p.Period = "", 1
case "heap":
sampling, p.Period = "v2", period/2
default:
return nil, errUnrecognized
}
} else if header = growthHeaderRE.FindStringSubmatch(l); header != nil {
p = &Profile{
SampleType: []*ValueType{
{Type: "objects", Unit: "count"},
{Type: "space", Unit: "bytes"},
},
PeriodType: &ValueType{Type: "heapgrowth", Unit: "count"},
Period: 1,
}
} else if header = fragmentationHeaderRE.FindStringSubmatch(l); header != nil {
p = &Profile{
SampleType: []*ValueType{
{Type: "objects", Unit: "count"},
{Type: "space", Unit: "bytes"},
},
PeriodType: &ValueType{Type: "allocations", Unit: "count"},
Period: 1,
}
} else {
return nil, errUnrecognized
}
if LegacyHeapAllocated {
for _, st := range p.SampleType {
st.Type = "alloc_" + st.Type
}
} else {
for _, st := range p.SampleType {
st.Type = "inuse_" + st.Type
}
}
locs := make(map[uint64]*Location)
for {
l, err = r.ReadString('\n')
if err != nil {
if err != io.EOF {
return nil, err
}
if l == "" {
break
}
}
if l = strings.TrimSpace(l); l == "" {
continue
}
if sectionTrigger(l) != unrecognizedSection {
break
}
value, blocksize, addrs, err := parseHeapSample(l, p.Period, sampling)
if err != nil {
return nil, err
}
var sloc []*Location
for i, addr := range addrs {
// Addresses from stack traces point to the next instruction after
// each call. Adjust by -1 to land somewhere on the actual call
// (except for the leaf, which is not a call).
if i > 0 {
addr--
}
loc := locs[addr]
if locs[addr] == nil {
loc = &Location{
Address: addr,
}
p.Location = append(p.Location, loc)
locs[addr] = loc
}
sloc = append(sloc, loc)
}
p.Sample = append(p.Sample, &Sample{
Value: value,
Location: sloc,
NumLabel: map[string][]int64{"bytes": []int64{blocksize}},
})
}
if err = parseAdditionalSections(l, r, p); err != nil {
return nil, err
}
return p, nil
}
// parseHeapSample parses a single row from a heap profile into a new Sample.
func parseHeapSample(line string, rate int64, sampling string) (value []int64, blocksize int64, addrs []uint64, err error) {
sampleData := heapSampleRE.FindStringSubmatch(line)
if len(sampleData) != 6 {
return value, blocksize, addrs, fmt.Errorf("unexpected number of sample values: got %d, want 6", len(sampleData))
}
// Use first two values by default; tcmalloc sampling generates the
// same value for both, only the older heap-profile collect separate
// stats for in-use and allocated objects.
valueIndex := 1
if LegacyHeapAllocated {
valueIndex = 3
}
var v1, v2 int64
if v1, err = strconv.ParseInt(sampleData[valueIndex], 10, 64); err != nil {
return value, blocksize, addrs, fmt.Errorf("malformed sample: %s: %v", line, err)
}
if v2, err = strconv.ParseInt(sampleData[valueIndex+1], 10, 64); err != nil {
return value, blocksize, addrs, fmt.Errorf("malformed sample: %s: %v", line, err)
}
if v1 == 0 {
if v2 != 0 {
return value, blocksize, addrs, fmt.Errorf("allocation count was 0 but allocation bytes was %d", v2)
}
} else {
blocksize = v2 / v1
if sampling == "v2" {
v1, v2 = scaleHeapSample(v1, v2, rate)
}
}
value = []int64{v1, v2}
addrs = parseHexAddresses(sampleData[5])
return value, blocksize, addrs, nil
}
// extractHexAddresses extracts hex numbers from a string and returns
// them, together with their numeric value, in a slice.
func extractHexAddresses(s string) ([]string, []uint64) {
hexStrings := hexNumberRE.FindAllString(s, -1)
var ids []uint64
for _, s := range hexStrings {
if id, err := strconv.ParseUint(s, 0, 64); err == nil {
ids = append(ids, id)
} else {
// Do not expect any parsing failures due to the regexp matching.
panic("failed to parse hex value:" + s)
}
}
return hexStrings, ids
}
// parseHexAddresses parses hex numbers from a string and returns them
// in a slice.
func parseHexAddresses(s string) []uint64 {
_, ids := extractHexAddresses(s)
return ids
}
// scaleHeapSample adjusts the data from a heapz Sample to
// account for its probability of appearing in the collected
// data. heapz profiles are a sampling of the memory allocations
// requests in a program. We estimate the unsampled value by dividing
// each collected sample by its probability of appearing in the
// profile. heapz v2 profiles rely on a poisson process to determine
// which samples to collect, based on the desired average collection
// rate R. The probability of a sample of size S to appear in that
// profile is 1-exp(-S/R).
func scaleHeapSample(count, size, rate int64) (int64, int64) {
if count == 0 || size == 0 {
return 0, 0
}
if rate <= 1 {
// if rate==1 all samples were collected so no adjustment is needed.
// if rate<1 treat as unknown and skip scaling.
return count, size
}
avgSize := float64(size) / float64(count)
scale := 1 / (1 - math.Exp(-avgSize/float64(rate)))
return int64(float64(count) * scale), int64(float64(size) * scale)
}
// parseContention parses a contentionz profile and returns a newly
// populated Profile.
func parseContention(b []byte) (p *Profile, err error) {
r := bytes.NewBuffer(b)
l, err := r.ReadString('\n')
if err != nil {
return nil, errUnrecognized
}
if !strings.HasPrefix(l, "--- contention") {
return nil, errUnrecognized
}
p = &Profile{
PeriodType: &ValueType{Type: "contentions", Unit: "count"},
Period: 1,
SampleType: []*ValueType{
{Type: "contentions", Unit: "count"},
{Type: "delay", Unit: "nanoseconds"},
},
}
var cpuHz int64
// Parse text of the form "attribute = value" before the samples.
const delimiter = "="
for {
l, err = r.ReadString('\n')
if err != nil {
if err != io.EOF {
return nil, err
}
if l == "" {
break
}
}
if l = strings.TrimSpace(l); l == "" {
continue
}
if strings.HasPrefix(l, "---") {
break
}
attr := strings.SplitN(l, delimiter, 2)
if len(attr) != 2 {
break
}
key, val := strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1])
var err error
switch key {
case "cycles/second":
if cpuHz, err = strconv.ParseInt(val, 0, 64); err != nil {
return nil, errUnrecognized
}
case "sampling period":
if p.Period, err = strconv.ParseInt(val, 0, 64); err != nil {
return nil, errUnrecognized
}
case "ms since reset":
ms, err := strconv.ParseInt(val, 0, 64)
if err != nil {
return nil, errUnrecognized
}
p.DurationNanos = ms * 1000 * 1000
case "format":
// CPP contentionz profiles don't have format.
return nil, errUnrecognized
case "resolution":
// CPP contentionz profiles don't have resolution.
return nil, errUnrecognized
case "discarded samples":
default:
return nil, errUnrecognized
}
}
locs := make(map[uint64]*Location)
for {
if l = strings.TrimSpace(l); strings.HasPrefix(l, "---") {
break
}
value, addrs, err := parseContentionSample(l, p.Period, cpuHz)
if err != nil {
return nil, err
}
var sloc []*Location
for i, addr := range addrs {
// Addresses from stack traces point to the next instruction after
// each call. Adjust by -1 to land somewhere on the actual call
// (except for the leaf, which is not a call).
if i > 0 {
addr--
}
loc := locs[addr]
if locs[addr] == nil {
loc = &Location{
Address: addr,
}
p.Location = append(p.Location, loc)
locs[addr] = loc
}
sloc = append(sloc, loc)
}
p.Sample = append(p.Sample, &Sample{
Value: value,
Location: sloc,
})
if l, err = r.ReadString('\n'); err != nil {
if err != io.EOF {
return nil, err
}
if l == "" {
break
}
}
}
if err = parseAdditionalSections(l, r, p); err != nil {
return nil, err
}
return p, nil
}
// parseContentionSample parses a single row from a contention profile
// into a new Sample.
func parseContentionSample(line string, period, cpuHz int64) (value []int64, addrs []uint64, err error) {
sampleData := contentionSampleRE.FindStringSubmatch(line)
if sampleData == nil {
return value, addrs, errUnrecognized
}
v1, err := strconv.ParseInt(sampleData[1], 10, 64)
if err != nil {
return value, addrs, fmt.Errorf("malformed sample: %s: %v", line, err)
}
v2, err := strconv.ParseInt(sampleData[2], 10, 64)
if err != nil {
return value, addrs, fmt.Errorf("malformed sample: %s: %v", line, err)
}
// Unsample values if period and cpuHz are available.
// - Delays are scaled to cycles and then to nanoseconds.
// - Contentions are scaled to cycles.
if period > 0 {
if cpuHz > 0 {
cpuGHz := float64(cpuHz) / 1e9
v1 = int64(float64(v1) * float64(period) / cpuGHz)
}
v2 = v2 * period
}
value = []int64{v2, v1}
addrs = parseHexAddresses(sampleData[3])
return value, addrs, nil
}
// parseThread parses a Threadz profile and returns a new Profile.
func parseThread(b []byte) (*Profile, error) {
r := bytes.NewBuffer(b)
var line string
var err error
for {
// Skip past comments and empty lines seeking a real header.
line, err = r.ReadString('\n')
if err != nil {
return nil, err
}
if !isSpaceOrComment(line) {
break
}
}
if m := threadzStartRE.FindStringSubmatch(line); m != nil {
// Advance over initial comments until first stack trace.
for {
line, err = r.ReadString('\n')
if err != nil {
if err != io.EOF {
return nil, err
}
if line == "" {
break
}
}
if sectionTrigger(line) != unrecognizedSection || line[0] == '-' {
break
}
}
} else if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 {
return nil, errUnrecognized
}
p := &Profile{
SampleType: []*ValueType{{Type: "thread", Unit: "count"}},
PeriodType: &ValueType{Type: "thread", Unit: "count"},
Period: 1,
}
locs := make(map[uint64]*Location)
// Recognize each thread and populate profile samples.
for sectionTrigger(line) == unrecognizedSection {
if strings.HasPrefix(line, "---- no stack trace for") {
line = ""
break
}
if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 {
return nil, errUnrecognized
}
var addrs []uint64
line, addrs, err = parseThreadSample(r)
if err != nil {
return nil, errUnrecognized
}
if len(addrs) == 0 {
// We got a --same as previous threads--. Bump counters.
if len(p.Sample) > 0 {
s := p.Sample[len(p.Sample)-1]
s.Value[0]++
}
continue
}
var sloc []*Location
for i, addr := range addrs {
// Addresses from stack traces point to the next instruction after
// each call. Adjust by -1 to land somewhere on the actual call
// (except for the leaf, which is not a call).
if i > 0 {
addr--
}
loc := locs[addr]
if locs[addr] == nil {
loc = &Location{
Address: addr,
}
p.Location = append(p.Location, loc)
locs[addr] = loc
}
sloc = append(sloc, loc)
}
p.Sample = append(p.Sample, &Sample{
Value: []int64{1},
Location: sloc,
})
}
if err = parseAdditionalSections(line, r, p); err != nil {
return nil, err
}
return p, nil
}
// parseThreadSample parses a symbolized or unsymbolized stack trace.
// Returns the first line after the traceback, the sample (or nil if
// it hits a 'same-as-previous' marker) and an error.
func parseThreadSample(b *bytes.Buffer) (nextl string, addrs []uint64, err error) {
var l string
sameAsPrevious := false
for {
if l, err = b.ReadString('\n'); err != nil {
if err != io.EOF {
return "", nil, err
}
if l == "" {
break
}
}
if l = strings.TrimSpace(l); l == "" {
continue
}
if strings.HasPrefix(l, "---") {
break
}
if strings.Contains(l, "same as previous thread") {
sameAsPrevious = true
continue
}
addrs = append(addrs, parseHexAddresses(l)...)
}
if sameAsPrevious {
return l, nil, nil
}
return l, addrs, nil
}
// parseAdditionalSections parses any additional sections in the
// profile, ignoring any unrecognized sections.
func parseAdditionalSections(l string, b *bytes.Buffer, p *Profile) (err error) {
for {
if sectionTrigger(l) == memoryMapSection {
break
}
// Ignore any unrecognized sections.
if l, err := b.ReadString('\n'); err != nil {
if err != io.EOF {
return err
}
if l == "" {
break
}
}
}
return p.ParseMemoryMap(b)
}
// ParseMemoryMap parses a memory map in the format of
// /proc/self/maps, and overrides the mappings in the current profile.
// It renumbers the samples and locations in the profile correspondingly.
func (p *Profile) ParseMemoryMap(rd io.Reader) error {
b := bufio.NewReader(rd)
var attrs []string
var r *strings.Replacer
const delimiter = "="
for {
l, err := b.ReadString('\n')
if err != nil {
if err != io.EOF {
return err
}
if l == "" {
break
}
}
if l = strings.TrimSpace(l); l == "" {
continue
}
if r != nil {
l = r.Replace(l)
}
m, err := parseMappingEntry(l)
if err != nil {
if err == errUnrecognized {
// Recognize assignments of the form: attr=value, and replace
// $attr with value on subsequent mappings.
if attr := strings.SplitN(l, delimiter, 2); len(attr) == 2 {
attrs = append(attrs, "$"+strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1]))
r = strings.NewReplacer(attrs...)
}
// Ignore any unrecognized entries
continue
}
return err
}
if m == nil || (m.File == "" && len(p.Mapping) != 0) {
// In some cases the first entry may include the address range
// but not the name of the file. It should be followed by
// another entry with the name.
continue
}
if len(p.Mapping) == 1 && p.Mapping[0].File == "" {
// Update the name if this is the entry following that empty one.
p.Mapping[0].File = m.File
continue
}
p.Mapping = append(p.Mapping, m)
}
p.remapLocationIDs()
p.remapFunctionIDs()
p.remapMappingIDs()
return nil
}
func parseMappingEntry(l string) (*Mapping, error) {
mapping := &Mapping{}
var err error
if me := procMapsRE.FindStringSubmatch(l); len(me) == 9 {
if !strings.Contains(me[3], "x") {
// Skip non-executable entries.
return nil, nil
}
if mapping.Start, err = strconv.ParseUint(me[1], 16, 64); err != nil {
return nil, errUnrecognized
}
if mapping.Limit, err = strconv.ParseUint(me[2], 16, 64); err != nil {
return nil, errUnrecognized
}
if me[4] != "" {
if mapping.Offset, err = strconv.ParseUint(me[4], 16, 64); err != nil {
return nil, errUnrecognized
}
}
mapping.File = me[8]
return mapping, nil
}
if me := briefMapsRE.FindStringSubmatch(l); len(me) == 6 {
if mapping.Start, err = strconv.ParseUint(me[1], 16, 64); err != nil {
return nil, errUnrecognized
}
if mapping.Limit, err = strconv.ParseUint(me[2], 16, 64); err != nil {
return nil, errUnrecognized
}
mapping.File = me[3]
if me[5] != "" {
if mapping.Offset, err = strconv.ParseUint(me[5], 16, 64); err != nil {
return nil, errUnrecognized
}
}
return mapping, nil
}
return nil, errUnrecognized
}
type sectionType int
const (
unrecognizedSection sectionType = iota
memoryMapSection
)
var memoryMapTriggers = []string{
"--- Memory map: ---",
"MAPPED_LIBRARIES:",
}
func sectionTrigger(line string) sectionType {
for _, trigger := range memoryMapTriggers {
if strings.Contains(line, trigger) {
return memoryMapSection
}
}
return unrecognizedSection
}
func (p *Profile) addLegacyFrameInfo() {
switch {
case isProfileType(p, heapzSampleTypes) ||
isProfileType(p, heapzInUseSampleTypes) ||
isProfileType(p, heapzAllocSampleTypes):
p.DropFrames, p.KeepFrames = allocRxStr, allocSkipRxStr
case isProfileType(p, contentionzSampleTypes):
p.DropFrames, p.KeepFrames = lockRxStr, ""
default:
p.DropFrames, p.KeepFrames = cpuProfilerRxStr, ""
}
}
var heapzSampleTypes = []string{"allocations", "size"} // early Go pprof profiles
var heapzInUseSampleTypes = []string{"inuse_objects", "inuse_space"}
var heapzAllocSampleTypes = []string{"alloc_objects", "alloc_space"}
var contentionzSampleTypes = []string{"contentions", "delay"}
func isProfileType(p *Profile, t []string) bool {
st := p.SampleType
if len(st) != len(t) {
return false
}
for i := range st {
if st[i].Type != t[i] {
return false
}
}
return true
}
var allocRxStr = strings.Join([]string{
// POSIX entry points.
`calloc`,
`cfree`,
`malloc`,
`free`,
`memalign`,
`do_memalign`,
`(__)?posix_memalign`,
`pvalloc`,
`valloc`,
`realloc`,
// TC malloc.
`tcmalloc::.*`,
`tc_calloc`,
`tc_cfree`,
`tc_malloc`,
`tc_free`,
`tc_memalign`,
`tc_posix_memalign`,
`tc_pvalloc`,
`tc_valloc`,
`tc_realloc`,
`tc_new`,
`tc_delete`,
`tc_newarray`,
`tc_deletearray`,
`tc_new_nothrow`,
`tc_newarray_nothrow`,
// Memory-allocation routines on OS X.
`malloc_zone_malloc`,
`malloc_zone_calloc`,
`malloc_zone_valloc`,
`malloc_zone_realloc`,
`malloc_zone_memalign`,
`malloc_zone_free`,
// Go runtime
`runtime\..*`,
// Other misc. memory allocation routines
`BaseArena::.*`,
`(::)?do_malloc_no_errno`,
`(::)?do_malloc_pages`,
`(::)?do_malloc`,
`DoSampledAllocation`,
`MallocedMemBlock::MallocedMemBlock`,
`_M_allocate`,
`__builtin_(vec_)?delete`,
`__builtin_(vec_)?new`,
`__gnu_cxx::new_allocator::allocate`,
`__libc_malloc`,
`__malloc_alloc_template::allocate`,
`allocate`,
`cpp_alloc`,
`operator new(\[\])?`,
`simple_alloc::allocate`,
}, `|`)
var allocSkipRxStr = strings.Join([]string{
// Preserve Go runtime frames that appear in the middle/bottom of
// the stack.
`runtime\.panic`,
}, `|`)
var cpuProfilerRxStr = strings.Join([]string{
`ProfileData::Add`,
`ProfileData::prof_handler`,
`CpuProfiler::prof_handler`,
`__pthread_sighandler`,
`__restore`,
}, `|`)
var lockRxStr = strings.Join([]string{
`RecordLockProfileData`,
`(base::)?RecordLockProfileData.*`,
`(base::)?SubmitMutexProfileData.*`,
`(base::)?SubmitSpinLockProfileData.*`,
`(Mutex::)?AwaitCommon.*`,
`(Mutex::)?Unlock.*`,
`(Mutex::)?UnlockSlow.*`,
`(Mutex::)?ReaderUnlock.*`,
`(MutexLock::)?~MutexLock.*`,
`(SpinLock::)?Unlock.*`,
`(SpinLock::)?SlowUnlock.*`,
`(SpinLockHolder::)?~SpinLockHolder.*`,
}, `|`)
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package profile provides a representation of profile.proto and
// methods to encode/decode profiles in this format.
package profile
import (
"bytes"
"compress/gzip"
"fmt"
"io"
"io/ioutil"
"regexp"
"strings"
"time"
)
// Profile is an in-memory representation of profile.proto.
type Profile struct {
SampleType []*ValueType
Sample []*Sample
Mapping []*Mapping
Location []*Location
Function []*Function
DropFrames string
KeepFrames string
TimeNanos int64
DurationNanos int64
PeriodType *ValueType
Period int64
dropFramesX int64
keepFramesX int64
stringTable []string
}
// ValueType corresponds to Profile.ValueType
type ValueType struct {
Type string // cpu, wall, inuse_space, etc
Unit string // seconds, nanoseconds, bytes, etc
typeX int64
unitX int64
}
// Sample corresponds to Profile.Sample
type Sample struct {
Location []*Location
Value []int64
Label map[string][]string
NumLabel map[string][]int64
locationIDX []uint64
labelX []Label
}
// Label corresponds to Profile.Label
type Label struct {
keyX int64
// Exactly one of the two following values must be set
strX int64
numX int64 // Integer value for this label
}
// Mapping corresponds to Profile.Mapping
type Mapping struct {
ID uint64
Start uint64
Limit uint64
Offset uint64
File string
BuildID string
HasFunctions bool
HasFilenames bool
HasLineNumbers bool
HasInlineFrames bool
fileX int64
buildIDX int64
}
// Location corresponds to Profile.Location
type Location struct {
ID uint64
Mapping *Mapping
Address uint64
Line []Line
mappingIDX uint64
}
// Line corresponds to Profile.Line
type Line struct {
Function *Function
Line int64
functionIDX uint64
}
// Function corresponds to Profile.Function
type Function struct {
ID uint64
Name string
SystemName string
Filename string
StartLine int64
nameX int64
systemNameX int64
filenameX int64
}
// Parse parses a profile and checks for its validity. The input
// may be a gzip-compressed encoded protobuf or one of many legacy
// profile formats which may be unsupported in the future.
func Parse(r io.Reader) (*Profile, error) {
orig, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
var p *Profile
if len(orig) >= 2 && orig[0] == 0x1f && orig[1] == 0x8b {
var data []byte
if gz, err := gzip.NewReader(bytes.NewBuffer(orig)); err == nil {
data, err = ioutil.ReadAll(gz)
}
if err != nil {
return nil, fmt.Errorf("decompressing profile: %v", err)
}
orig = data
}
if p, err = parseUncompressed(orig); err != nil {
if p, err = parseLegacy(orig); err != nil {
return nil, fmt.Errorf("parsing profile: %v", err)
}
}
if err := p.CheckValid(); err != nil {
return nil, fmt.Errorf("malformed profile: %v", err)
}
return p, nil
}
var errUnrecognized = fmt.Errorf("unrecognized profile format")
var errMalformed = fmt.Errorf("malformed profile format")
func parseLegacy(data []byte) (*Profile, error) {
parsers := []func([]byte) (*Profile, error){
parseCPU,
parseHeap,
parseGoCount, // goroutine, threadcreate
parseThread,
parseContention,
}
for _, parser := range parsers {
p, err := parser(data)
if err == nil {
p.setMain()
p.addLegacyFrameInfo()
return p, nil
}
if err != errUnrecognized {
return nil, err
}
}
return nil, errUnrecognized
}
func parseUncompressed(data []byte) (*Profile, error) {
p := &Profile{}
if err := unmarshal(data, p); err != nil {
return nil, err
}
if err := p.postDecode(); err != nil {
return nil, err
}
return p, nil
}
var libRx = regexp.MustCompile(`([.]so$|[.]so[._][0-9]+)`)
// setMain scans Mapping entries and guesses which entry is main
// because legacy profiles don't obey the convention of putting main
// first.
func (p *Profile) setMain() {
for i := 0; i < len(p.Mapping); i++ {
file := strings.TrimSpace(strings.Replace(p.Mapping[i].File, "(deleted)", "", -1))
if len(file) == 0 {
continue
}
if len(libRx.FindStringSubmatch(file)) > 0 {
continue
}
if strings.HasPrefix(file, "[") {
continue
}
// Swap what we guess is main to position 0.
tmp := p.Mapping[i]
p.Mapping[i] = p.Mapping[0]
p.Mapping[0] = tmp
break
}
}
// Write writes the profile as a gzip-compressed marshaled protobuf.
func (p *Profile) Write(w io.Writer) error {
p.preEncode()
b := marshal(p)
zw := gzip.NewWriter(w)
defer zw.Close()
_, err := zw.Write(b)
return err
}
// CheckValid tests whether the profile is valid. Checks include, but are
// not limited to:
// - len(Profile.Sample[n].value) == len(Profile.value_unit)
// - Sample.id has a corresponding Profile.Location
func (p *Profile) CheckValid() error {
// Check that sample values are consistent
sampleLen := len(p.SampleType)
if sampleLen == 0 && len(p.Sample) != 0 {
return fmt.Errorf("missing sample type information")
}
for _, s := range p.Sample {
if len(s.Value) != sampleLen {
return fmt.Errorf("mismatch: sample has: %d values vs. %d types", len(s.Value), len(p.SampleType))
}
}
// Check that all mappings/locations/functions are in the tables
// Check that there are no duplicate ids
mappings := make(map[uint64]*Mapping, len(p.Mapping))
for _, m := range p.Mapping {
if m.ID == 0 {
return fmt.Errorf("found mapping with reserved ID=0")
}
if mappings[m.ID] != nil {
return fmt.Errorf("multiple mappings with same id: %d", m.ID)
}
mappings[m.ID] = m
}
functions := make(map[uint64]*Function, len(p.Function))
for _, f := range p.Function {
if f.ID == 0 {
return fmt.Errorf("found function with reserved ID=0")
}
if functions[f.ID] != nil {
return fmt.Errorf("multiple functions with same id: %d", f.ID)
}
functions[f.ID] = f
}
locations := make(map[uint64]*Location, len(p.Location))
for _, l := range p.Location {
if l.ID == 0 {
return fmt.Errorf("found location with reserved id=0")
}
if locations[l.ID] != nil {
return fmt.Errorf("multiple locations with same id: %d", l.ID)
}
locations[l.ID] = l
if m := l.Mapping; m != nil {
if m.ID == 0 || mappings[m.ID] != m {
return fmt.Errorf("inconsistent mapping %p: %d", m, m.ID)
}
}
for _, ln := range l.Line {
if f := ln.Function; f != nil {
if f.ID == 0 || functions[f.ID] != f {
return fmt.Errorf("inconsistent function %p: %d", f, f.ID)
}
}
}
}
return nil
}
// Aggregate merges the locations in the profile into equivalence
// classes preserving the request attributes. It also updates the
// samples to point to the merged locations.
func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error {
for _, m := range p.Mapping {
m.HasInlineFrames = m.HasInlineFrames && inlineFrame
m.HasFunctions = m.HasFunctions && function
m.HasFilenames = m.HasFilenames && filename
m.HasLineNumbers = m.HasLineNumbers && linenumber
}
// Aggregate functions
if !function || !filename {
for _, f := range p.Function {
if !function {
f.Name = ""
f.SystemName = ""
}
if !filename {
f.Filename = ""
}
}
}
// Aggregate locations
if !inlineFrame || !address || !linenumber {
for _, l := range p.Location {
if !inlineFrame && len(l.Line) > 1 {
l.Line = l.Line[len(l.Line)-1:]
}
if !linenumber {
for i := range l.Line {
l.Line[i].Line = 0
}
}
if !address {
l.Address = 0
}
}
}
return p.CheckValid()
}
// Print dumps a text representation of a profile. Intended mainly
// for debugging purposes.
func (p *Profile) String() string {
ss := make([]string, 0, len(p.Sample)+len(p.Mapping)+len(p.Location))
if pt := p.PeriodType; pt != nil {
ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit))
}
ss = append(ss, fmt.Sprintf("Period: %d", p.Period))
if p.TimeNanos != 0 {
ss = append(ss, fmt.Sprintf("Time: %v", time.Unix(0, p.TimeNanos)))
}
if p.DurationNanos != 0 {
ss = append(ss, fmt.Sprintf("Duration: %v", time.Duration(p.DurationNanos)))
}
ss = append(ss, "Samples:")
var sh1 string
for _, s := range p.SampleType {
sh1 = sh1 + fmt.Sprintf("%s/%s ", s.Type, s.Unit)
}
ss = append(ss, strings.TrimSpace(sh1))
for _, s := range p.Sample {
var sv string
for _, v := range s.Value {
sv = fmt.Sprintf("%s %10d", sv, v)
}
sv = sv + ": "
for _, l := range s.Location {
sv = sv + fmt.Sprintf("%d ", l.ID)
}
ss = append(ss, sv)
const labelHeader = " "
if len(s.Label) > 0 {
ls := labelHeader
for k, v := range s.Label {
ls = ls + fmt.Sprintf("%s:%v ", k, v)
}
ss = append(ss, ls)
}
if len(s.NumLabel) > 0 {
ls := labelHeader
for k, v := range s.NumLabel {
ls = ls + fmt.Sprintf("%s:%v ", k, v)
}
ss = append(ss, ls)
}
}
ss = append(ss, "Locations")
for _, l := range p.Location {
locStr := fmt.Sprintf("%6d: %#x ", l.ID, l.Address)
if m := l.Mapping; m != nil {
locStr = locStr + fmt.Sprintf("M=%d ", m.ID)
}
if len(l.Line) == 0 {
ss = append(ss, locStr)
}
for li := range l.Line {
lnStr := "??"
if fn := l.Line[li].Function; fn != nil {
lnStr = fmt.Sprintf("%s %s:%d s=%d",
fn.Name,
fn.Filename,
l.Line[li].Line,
fn.StartLine)
if fn.Name != fn.SystemName {
lnStr = lnStr + "(" + fn.SystemName + ")"
}
}
ss = append(ss, locStr+lnStr)
// Do not print location details past the first line
locStr = " "
}
}
ss = append(ss, "Mappings")
for _, m := range p.Mapping {
bits := ""
if m.HasFunctions {
bits = bits + "[FN]"
}
if m.HasFilenames {
bits = bits + "[FL]"
}
if m.HasLineNumbers {
bits = bits + "[LN]"
}
if m.HasInlineFrames {
bits = bits + "[IN]"
}
ss = append(ss, fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s",
m.ID,
m.Start, m.Limit, m.Offset,
m.File,
m.BuildID,
bits))
}
return strings.Join(ss, "\n") + "\n"
}
// Merge adds profile p adjusted by ratio r into profile p. Profiles
// must be compatible (same Type and SampleType).
// TODO(rsilvera): consider normalizing the profiles based on the
// total samples collected.
func (p *Profile) Merge(pb *Profile, r float64) error {
if err := p.Compatible(pb); err != nil {
return err
}
pb = pb.Copy()
// Keep the largest of the two periods.
if pb.Period > p.Period {
p.Period = pb.Period
}
p.DurationNanos += pb.DurationNanos
p.Mapping = append(p.Mapping, pb.Mapping...)
for i, m := range p.Mapping {
m.ID = uint64(i + 1)
}
p.Location = append(p.Location, pb.Location...)
for i, l := range p.Location {
l.ID = uint64(i + 1)
}
p.Function = append(p.Function, pb.Function...)
for i, f := range p.Function {
f.ID = uint64(i + 1)
}
if r != 1.0 {
for _, s := range pb.Sample {
for i, v := range s.Value {
s.Value[i] = int64((float64(v) * r))
}
}
}
p.Sample = append(p.Sample, pb.Sample...)
return p.CheckValid()
}
// Compatible determines if two profiles can be compared/merged.
// returns nil if the profiles are compatible; otherwise an error with
// details on the incompatibility.
func (p *Profile) Compatible(pb *Profile) error {
if !compatibleValueTypes(p.PeriodType, pb.PeriodType) {
return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType)
}
if len(p.SampleType) != len(pb.SampleType) {
return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
}
for i := range p.SampleType {
if !compatibleValueTypes(p.SampleType[i], pb.SampleType[i]) {
return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
}
}
return nil
}
// HasFunctions determines if all locations in this profile have
// symbolized function information.
func (p *Profile) HasFunctions() bool {
for _, l := range p.Location {
if l.Mapping == nil || !l.Mapping.HasFunctions {
return false
}
}
return true
}
// HasFileLines determines if all locations in this profile have
// symbolized file and line number information.
func (p *Profile) HasFileLines() bool {
for _, l := range p.Location {
if l.Mapping == nil || (!l.Mapping.HasFilenames || !l.Mapping.HasLineNumbers) {
return false
}
}
return true
}
func compatibleValueTypes(v1, v2 *ValueType) bool {
if v1 == nil || v2 == nil {
return true // No grounds to disqualify.
}
return v1.Type == v2.Type && v1.Unit == v2.Unit
}
// Copy makes a fully independent copy of a profile.
func (p *Profile) Copy() *Profile {
p.preEncode()
b := marshal(p)
pp := &Profile{}
if err := unmarshal(b, pp); err != nil {
panic(err)
}
if err := pp.postDecode(); err != nil {
panic(err)
}
return pp
}
// Demangler maps symbol names to a human-readable form. This may
// include C++ demangling and additional simplification. Names that
// are not demangled may be missing from the resulting map.
type Demangler func(name []string) (map[string]string, error)
// Demangle attempts to demangle and optionally simplify any function
// names referenced in the profile. It works on a best-effort basis:
// it will silently preserve the original names in case of any errors.
func (p *Profile) Demangle(d Demangler) error {
// Collect names to demangle.
var names []string
for _, fn := range p.Function {
names = append(names, fn.SystemName)
}
// Update profile with demangled names.
demangled, err := d(names)
if err != nil {
return err
}
for _, fn := range p.Function {
if dd, ok := demangled[fn.SystemName]; ok {
fn.Name = dd
}
}
return nil
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file is a simple protocol buffer encoder and decoder.
//
// A protocol message must implement the message interface:
// decoder() []decoder
// encode(*buffer)
//
// The decode method returns a slice indexed by field number that gives the
// function to decode that field.
// The encode method encodes its receiver into the given buffer.
//
// The two methods are simple enough to be implemented by hand rather than
// by using a protocol compiler.
//
// See profile.go for examples of messages implementing this interface.
//
// There is no support for groups, message sets, or "has" bits.
package profile
import "errors"
type buffer struct {
field int
typ int
u64 uint64
data []byte
tmp [16]byte
}
type decoder func(*buffer, message) error
type message interface {
decoder() []decoder
encode(*buffer)
}
func marshal(m message) []byte {
var b buffer
m.encode(&b)
return b.data
}
func encodeVarint(b *buffer, x uint64) {
for x >= 128 {
b.data = append(b.data, byte(x)|0x80)
x >>= 7
}
b.data = append(b.data, byte(x))
}
func encodeLength(b *buffer, tag int, len int) {
encodeVarint(b, uint64(tag)<<3|2)
encodeVarint(b, uint64(len))
}
func encodeUint64(b *buffer, tag int, x uint64) {
// append varint to b.data
encodeVarint(b, uint64(tag)<<3|0)
encodeVarint(b, x)
}
func encodeUint64s(b *buffer, tag int, x []uint64) {
for _, u := range x {
encodeUint64(b, tag, u)
}
}
func encodeUint64Opt(b *buffer, tag int, x uint64) {
if x == 0 {
return
}
encodeUint64(b, tag, x)
}
func encodeInt64(b *buffer, tag int, x int64) {
u := uint64(x)
encodeUint64(b, tag, u)
}
func encodeInt64Opt(b *buffer, tag int, x int64) {
if x == 0 {
return
}
encodeInt64(b, tag, x)
}
func encodeString(b *buffer, tag int, x string) {
encodeLength(b, tag, len(x))
b.data = append(b.data, x...)
}
func encodeStrings(b *buffer, tag int, x []string) {
for _, s := range x {
encodeString(b, tag, s)
}
}
func encodeStringOpt(b *buffer, tag int, x string) {
if x == "" {
return
}
encodeString(b, tag, x)
}
func encodeBool(b *buffer, tag int, x bool) {
if x {
encodeUint64(b, tag, 1)
} else {
encodeUint64(b, tag, 0)
}
}
func encodeBoolOpt(b *buffer, tag int, x bool) {
if x == false {
return
}
encodeBool(b, tag, x)
}
func encodeMessage(b *buffer, tag int, m message) {
n1 := len(b.data)
m.encode(b)
n2 := len(b.data)
encodeLength(b, tag, n2-n1)
n3 := len(b.data)
copy(b.tmp[:], b.data[n2:n3])
copy(b.data[n1+(n3-n2):], b.data[n1:n2])
copy(b.data[n1:], b.tmp[:n3-n2])
}
func unmarshal(data []byte, m message) (err error) {
b := buffer{data: data, typ: 2}
return decodeMessage(&b, m)
}
func le64(p []byte) uint64 {
return uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56
}
func le32(p []byte) uint32 {
return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
}
func decodeVarint(data []byte) (uint64, []byte, error) {
var i int
var u uint64
for i = 0; ; i++ {
if i >= 10 || i >= len(data) {
return 0, nil, errors.New("bad varint")
}
u |= uint64(data[i]&0x7F) << uint(7*i)
if data[i]&0x80 == 0 {
return u, data[i+1:], nil
}
}
}
func decodeField(b *buffer, data []byte) ([]byte, error) {
x, data, err := decodeVarint(data)
if err != nil {
return nil, err
}
b.field = int(x >> 3)
b.typ = int(x & 7)
b.data = nil
b.u64 = 0
switch b.typ {
case 0:
b.u64, data, err = decodeVarint(data)
if err != nil {
return nil, err
}
case 1:
if len(data) < 8 {
return nil, errors.New("not enough data")
}
b.u64 = le64(data[:8])
data = data[8:]
case 2:
var n uint64
n, data, err = decodeVarint(data)
if err != nil {
return nil, err
}
if n > uint64(len(data)) {
return nil, errors.New("too much data")
}
b.data = data[:n]
data = data[n:]
case 5:
if len(data) < 4 {
return nil, errors.New("not enough data")
}
b.u64 = uint64(le32(data[:4]))
data = data[4:]
default:
return nil, errors.New("unknown type: " + string(b.typ))
}
return data, nil
}
func checkType(b *buffer, typ int) error {
if b.typ != typ {
return errors.New("type mismatch")
}
return nil
}
func decodeMessage(b *buffer, m message) error {
if err := checkType(b, 2); err != nil {
return err
}
dec := m.decoder()
data := b.data
for len(data) > 0 {
// pull varint field# + type
var err error
data, err = decodeField(b, data)
if err != nil {
return err
}
if b.field >= len(dec) || dec[b.field] == nil {
continue
}
if err := dec[b.field](b, m); err != nil {
return err
}
}
return nil
}
func decodeInt64(b *buffer, x *int64) error {
if err := checkType(b, 0); err != nil {
return err
}
*x = int64(b.u64)
return nil
}
func decodeInt64s(b *buffer, x *[]int64) error {
var i int64
if err := decodeInt64(b, &i); err != nil {
return err
}
*x = append(*x, i)
return nil
}
func decodeUint64(b *buffer, x *uint64) error {
if err := checkType(b, 0); err != nil {
return err
}
*x = b.u64
return nil
}
func decodeUint64s(b *buffer, x *[]uint64) error {
var u uint64
if err := decodeUint64(b, &u); err != nil {
return err
}
*x = append(*x, u)
return nil
}
func decodeString(b *buffer, x *string) error {
if err := checkType(b, 2); err != nil {
return err
}
*x = string(b.data)
return nil
}
func decodeStrings(b *buffer, x *[]string) error {
var s string
if err := decodeString(b, &s); err != nil {
return err
}
*x = append(*x, s)
return nil
}
func decodeBool(b *buffer, x *bool) error {
if err := checkType(b, 0); err != nil {
return err
}
if int64(b.u64) == 0 {
*x = false
} else {
*x = true
}
return nil
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Implements methods to remove frames from profiles.
package profile
import (
"fmt"
"regexp"
)
// Prune removes all nodes beneath a node matching dropRx, and not
// matching keepRx. If the root node of a Sample matches, the sample
// will have an empty stack.
func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) {
prune := make(map[uint64]bool)
pruneBeneath := make(map[uint64]bool)
for _, loc := range p.Location {
var i int
for i = len(loc.Line) - 1; i >= 0; i-- {
if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
funcName := fn.Name
// Account for leading '.' on the PPC ELF v1 ABI.
if funcName[0] == '.' {
funcName = funcName[1:]
}
if dropRx.MatchString(funcName) {
if keepRx == nil || !keepRx.MatchString(funcName) {
break
}
}
}
}
if i >= 0 {
// Found matching entry to prune.
pruneBeneath[loc.ID] = true
// Remove the matching location.
if i == len(loc.Line)-1 {
// Matched the top entry: prune the whole location.
prune[loc.ID] = true
} else {
loc.Line = loc.Line[i+1:]
}
}
}
// Prune locs from each Sample
for _, sample := range p.Sample {
// Scan from the root to the leaves to find the prune location.
// Do not prune frames before the first user frame, to avoid
// pruning everything.
foundUser := false
for i := len(sample.Location) - 1; i >= 0; i-- {
id := sample.Location[i].ID
if !prune[id] && !pruneBeneath[id] {
foundUser = true
continue
}
if !foundUser {
continue
}
if prune[id] {
sample.Location = sample.Location[i+1:]
break
}
if pruneBeneath[id] {
sample.Location = sample.Location[i:]
break
}
}
}
}
// RemoveUninteresting prunes and elides profiles using built-in
// tables of uninteresting function names.
func (p *Profile) RemoveUninteresting() error {
var keep, drop *regexp.Regexp
var err error
if p.DropFrames != "" {
if drop, err = regexp.Compile("^(" + p.DropFrames + ")$"); err != nil {
return fmt.Errorf("failed to compile regexp %s: %v", p.DropFrames, err)
}
if p.KeepFrames != "" {
if keep, err = regexp.Compile("^(" + p.KeepFrames + ")$"); err != nil {
return fmt.Errorf("failed to compile regexp %s: %v", p.KeepFrames, err)
}
}
p.Prune(drop, keep)
}
return nil
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package report summarizes a performance profile into a
// human-readable report.
package report
import (
"fmt"
"io"
"math"
"os"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"time"
"cmd/pprof/internal/plugin"
"cmd/pprof/internal/profile"
)
// Generate generates a report as directed by the Report.
func Generate(w io.Writer, rpt *Report, obj plugin.ObjTool) error {
o := rpt.options
switch o.OutputFormat {
case Dot:
return printDOT(w, rpt)
case Tree:
return printTree(w, rpt)
case Text:
return printText(w, rpt)
case Raw:
fmt.Fprint(w, rpt.prof.String())
return nil
case Tags:
return printTags(w, rpt)
case Proto:
return rpt.prof.Write(w)
case Dis:
return printAssembly(w, rpt, obj)
case List:
return printSource(w, rpt)
case WebList:
return printWebSource(w, rpt, obj)
case Callgrind:
return printCallgrind(w, rpt)
}
return fmt.Errorf("unexpected output format")
}
// printAssembly prints an annotated assembly listing.
func printAssembly(w io.Writer, rpt *Report, obj plugin.ObjTool) error {
g, err := newGraph(rpt)
if err != nil {
return err
}
o := rpt.options
prof := rpt.prof
// If the regexp source can be parsed as an address, also match
// functions that land on that address.
var address *uint64
if hex, err := strconv.ParseUint(o.Symbol.String(), 0, 64); err == nil {
address = &hex
}
fmt.Fprintln(w, "Total:", rpt.formatValue(rpt.total))
symbols := symbolsFromBinaries(prof, g, o.Symbol, address, obj)
symNodes := nodesPerSymbol(g.ns, symbols)
// Sort function names for printing.
var syms objSymbols
for s := range symNodes {
syms = append(syms, s)
}
sort.Sort(syms)
// Correlate the symbols from the binary with the profile samples.
for _, s := range syms {
sns := symNodes[s]
// Gather samples for this symbol.
flatSum, cumSum := sumNodes(sns)
// Get the function assembly.
insns, err := obj.Disasm(s.sym.File, s.sym.Start, s.sym.End)
if err != nil {
return err
}
ns := annotateAssembly(insns, sns, s.base)
fmt.Fprintf(w, "ROUTINE ======================== %s\n", s.sym.Name[0])
for _, name := range s.sym.Name[1:] {
fmt.Fprintf(w, " AKA ======================== %s\n", name)
}
fmt.Fprintf(w, "%10s %10s (flat, cum) %s of Total\n",
rpt.formatValue(flatSum), rpt.formatValue(cumSum),
percentage(cumSum, rpt.total))
for _, n := range ns {
fmt.Fprintf(w, "%10s %10s %10x: %s\n", valueOrDot(n.flat, rpt), valueOrDot(n.cum, rpt), n.info.address, n.info.name)
}
}
return nil
}
// symbolsFromBinaries examines the binaries listed on the profile
// that have associated samples, and identifies symbols matching rx.
func symbolsFromBinaries(prof *profile.Profile, g graph, rx *regexp.Regexp, address *uint64, obj plugin.ObjTool) []*objSymbol {
hasSamples := make(map[string]bool)
// Only examine mappings that have samples that match the
// regexp. This is an optimization to speed up pprof.
for _, n := range g.ns {
if name := n.info.prettyName(); rx.MatchString(name) && n.info.objfile != "" {
hasSamples[n.info.objfile] = true
}
}
// Walk all mappings looking for matching functions with samples.
var objSyms []*objSymbol
for _, m := range prof.Mapping {
if !hasSamples[filepath.Base(m.File)] {
if address == nil || !(m.Start <= *address && *address <= m.Limit) {
continue
}
}
f, err := obj.Open(m.File, m.Start)
if err != nil {
fmt.Printf("%v\n", err)
continue
}
// Find symbols in this binary matching the user regexp.
var addr uint64
if address != nil {
addr = *address
}
msyms, err := f.Symbols(rx, addr)
base := f.Base()
f.Close()
if err != nil {
continue
}
for _, ms := range msyms {
objSyms = append(objSyms,
&objSymbol{
sym: ms,
base: base,
},
)
}
}
return objSyms
}
// objSym represents a symbol identified from a binary. It includes
// the SymbolInfo from the disasm package and the base that must be
// added to correspond to sample addresses
type objSymbol struct {
sym *plugin.Sym
base uint64
}
// objSymbols is a wrapper type to enable sorting of []*objSymbol.
type objSymbols []*objSymbol
func (o objSymbols) Len() int {
return len(o)
}
func (o objSymbols) Less(i, j int) bool {
if namei, namej := o[i].sym.Name[0], o[j].sym.Name[0]; namei != namej {
return namei < namej
}
return o[i].sym.Start < o[j].sym.Start
}
func (o objSymbols) Swap(i, j int) {
o[i], o[j] = o[j], o[i]
}
// nodesPerSymbol classifies nodes into a group of symbols.
func nodesPerSymbol(ns nodes, symbols []*objSymbol) map[*objSymbol]nodes {
symNodes := make(map[*objSymbol]nodes)
for _, s := range symbols {
// Gather samples for this symbol.
for _, n := range ns {
address := n.info.address - s.base
if address >= s.sym.Start && address < s.sym.End {
symNodes[s] = append(symNodes[s], n)
}
}
}
return symNodes
}
// annotateAssembly annotates a set of assembly instructions with a
// set of samples. It returns a set of nodes to display. base is an
// offset to adjust the sample addresses.
func annotateAssembly(insns []plugin.Inst, samples nodes, base uint64) nodes {
// Add end marker to simplify printing loop.
insns = append(insns, plugin.Inst{^uint64(0), "", "", 0})
// Ensure samples are sorted by address.
samples.sort(addressOrder)
var s int
var asm nodes
for ix, in := range insns[:len(insns)-1] {
n := node{
info: nodeInfo{
address: in.Addr,
name: in.Text,
file: trimPath(in.File),
lineno: in.Line,
},
}
// Sum all the samples until the next instruction (to account
// for samples attributed to the middle of an instruction).
for next := insns[ix+1].Addr; s < len(samples) && samples[s].info.address-base < next; s++ {
n.flat += samples[s].flat
n.cum += samples[s].cum
if samples[s].info.file != "" {
n.info.file = trimPath(samples[s].info.file)
n.info.lineno = samples[s].info.lineno
}
}
asm = append(asm, &n)
}
return asm
}
// valueOrDot formats a value according to a report, intercepting zero
// values.
func valueOrDot(value int64, rpt *Report) string {
if value == 0 {
return "."
}
return rpt.formatValue(value)
}
// canAccessFile determines if the filename can be opened for reading.
func canAccessFile(path string) bool {
if fi, err := os.Stat(path); err == nil {
return fi.Mode().Perm()&0400 != 0
}
return false
}
// printTags collects all tags referenced in the profile and prints
// them in a sorted table.
func printTags(w io.Writer, rpt *Report) error {
p := rpt.prof
// Hashtable to keep accumulate tags as key,value,count.
tagMap := make(map[string]map[string]int64)
for _, s := range p.Sample {
for key, vals := range s.Label {
for _, val := range vals {
if valueMap, ok := tagMap[key]; ok {
valueMap[val] = valueMap[val] + s.Value[0]
continue
}
valueMap := make(map[string]int64)
valueMap[val] = s.Value[0]
tagMap[key] = valueMap
}
}
for key, vals := range s.NumLabel {
for _, nval := range vals {
val := scaledValueLabel(nval, key, "auto")
if valueMap, ok := tagMap[key]; ok {
valueMap[val] = valueMap[val] + s.Value[0]
continue
}
valueMap := make(map[string]int64)
valueMap[val] = s.Value[0]
tagMap[key] = valueMap
}
}
}
tagKeys := make(tags, 0, len(tagMap))
for key := range tagMap {
tagKeys = append(tagKeys, &tag{name: key})
}
sort.Sort(tagKeys)
for _, tagKey := range tagKeys {
var total int64
key := tagKey.name
tags := make(tags, 0, len(tagMap[key]))
for t, c := range tagMap[key] {
total += c
tags = append(tags, &tag{name: t, weight: c})
}
sort.Sort(tags)
fmt.Fprintf(w, "%s: Total %d\n", key, total)
for _, t := range tags {
if total > 0 {
fmt.Fprintf(w, " %8d (%s): %s\n", t.weight,
percentage(t.weight, total), t.name)
} else {
fmt.Fprintf(w, " %8d: %s\n", t.weight, t.name)
}
}
fmt.Fprintln(w)
}
return nil
}
// printText prints a flat text report for a profile.
func printText(w io.Writer, rpt *Report) error {
g, err := newGraph(rpt)
if err != nil {
return err
}
origCount, droppedNodes, _ := g.preprocess(rpt)
fmt.Fprintln(w, strings.Join(legendDetailLabels(rpt, g, origCount, droppedNodes, 0), "\n"))
fmt.Fprintf(w, "%10s %5s%% %5s%% %10s %5s%%\n",
"flat", "flat", "sum", "cum", "cum")
var flatSum int64
for _, n := range g.ns {
name, flat, cum := n.info.prettyName(), n.flat, n.cum
flatSum += flat
fmt.Fprintf(w, "%10s %s %s %10s %s %s\n",
rpt.formatValue(flat),
percentage(flat, rpt.total),
percentage(flatSum, rpt.total),
rpt.formatValue(cum),
percentage(cum, rpt.total),
name)
}
return nil
}
// printCallgrind prints a graph for a profile on callgrind format.
func printCallgrind(w io.Writer, rpt *Report) error {
g, err := newGraph(rpt)
if err != nil {
return err
}
o := rpt.options
rpt.options.NodeFraction = 0
rpt.options.EdgeFraction = 0
rpt.options.NodeCount = 0
g.preprocess(rpt)
fmt.Fprintln(w, "events:", o.SampleType+"("+o.OutputUnit+")")
files := make(map[string]int)
names := make(map[string]int)
for _, n := range g.ns {
fmt.Fprintln(w, "fl="+callgrindName(files, n.info.file))
fmt.Fprintln(w, "fn="+callgrindName(names, n.info.name))
sv, _ := ScaleValue(n.flat, o.SampleUnit, o.OutputUnit)
fmt.Fprintf(w, "%d %d\n", n.info.lineno, int(sv))
// Print outgoing edges.
for _, out := range sortedEdges(n.out) {
c, _ := ScaleValue(out.weight, o.SampleUnit, o.OutputUnit)
count := fmt.Sprintf("%d", int(c))
callee := out.dest
fmt.Fprintln(w, "cfl="+callgrindName(files, callee.info.file))
fmt.Fprintln(w, "cfn="+callgrindName(names, callee.info.name))
fmt.Fprintln(w, "calls="+count, callee.info.lineno)
fmt.Fprintln(w, n.info.lineno, count)
}
fmt.Fprintln(w)
}
return nil
}
// callgrindName implements the callgrind naming compression scheme.
// For names not previously seen returns "(N) name", where N is a
// unique index. For names previously seen returns "(N)" where N is
// the index returned the first time.
func callgrindName(names map[string]int, name string) string {
if name == "" {
return ""
}
if id, ok := names[name]; ok {
return fmt.Sprintf("(%d)", id)
}
id := len(names) + 1
names[name] = id
return fmt.Sprintf("(%d) %s", id, name)
}
// printTree prints a tree-based report in text form.
func printTree(w io.Writer, rpt *Report) error {
const separator = "----------------------------------------------------------+-------------"
const legend = " flat flat% sum% cum cum% calls calls% + context "
g, err := newGraph(rpt)
if err != nil {
return err
}
origCount, droppedNodes, _ := g.preprocess(rpt)
fmt.Fprintln(w, strings.Join(legendDetailLabels(rpt, g, origCount, droppedNodes, 0), "\n"))
fmt.Fprintln(w, separator)
fmt.Fprintln(w, legend)
var flatSum int64
rx := rpt.options.Symbol
for _, n := range g.ns {
name, flat, cum := n.info.prettyName(), n.flat, n.cum
// Skip any entries that do not match the regexp (for the "peek" command).
if rx != nil && !rx.MatchString(name) {
continue
}
fmt.Fprintln(w, separator)
// Print incoming edges.
inEdges := sortedEdges(n.in)
inSum := inEdges.sum()
for _, in := range inEdges {
fmt.Fprintf(w, "%50s %s | %s\n", rpt.formatValue(in.weight),
percentage(in.weight, inSum), in.src.info.prettyName())
}
// Print current node.
flatSum += flat
fmt.Fprintf(w, "%10s %s %s %10s %s | %s\n",
rpt.formatValue(flat),
percentage(flat, rpt.total),
percentage(flatSum, rpt.total),
rpt.formatValue(cum),
percentage(cum, rpt.total),
name)
// Print outgoing edges.
outEdges := sortedEdges(n.out)
outSum := outEdges.sum()
for _, out := range outEdges {
fmt.Fprintf(w, "%50s %s | %s\n", rpt.formatValue(out.weight),
percentage(out.weight, outSum), out.dest.info.prettyName())
}
}
if len(g.ns) > 0 {
fmt.Fprintln(w, separator)
}
return nil
}
// printDOT prints an annotated callgraph in DOT format.
func printDOT(w io.Writer, rpt *Report) error {
g, err := newGraph(rpt)
if err != nil {
return err
}
origCount, droppedNodes, droppedEdges := g.preprocess(rpt)
prof := rpt.prof
graphname := "unnamed"
if len(prof.Mapping) > 0 {
graphname = filepath.Base(prof.Mapping[0].File)
}
fmt.Fprintln(w, `digraph "`+graphname+`" {`)
fmt.Fprintln(w, `node [style=filled fillcolor="#f8f8f8"]`)
fmt.Fprintln(w, dotLegend(rpt, g, origCount, droppedNodes, droppedEdges))
if len(g.ns) == 0 {
fmt.Fprintln(w, "}")
return nil
}
// Make sure nodes have a unique consistent id.
nodeIndex := make(map[*node]int)
maxFlat := float64(g.ns[0].flat)
for i, n := range g.ns {
nodeIndex[n] = i + 1
if float64(n.flat) > maxFlat {
maxFlat = float64(n.flat)
}
}
var edges edgeList
for _, n := range g.ns {
node := dotNode(rpt, maxFlat, nodeIndex[n], n)
fmt.Fprintln(w, node)
if nodelets := dotNodelets(rpt, nodeIndex[n], n); nodelets != "" {
fmt.Fprint(w, nodelets)
}
// Collect outgoing edges.
for _, e := range n.out {
edges = append(edges, e)
}
}
// Sort edges by frequency as a hint to the graph layout engine.
sort.Sort(edges)
for _, e := range edges {
fmt.Fprintln(w, dotEdge(rpt, nodeIndex[e.src], nodeIndex[e.dest], e))
}
fmt.Fprintln(w, "}")
return nil
}
// percentage computes the percentage of total of a value, and encodes
// it as a string. At least two digits of precision are printed.
func percentage(value, total int64) string {
var ratio float64
if total != 0 {
ratio = float64(value) / float64(total) * 100
}
switch {
case ratio >= 99.95:
return " 100%"
case ratio >= 1.0:
return fmt.Sprintf("%5.2f%%", ratio)
default:
return fmt.Sprintf("%5.2g%%", ratio)
}
}
// dotLegend generates the overall graph label for a report in DOT format.
func dotLegend(rpt *Report, g graph, origCount, droppedNodes, droppedEdges int) string {
label := legendLabels(rpt)
label = append(label, legendDetailLabels(rpt, g, origCount, droppedNodes, droppedEdges)...)
return fmt.Sprintf(`subgraph cluster_L { L [shape=box fontsize=32 label="%s\l"] }`, strings.Join(label, `\l`))
}
// legendLabels generates labels exclusive to graph visualization.
func legendLabels(rpt *Report) []string {
prof := rpt.prof
o := rpt.options
var label []string
if len(prof.Mapping) > 0 {
if prof.Mapping[0].File != "" {
label = append(label, "File: "+filepath.Base(prof.Mapping[0].File))
}
if prof.Mapping[0].BuildID != "" {
label = append(label, "Build ID: "+prof.Mapping[0].BuildID)
}
}
if o.SampleType != "" {
label = append(label, "Type: "+o.SampleType)
}
if prof.TimeNanos != 0 {
const layout = "Jan 2, 2006 at 3:04pm (MST)"
label = append(label, "Time: "+time.Unix(0, prof.TimeNanos).Format(layout))
}
if prof.DurationNanos != 0 {
label = append(label, fmt.Sprintf("Duration: %v", time.Duration(prof.DurationNanos)))
}
return label
}
// legendDetailLabels generates labels common to graph and text visualization.
func legendDetailLabels(rpt *Report, g graph, origCount, droppedNodes, droppedEdges int) []string {
nodeFraction := rpt.options.NodeFraction
edgeFraction := rpt.options.EdgeFraction
nodeCount := rpt.options.NodeCount
label := []string{}
var flatSum int64
for _, n := range g.ns {
flatSum = flatSum + n.flat
}
label = append(label, fmt.Sprintf("%s of %s total (%s)", rpt.formatValue(flatSum), rpt.formatValue(rpt.total), percentage(flatSum, rpt.total)))
if rpt.total > 0 {
if droppedNodes > 0 {
label = append(label, genLabel(droppedNodes, "node", "cum",
rpt.formatValue(int64(float64(rpt.total)*nodeFraction))))
}
if droppedEdges > 0 {
label = append(label, genLabel(droppedEdges, "edge", "freq",
rpt.formatValue(int64(float64(rpt.total)*edgeFraction))))
}
if nodeCount > 0 && nodeCount < origCount {
label = append(label, fmt.Sprintf("Showing top %d nodes out of %d (cum >= %s)",
nodeCount, origCount,
rpt.formatValue(g.ns[len(g.ns)-1].cum)))
}
}
return label
}
func genLabel(d int, n, l, f string) string {
if d > 1 {
n = n + "s"
}
return fmt.Sprintf("Dropped %d %s (%s <= %s)", d, n, l, f)
}
// dotNode generates a graph node in DOT format.
func dotNode(rpt *Report, maxFlat float64, rIndex int, n *node) string {
flat, cum := n.flat, n.cum
labels := strings.Split(n.info.prettyName(), "::")
label := strings.Join(labels, `\n`) + `\n`
flatValue := rpt.formatValue(flat)
if flat > 0 {
label = label + fmt.Sprintf(`%s(%s)`,
flatValue,
strings.TrimSpace(percentage(flat, rpt.total)))
} else {
label = label + "0"
}
cumValue := flatValue
if cum != flat {
if flat > 0 {
label = label + `\n`
} else {
label = label + " "
}
cumValue = rpt.formatValue(cum)
label = label + fmt.Sprintf(`of %s(%s)`,
cumValue,
strings.TrimSpace(percentage(cum, rpt.total)))
}
// Scale font sizes from 8 to 24 based on percentage of flat frequency.
// Use non linear growth to emphasize the size difference.
baseFontSize, maxFontGrowth := 8, 16.0
fontSize := baseFontSize
if maxFlat > 0 && flat > 0 && float64(flat) <= maxFlat {
fontSize += int(math.Ceil(maxFontGrowth * math.Sqrt(float64(flat)/maxFlat)))
}
return fmt.Sprintf(`N%d [label="%s" fontsize=%d shape=box tooltip="%s (%s)"]`,
rIndex,
label,
fontSize, n.info.prettyName(), cumValue)
}
// dotEdge generates a graph edge in DOT format.
func dotEdge(rpt *Report, from, to int, e *edgeInfo) string {
w := rpt.formatValue(e.weight)
attr := fmt.Sprintf(`label=" %s"`, w)
if rpt.total > 0 {
if weight := 1 + int(e.weight*100/rpt.total); weight > 1 {
attr = fmt.Sprintf(`%s weight=%d`, attr, weight)
}
if width := 1 + int(e.weight*5/rpt.total); width > 1 {
attr = fmt.Sprintf(`%s penwidth=%d`, attr, width)
}
}
arrow := "->"
if e.residual {
arrow = "..."
}
tooltip := fmt.Sprintf(`"%s %s %s (%s)"`,
e.src.info.prettyName(), arrow, e.dest.info.prettyName(), w)
attr = fmt.Sprintf(`%s tooltip=%s labeltooltip=%s`,
attr, tooltip, tooltip)
if e.residual {
attr = attr + ` style="dotted"`
}
if len(e.src.tags) > 0 {
// Separate children further if source has tags.
attr = attr + " minlen=2"
}
return fmt.Sprintf("N%d -> N%d [%s]", from, to, attr)
}
// dotNodelets generates the DOT boxes for the node tags.
func dotNodelets(rpt *Report, rIndex int, n *node) (dot string) {
const maxNodelets = 4 // Number of nodelets for alphanumeric labels
const maxNumNodelets = 4 // Number of nodelets for numeric labels
var ts, nts tags
for _, t := range n.tags {
if t.unit == "" {
ts = append(ts, t)
} else {
nts = append(nts, t)
}
}
// Select the top maxNodelets alphanumeric labels by weight
sort.Sort(ts)
if len(ts) > maxNodelets {
ts = ts[:maxNodelets]
}
for i, t := range ts {
weight := rpt.formatValue(t.weight)
dot += fmt.Sprintf(`N%d_%d [label = "%s" fontsize=8 shape=box3d tooltip="%s"]`+"\n", rIndex, i, t.name, weight)
dot += fmt.Sprintf(`N%d -> N%d_%d [label=" %s" weight=100 tooltip="\L" labeltooltip="\L"]`+"\n", rIndex, rIndex, i, weight)
}
// Collapse numeric labels into maxNumNodelets buckets, of the form:
// 1MB..2MB, 3MB..5MB, ...
nts = collapseTags(nts, maxNumNodelets)
sort.Sort(nts)
for i, t := range nts {
weight := rpt.formatValue(t.weight)
dot += fmt.Sprintf(`NN%d_%d [label = "%s" fontsize=8 shape=box3d tooltip="%s"]`+"\n", rIndex, i, t.name, weight)
dot += fmt.Sprintf(`N%d -> NN%d_%d [label=" %s" weight=100 tooltip="\L" labeltooltip="\L"]`+"\n", rIndex, rIndex, i, weight)
}
return dot
}
// graph summarizes a performance profile into a format that is
// suitable for visualization.
type graph struct {
ns nodes
}
// nodes is an ordered collection of graph nodes.
type nodes []*node
// tags represent sample annotations
type tags []*tag
type tagMap map[string]*tag
type tag struct {
name string
unit string // Describe the value, "" for non-numeric tags
value int64
weight int64
}
func (t tags) Len() int { return len(t) }
func (t tags) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
func (t tags) Less(i, j int) bool {
if t[i].weight == t[j].weight {
return t[i].name < t[j].name
}
return t[i].weight > t[j].weight
}
// node is an entry on a profiling report. It represents a unique
// program location. It can include multiple names to represent
// inlined functions.
type node struct {
info nodeInfo // Information associated to this entry.
// values associated to this node.
// flat is exclusive to this node, cum includes all descendents.
flat, cum int64
// in and out contains the nodes immediately reaching or reached by this nodes.
in, out edgeMap
// tags provide additional information about subsets of a sample.
tags tagMap
}
func (ts tags) string() string {
var ret string
for _, s := range ts {
ret = ret + fmt.Sprintf("%s %s %d %d\n", s.name, s.unit, s.value, s.weight)
}
return ret
}
type nodeInfo struct {
name string
origName string
address uint64
file string
startLine, lineno int
inline bool
lowPriority bool
objfile string
parent *node // Used only if creating a calltree
}
func (n *node) addTags(s *profile.Sample, weight int64) {
// Add a tag with all string labels
var labels []string
for key, vals := range s.Label {
for _, v := range vals {
labels = append(labels, key+":"+v)
}
}
if len(labels) > 0 {
sort.Strings(labels)
l := n.tags.findOrAddTag(strings.Join(labels, `\n`), "", 0)
l.weight += weight
}
for key, nvals := range s.NumLabel {
for _, v := range nvals {
label := scaledValueLabel(v, key, "auto")
l := n.tags.findOrAddTag(label, key, v)
l.weight += weight
}
}
}
func (m tagMap) findOrAddTag(label, unit string, value int64) *tag {
if l := m[label]; l != nil {
return l
}
l := &tag{
name: label,
unit: unit,
value: value,
}
m[label] = l
return l
}
// collapseTags reduces the number of entries in a tagMap by merging
// adjacent nodes into ranges. It uses a greedy approach to merge
// starting with the entries with the lowest weight.
func collapseTags(ts tags, count int) tags {
if len(ts) <= count {
return ts
}
sort.Sort(ts)
tagGroups := make([]tags, count)
for i, t := range ts[:count] {
tagGroups[i] = tags{t}
}
for _, t := range ts[count:] {
g, d := 0, tagDistance(t, tagGroups[0][0])
for i := 1; i < count; i++ {
if nd := tagDistance(t, tagGroups[i][0]); nd < d {
g, d = i, nd
}
}
tagGroups[g] = append(tagGroups[g], t)
}
var nts tags
for _, g := range tagGroups {
l, w := tagGroupLabel(g)
nts = append(nts, &tag{
name: l,
weight: w,
})
}
return nts
}
func tagDistance(t, u *tag) float64 {
v, _ := ScaleValue(u.value, u.unit, t.unit)
if v < float64(t.value) {
return float64(t.value) - v
}
return v - float64(t.value)
}
func tagGroupLabel(g tags) (string, int64) {
if len(g) == 1 {
t := g[0]
return scaledValueLabel(t.value, t.unit, "auto"), t.weight
}
min := g[0]
max := g[0]
w := min.weight
for _, t := range g[1:] {
if v, _ := ScaleValue(t.value, t.unit, min.unit); int64(v) < min.value {
min = t
}
if v, _ := ScaleValue(t.value, t.unit, max.unit); int64(v) > max.value {
max = t
}
w += t.weight
}
return scaledValueLabel(min.value, min.unit, "auto") + ".." +
scaledValueLabel(max.value, max.unit, "auto"), w
}
// sumNodes adds the flat and sum values on a report.
func sumNodes(ns nodes) (flat int64, cum int64) {
for _, n := range ns {
flat += n.flat
cum += n.cum
}
return
}
type edgeMap map[*node]*edgeInfo
// edgeInfo contains any attributes to be represented about edges in a graph/
type edgeInfo struct {
src, dest *node
// The summary weight of the edge
weight int64
// residual edges connect nodes that were connected through a
// separate node, which has been removed from the report.
residual bool
}
// bumpWeight increases the weight of an edge. If there isn't such an
// edge in the map one is created.
func bumpWeight(from, to *node, w int64, residual bool) {
if from.out[to] != to.in[from] {
panic(fmt.Errorf("asymmetric edges %v %v", *from, *to))
}
if n := from.out[to]; n != nil {
n.weight += w
if n.residual && !residual {
n.residual = false
}
return
}
info := &edgeInfo{src: from, dest: to, weight: w, residual: residual}
from.out[to] = info
to.in[from] = info
}
// Output formats.
const (
Proto = iota
Dot
Tags
Tree
Text
Raw
Dis
List
WebList
Callgrind
)
// Options are the formatting and filtering options used to generate a
// profile.
type Options struct {
OutputFormat int
CumSort bool
CallTree bool
PrintAddresses bool
DropNegative bool
Ratio float64
NodeCount int
NodeFraction float64
EdgeFraction float64
SampleType string
SampleUnit string // Unit for the sample data from the profile.
OutputUnit string // Units for data formatting in report.
Symbol *regexp.Regexp // Symbols to include on disassembly report.
}
// newGraph summarizes performance data from a profile into a graph.
func newGraph(rpt *Report) (g graph, err error) {
prof := rpt.prof
o := rpt.options
// Generate a tree for graphical output if requested.
buildTree := o.CallTree && o.OutputFormat == Dot
locations := make(map[uint64][]nodeInfo)
for _, l := range prof.Location {
locations[l.ID] = newLocInfo(l)
}
nm := make(nodeMap)
for _, sample := range prof.Sample {
if sample.Location == nil {
continue
}
// Construct list of node names for sample.
var stack []nodeInfo
for _, loc := range sample.Location {
id := loc.ID
stack = append(stack, locations[id]...)
}
// Upfront pass to update the parent chains, to prevent the
// merging of nodes with different parents.
if buildTree {
var nn *node
for i := len(stack); i > 0; i-- {
n := &stack[i-1]
n.parent = nn
nn = nm.findOrInsertNode(*n)
}
}
leaf := nm.findOrInsertNode(stack[0])
weight := rpt.sampleValue(sample)
leaf.addTags(sample, weight)
// Aggregate counter data.
leaf.flat += weight
seen := make(map[*node]bool)
var nn *node
for _, s := range stack {
n := nm.findOrInsertNode(s)
if !seen[n] {
seen[n] = true
n.cum += weight
if nn != nil {
bumpWeight(n, nn, weight, false)
}
}
nn = n
}
}
// Collect new nodes into a report.
ns := make(nodes, 0, len(nm))
for _, n := range nm {
if rpt.options.DropNegative && n.flat < 0 {
continue
}
ns = append(ns, n)
}
return graph{ns}, nil
}
// Create a slice of formatted names for a location.
func newLocInfo(l *profile.Location) []nodeInfo {
var objfile string
if m := l.Mapping; m != nil {
objfile = filepath.Base(m.File)
}
if len(l.Line) == 0 {
return []nodeInfo{
{
address: l.Address,
objfile: objfile,
},
}
}
var info []nodeInfo
numInlineFrames := len(l.Line) - 1
for li, line := range l.Line {
ni := nodeInfo{
address: l.Address,
lineno: int(line.Line),
inline: li < numInlineFrames,
objfile: objfile,
}
if line.Function != nil {
ni.name = line.Function.Name
ni.origName = line.Function.SystemName
ni.file = line.Function.Filename
ni.startLine = int(line.Function.StartLine)
}
info = append(info, ni)
}
return info
}
// nodeMap maps from a node info struct to a node. It is used to merge
// report entries with the same info.
type nodeMap map[nodeInfo]*node
func (m nodeMap) findOrInsertNode(info nodeInfo) *node {
rr := m[info]
if rr == nil {
rr = &node{
info: info,
in: make(edgeMap),
out: make(edgeMap),
tags: make(map[string]*tag),
}
m[info] = rr
}
return rr
}
// preprocess does any required filtering/sorting according to the
// report options. Returns the mapping from each node to any nodes
// removed by path compression and statistics on the nodes/edges removed.
func (g *graph) preprocess(rpt *Report) (origCount, droppedNodes, droppedEdges int) {
o := rpt.options
// Compute total weight of current set of nodes.
// This is <= rpt.total because of node filtering.
var totalValue int64
for _, n := range g.ns {
totalValue += n.flat
}
// Remove nodes with value <= total*nodeFraction
if nodeFraction := o.NodeFraction; nodeFraction > 0 {
var removed nodes
minValue := int64(float64(totalValue) * nodeFraction)
kept := make(nodes, 0, len(g.ns))
for _, n := range g.ns {
if n.cum < minValue {
removed = append(removed, n)
} else {
kept = append(kept, n)
tagsKept := make(map[string]*tag)
for s, t := range n.tags {
if t.weight >= minValue {
tagsKept[s] = t
}
}
n.tags = tagsKept
}
}
droppedNodes = len(removed)
removeNodes(removed, false, false)
g.ns = kept
}
// Remove edges below minimum frequency.
if edgeFraction := o.EdgeFraction; edgeFraction > 0 {
minEdge := int64(float64(totalValue) * edgeFraction)
for _, n := range g.ns {
for src, e := range n.in {
if e.weight < minEdge {
delete(n.in, src)
delete(src.out, n)
droppedEdges++
}
}
}
}
sortOrder := flatName
if o.CumSort {
// Force cum sorting for graph output, to preserve connectivity.
sortOrder = cumName
}
// Nodes that have flat==0 and a single in/out do not provide much
// information. Give them first chance to be removed. Do not consider edges
// from/to nodes that are expected to be removed.
maxNodes := o.NodeCount
if o.OutputFormat == Dot {
if maxNodes > 0 && maxNodes < len(g.ns) {
sortOrder = cumName
g.ns.sort(cumName)
cumCutoff := g.ns[maxNodes].cum
for _, n := range g.ns {
if n.flat == 0 {
if count := countEdges(n.out, cumCutoff); count > 1 {
continue
}
if count := countEdges(n.in, cumCutoff); count != 1 {
continue
}
n.info.lowPriority = true
}
}
}
}
g.ns.sort(sortOrder)
if maxNodes > 0 {
origCount = len(g.ns)
for index, nodes := 0, 0; index < len(g.ns); index++ {
nodes++
// For DOT output, count the tags as nodes since we will draw
// boxes for them.
if o.OutputFormat == Dot {
nodes += len(g.ns[index].tags)
}
if nodes > maxNodes {
// Trim to the top n nodes. Create dotted edges to bridge any
// broken connections.
removeNodes(g.ns[index:], true, true)
g.ns = g.ns[:index]
break
}
}
}
removeRedundantEdges(g.ns)
// Select best unit for profile output.
// Find the appropriate units for the smallest non-zero sample
if o.OutputUnit == "minimum" && len(g.ns) > 0 {
var maxValue, minValue int64
for _, n := range g.ns {
if n.flat > 0 && (minValue == 0 || n.flat < minValue) {
minValue = n.flat
}
if n.cum > maxValue {
maxValue = n.cum
}
}
if r := o.Ratio; r > 0 && r != 1 {
minValue = int64(float64(minValue) * r)
maxValue = int64(float64(maxValue) * r)
}
_, minUnit := ScaleValue(minValue, o.SampleUnit, "minimum")
_, maxUnit := ScaleValue(maxValue, o.SampleUnit, "minimum")
unit := minUnit
if minUnit != maxUnit && minValue*100 < maxValue && o.OutputFormat != Callgrind {
// Minimum and maximum values have different units. Scale
// minimum by 100 to use larger units, allowing minimum value to
// be scaled down to 0.01, except for callgrind reports since
// they can only represent integer values.
_, unit = ScaleValue(100*minValue, o.SampleUnit, "minimum")
}
if unit != "" {
o.OutputUnit = unit
} else {
o.OutputUnit = o.SampleUnit
}
}
return
}
// countEdges counts the number of edges below the specified cutoff.
func countEdges(el edgeMap, cutoff int64) int {
count := 0
for _, e := range el {
if e.weight > cutoff {
count++
}
}
return count
}
// removeNodes removes nodes from a report, optionally bridging
// connections between in/out edges and spreading out their weights
// proportionally. residual marks new bridge edges as residual
// (dotted).
func removeNodes(toRemove nodes, bridge, residual bool) {
for _, n := range toRemove {
for ei := range n.in {
delete(ei.out, n)
}
if bridge {
for ei, wi := range n.in {
for eo, wo := range n.out {
var weight int64
if n.cum != 0 {
weight = int64(float64(wo.weight) * (float64(wi.weight) / float64(n.cum)))
}
bumpWeight(ei, eo, weight, residual)
}
}
}
for eo := range n.out {
delete(eo.in, n)
}
}
}
// removeRedundantEdges removes residual edges if the destination can
// be reached through another path. This is done to simplify the graph
// while preserving connectivity.
func removeRedundantEdges(ns nodes) {
// Walk the nodes and outgoing edges in reverse order to prefer
// removing edges with the lowest weight.
for i := len(ns); i > 0; i-- {
n := ns[i-1]
in := sortedEdges(n.in)
for j := len(in); j > 0; j-- {
if e := in[j-1]; e.residual && isRedundant(e) {
delete(e.src.out, e.dest)
delete(e.dest.in, e.src)
}
}
}
}
// isRedundant determines if an edge can be removed without impacting
// connectivity of the whole graph. This is implemented by checking if the
// nodes have a common ancestor after removing the edge.
func isRedundant(e *edgeInfo) bool {
destPred := predecessors(e, e.dest)
if len(destPred) == 1 {
return false
}
srcPred := predecessors(e, e.src)
for n := range srcPred {
if destPred[n] && n != e.dest {
return true
}
}
return false
}
// predecessors collects all the predecessors to node n, excluding edge e.
func predecessors(e *edgeInfo, n *node) map[*node]bool {
seen := map[*node]bool{n: true}
queue := []*node{n}
for len(queue) > 0 {
n := queue[0]
queue = queue[1:]
for _, ie := range n.in {
if e == ie || seen[ie.src] {
continue
}
seen[ie.src] = true
queue = append(queue, ie.src)
}
}
return seen
}
// nodeSorter is a mechanism used to allow a report to be sorted
// in different ways.
type nodeSorter struct {
rs nodes
less func(i, j int) bool
}
func (s nodeSorter) Len() int { return len(s.rs) }
func (s nodeSorter) Swap(i, j int) { s.rs[i], s.rs[j] = s.rs[j], s.rs[i] }
func (s nodeSorter) Less(i, j int) bool { return s.less(i, j) }
type nodeOrder int
const (
flatName nodeOrder = iota
flatCumName
cumName
nameOrder
fileOrder
addressOrder
)
// sort reoders the entries in a report based on the specified
// ordering criteria. The result is sorted in decreasing order for
// numeric quantities, alphabetically for text, and increasing for
// addresses.
func (ns nodes) sort(o nodeOrder) error {
var s nodeSorter
switch o {
case flatName:
s = nodeSorter{ns,
func(i, j int) bool {
if iv, jv := ns[i].flat, ns[j].flat; iv != jv {
return iv > jv
}
if ns[i].info.prettyName() != ns[j].info.prettyName() {
return ns[i].info.prettyName() < ns[j].info.prettyName()
}
iv, jv := ns[i].cum, ns[j].cum
return iv > jv
},
}
case flatCumName:
s = nodeSorter{ns,
func(i, j int) bool {
if iv, jv := ns[i].flat, ns[j].flat; iv != jv {
return iv > jv
}
if iv, jv := ns[i].cum, ns[j].cum; iv != jv {
return iv > jv
}
return ns[i].info.prettyName() < ns[j].info.prettyName()
},
}
case cumName:
s = nodeSorter{ns,
func(i, j int) bool {
if ns[i].info.lowPriority != ns[j].info.lowPriority {
return ns[j].info.lowPriority
}
if iv, jv := ns[i].cum, ns[j].cum; iv != jv {
return iv > jv
}
if ns[i].info.prettyName() != ns[j].info.prettyName() {
return ns[i].info.prettyName() < ns[j].info.prettyName()
}
iv, jv := ns[i].flat, ns[j].flat
return iv > jv
},
}
case nameOrder:
s = nodeSorter{ns,
func(i, j int) bool {
return ns[i].info.name < ns[j].info.name
},
}
case fileOrder:
s = nodeSorter{ns,
func(i, j int) bool {
return ns[i].info.file < ns[j].info.file
},
}
case addressOrder:
s = nodeSorter{ns,
func(i, j int) bool {
return ns[i].info.address < ns[j].info.address
},
}
default:
return fmt.Errorf("report: unrecognized sort ordering: %d", o)
}
sort.Sort(s)
return nil
}
type edgeList []*edgeInfo
// sortedEdges return a slice of the edges in the map, sorted for
// visualization. The sort order is first based on the edge weight
// (higher-to-lower) and then by the node names to avoid flakiness.
func sortedEdges(edges map[*node]*edgeInfo) edgeList {
el := make(edgeList, 0, len(edges))
for _, w := range edges {
el = append(el, w)
}
sort.Sort(el)
return el
}
func (el edgeList) Len() int {
return len(el)
}
func (el edgeList) Less(i, j int) bool {
if el[i].weight != el[j].weight {
return el[i].weight > el[j].weight
}
from1 := el[i].src.info.prettyName()
from2 := el[j].src.info.prettyName()
if from1 != from2 {
return from1 < from2
}
to1 := el[i].dest.info.prettyName()
to2 := el[j].dest.info.prettyName()
return to1 < to2
}
func (el edgeList) Swap(i, j int) {
el[i], el[j] = el[j], el[i]
}
func (el edgeList) sum() int64 {
var ret int64
for _, e := range el {
ret += e.weight
}
return ret
}
// ScaleValue reformats a value from a unit to a different unit.
func ScaleValue(value int64, fromUnit, toUnit string) (sv float64, su string) {
// Avoid infinite recursion on overflow.
if value < 0 && -value > 0 {
v, u := ScaleValue(-value, fromUnit, toUnit)
return -v, u
}
if m, u, ok := memoryLabel(value, fromUnit, toUnit); ok {
return m, u
}
if t, u, ok := timeLabel(value, fromUnit, toUnit); ok {
return t, u
}
// Skip non-interesting units.
switch toUnit {
case "count", "sample", "unit", "minimum":
return float64(value), ""
default:
return float64(value), toUnit
}
}
func scaledValueLabel(value int64, fromUnit, toUnit string) string {
v, u := ScaleValue(value, fromUnit, toUnit)
sv := strings.TrimSuffix(fmt.Sprintf("%.2f", v), ".00")
if sv == "0" || sv == "-0" {
return "0"
}
return sv + u
}
func memoryLabel(value int64, fromUnit, toUnit string) (v float64, u string, ok bool) {
fromUnit = strings.TrimSuffix(strings.ToLower(fromUnit), "s")
toUnit = strings.TrimSuffix(strings.ToLower(toUnit), "s")
switch fromUnit {
case "byte", "b":
case "kilobyte", "kb":
value *= 1024
case "megabyte", "mb":
value *= 1024 * 1024
case "gigabyte", "gb":
value *= 1024 * 1024
default:
return 0, "", false
}
if toUnit == "minimum" || toUnit == "auto" {
switch {
case value < 1024:
toUnit = "b"
case value < 1024*1024:
toUnit = "kb"
case value < 1024*1024*1024:
toUnit = "mb"
default:
toUnit = "gb"
}
}
var output float64
switch toUnit {
default:
output, toUnit = float64(value), "B"
case "kb", "kbyte", "kilobyte":
output, toUnit = float64(value)/1024, "kB"
case "mb", "mbyte", "megabyte":
output, toUnit = float64(value)/(1024*1024), "MB"
case "gb", "gbyte", "giggabyte":
output, toUnit = float64(value)/(1024*1024*1024), "GB"
}
return output, toUnit, true
}
func timeLabel(value int64, fromUnit, toUnit string) (v float64, u string, ok bool) {
fromUnit = strings.ToLower(fromUnit)
if len(fromUnit) > 2 {
fromUnit = strings.TrimSuffix(fromUnit, "s")
}
toUnit = strings.ToLower(toUnit)
if len(toUnit) > 2 {
toUnit = strings.TrimSuffix(toUnit, "s")
}
var d time.Duration
switch fromUnit {
case "nanosecond", "ns":
d = time.Duration(value) * time.Nanosecond
case "microsecond":
d = time.Duration(value) * time.Microsecond
case "millisecond", "ms":
d = time.Duration(value) * time.Millisecond
case "second", "sec":
d = time.Duration(value) * time.Second
case "cycle":
return float64(value), "", true
default:
return 0, "", false
}
if toUnit == "minimum" || toUnit == "auto" {
switch {
case d < 1*time.Microsecond:
toUnit = "ns"
case d < 1*time.Millisecond:
toUnit = "us"
case d < 1*time.Second:
toUnit = "ms"
case d < 1*time.Minute:
toUnit = "sec"
case d < 1*time.Hour:
toUnit = "min"
case d < 24*time.Hour:
toUnit = "hour"
case d < 15*24*time.Hour:
toUnit = "day"
case d < 120*24*time.Hour:
toUnit = "week"
default:
toUnit = "year"
}
}
var output float64
dd := float64(d)
switch toUnit {
case "ns", "nanosecond":
output, toUnit = dd/float64(time.Nanosecond), "ns"
case "us", "microsecond":
output, toUnit = dd/float64(time.Microsecond), "us"
case "ms", "millisecond":
output, toUnit = dd/float64(time.Millisecond), "ms"
case "min", "minute":
output, toUnit = dd/float64(time.Minute), "mins"
case "hour", "hr":
output, toUnit = dd/float64(time.Hour), "hrs"
case "day":
output, toUnit = dd/float64(24*time.Hour), "days"
case "week", "wk":
output, toUnit = dd/float64(7*24*time.Hour), "wks"
case "year", "yr":
output, toUnit = dd/float64(365*7*24*time.Hour), "yrs"
default:
fallthrough
case "sec", "second", "s":
output, toUnit = dd/float64(time.Second), "s"
}
return output, toUnit, true
}
// prettyName determines the printable name to be used for a node.
func (info *nodeInfo) prettyName() string {
var name string
if info.address != 0 {
name = fmt.Sprintf("%016x", info.address)
}
if info.name != "" {
name = name + " " + info.name
}
if info.file != "" {
name += " " + trimPath(info.file)
if info.lineno != 0 {
name += fmt.Sprintf(":%d", info.lineno)
}
}
if info.inline {
name = name + " (inline)"
}
if name = strings.TrimSpace(name); name == "" && info.objfile != "" {
name = "[" + info.objfile + "]"
}
return name
}
// New builds a new report indexing the sample values interpreting the
// samples with the provided function.
func New(prof *profile.Profile, options Options, value func(s *profile.Sample) int64, unit string) *Report {
o := &options
if o.SampleUnit == "" {
o.SampleUnit = unit
}
format := func(v int64) string {
if r := o.Ratio; r > 0 && r != 1 {
fv := float64(v) * r
v = int64(fv)
}
return scaledValueLabel(v, o.SampleUnit, o.OutputUnit)
}
return &Report{prof, computeTotal(prof, value), o, value, format}
}
// NewDefault builds a new report indexing the sample values with the
// last value available.
func NewDefault(prof *profile.Profile, options Options) *Report {
index := len(prof.SampleType) - 1
o := &options
if o.SampleUnit == "" {
o.SampleUnit = strings.ToLower(prof.SampleType[index].Unit)
}
value := func(s *profile.Sample) int64 {
return s.Value[index]
}
format := func(v int64) string {
if r := o.Ratio; r > 0 && r != 1 {
fv := float64(v) * r
v = int64(fv)
}
return scaledValueLabel(v, o.SampleUnit, o.OutputUnit)
}
return &Report{prof, computeTotal(prof, value), o, value, format}
}
func computeTotal(prof *profile.Profile, value func(s *profile.Sample) int64) int64 {
var ret int64
for _, sample := range prof.Sample {
ret += value(sample)
}
return ret
}
// Report contains the data and associated routines to extract a
// report from a profile.
type Report struct {
prof *profile.Profile
total int64
options *Options
sampleValue func(*profile.Sample) int64
formatValue func(int64) string
}
func (rpt *Report) formatTags(s *profile.Sample) (string, bool) {
var labels []string
for key, vals := range s.Label {
for _, v := range vals {
labels = append(labels, key+":"+v)
}
}
for key, nvals := range s.NumLabel {
for _, v := range nvals {
labels = append(labels, scaledValueLabel(v, key, "auto"))
}
}
if len(labels) == 0 {
return "", false
}
sort.Strings(labels)
return strings.Join(labels, `\n`), true
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package report
// This file contains routines related to the generation of annotated
// source listings.
import (
"bufio"
"fmt"
"html/template"
"io"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"cmd/pprof/internal/plugin"
)
// printSource prints an annotated source listing, include all
// functions with samples that match the regexp rpt.options.symbol.
// The sources are sorted by function name and then by filename to
// eliminate potential nondeterminism.
func printSource(w io.Writer, rpt *Report) error {
o := rpt.options
g, err := newGraph(rpt)
if err != nil {
return err
}
// Identify all the functions that match the regexp provided.
// Group nodes for each matching function.
var functions nodes
functionNodes := make(map[string]nodes)
for _, n := range g.ns {
if !o.Symbol.MatchString(n.info.name) {
continue
}
if functionNodes[n.info.name] == nil {
functions = append(functions, n)
}
functionNodes[n.info.name] = append(functionNodes[n.info.name], n)
}
functions.sort(nameOrder)
fmt.Fprintf(w, "Total: %s\n", rpt.formatValue(rpt.total))
for _, fn := range functions {
name := fn.info.name
// Identify all the source files associated to this function.
// Group nodes for each source file.
var sourceFiles nodes
fileNodes := make(map[string]nodes)
for _, n := range functionNodes[name] {
if n.info.file == "" {
continue
}
if fileNodes[n.info.file] == nil {
sourceFiles = append(sourceFiles, n)
}
fileNodes[n.info.file] = append(fileNodes[n.info.file], n)
}
if len(sourceFiles) == 0 {
fmt.Printf("No source information for %s\n", name)
continue
}
sourceFiles.sort(fileOrder)
// Print each file associated with this function.
for _, fl := range sourceFiles {
filename := fl.info.file
fns := fileNodes[filename]
flatSum, cumSum := sumNodes(fns)
fnodes, path, err := getFunctionSource(name, filename, fns, 0, 0)
fmt.Fprintf(w, "ROUTINE ======================== %s in %s\n", name, path)
fmt.Fprintf(w, "%10s %10s (flat, cum) %s of Total\n",
rpt.formatValue(flatSum), rpt.formatValue(cumSum),
percentage(cumSum, rpt.total))
if err != nil {
fmt.Fprintf(w, " Error: %v\n", err)
continue
}
for _, fn := range fnodes {
fmt.Fprintf(w, "%10s %10s %6d:%s\n", valueOrDot(fn.flat, rpt), valueOrDot(fn.cum, rpt), fn.info.lineno, fn.info.name)
}
}
}
return nil
}
// printWebSource prints an annotated source listing, include all
// functions with samples that match the regexp rpt.options.symbol.
func printWebSource(w io.Writer, rpt *Report, obj plugin.ObjTool) error {
o := rpt.options
g, err := newGraph(rpt)
if err != nil {
return err
}
// If the regexp source can be parsed as an address, also match
// functions that land on that address.
var address *uint64
if hex, err := strconv.ParseUint(o.Symbol.String(), 0, 64); err == nil {
address = &hex
}
// Extract interesting symbols from binary files in the profile and
// classify samples per symbol.
symbols := symbolsFromBinaries(rpt.prof, g, o.Symbol, address, obj)
symNodes := nodesPerSymbol(g.ns, symbols)
// Sort symbols for printing.
var syms objSymbols
for s := range symNodes {
syms = append(syms, s)
}
sort.Sort(syms)
if len(syms) == 0 {
return fmt.Errorf("no samples found on routines matching: %s", o.Symbol.String())
}
printHeader(w, rpt)
for _, s := range syms {
name := s.sym.Name[0]
// Identify sources associated to a symbol by examining
// symbol samples. Classify samples per source file.
var sourceFiles nodes
fileNodes := make(map[string]nodes)
for _, n := range symNodes[s] {
if n.info.file == "" {
continue
}
if fileNodes[n.info.file] == nil {
sourceFiles = append(sourceFiles, n)
}
fileNodes[n.info.file] = append(fileNodes[n.info.file], n)
}
if len(sourceFiles) == 0 {
fmt.Printf("No source information for %s\n", name)
continue
}
sourceFiles.sort(fileOrder)
// Print each file associated with this function.
for _, fl := range sourceFiles {
filename := fl.info.file
fns := fileNodes[filename]
asm := assemblyPerSourceLine(symbols, fns, filename, obj)
start, end := sourceCoordinates(asm)
fnodes, path, err := getFunctionSource(name, filename, fns, start, end)
if err != nil {
fnodes, path = getMissingFunctionSource(filename, asm, start, end)
}
flatSum, cumSum := sumNodes(fnodes)
printFunctionHeader(w, name, path, flatSum, cumSum, rpt)
for _, fn := range fnodes {
printFunctionSourceLine(w, fn, asm[fn.info.lineno], rpt)
}
printFunctionClosing(w)
}
}
printPageClosing(w)
return nil
}
// sourceCoordinates returns the lowest and highest line numbers from
// a set of assembly statements.
func sourceCoordinates(asm map[int]nodes) (start, end int) {
for l := range asm {
if start == 0 || l < start {
start = l
}
if end == 0 || l > end {
end = l
}
}
return start, end
}
// assemblyPerSourceLine disassembles the binary containing a symbol
// and classifies the assembly instructions according to its
// corresponding source line, annotating them with a set of samples.
func assemblyPerSourceLine(objSyms []*objSymbol, rs nodes, src string, obj plugin.ObjTool) map[int]nodes {
assembly := make(map[int]nodes)
// Identify symbol to use for this collection of samples.
o := findMatchingSymbol(objSyms, rs)
if o == nil {
return assembly
}
// Extract assembly for matched symbol
insns, err := obj.Disasm(o.sym.File, o.sym.Start, o.sym.End)
if err != nil {
return assembly
}
srcBase := filepath.Base(src)
anodes := annotateAssembly(insns, rs, o.base)
var lineno = 0
for _, an := range anodes {
if filepath.Base(an.info.file) == srcBase {
lineno = an.info.lineno
}
if lineno != 0 {
assembly[lineno] = append(assembly[lineno], an)
}
}
return assembly
}
// findMatchingSymbol looks for the symbol that corresponds to a set
// of samples, by comparing their addresses.
func findMatchingSymbol(objSyms []*objSymbol, ns nodes) *objSymbol {
for _, n := range ns {
for _, o := range objSyms {
if filepath.Base(o.sym.File) == n.info.objfile &&
o.sym.Start <= n.info.address-o.base &&
n.info.address-o.base <= o.sym.End {
return o
}
}
}
return nil
}
// printHeader prints the page header for a weblist report.
func printHeader(w io.Writer, rpt *Report) {
fmt.Fprintln(w, weblistPageHeader)
var labels []string
for _, l := range legendLabels(rpt) {
labels = append(labels, template.HTMLEscapeString(l))
}
fmt.Fprintf(w, `<div class="legend">%s<br>Total: %s</div>`,
strings.Join(labels, "<br>\n"),
rpt.formatValue(rpt.total),
)
}
// printFunctionHeader prints a function header for a weblist report.
func printFunctionHeader(w io.Writer, name, path string, flatSum, cumSum int64, rpt *Report) {
fmt.Fprintf(w, `<h1>%s</h1>%s
<pre onClick="pprof_toggle_asm()">
Total: %10s %10s (flat, cum) %s
`,
template.HTMLEscapeString(name), template.HTMLEscapeString(path),
rpt.formatValue(flatSum), rpt.formatValue(cumSum),
percentage(cumSum, rpt.total))
}
// printFunctionSourceLine prints a source line and the corresponding assembly.
func printFunctionSourceLine(w io.Writer, fn *node, assembly nodes, rpt *Report) {
if len(assembly) == 0 {
fmt.Fprintf(w,
"<span class=line> %6d</span> <span class=nop> %10s %10s %s </span>\n",
fn.info.lineno,
valueOrDot(fn.flat, rpt), valueOrDot(fn.cum, rpt),
template.HTMLEscapeString(fn.info.name))
return
}
fmt.Fprintf(w,
"<span class=line> %6d</span> <span class=deadsrc> %10s %10s %s </span>",
fn.info.lineno,
valueOrDot(fn.flat, rpt), valueOrDot(fn.cum, rpt),
template.HTMLEscapeString(fn.info.name))
fmt.Fprint(w, "<span class=asm>")
for _, an := range assembly {
var fileline string
class := "disasmloc"
if an.info.file != "" {
fileline = fmt.Sprintf("%s:%d", template.HTMLEscapeString(an.info.file), an.info.lineno)
if an.info.lineno != fn.info.lineno {
class = "unimportant"
}
}
fmt.Fprintf(w, " %8s %10s %10s %8x: %-48s <span class=%s>%s</span>\n", "",
valueOrDot(an.flat, rpt), valueOrDot(an.cum, rpt),
an.info.address,
template.HTMLEscapeString(an.info.name),
class,
template.HTMLEscapeString(fileline))
}
fmt.Fprintln(w, "</span>")
}
// printFunctionClosing prints the end of a function in a weblist report.
func printFunctionClosing(w io.Writer) {
fmt.Fprintln(w, "</pre>")
}
// printPageClosing prints the end of the page in a weblist report.
func printPageClosing(w io.Writer) {
fmt.Fprintln(w, weblistPageClosing)
}
// getFunctionSource collects the sources of a function from a source
// file and annotates it with the samples in fns. Returns the sources
// as nodes, using the info.name field to hold the source code.
func getFunctionSource(fun, file string, fns nodes, start, end int) (nodes, string, error) {
f, file, err := adjustSourcePath(file)
if err != nil {
return nil, file, err
}
lineNodes := make(map[int]nodes)
// Collect source coordinates from profile.
const margin = 5 // Lines before first/after last sample.
if start == 0 {
if fns[0].info.startLine != 0 {
start = fns[0].info.startLine
} else {
start = fns[0].info.lineno - margin
}
} else {
start -= margin
}
if end == 0 {
end = fns[0].info.lineno
}
end += margin
for _, n := range fns {
lineno := n.info.lineno
nodeStart := n.info.startLine
if nodeStart == 0 {
nodeStart = lineno - margin
}
nodeEnd := lineno + margin
if nodeStart < start {
start = nodeStart
} else if nodeEnd > end {
end = nodeEnd
}
lineNodes[lineno] = append(lineNodes[lineno], n)
}
var src nodes
buf := bufio.NewReader(f)
lineno := 1
for {
line, err := buf.ReadString('\n')
if err != nil {
if line == "" || err != io.EOF {
return nil, file, err
}
}
if lineno >= start {
flat, cum := sumNodes(lineNodes[lineno])
src = append(src, &node{
info: nodeInfo{
name: strings.TrimRight(line, "\n"),
lineno: lineno,
},
flat: flat,
cum: cum,
})
}
lineno++
if lineno > end {
break
}
}
return src, file, nil
}
// getMissingFunctionSource creates a dummy function body to point to
// the source file and annotates it with the samples in asm.
func getMissingFunctionSource(filename string, asm map[int]nodes, start, end int) (nodes, string) {
var fnodes nodes
for i := start; i <= end; i++ {
lrs := asm[i]
if len(lrs) == 0 {
continue
}
flat, cum := sumNodes(lrs)
fnodes = append(fnodes, &node{
info: nodeInfo{
name: "???",
lineno: i,
},
flat: flat,
cum: cum,
})
}
return fnodes, filename
}
// adjustSourcePath adjusts the pathe for a source file by trimmming
// known prefixes and searching for the file on all parents of the
// current working dir.
func adjustSourcePath(path string) (*os.File, string, error) {
path = trimPath(path)
f, err := os.Open(path)
if err == nil {
return f, path, nil
}
if dir, wderr := os.Getwd(); wderr == nil {
for {
parent := filepath.Dir(dir)
if parent == dir {
break
}
if f, err := os.Open(filepath.Join(parent, path)); err == nil {
return f, filepath.Join(parent, path), nil
}
dir = parent
}
}
return nil, path, err
}
// trimPath cleans up a path by removing prefixes that are commonly
// found on profiles.
func trimPath(path string) string {
basePaths := []string{
"/proc/self/cwd/./",
"/proc/self/cwd/",
}
sPath := filepath.ToSlash(path)
for _, base := range basePaths {
if strings.HasPrefix(sPath, base) {
return filepath.FromSlash(sPath[len(base):])
}
}
return path
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package report
const weblistPageHeader = `
<!DOCTYPE html>
<html>
<head>
<title>Pprof listing</title>
<style type="text/css">
body {
font-family: sans-serif;
}
h1 {
font-size: 1.5em;
margin-bottom: 4px;
}
.legend {
font-size: 1.25em;
}
.line {
color: #aaaaaa;
}
.nop {
color: #aaaaaa;
}
.unimportant {
color: #cccccc;
}
.disasmloc {
color: #000000;
}
.deadsrc {
cursor: pointer;
}
.deadsrc:hover {
background-color: #eeeeee;
}
.livesrc {
color: #0000ff;
cursor: pointer;
}
.livesrc:hover {
background-color: #eeeeee;
}
.asm {
color: #008800;
display: none;
}
</style>
<script type="text/javascript">
function pprof_toggle_asm(e) {
var target;
if (!e) e = window.event;
if (e.target) target = e.target;
else if (e.srcElement) target = e.srcElement;
if (target) {
var asm = target.nextSibling;
if (asm && asm.className == "asm") {
asm.style.display = (asm.style.display == "block" ? "" : "block");
e.preventDefault();
return false;
}
}
}
</script>
</head>
<body>
`
const weblistPageClosing = `
</body>
</html>
`
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package svg provides tools related to handling of SVG files
package svg
import (
"bytes"
"regexp"
"strings"
)
var (
viewBox = regexp.MustCompile(`<svg\s*width="[^"]+"\s*height="[^"]+"\s*viewBox="[^"]+"`)
graphId = regexp.MustCompile(`<g id="graph\d"`)
svgClose = regexp.MustCompile(`</svg>`)
)
// Massage enhances the SVG output from DOT to provide bettern
// panning inside a web browser. It uses the SVGPan library, which is
// accessed through the svgPan URL.
func Massage(in bytes.Buffer, svgPan string) string {
svg := string(in.Bytes())
// Work around for dot bug which misses quoting some ampersands,
// resulting on unparsable SVG.
svg = strings.Replace(svg, "&;", "&amp;;", -1)
if svgPan == "" {
return svg
}
//Dot's SVG output is
//
// <svg width="___" height="___"
// viewBox="___" xmlns=...>
// <g id="graph0" transform="...">
// ...
// </g>
// </svg>
//
// Change it to
//
// <svg width="100%" height="100%"
// xmlns=...>
// <script xlink:href=" ...$svgpan.. "/>
// <g id="viewport" transform="translate(0,0)">
// <g id="graph0" transform="...">
// ...
// </g>
// </g>
// </svg>
if loc := viewBox.FindStringIndex(svg); loc != nil {
svg = svg[:loc[0]] +
`<svg width="100%" height="100%"` +
svg[loc[1]:]
}
if loc := graphId.FindStringIndex(svg); loc != nil {
svg = svg[:loc[0]] +
`<script xlink:href="` + svgPan + `"/>` +
`<g id="viewport" transform="scale(0.5,0.5) translate(0,0)">` +
svg[loc[0]:]
}
if loc := svgClose.FindStringIndex(svg); loc != nil {
svg = svg[:loc[0]] +
`</g>` +
svg[loc[0]:]
}
return svg
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package symbolizer provides a routine to populate a profile with
// symbol, file and line number information. It relies on the
// addr2liner and demangler packages to do the actual work.
package symbolizer
import (
"fmt"
"os"
"path/filepath"
"strings"
"cmd/pprof/internal/plugin"
"cmd/pprof/internal/profile"
)
// Symbolize adds symbol and line number information to all locations
// in a profile. mode enables some options to control
// symbolization. Currently only recognizes "force", which causes it
// to overwrite any existing data.
func Symbolize(mode string, prof *profile.Profile, obj plugin.ObjTool, ui plugin.UI) error {
force := false
// Disable some mechanisms based on mode string.
for _, o := range strings.Split(strings.ToLower(mode), ":") {
switch o {
case "force":
force = true
default:
}
}
mt, err := newMapping(prof, obj, ui, force)
if err != nil {
return err
}
defer mt.close()
functions := make(map[profile.Function]*profile.Function)
for _, l := range mt.prof.Location {
m := l.Mapping
segment := mt.segments[m]
if segment == nil {
// Nothing to do
continue
}
stack, err := segment.SourceLine(l.Address)
if err != nil || len(stack) == 0 {
// No answers from addr2line
continue
}
l.Line = make([]profile.Line, len(stack))
for i, frame := range stack {
if frame.Func != "" {
m.HasFunctions = true
}
if frame.File != "" {
m.HasFilenames = true
}
if frame.Line != 0 {
m.HasLineNumbers = true
}
f := &profile.Function{
Name: frame.Func,
SystemName: frame.Func,
Filename: frame.File,
}
if fp := functions[*f]; fp != nil {
f = fp
} else {
functions[*f] = f
f.ID = uint64(len(mt.prof.Function)) + 1
mt.prof.Function = append(mt.prof.Function, f)
}
l.Line[i] = profile.Line{
Function: f,
Line: int64(frame.Line),
}
}
if len(stack) > 0 {
m.HasInlineFrames = true
}
}
return nil
}
// newMapping creates a mappingTable for a profile.
func newMapping(prof *profile.Profile, obj plugin.ObjTool, ui plugin.UI, force bool) (*mappingTable, error) {
mt := &mappingTable{
prof: prof,
segments: make(map[*profile.Mapping]plugin.ObjFile),
}
// Identify used mappings
mappings := make(map[*profile.Mapping]bool)
for _, l := range prof.Location {
mappings[l.Mapping] = true
}
for _, m := range prof.Mapping {
if !mappings[m] {
continue
}
// Do not attempt to re-symbolize a mapping that has already been symbolized.
if !force && (m.HasFunctions || m.HasFilenames || m.HasLineNumbers) {
continue
}
f, err := locateFile(obj, m.File, m.BuildID, m.Start)
if err != nil {
ui.PrintErr("Local symbolization failed for ", filepath.Base(m.File), ": ", err)
// Move on to other mappings
continue
}
if fid := f.BuildID(); m.BuildID != "" && fid != "" && fid != m.BuildID {
// Build ID mismatch - ignore.
f.Close()
continue
}
mt.segments[m] = f
}
return mt, nil
}
// locateFile opens a local file for symbolization on the search path
// at $PPROF_BINARY_PATH. Looks inside these directories for files
// named $BUILDID/$BASENAME and $BASENAME (if build id is available).
func locateFile(obj plugin.ObjTool, file, buildID string, start uint64) (plugin.ObjFile, error) {
// Construct search path to examine
searchPath := os.Getenv("PPROF_BINARY_PATH")
if searchPath == "" {
// Use $HOME/pprof/binaries as default directory for local symbolization binaries
searchPath = filepath.Join(os.Getenv("HOME"), "pprof", "binaries")
}
// Collect names to search: {buildid/basename, basename}
var fileNames []string
if baseName := filepath.Base(file); buildID != "" {
fileNames = []string{filepath.Join(buildID, baseName), baseName}
} else {
fileNames = []string{baseName}
}
for _, path := range filepath.SplitList(searchPath) {
for nameIndex, name := range fileNames {
file := filepath.Join(path, name)
if f, err := obj.Open(file, start); err == nil {
fileBuildID := f.BuildID()
if buildID == "" || buildID == fileBuildID {
return f, nil
}
f.Close()
if nameIndex == 0 {
// If this is the first name, the path includes the build id. Report inconsistency.
return nil, fmt.Errorf("found file %s with inconsistent build id %s", file, fileBuildID)
}
}
}
}
// Try original file name
f, err := obj.Open(file, start)
if err == nil && buildID != "" {
if fileBuildID := f.BuildID(); fileBuildID != "" && fileBuildID != buildID {
// Mismatched build IDs, ignore
f.Close()
return nil, fmt.Errorf("mismatched build ids %s != %s", fileBuildID, buildID)
}
}
return f, err
}
// mappingTable contains the mechanisms for symbolization of a
// profile.
type mappingTable struct {
prof *profile.Profile
segments map[*profile.Mapping]plugin.ObjFile
}
// Close releases any external processes being used for the mapping.
func (mt *mappingTable) close() {
for _, segment := range mt.segments {
segment.Close()
}
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package symbolz symbolizes a profile using the output from the symbolz
// service.
package symbolz
import (
"bytes"
"fmt"
"io"
"net/url"
"regexp"
"strconv"
"strings"
"cmd/pprof/internal/profile"
)
var (
symbolzRE = regexp.MustCompile(`(0x[[:xdigit:]]+)\s+(.*)`)
)
// Symbolize symbolizes profile p by parsing data returned by a
// symbolz handler. syms receives the symbolz query (hex addresses
// separated by '+') and returns the symbolz output in a string. It
// symbolizes all locations based on their addresses, regardless of
// mapping.
func Symbolize(source string, syms func(string, string) ([]byte, error), p *profile.Profile) error {
if source = symbolz(source, p); source == "" {
// If the source is not a recognizable URL, do nothing.
return nil
}
// Construct query of addresses to symbolize.
var a []string
for _, l := range p.Location {
if l.Address != 0 && len(l.Line) == 0 {
a = append(a, fmt.Sprintf("%#x", l.Address))
}
}
if len(a) == 0 {
// No addresses to symbolize.
return nil
}
lines := make(map[uint64]profile.Line)
functions := make(map[string]*profile.Function)
if b, err := syms(source, strings.Join(a, "+")); err == nil {
buf := bytes.NewBuffer(b)
for {
l, err := buf.ReadString('\n')
if err != nil {
if err == io.EOF {
break
}
return err
}
if symbol := symbolzRE.FindStringSubmatch(l); len(symbol) == 3 {
addr, err := strconv.ParseUint(symbol[1], 0, 64)
if err != nil {
return fmt.Errorf("unexpected parse failure %s: %v", symbol[1], err)
}
name := symbol[2]
fn := functions[name]
if fn == nil {
fn = &profile.Function{
ID: uint64(len(p.Function) + 1),
Name: name,
SystemName: name,
}
functions[name] = fn
p.Function = append(p.Function, fn)
}
lines[addr] = profile.Line{Function: fn}
}
}
}
for _, l := range p.Location {
if line, ok := lines[l.Address]; ok {
l.Line = []profile.Line{line}
if l.Mapping != nil {
l.Mapping.HasFunctions = true
}
}
}
return nil
}
// symbolz returns the corresponding symbolz source for a profile URL.
func symbolz(source string, p *profile.Profile) string {
if url, err := url.Parse(source); err == nil && url.Host != "" {
if last := strings.LastIndex(url.Path, "/"); last != -1 {
if strings.HasSuffix(url.Path[:last], "pprof") {
url.Path = url.Path[:last] + "/symbol"
} else {
url.Path = url.Path[:last] + "/symbolz"
}
return url.String()
}
}
return ""
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package tempfile provides tools to create and delete temporary files
package tempfile
import (
"fmt"
"os"
"path/filepath"
"sync"
)
// New returns an unused filename for output files.
func New(dir, prefix, suffix string) (*os.File, error) {
for index := 1; index < 10000; index++ {
path := filepath.Join(dir, fmt.Sprintf("%s%03d%s", prefix, index, suffix))
if _, err := os.Stat(path); err != nil {
return os.Create(path)
}
}
// Give up
return nil, fmt.Errorf("could not create file of the form %s%03d%s", prefix, 1, suffix)
}
var tempFiles []string
var tempFilesMu = sync.Mutex{}
// DeferDelete marks a file to be deleted by next call to Cleanup()
func DeferDelete(path string) {
tempFilesMu.Lock()
tempFiles = append(tempFiles, path)
tempFilesMu.Unlock()
}
// Cleanup removes any temporary files selected for deferred cleaning.
func Cleanup() {
tempFilesMu.Lock()
for _, f := range tempFiles {
os.Remove(f)
}
tempFiles = nil
tempFilesMu.Unlock()
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"debug/gosym"
"flag"
"fmt"
"os"
"regexp"
"strings"
"cmd/internal/objfile"
"cmd/pprof/internal/commands"
"cmd/pprof/internal/driver"
"cmd/pprof/internal/fetch"
"cmd/pprof/internal/plugin"
"cmd/pprof/internal/profile"
"cmd/pprof/internal/symbolizer"
"cmd/pprof/internal/symbolz"
)
func main() {
var extraCommands map[string]*commands.Command // no added Go-specific commands
if err := driver.PProf(flags{}, fetch.Fetcher, symbolize, new(objTool), plugin.StandardUI(), extraCommands); err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
}
}
// symbolize attempts to symbolize profile p.
// If the source is a local binary, it tries using symbolizer and obj.
// If the source is a URL, it fetches symbol information using symbolz.
func symbolize(mode, source string, p *profile.Profile, obj plugin.ObjTool, ui plugin.UI) error {
remote, local := true, true
for _, o := range strings.Split(strings.ToLower(mode), ":") {
switch o {
case "none", "no":
return nil
case "local":
remote, local = false, true
case "remote":
remote, local = true, false
default:
ui.PrintErr("ignoring unrecognized symbolization option: " + mode)
ui.PrintErr("expecting -symbolize=[local|remote|none][:force]")
fallthrough
case "", "force":
// Ignore these options, -force is recognized by symbolizer.Symbolize
}
}
var err error
if local {
// Symbolize using binutils.
if err = symbolizer.Symbolize(mode, p, obj, ui); err == nil {
return nil
}
}
if remote {
err = symbolz.Symbolize(source, fetch.PostURL, p)
}
return err
}
// flags implements the driver.FlagPackage interface using the builtin flag package.
type flags struct {
}
func (flags) Bool(o string, d bool, c string) *bool {
return flag.Bool(o, d, c)
}
func (flags) Int(o string, d int, c string) *int {
return flag.Int(o, d, c)
}
func (flags) Float64(o string, d float64, c string) *float64 {
return flag.Float64(o, d, c)
}
func (flags) String(o, d, c string) *string {
return flag.String(o, d, c)
}
func (flags) Parse(usage func()) []string {
flag.Usage = usage
flag.Parse()
args := flag.Args()
if len(args) == 0 {
usage()
}
return args
}
func (flags) ExtraUsage() string {
return ""
}
// objTool implements plugin.ObjTool using Go libraries
// (instead of invoking GNU binutils).
type objTool struct{}
func (*objTool) Open(name string, start uint64) (plugin.ObjFile, error) {
of, err := objfile.Open(name)
if err != nil {
return nil, err
}
f := &file{
name: name,
file: of,
}
return f, nil
}
func (*objTool) Demangle(names []string) (map[string]string, error) {
// No C++, nothing to demangle.
return make(map[string]string), nil
}
func (*objTool) Disasm(file string, start, end uint64) ([]plugin.Inst, error) {
return nil, fmt.Errorf("disassembly not supported")
}
func (*objTool) SetConfig(config string) {
// config is usually used to say what binaries to invoke.
// Ignore entirely.
}
// file implements plugin.ObjFile using Go libraries
// (instead of invoking GNU binutils).
// A file represents a single executable being analyzed.
type file struct {
name string
sym []objfile.Sym
file *objfile.File
pcln *gosym.Table
}
func (f *file) Name() string {
return f.name
}
func (f *file) Base() uint64 {
// No support for shared libraries.
return 0
}
func (f *file) BuildID() string {
// No support for build ID.
return ""
}
func (f *file) SourceLine(addr uint64) ([]plugin.Frame, error) {
if f.pcln == nil {
pcln, err := f.file.PCLineTable()
if err != nil {
return nil, err
}
f.pcln = pcln
}
file, line, fn := f.pcln.PCToLine(addr)
if fn == nil {
return nil, fmt.Errorf("no line information for PC=%#x", addr)
}
frame := []plugin.Frame{
{
Func: fn.Name,
File: file,
Line: line,
},
}
return frame, nil
}
func (f *file) Symbols(r *regexp.Regexp, addr uint64) ([]*plugin.Sym, error) {
if f.sym == nil {
sym, err := f.file.Symbols()
if err != nil {
return nil, err
}
f.sym = sym
}
var out []*plugin.Sym
for _, s := range f.sym {
if (r == nil || r.MatchString(s.Name)) && (addr == 0 || s.Addr <= addr && addr < s.Addr+uint64(s.Size)) {
out = append(out, &plugin.Sym{
Name: []string{s.Name},
File: f.name,
Start: s.Addr,
End: s.Addr + uint64(s.Size) - 1,
})
}
}
return out, nil
}
func (f *file) Close() error {
f.file.Close()
return nil
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment