...

Source file src/cmd/covdata/metamerge.go

Documentation: cmd/covdata

     1  // Copyright 2022 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package main
     6  
     7  // This file contains functions and apis that support merging of
     8  // meta-data information.  It helps implement the "merge", "subtract",
     9  // and "intersect" subcommands.
    10  
    11  import (
    12  	"crypto/md5"
    13  	"fmt"
    14  	"internal/coverage"
    15  	"internal/coverage/calloc"
    16  	"internal/coverage/cmerge"
    17  	"internal/coverage/decodecounter"
    18  	"internal/coverage/decodemeta"
    19  	"internal/coverage/encodecounter"
    20  	"internal/coverage/encodemeta"
    21  	"internal/coverage/slicewriter"
    22  	"io"
    23  	"os"
    24  	"path/filepath"
    25  	"sort"
    26  	"time"
    27  	"unsafe"
    28  )
    29  
    30  // metaMerge provides state and methods to help manage the process
    31  // of selecting or merging meta data files. There are three cases
    32  // of interest here: the "-pcombine" flag provided by merge, the
    33  // "-pkg" option provided by all merge/subtract/intersect, and
    34  // a regular vanilla merge with no package selection
    35  //
    36  // In the -pcombine case, we're essentially glomming together all the
    37  // meta-data for all packages and all functions, meaning that
    38  // everything we see in a given package needs to be added into the
    39  // meta-data file builder; we emit a single meta-data file at the end
    40  // of the run.
    41  //
    42  // In the -pkg case, we will typically emit a single meta-data file
    43  // per input pod, where that new meta-data file contains entries for
    44  // just the selected packages.
    45  //
    46  // In the third case (vanilla merge with no combining or package
    47  // selection) we can carry over meta-data files without touching them
    48  // at all (only counter data files will be merged).
    49  type metaMerge struct {
    50  	calloc.BatchCounterAlloc
    51  	cmerge.Merger
    52  	// maps package import path to package state
    53  	pkm map[string]*pkstate
    54  	// list of packages
    55  	pkgs []*pkstate
    56  	// current package state
    57  	p *pkstate
    58  	// current pod state
    59  	pod *podstate
    60  	// counter data file osargs/goos/goarch state
    61  	astate *argstate
    62  }
    63  
    64  // pkstate
    65  type pkstate struct {
    66  	// index of package within meta-data file.
    67  	pkgIdx uint32
    68  	// this maps function index within the package to counter data payload
    69  	ctab map[uint32]decodecounter.FuncPayload
    70  	// pointer to meta-data blob for package
    71  	mdblob []byte
    72  	// filled in only for -pcombine merges
    73  	*pcombinestate
    74  }
    75  
    76  type podstate struct {
    77  	pmm      map[pkfunc]decodecounter.FuncPayload
    78  	mdf      string
    79  	mfr      *decodemeta.CoverageMetaFileReader
    80  	fileHash [16]byte
    81  }
    82  
    83  type pkfunc struct {
    84  	pk, fcn uint32
    85  }
    86  
    87  // pcombinestate
    88  type pcombinestate struct {
    89  	// Meta-data builder for the package.
    90  	cmdb *encodemeta.CoverageMetaDataBuilder
    91  	// Maps function meta-data hash to new function index in the
    92  	// new version of the package we're building.
    93  	ftab map[[16]byte]uint32
    94  }
    95  
    96  func newMetaMerge() *metaMerge {
    97  	return &metaMerge{
    98  		pkm:    make(map[string]*pkstate),
    99  		astate: &argstate{},
   100  	}
   101  }
   102  
   103  func (mm *metaMerge) visitMetaDataFile(mdf string, mfr *decodemeta.CoverageMetaFileReader) {
   104  	dbgtrace(2, "visitMetaDataFile(mdf=%s)", mdf)
   105  
   106  	// Record meta-data file name.
   107  	mm.pod.mdf = mdf
   108  	// Keep a pointer to the file-level reader.
   109  	mm.pod.mfr = mfr
   110  	// Record file hash.
   111  	mm.pod.fileHash = mfr.FileHash()
   112  	// Counter mode and granularity -- detect and record clashes here.
   113  	newgran := mfr.CounterGranularity()
   114  	newmode := mfr.CounterMode()
   115  	if err := mm.SetModeAndGranularity(mdf, newmode, newgran); err != nil {
   116  		fatal("%v", err)
   117  	}
   118  }
   119  
   120  func (mm *metaMerge) beginCounterDataFile(cdr *decodecounter.CounterDataReader) {
   121  	state := argvalues{
   122  		osargs: cdr.OsArgs(),
   123  		goos:   cdr.Goos(),
   124  		goarch: cdr.Goarch(),
   125  	}
   126  	mm.astate.Merge(state)
   127  }
   128  
   129  func copyMetaDataFile(inpath, outpath string) {
   130  	inf, err := os.Open(inpath)
   131  	if err != nil {
   132  		fatal("opening input meta-data file %s: %v", inpath, err)
   133  	}
   134  	defer inf.Close()
   135  
   136  	fi, err := inf.Stat()
   137  	if err != nil {
   138  		fatal("accessing input meta-data file %s: %v", inpath, err)
   139  	}
   140  
   141  	outf, err := os.OpenFile(outpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, fi.Mode())
   142  	if err != nil {
   143  		fatal("opening output meta-data file %s: %v", outpath, err)
   144  	}
   145  
   146  	_, err = io.Copy(outf, inf)
   147  	outf.Close()
   148  	if err != nil {
   149  		fatal("writing output meta-data file %s: %v", outpath, err)
   150  	}
   151  }
   152  
   153  func (mm *metaMerge) beginPod() {
   154  	mm.pod = &podstate{
   155  		pmm: make(map[pkfunc]decodecounter.FuncPayload),
   156  	}
   157  }
   158  
   159  // metaEndPod handles actions needed when we're done visiting all of
   160  // the things in a pod -- counter files and meta-data file. There are
   161  // three cases of interest here:
   162  //
   163  // Case 1: in an unconditional merge (we're not selecting a specific set of
   164  // packages using "-pkg", and the "-pcombine" option is not in use),
   165  // we can simply copy over the meta-data file from input to output.
   166  //
   167  // Case 2: if this is a select merge (-pkg is in effect), then at
   168  // this point we write out a new smaller meta-data file that includes
   169  // only the packages of interest. At this point we also emit a merged
   170  // counter data file as well.
   171  //
   172  // Case 3: if "-pcombine" is in effect, we don't write anything at
   173  // this point (all writes will happen at the end of the run).
   174  func (mm *metaMerge) endPod(pcombine bool) {
   175  	if pcombine {
   176  		// Just clear out the pod data, we'll do all the
   177  		// heavy lifting at the end.
   178  		mm.pod = nil
   179  		return
   180  	}
   181  
   182  	finalHash := mm.pod.fileHash
   183  	if matchpkg != nil {
   184  		// Emit modified meta-data file for this pod.
   185  		finalHash = mm.emitMeta(*outdirflag, pcombine)
   186  	} else {
   187  		// Copy meta-data file for this pod to the output directory.
   188  		inpath := mm.pod.mdf
   189  		mdfbase := filepath.Base(mm.pod.mdf)
   190  		outpath := filepath.Join(*outdirflag, mdfbase)
   191  		copyMetaDataFile(inpath, outpath)
   192  	}
   193  
   194  	// Emit accumulated counter data for this pod.
   195  	mm.emitCounters(*outdirflag, finalHash)
   196  
   197  	// Reset package state.
   198  	mm.pkm = make(map[string]*pkstate)
   199  	mm.pkgs = nil
   200  	mm.pod = nil
   201  
   202  	// Reset counter mode and granularity
   203  	mm.ResetModeAndGranularity()
   204  }
   205  
   206  // emitMeta encodes and writes out a new coverage meta-data file as
   207  // part of a merge operation, specifically a merge with the
   208  // "-pcombine" flag.
   209  func (mm *metaMerge) emitMeta(outdir string, pcombine bool) [16]byte {
   210  	fh := md5.New()
   211  	blobs := [][]byte{}
   212  	tlen := uint64(unsafe.Sizeof(coverage.MetaFileHeader{}))
   213  	for _, p := range mm.pkgs {
   214  		var blob []byte
   215  		if pcombine {
   216  			mdw := &slicewriter.WriteSeeker{}
   217  			p.cmdb.Emit(mdw)
   218  			blob = mdw.BytesWritten()
   219  		} else {
   220  			blob = p.mdblob
   221  		}
   222  		ph := md5.Sum(blob)
   223  		blobs = append(blobs, blob)
   224  		if _, err := fh.Write(ph[:]); err != nil {
   225  			panic(fmt.Sprintf("internal error: md5 sum failed: %v", err))
   226  		}
   227  		tlen += uint64(len(blob))
   228  	}
   229  	var finalHash [16]byte
   230  	fhh := fh.Sum(nil)
   231  	copy(finalHash[:], fhh)
   232  
   233  	// Open meta-file for writing.
   234  	fn := fmt.Sprintf("%s.%x", coverage.MetaFilePref, finalHash)
   235  	fpath := filepath.Join(outdir, fn)
   236  	mf, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
   237  	if err != nil {
   238  		fatal("unable to open output meta-data file %s: %v", fpath, err)
   239  	}
   240  
   241  	// Encode and write.
   242  	mfw := encodemeta.NewCoverageMetaFileWriter(fpath, mf)
   243  	err = mfw.Write(finalHash, blobs, mm.Mode(), mm.Granularity())
   244  	if err != nil {
   245  		fatal("error writing %s: %v\n", fpath, err)
   246  	}
   247  	return finalHash
   248  }
   249  
   250  func (mm *metaMerge) emitCounters(outdir string, metaHash [16]byte) {
   251  	// Open output file. The file naming scheme is intended to mimic
   252  	// that used when running a coverage-instrumented binary, for
   253  	// consistency (however the process ID is not meaningful here, so
   254  	// use a value of zero).
   255  	var dummyPID int
   256  	fn := fmt.Sprintf(coverage.CounterFileTempl, coverage.CounterFilePref, metaHash, dummyPID, time.Now().UnixNano())
   257  	fpath := filepath.Join(outdir, fn)
   258  	cf, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
   259  	if err != nil {
   260  		fatal("opening counter data file %s: %v", fpath, err)
   261  	}
   262  	defer func() {
   263  		if err := cf.Close(); err != nil {
   264  			fatal("error closing output meta-data file %s: %v", fpath, err)
   265  		}
   266  	}()
   267  
   268  	args := mm.astate.ArgsSummary()
   269  	cfw := encodecounter.NewCoverageDataWriter(cf, coverage.CtrULeb128)
   270  	if err := cfw.Write(metaHash, args, mm); err != nil {
   271  		fatal("counter file write failed: %v", err)
   272  	}
   273  	mm.astate = &argstate{}
   274  }
   275  
   276  // VisitFuncs is used while writing the counter data files; it
   277  // implements the 'VisitFuncs' method required by the interface
   278  // internal/coverage/encodecounter/CounterVisitor.
   279  func (mm *metaMerge) VisitFuncs(f encodecounter.CounterVisitorFn) error {
   280  	if *verbflag >= 4 {
   281  		fmt.Printf("counterVisitor invoked\n")
   282  	}
   283  	// For each package, for each function, construct counter
   284  	// array and then call "f" on it.
   285  	for pidx, p := range mm.pkgs {
   286  		fids := make([]int, 0, len(p.ctab))
   287  		for fid := range p.ctab {
   288  			fids = append(fids, int(fid))
   289  		}
   290  		sort.Ints(fids)
   291  		if *verbflag >= 4 {
   292  			fmt.Printf("fids for pk=%d: %+v\n", pidx, fids)
   293  		}
   294  		for _, fid := range fids {
   295  			fp := p.ctab[uint32(fid)]
   296  			if *verbflag >= 4 {
   297  				fmt.Printf("counter write for pk=%d fid=%d len(ctrs)=%d\n", pidx, fid, len(fp.Counters))
   298  			}
   299  			if err := f(uint32(pidx), uint32(fid), fp.Counters); err != nil {
   300  				return err
   301  			}
   302  		}
   303  	}
   304  	return nil
   305  }
   306  
   307  func (mm *metaMerge) visitPackage(pd *decodemeta.CoverageMetaDataDecoder, pkgIdx uint32, pcombine bool) {
   308  	p, ok := mm.pkm[pd.PackagePath()]
   309  	if !ok {
   310  		p = &pkstate{
   311  			pkgIdx: uint32(len(mm.pkgs)),
   312  		}
   313  		mm.pkgs = append(mm.pkgs, p)
   314  		mm.pkm[pd.PackagePath()] = p
   315  		if pcombine {
   316  			p.pcombinestate = new(pcombinestate)
   317  			cmdb, err := encodemeta.NewCoverageMetaDataBuilder(pd.PackagePath(), pd.PackageName(), pd.ModulePath())
   318  			if err != nil {
   319  				fatal("fatal error creating meta-data builder: %v", err)
   320  			}
   321  			dbgtrace(2, "install new pkm entry for package %s pk=%d", pd.PackagePath(), pkgIdx)
   322  			p.cmdb = cmdb
   323  			p.ftab = make(map[[16]byte]uint32)
   324  		} else {
   325  			var err error
   326  			p.mdblob, err = mm.pod.mfr.GetPackagePayload(pkgIdx, nil)
   327  			if err != nil {
   328  				fatal("error extracting package %d payload from %s: %v",
   329  					pkgIdx, mm.pod.mdf, err)
   330  			}
   331  		}
   332  		p.ctab = make(map[uint32]decodecounter.FuncPayload)
   333  	}
   334  	mm.p = p
   335  }
   336  
   337  func (mm *metaMerge) visitFuncCounterData(data decodecounter.FuncPayload) {
   338  	key := pkfunc{pk: data.PkgIdx, fcn: data.FuncIdx}
   339  	val := mm.pod.pmm[key]
   340  	// FIXME: in theory either A) len(val.Counters) is zero, or B)
   341  	// the two lengths are equal. Assert if not? Of course, we could
   342  	// see odd stuff if there is source file skew.
   343  	if *verbflag > 4 {
   344  		fmt.Printf("visit pk=%d fid=%d len(counters)=%d\n", data.PkgIdx, data.FuncIdx, len(data.Counters))
   345  	}
   346  	if len(val.Counters) < len(data.Counters) {
   347  		t := val.Counters
   348  		val.Counters = mm.AllocateCounters(len(data.Counters))
   349  		copy(val.Counters, t)
   350  	}
   351  	err, overflow := mm.MergeCounters(val.Counters, data.Counters)
   352  	if err != nil {
   353  		fatal("%v", err)
   354  	}
   355  	if overflow {
   356  		warn("uint32 overflow during counter merge")
   357  	}
   358  	mm.pod.pmm[key] = val
   359  }
   360  
   361  func (mm *metaMerge) visitFunc(pkgIdx uint32, fnIdx uint32, fd *coverage.FuncDesc, verb string, pcombine bool) {
   362  	if *verbflag >= 3 {
   363  		fmt.Printf("visit pk=%d fid=%d func %s\n", pkgIdx, fnIdx, fd.Funcname)
   364  	}
   365  
   366  	var counters []uint32
   367  	key := pkfunc{pk: pkgIdx, fcn: fnIdx}
   368  	v, haveCounters := mm.pod.pmm[key]
   369  	if haveCounters {
   370  		counters = v.Counters
   371  	}
   372  
   373  	if pcombine {
   374  		// If the merge is running in "combine programs" mode, then hash
   375  		// the function and look it up in the package ftab to see if we've
   376  		// encountered it before. If we haven't, then register it with the
   377  		// meta-data builder.
   378  		fnhash := encodemeta.HashFuncDesc(fd)
   379  		gfidx, ok := mm.p.ftab[fnhash]
   380  		if !ok {
   381  			// We haven't seen this function before, need to add it to
   382  			// the meta data.
   383  			gfidx = uint32(mm.p.cmdb.AddFunc(*fd))
   384  			mm.p.ftab[fnhash] = gfidx
   385  			if *verbflag >= 3 {
   386  				fmt.Printf("new meta entry for fn %s fid=%d\n", fd.Funcname, gfidx)
   387  			}
   388  		}
   389  		fnIdx = gfidx
   390  	}
   391  	if !haveCounters {
   392  		return
   393  	}
   394  
   395  	// Install counters in package ctab.
   396  	gfp, ok := mm.p.ctab[fnIdx]
   397  	if ok {
   398  		if verb == "subtract" || verb == "intersect" {
   399  			panic("should never see this for intersect/subtract")
   400  		}
   401  		if *verbflag >= 3 {
   402  			fmt.Printf("counter merge for %s fidx=%d\n", fd.Funcname, fnIdx)
   403  		}
   404  		// Merge.
   405  		err, overflow := mm.MergeCounters(gfp.Counters, counters)
   406  		if err != nil {
   407  			fatal("%v", err)
   408  		}
   409  		if overflow {
   410  			warn("uint32 overflow during counter merge")
   411  		}
   412  		mm.p.ctab[fnIdx] = gfp
   413  	} else {
   414  		if *verbflag >= 3 {
   415  			fmt.Printf("null merge for %s fidx %d\n", fd.Funcname, fnIdx)
   416  		}
   417  		gfp := v
   418  		gfp.PkgIdx = mm.p.pkgIdx
   419  		gfp.FuncIdx = fnIdx
   420  		mm.p.ctab[fnIdx] = gfp
   421  	}
   422  }
   423  

View as plain text