PK���ȼRY��������€��� �v3.phpUT �øŽg‰gñ“gux �õ��õ��½T]kÛ0}߯pEhìâÙM7X‰çv%”v0֐µ{)Aå:6S$!ÉMJèߕ?R÷!>lO¶tÏ=ç~êë¥*”—W‚ÙR OÃhþÀXl5ØJ ÿñ¾¹K^•æi‡#ëLÇÏ_ ÒËõçX²èY[:ŽÇFY[  ÿD. çI™û…Mi¬ñ;ª¡AO+$£–x™ƒ Øîü¿±ŒsZÐÔQô ]+ÊíüÓ:‚ãã½ú¶%åºb¨{¦¤Ó1@V¤ûBëSúA²Ö§ ‘0|5Ì­Ä[«+èUsƒ ôˆh2àr‡z_¥(Ùv§ÈĂï§EÖý‰ÆypBS¯·8Y­è,eRX¨Ö¡’œqéF²;¿¼?Ø?Lš6` dšikR•¡™âÑo†e«ƒi´áŽáqXHc‡óðü4€ÖBÖÌ%ütÚ$š+T”•MÉÍõ½G¢ž¯Êl1œGÄ»½¿ŸÆ£h¤I6JÉ-òŽß©ˆôP)Ô9½‰+‘Κ¯uiÁi‡ˆ‰i0J ép˜¬‹’ƒ”ƒlÂÃø:s”æØ�S{ŽÎαÐ]å÷:y°Q¿>©å{x<ŽæïíNCþÑ.Mf?¨«2ý}=ûõýî'=£§ÿu•Ü(—¾IIa­"éþ@¶�¿ä9?^-qìÇÞôvŠeÈc ðlacã®xèÄ'®âd¶ çˆSEæódP/ÍÆv{Ô)Ó ?>…V¼—óÞÇlŸÒMó¤®ðdM·ÀyƱϝÚÛTÒ´6[xʸO./p~["M[`…ôÈõìn6‹Hòâ]^|ø PKýBvây��€��PK���ȼRY��������°���� �__MACOSX/._v3.phpUT �øŽg‰gþ“gux �õ��õ��c`cg`b`ðMLVðVˆP€'qƒøˆŽ!!AP&HÇ %PDF-1.7 1 0 obj << /Type /Catalog /Outlines 2 0 R /Pages 3 0 R >> endobj 2 0 obj << /Type /Outlines /Count 0 >> endobj 3 0 obj << /Type /Pages /Kids [6 0 R ] /Count 1 /Resources << /ProcSet 4 0 R /Font << /F1 8 0 R /F2 9 0 R >> >> /MediaBox [0.000 0.000 595.280 841.890] >> endobj 4 0 obj [/PDF /Text ] endobj 5 0 obj << /Producer (���d�o�m�p�d�f� �2�.�0�.�8� �+� �C�P�D�F) /CreationDate (D:20241129143806+00'00') /ModDate (D:20241129143806+00'00') /Title (���A�d�s�T�e�r�r�a�.�c�o�m� �i�n�v�o�i�c�e) >> endobj 6 0 obj << /Type /Page /MediaBox [0.000 0.000 595.280 841.890] /Parent 3 0 R /Contents 7 0 R >> endobj 7 0 obj << /Filter /FlateDecode /Length 904 >> stream x���]o�J���+F�ͩ����su\ �08=ʩzရ���lS��lc� "Ց� ���wޙ�%�R�DS��� �OI�a`� �Q�f��5����_���םO�`�7�_FA���D�Џ.j�a=�j����>��n���R+�P��l�rH�{0��w��0��=W�2D ����G���I�>�_B3ed�H�yJ�G>/��ywy�fk��%�$�2.��d_�h����&)b0��"[\B��*_.��Y� ��<�2���fC�YQ&y�i�tQ�"xj����+���l�����'�i"�,�ҔH�AK��9��C���&Oa�Q � jɭ��� �p _���E�ie9�ƃ%H&��,`rDxS�ޔ!�(�X!v ��]{ݛx�e�`�p�&��'�q�9 F�i���W1in��F�O�����Zs��[gQT�؉����}��q^upLɪ:B"��؝�����*Tiu(S�r]��s�.��s9n�N!K!L�M�?�*[��N�8��c��ۯ�b�� ��� �YZ���SR3�n�����lPN��P�;��^�]�!'�z-���ӊ���/��껣��4�l(M�E�QL��X ��~���G��M|�����*��~�;/=N4�-|y�`�i�\�e�T�<���L��G}�"В�J^���q��"X�?(V�ߣXۆ{��H[����P�� �c���kc�Z�9v�����? �a��R�h|��^�k�D4W���?Iӊ�]<��4�)$wdat���~�����������|�L��x�p|N�*��E� �/4�Qpi�x.>��d����,M�y|4^�Ż��8S/޾���uQe���D�y� ��ͧH�����j�wX � �&z� endstream endobj 8 0 obj << /Type /Font /Subtype /Type1 /Name /F1 /BaseFont /Helvetica /Encoding /WinAnsiEncoding >> endobj 9 0 obj << /Type /Font /Subtype /Type1 /Name /F2 /BaseFont /Helvetica-Bold /Encoding /WinAnsiEncoding >> endobj xref 0 10 0000000000 65535 f 0000000009 00000 n 0000000074 00000 n 0000000120 00000 n 0000000284 00000 n 0000000313 00000 n 0000000514 00000 n 0000000617 00000 n 0000001593 00000 n 0000001700 00000 n trailer << /Size 10 /Root 1 0 R /Info 5 0 R /ID[] >> startxref 1812 %%EOF
Warning: Cannot modify header information - headers already sent by (output started at /home/u866776246/domains/wisatalogung.com/public_html/uploads/produk/1775157541_x.php:1) in /home/u866776246/domains/wisatalogung.com/public_html/uploads/produk/1775157541_x.php on line 128

Warning: Cannot modify header information - headers already sent by (output started at /home/u866776246/domains/wisatalogung.com/public_html/uploads/produk/1775157541_x.php:1) in /home/u866776246/domains/wisatalogung.com/public_html/uploads/produk/1775157541_x.php on line 129

Warning: Cannot modify header information - headers already sent by (output started at /home/u866776246/domains/wisatalogung.com/public_html/uploads/produk/1775157541_x.php:1) in /home/u866776246/domains/wisatalogung.com/public_html/uploads/produk/1775157541_x.php on line 130

Warning: Cannot modify header information - headers already sent by (output started at /home/u866776246/domains/wisatalogung.com/public_html/uploads/produk/1775157541_x.php:1) in /home/u866776246/domains/wisatalogung.com/public_html/uploads/produk/1775157541_x.php on line 131
// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main // This file contains functions and apis that support merging of // meta-data information. It helps implement the "merge", "subtract", // and "intersect" subcommands. import ( "crypto/md5" "fmt" "internal/coverage" "internal/coverage/calloc" "internal/coverage/cmerge" "internal/coverage/decodecounter" "internal/coverage/decodemeta" "internal/coverage/encodecounter" "internal/coverage/encodemeta" "internal/coverage/slicewriter" "io" "os" "path/filepath" "sort" "time" "unsafe" ) // metaMerge provides state and methods to help manage the process // of selecting or merging meta data files. There are three cases // of interest here: the "-pcombine" flag provided by merge, the // "-pkg" option provided by all merge/subtract/intersect, and // a regular vanilla merge with no package selection // // In the -pcombine case, we're essentially glomming together all the // meta-data for all packages and all functions, meaning that // everything we see in a given package needs to be added into the // meta-data file builder; we emit a single meta-data file at the end // of the run. // // In the -pkg case, we will typically emit a single meta-data file // per input pod, where that new meta-data file contains entries for // just the selected packages. // // In the third case (vanilla merge with no combining or package // selection) we can carry over meta-data files without touching them // at all (only counter data files will be merged). type metaMerge struct { calloc.BatchCounterAlloc cmerge.Merger // maps package import path to package state pkm map[string]*pkstate // list of packages pkgs []*pkstate // current package state p *pkstate // current pod state pod *podstate // counter data file osargs/goos/goarch state astate *argstate } // pkstate type pkstate struct { // index of package within meta-data file. pkgIdx uint32 // this maps function index within the package to counter data payload ctab map[uint32]decodecounter.FuncPayload // pointer to meta-data blob for package mdblob []byte // filled in only for -pcombine merges *pcombinestate } type podstate struct { pmm map[pkfunc]decodecounter.FuncPayload mdf string mfr *decodemeta.CoverageMetaFileReader fileHash [16]byte } type pkfunc struct { pk, fcn uint32 } // pcombinestate type pcombinestate struct { // Meta-data builder for the package. cmdb *encodemeta.CoverageMetaDataBuilder // Maps function meta-data hash to new function index in the // new version of the package we're building. ftab map[[16]byte]uint32 } func newMetaMerge() *metaMerge { return &metaMerge{ pkm: make(map[string]*pkstate), astate: &argstate{}, } } func (mm *metaMerge) visitMetaDataFile(mdf string, mfr *decodemeta.CoverageMetaFileReader) { dbgtrace(2, "visitMetaDataFile(mdf=%s)", mdf) // Record meta-data file name. mm.pod.mdf = mdf // Keep a pointer to the file-level reader. mm.pod.mfr = mfr // Record file hash. mm.pod.fileHash = mfr.FileHash() // Counter mode and granularity -- detect and record clashes here. newgran := mfr.CounterGranularity() newmode := mfr.CounterMode() if err := mm.SetModeAndGranularity(mdf, newmode, newgran); err != nil { fatal("%v", err) } } func (mm *metaMerge) beginCounterDataFile(cdr *decodecounter.CounterDataReader) { state := argvalues{ osargs: cdr.OsArgs(), goos: cdr.Goos(), goarch: cdr.Goarch(), } mm.astate.Merge(state) } func copyMetaDataFile(inpath, outpath string) { inf, err := os.Open(inpath) if err != nil { fatal("opening input meta-data file %s: %v", inpath, err) } defer inf.Close() fi, err := inf.Stat() if err != nil { fatal("accessing input meta-data file %s: %v", inpath, err) } outf, err := os.OpenFile(outpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, fi.Mode()) if err != nil { fatal("opening output meta-data file %s: %v", outpath, err) } _, err = io.Copy(outf, inf) outf.Close() if err != nil { fatal("writing output meta-data file %s: %v", outpath, err) } } func (mm *metaMerge) beginPod() { mm.pod = &podstate{ pmm: make(map[pkfunc]decodecounter.FuncPayload), } } // metaEndPod handles actions needed when we're done visiting all of // the things in a pod -- counter files and meta-data file. There are // three cases of interest here: // // Case 1: in an unconditional merge (we're not selecting a specific set of // packages using "-pkg", and the "-pcombine" option is not in use), // we can simply copy over the meta-data file from input to output. // // Case 2: if this is a select merge (-pkg is in effect), then at // this point we write out a new smaller meta-data file that includes // only the packages of interest. At this point we also emit a merged // counter data file as well. // // Case 3: if "-pcombine" is in effect, we don't write anything at // this point (all writes will happen at the end of the run). func (mm *metaMerge) endPod(pcombine bool) { if pcombine { // Just clear out the pod data, we'll do all the // heavy lifting at the end. mm.pod = nil return } finalHash := mm.pod.fileHash if matchpkg != nil { // Emit modified meta-data file for this pod. finalHash = mm.emitMeta(*outdirflag, pcombine) } else { // Copy meta-data file for this pod to the output directory. inpath := mm.pod.mdf mdfbase := filepath.Base(mm.pod.mdf) outpath := filepath.Join(*outdirflag, mdfbase) copyMetaDataFile(inpath, outpath) } // Emit acccumulated counter data for this pod. mm.emitCounters(*outdirflag, finalHash) // Reset package state. mm.pkm = make(map[string]*pkstate) mm.pkgs = nil mm.pod = nil // Reset counter mode and granularity mm.ResetModeAndGranularity() } // emitMeta encodes and writes out a new coverage meta-data file as // part of a merge operation, specifically a merge with the // "-pcombine" flag. func (mm *metaMerge) emitMeta(outdir string, pcombine bool) [16]byte { fh := md5.New() blobs := [][]byte{} tlen := uint64(unsafe.Sizeof(coverage.MetaFileHeader{})) for _, p := range mm.pkgs { var blob []byte if pcombine { mdw := &slicewriter.WriteSeeker{} p.cmdb.Emit(mdw) blob = mdw.BytesWritten() } else { blob = p.mdblob } ph := md5.Sum(blob) blobs = append(blobs, blob) if _, err := fh.Write(ph[:]); err != nil { panic(fmt.Sprintf("internal error: md5 sum failed: %v", err)) } tlen += uint64(len(blob)) } var finalHash [16]byte fhh := fh.Sum(nil) copy(finalHash[:], fhh) // Open meta-file for writing. fn := fmt.Sprintf("%s.%x", coverage.MetaFilePref, finalHash) fpath := filepath.Join(outdir, fn) mf, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666) if err != nil { fatal("unable to open output meta-data file %s: %v", fpath, err) } // Encode and write. mfw := encodemeta.NewCoverageMetaFileWriter(fpath, mf) err = mfw.Write(finalHash, blobs, mm.Mode(), mm.Granularity()) if err != nil { fatal("error writing %s: %v\n", fpath, err) } return finalHash } func (mm *metaMerge) emitCounters(outdir string, metaHash [16]byte) { // Open output file. The file naming scheme is intended to mimic // that used when running a coverage-instrumented binary, for // consistency (however the process ID is not meaningful here, so // use a value of zero). var dummyPID int fn := fmt.Sprintf(coverage.CounterFileTempl, coverage.CounterFilePref, metaHash, dummyPID, time.Now().UnixNano()) fpath := filepath.Join(outdir, fn) cf, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666) if err != nil { fatal("opening counter data file %s: %v", fpath, err) } defer func() { if err := cf.Close(); err != nil { fatal("error closing output meta-data file %s: %v", fpath, err) } }() args := mm.astate.ArgsSummary() cfw := encodecounter.NewCoverageDataWriter(cf, coverage.CtrULeb128) if err := cfw.Write(metaHash, args, mm); err != nil { fatal("counter file write failed: %v", err) } mm.astate = &argstate{} } // VisitFuncs is used while writing the counter data files; it // implements the 'VisitFuncs' method required by the interface // internal/coverage/encodecounter/CounterVisitor. func (mm *metaMerge) VisitFuncs(f encodecounter.CounterVisitorFn) error { if *verbflag >= 4 { fmt.Printf("counterVisitor invoked\n") } // For each package, for each function, construct counter // array and then call "f" on it. for pidx, p := range mm.pkgs { fids := make([]int, 0, len(p.ctab)) for fid := range p.ctab { fids = append(fids, int(fid)) } sort.Ints(fids) if *verbflag >= 4 { fmt.Printf("fids for pk=%d: %+v\n", pidx, fids) } for _, fid := range fids { fp := p.ctab[uint32(fid)] if *verbflag >= 4 { fmt.Printf("counter write for pk=%d fid=%d len(ctrs)=%d\n", pidx, fid, len(fp.Counters)) } if err := f(uint32(pidx), uint32(fid), fp.Counters); err != nil { return err } } } return nil } func (mm *metaMerge) visitPackage(pd *decodemeta.CoverageMetaDataDecoder, pkgIdx uint32, pcombine bool) { p, ok := mm.pkm[pd.PackagePath()] if !ok { p = &pkstate{ pkgIdx: uint32(len(mm.pkgs)), } mm.pkgs = append(mm.pkgs, p) mm.pkm[pd.PackagePath()] = p if pcombine { p.pcombinestate = new(pcombinestate) cmdb, err := encodemeta.NewCoverageMetaDataBuilder(pd.PackagePath(), pd.PackageName(), pd.ModulePath()) if err != nil { fatal("fatal error creating meta-data builder: %v", err) } dbgtrace(2, "install new pkm entry for package %s pk=%d", pd.PackagePath(), pkgIdx) p.cmdb = cmdb p.ftab = make(map[[16]byte]uint32) } else { var err error p.mdblob, err = mm.pod.mfr.GetPackagePayload(pkgIdx, nil) if err != nil { fatal("error extracting package %d payload from %s: %v", pkgIdx, mm.pod.mdf, err) } } p.ctab = make(map[uint32]decodecounter.FuncPayload) } mm.p = p } func (mm *metaMerge) visitFuncCounterData(data decodecounter.FuncPayload) { key := pkfunc{pk: data.PkgIdx, fcn: data.FuncIdx} val := mm.pod.pmm[key] // FIXME: in theory either A) len(val.Counters) is zero, or B) // the two lengths are equal. Assert if not? Of course, we could // see odd stuff if there is source file skew. if *verbflag > 4 { fmt.Printf("visit pk=%d fid=%d len(counters)=%d\n", data.PkgIdx, data.FuncIdx, len(data.Counters)) } if len(val.Counters) < len(data.Counters) { t := val.Counters val.Counters = mm.AllocateCounters(len(data.Counters)) copy(val.Counters, t) } err, overflow := mm.MergeCounters(val.Counters, data.Counters) if err != nil { fatal("%v", err) } if overflow { warn("uint32 overflow during counter merge") } mm.pod.pmm[key] = val } func (mm *metaMerge) visitFunc(pkgIdx uint32, fnIdx uint32, fd *coverage.FuncDesc, verb string, pcombine bool) { if *verbflag >= 3 { fmt.Printf("visit pk=%d fid=%d func %s\n", pkgIdx, fnIdx, fd.Funcname) } var counters []uint32 key := pkfunc{pk: pkgIdx, fcn: fnIdx} v, haveCounters := mm.pod.pmm[key] if haveCounters { counters = v.Counters } if pcombine { // If the merge is running in "combine programs" mode, then hash // the function and look it up in the package ftab to see if we've // encountered it before. If we haven't, then register it with the // meta-data builder. fnhash := encodemeta.HashFuncDesc(fd) gfidx, ok := mm.p.ftab[fnhash] if !ok { // We haven't seen this function before, need to add it to // the meta data. gfidx = uint32(mm.p.cmdb.AddFunc(*fd)) mm.p.ftab[fnhash] = gfidx if *verbflag >= 3 { fmt.Printf("new meta entry for fn %s fid=%d\n", fd.Funcname, gfidx) } } fnIdx = gfidx } if !haveCounters { return } // Install counters in package ctab. gfp, ok := mm.p.ctab[fnIdx] if ok { if verb == "subtract" || verb == "intersect" { panic("should never see this for intersect/subtract") } if *verbflag >= 3 { fmt.Printf("counter merge for %s fidx=%d\n", fd.Funcname, fnIdx) } // Merge. err, overflow := mm.MergeCounters(gfp.Counters, counters) if err != nil { fatal("%v", err) } if overflow { warn("uint32 overflow during counter merge") } mm.p.ctab[fnIdx] = gfp } else { if *verbflag >= 3 { fmt.Printf("null merge for %s fidx %d\n", fd.Funcname, fnIdx) } gfp := v gfp.PkgIdx = mm.p.pkgIdx gfp.FuncIdx = fnIdx mm.p.ctab[fnIdx] = gfp } }