1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76 package pprof
77
78 import (
79 "bufio"
80 "cmp"
81 "fmt"
82 "internal/abi"
83 "internal/goexperiment"
84 "internal/profilerecord"
85 "io"
86 "runtime"
87 "slices"
88 "sort"
89 "strings"
90 "sync"
91 "text/tabwriter"
92 "time"
93 "unsafe"
94 )
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174 type Profile struct {
175 name string
176 mu sync.Mutex
177 m map[any][]uintptr
178 count func() int
179 write func(io.Writer, int) error
180 }
181
182
183 var profiles struct {
184 mu sync.Mutex
185 m map[string]*Profile
186 }
187
188 var goroutineProfile = &Profile{
189 name: "goroutine",
190 count: countGoroutine,
191 write: writeGoroutine,
192 }
193
194 var goroutineLeakProfile = &Profile{
195 name: "goroutineleak",
196 count: runtime_goroutineleakcount,
197 write: writeGoroutineLeak,
198 }
199
200 var threadcreateProfile = &Profile{
201 name: "threadcreate",
202 count: countThreadCreate,
203 write: writeThreadCreate,
204 }
205
206 var heapProfile = &Profile{
207 name: "heap",
208 count: countHeap,
209 write: writeHeap,
210 }
211
212 var allocsProfile = &Profile{
213 name: "allocs",
214 count: countHeap,
215 write: writeAlloc,
216 }
217
218 var blockProfile = &Profile{
219 name: "block",
220 count: countBlock,
221 write: writeBlock,
222 }
223
224 var mutexProfile = &Profile{
225 name: "mutex",
226 count: countMutex,
227 write: writeMutex,
228 }
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253 var goroutineLeakProfileLock sync.Mutex
254
255 func lockProfiles() {
256 profiles.mu.Lock()
257 if profiles.m == nil {
258
259 profiles.m = map[string]*Profile{
260 "goroutine": goroutineProfile,
261 "threadcreate": threadcreateProfile,
262 "heap": heapProfile,
263 "allocs": allocsProfile,
264 "block": blockProfile,
265 "mutex": mutexProfile,
266 }
267 if goexperiment.GoroutineLeakProfile {
268 profiles.m["goroutineleak"] = goroutineLeakProfile
269 }
270 }
271 }
272
273 func unlockProfiles() {
274 profiles.mu.Unlock()
275 }
276
277
278
279
280
281
282
283 func NewProfile(name string) *Profile {
284 lockProfiles()
285 defer unlockProfiles()
286 if name == "" {
287 panic("pprof: NewProfile with empty name")
288 }
289 if profiles.m[name] != nil {
290 panic("pprof: NewProfile name already in use: " + name)
291 }
292 p := &Profile{
293 name: name,
294 m: map[any][]uintptr{},
295 }
296 profiles.m[name] = p
297 return p
298 }
299
300
301 func Lookup(name string) *Profile {
302 lockProfiles()
303 defer unlockProfiles()
304 return profiles.m[name]
305 }
306
307
308 func Profiles() []*Profile {
309 lockProfiles()
310 defer unlockProfiles()
311
312 all := make([]*Profile, 0, len(profiles.m))
313 for _, p := range profiles.m {
314
315 all = append(all, p)
316 }
317
318 slices.SortFunc(all, func(a, b *Profile) int {
319 return strings.Compare(a.name, b.name)
320 })
321 return all
322 }
323
324
325 func (p *Profile) Name() string {
326 return p.name
327 }
328
329
330 func (p *Profile) Count() int {
331 p.mu.Lock()
332 defer p.mu.Unlock()
333 if p.count != nil {
334 return p.count()
335 }
336 return len(p.m)
337 }
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356 func (p *Profile) Add(value any, skip int) {
357 if p.name == "" {
358 panic("pprof: use of uninitialized Profile")
359 }
360 if p.write != nil {
361 panic("pprof: Add called on built-in Profile " + p.name)
362 }
363
364 stk := make([]uintptr, 32)
365 n := runtime.Callers(skip+1, stk[:])
366 stk = stk[:n]
367 if len(stk) == 0 {
368
369 stk = []uintptr{abi.FuncPCABIInternal(lostProfileEvent)}
370 }
371
372 p.mu.Lock()
373 defer p.mu.Unlock()
374 if p.m[value] != nil {
375 panic("pprof: Profile.Add of duplicate value")
376 }
377 p.m[value] = stk
378 }
379
380
381
382 func (p *Profile) Remove(value any) {
383 p.mu.Lock()
384 defer p.mu.Unlock()
385 delete(p.m, value)
386 }
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403 func (p *Profile) WriteTo(w io.Writer, debug int) error {
404 if p.name == "" {
405 panic("pprof: use of zero Profile")
406 }
407 if p.write != nil {
408 return p.write(w, debug)
409 }
410
411
412 p.mu.Lock()
413 all := make([][]uintptr, 0, len(p.m))
414 for _, stk := range p.m {
415 all = append(all, stk)
416 }
417 p.mu.Unlock()
418
419
420 slices.SortFunc(all, slices.Compare)
421
422 return printCountProfile(w, debug, p.name, stackProfile(all))
423 }
424
425 type stackProfile [][]uintptr
426
427 func (x stackProfile) Len() int { return len(x) }
428 func (x stackProfile) Stack(i int) []uintptr { return x[i] }
429 func (x stackProfile) Label(i int) *labelMap { return nil }
430
431
432
433
434
435 type countProfile interface {
436 Len() int
437 Stack(i int) []uintptr
438 Label(i int) *labelMap
439 }
440
441
442
443
444 func expandInlinedFrames(dst, pcs []uintptr) int {
445 cf := runtime.CallersFrames(pcs)
446 var n int
447 for n < len(dst) {
448 f, more := cf.Next()
449
450
451 dst[n] = f.PC + 1
452 n++
453 if !more {
454 break
455 }
456 }
457 return n
458 }
459
460
461
462
463
464 func printCountCycleProfile(w io.Writer, countName, cycleName string, records []profilerecord.BlockProfileRecord) error {
465
466 b := newProfileBuilder(w)
467 b.pbValueType(tagProfile_PeriodType, countName, "count")
468 b.pb.int64Opt(tagProfile_Period, 1)
469 b.pbValueType(tagProfile_SampleType, countName, "count")
470 b.pbValueType(tagProfile_SampleType, cycleName, "nanoseconds")
471
472 cpuGHz := float64(pprof_cyclesPerSecond()) / 1e9
473
474 values := []int64{0, 0}
475 var locs []uint64
476 expandedStack := pprof_makeProfStack()
477 for _, r := range records {
478 values[0] = r.Count
479 values[1] = int64(float64(r.Cycles) / cpuGHz)
480
481
482 n := expandInlinedFrames(expandedStack, r.Stack)
483 locs = b.appendLocsForStack(locs[:0], expandedStack[:n])
484 b.pbSample(values, locs, nil)
485 }
486 return b.build()
487 }
488
489
490
491 func printCountProfile(w io.Writer, debug int, name string, p countProfile) error {
492
493 var buf strings.Builder
494 key := func(stk []uintptr, lbls *labelMap) string {
495 buf.Reset()
496 fmt.Fprintf(&buf, "@")
497 for _, pc := range stk {
498 fmt.Fprintf(&buf, " %#x", pc)
499 }
500 if lbls != nil {
501 buf.WriteString("\n# labels: ")
502 buf.WriteString(lbls.String())
503 }
504 return buf.String()
505 }
506 count := map[string]int{}
507 index := map[string]int{}
508 var keys []string
509 n := p.Len()
510 for i := 0; i < n; i++ {
511 k := key(p.Stack(i), p.Label(i))
512 if count[k] == 0 {
513 index[k] = i
514 keys = append(keys, k)
515 }
516 count[k]++
517 }
518
519 sort.Sort(&keysByCount{keys, count})
520
521 if debug > 0 {
522
523 tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
524 fmt.Fprintf(tw, "%s profile: total %d\n", name, p.Len())
525 for _, k := range keys {
526 fmt.Fprintf(tw, "%d %s\n", count[k], k)
527 printStackRecord(tw, p.Stack(index[k]), false)
528 }
529 return tw.Flush()
530 }
531
532
533 b := newProfileBuilder(w)
534 b.pbValueType(tagProfile_PeriodType, name, "count")
535 b.pb.int64Opt(tagProfile_Period, 1)
536 b.pbValueType(tagProfile_SampleType, name, "count")
537
538 values := []int64{0}
539 var locs []uint64
540 for _, k := range keys {
541 values[0] = int64(count[k])
542
543
544 locs = b.appendLocsForStack(locs[:0], p.Stack(index[k]))
545 idx := index[k]
546 var labels func()
547 if p.Label(idx) != nil {
548 labels = func() {
549 for _, lbl := range p.Label(idx).Set.List {
550 b.pbLabel(tagSample_Label, lbl.Key, lbl.Value, 0)
551 }
552 }
553 }
554 b.pbSample(values, locs, labels)
555 }
556 return b.build()
557 }
558
559
560 type keysByCount struct {
561 keys []string
562 count map[string]int
563 }
564
565 func (x *keysByCount) Len() int { return len(x.keys) }
566 func (x *keysByCount) Swap(i, j int) { x.keys[i], x.keys[j] = x.keys[j], x.keys[i] }
567 func (x *keysByCount) Less(i, j int) bool {
568 ki, kj := x.keys[i], x.keys[j]
569 ci, cj := x.count[ki], x.count[kj]
570 if ci != cj {
571 return ci > cj
572 }
573 return ki < kj
574 }
575
576
577
578 func printStackRecord(w io.Writer, stk []uintptr, allFrames bool) {
579 show := allFrames
580 frames := runtime.CallersFrames(stk)
581 for {
582 frame, more := frames.Next()
583 name := frame.Function
584 if name == "" {
585 show = true
586 fmt.Fprintf(w, "#\t%#x\n", frame.PC)
587 } else if name != "runtime.goexit" && (show || !(strings.HasPrefix(name, "runtime.") || strings.HasPrefix(name, "internal/runtime/"))) {
588
589
590 show = true
591 fmt.Fprintf(w, "#\t%#x\t%s+%#x\t%s:%d\n", frame.PC, name, frame.PC-frame.Entry, frame.File, frame.Line)
592 }
593 if !more {
594 break
595 }
596 }
597 if !show {
598
599
600 printStackRecord(w, stk, true)
601 return
602 }
603 fmt.Fprintf(w, "\n")
604 }
605
606
607
608
609
610 func WriteHeapProfile(w io.Writer) error {
611 return writeHeap(w, 0)
612 }
613
614
615 func countHeap() int {
616 n, _ := runtime.MemProfile(nil, true)
617 return n
618 }
619
620
621 func writeHeap(w io.Writer, debug int) error {
622 return writeHeapInternal(w, debug, "")
623 }
624
625
626
627 func writeAlloc(w io.Writer, debug int) error {
628 return writeHeapInternal(w, debug, "alloc_space")
629 }
630
631 func writeHeapInternal(w io.Writer, debug int, defaultSampleType string) error {
632 var memStats *runtime.MemStats
633 if debug != 0 {
634
635
636 memStats = new(runtime.MemStats)
637 runtime.ReadMemStats(memStats)
638 }
639
640
641
642
643
644
645
646
647 var p []profilerecord.MemProfileRecord
648 n, ok := pprof_memProfileInternal(nil, true)
649 for {
650
651
652
653 p = make([]profilerecord.MemProfileRecord, n+50)
654 n, ok = pprof_memProfileInternal(p, true)
655 if ok {
656 p = p[0:n]
657 break
658 }
659
660 }
661
662 if debug == 0 {
663 return writeHeapProto(w, p, int64(runtime.MemProfileRate), defaultSampleType)
664 }
665
666 slices.SortFunc(p, func(a, b profilerecord.MemProfileRecord) int {
667 return cmp.Compare(a.InUseBytes(), b.InUseBytes())
668 })
669
670 b := bufio.NewWriter(w)
671 tw := tabwriter.NewWriter(b, 1, 8, 1, '\t', 0)
672 w = tw
673
674 var total runtime.MemProfileRecord
675 for i := range p {
676 r := &p[i]
677 total.AllocBytes += r.AllocBytes
678 total.AllocObjects += r.AllocObjects
679 total.FreeBytes += r.FreeBytes
680 total.FreeObjects += r.FreeObjects
681 }
682
683
684
685
686 rate := 2 * runtime.MemProfileRate
687
688
689
690
691
692
693
694 inUseBytes := total.InUseBytes()
695 allocBytes := total.AllocBytes
696 if inUseBytes == allocBytes {
697 allocBytes++
698 }
699
700 fmt.Fprintf(w, "heap profile: %d: %d [%d: %d] @ heap/%d\n",
701 total.InUseObjects(), inUseBytes,
702 total.AllocObjects, allocBytes,
703 rate)
704
705 for i := range p {
706 r := &p[i]
707 fmt.Fprintf(w, "%d: %d [%d: %d] @",
708 r.InUseObjects(), r.InUseBytes(),
709 r.AllocObjects, r.AllocBytes)
710 for _, pc := range r.Stack {
711 fmt.Fprintf(w, " %#x", pc)
712 }
713 fmt.Fprintf(w, "\n")
714 printStackRecord(w, r.Stack, false)
715 }
716
717
718
719 s := memStats
720 fmt.Fprintf(w, "\n# runtime.MemStats\n")
721 fmt.Fprintf(w, "# Alloc = %d\n", s.Alloc)
722 fmt.Fprintf(w, "# TotalAlloc = %d\n", s.TotalAlloc)
723 fmt.Fprintf(w, "# Sys = %d\n", s.Sys)
724 fmt.Fprintf(w, "# Lookups = %d\n", s.Lookups)
725 fmt.Fprintf(w, "# Mallocs = %d\n", s.Mallocs)
726 fmt.Fprintf(w, "# Frees = %d\n", s.Frees)
727
728 fmt.Fprintf(w, "# HeapAlloc = %d\n", s.HeapAlloc)
729 fmt.Fprintf(w, "# HeapSys = %d\n", s.HeapSys)
730 fmt.Fprintf(w, "# HeapIdle = %d\n", s.HeapIdle)
731 fmt.Fprintf(w, "# HeapInuse = %d\n", s.HeapInuse)
732 fmt.Fprintf(w, "# HeapReleased = %d\n", s.HeapReleased)
733 fmt.Fprintf(w, "# HeapObjects = %d\n", s.HeapObjects)
734
735 fmt.Fprintf(w, "# Stack = %d / %d\n", s.StackInuse, s.StackSys)
736 fmt.Fprintf(w, "# MSpan = %d / %d\n", s.MSpanInuse, s.MSpanSys)
737 fmt.Fprintf(w, "# MCache = %d / %d\n", s.MCacheInuse, s.MCacheSys)
738 fmt.Fprintf(w, "# BuckHashSys = %d\n", s.BuckHashSys)
739 fmt.Fprintf(w, "# GCSys = %d\n", s.GCSys)
740 fmt.Fprintf(w, "# OtherSys = %d\n", s.OtherSys)
741
742 fmt.Fprintf(w, "# NextGC = %d\n", s.NextGC)
743 fmt.Fprintf(w, "# LastGC = %d\n", s.LastGC)
744 fmt.Fprintf(w, "# PauseNs = %d\n", s.PauseNs)
745 fmt.Fprintf(w, "# PauseEnd = %d\n", s.PauseEnd)
746 fmt.Fprintf(w, "# NumGC = %d\n", s.NumGC)
747 fmt.Fprintf(w, "# NumForcedGC = %d\n", s.NumForcedGC)
748 fmt.Fprintf(w, "# GCCPUFraction = %v\n", s.GCCPUFraction)
749 fmt.Fprintf(w, "# DebugGC = %v\n", s.DebugGC)
750
751
752 addMaxRSS(w)
753
754 tw.Flush()
755 return b.Flush()
756 }
757
758
759 func countThreadCreate() int {
760 n, _ := runtime.ThreadCreateProfile(nil)
761 return n
762 }
763
764
765 func writeThreadCreate(w io.Writer, debug int) error {
766
767
768
769 return writeRuntimeProfile(w, debug, "threadcreate", func(p []profilerecord.StackRecord, _ []unsafe.Pointer) (n int, ok bool) {
770 return pprof_threadCreateInternal(p)
771 })
772 }
773
774
775 func countGoroutine() int {
776 return runtime.NumGoroutine()
777 }
778
779
780 func writeGoroutine(w io.Writer, debug int) error {
781 if debug >= 2 {
782 return writeGoroutineStacks(w)
783 }
784 return writeRuntimeProfile(w, debug, "goroutine", pprof_goroutineProfileWithLabels)
785 }
786
787
788
789 func writeGoroutineLeak(w io.Writer, debug int) error {
790
791
792
793
794
795
796 goroutineLeakProfileLock.Lock()
797 defer goroutineLeakProfileLock.Unlock()
798
799
800
801 runtime_goroutineLeakGC()
802
803
804
805 if debug >= 2 {
806 return writeGoroutineStacks(w)
807 }
808
809
810 return writeRuntimeProfile(w, debug, "goroutineleak", pprof_goroutineLeakProfileWithLabels)
811 }
812
813 func writeGoroutineStacks(w io.Writer) error {
814
815
816
817 buf := make([]byte, 1<<20)
818 for i := 0; ; i++ {
819 n := runtime.Stack(buf, true)
820 if n < len(buf) {
821 buf = buf[:n]
822 break
823 }
824 if len(buf) >= 64<<20 {
825
826 break
827 }
828 buf = make([]byte, 2*len(buf))
829 }
830 _, err := w.Write(buf)
831 return err
832 }
833
834 func writeRuntimeProfile(w io.Writer, debug int, name string, fetch func([]profilerecord.StackRecord, []unsafe.Pointer) (int, bool)) error {
835
836
837
838
839
840
841 var p []profilerecord.StackRecord
842 var labels []unsafe.Pointer
843 n, ok := fetch(nil, nil)
844
845 for {
846
847
848
849 p = make([]profilerecord.StackRecord, n+10)
850 labels = make([]unsafe.Pointer, n+10)
851 n, ok = fetch(p, labels)
852 if ok {
853 p = p[0:n]
854 break
855 }
856
857 }
858
859 return printCountProfile(w, debug, name, &runtimeProfile{p, labels})
860 }
861
862 type runtimeProfile struct {
863 stk []profilerecord.StackRecord
864 labels []unsafe.Pointer
865 }
866
867 func (p *runtimeProfile) Len() int { return len(p.stk) }
868 func (p *runtimeProfile) Stack(i int) []uintptr { return p.stk[i].Stack }
869 func (p *runtimeProfile) Label(i int) *labelMap { return (*labelMap)(p.labels[i]) }
870
871 var cpu struct {
872 sync.Mutex
873 profiling bool
874 done chan bool
875 }
876
877
878
879
880
881
882
883
884
885
886
887
888 func StartCPUProfile(w io.Writer) error {
889
890
891
892
893
894
895
896
897
898 const hz = 100
899
900 cpu.Lock()
901 defer cpu.Unlock()
902 if cpu.done == nil {
903 cpu.done = make(chan bool)
904 }
905
906 if cpu.profiling {
907 return fmt.Errorf("cpu profiling already in use")
908 }
909 cpu.profiling = true
910 runtime.SetCPUProfileRate(hz)
911 go profileWriter(w)
912 return nil
913 }
914
915
916
917
918
919
920 func readProfile() (data []uint64, tags []unsafe.Pointer, eof bool)
921
922 func profileWriter(w io.Writer) {
923 b := newProfileBuilder(w)
924 var err error
925 for {
926 if runtime.GOOS == "darwin" || runtime.GOOS == "ios" {
927
928 time.Sleep(100 * time.Millisecond)
929 }
930 data, tags, eof := readProfile()
931 if e := b.addCPUData(data, tags); e != nil && err == nil {
932 err = e
933 }
934 if eof {
935 break
936 }
937 }
938 if err != nil {
939
940
941 panic("runtime/pprof: converting profile: " + err.Error())
942 }
943 b.build()
944 cpu.done <- true
945 }
946
947
948
949
950 func StopCPUProfile() {
951 cpu.Lock()
952 defer cpu.Unlock()
953
954 if !cpu.profiling {
955 return
956 }
957 cpu.profiling = false
958 runtime.SetCPUProfileRate(0)
959 <-cpu.done
960 }
961
962
963 func countBlock() int {
964 n, _ := runtime.BlockProfile(nil)
965 return n
966 }
967
968
969 func countMutex() int {
970 n, _ := runtime.MutexProfile(nil)
971 return n
972 }
973
974
975 func writeBlock(w io.Writer, debug int) error {
976 return writeProfileInternal(w, debug, "contention", pprof_blockProfileInternal)
977 }
978
979
980 func writeMutex(w io.Writer, debug int) error {
981 return writeProfileInternal(w, debug, "mutex", pprof_mutexProfileInternal)
982 }
983
984
985 func writeProfileInternal(w io.Writer, debug int, name string, runtimeProfile func([]profilerecord.BlockProfileRecord) (int, bool)) error {
986 var p []profilerecord.BlockProfileRecord
987 n, ok := runtimeProfile(nil)
988 for {
989 p = make([]profilerecord.BlockProfileRecord, n+50)
990 n, ok = runtimeProfile(p)
991 if ok {
992 p = p[:n]
993 break
994 }
995 }
996
997 slices.SortFunc(p, func(a, b profilerecord.BlockProfileRecord) int {
998 return cmp.Compare(b.Cycles, a.Cycles)
999 })
1000
1001 if debug <= 0 {
1002 return printCountCycleProfile(w, "contentions", "delay", p)
1003 }
1004
1005 b := bufio.NewWriter(w)
1006 tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
1007 w = tw
1008
1009 fmt.Fprintf(w, "--- %v:\n", name)
1010 fmt.Fprintf(w, "cycles/second=%v\n", pprof_cyclesPerSecond())
1011 if name == "mutex" {
1012 fmt.Fprintf(w, "sampling period=%d\n", runtime.SetMutexProfileFraction(-1))
1013 }
1014 expandedStack := pprof_makeProfStack()
1015 for i := range p {
1016 r := &p[i]
1017 fmt.Fprintf(w, "%v %v @", r.Cycles, r.Count)
1018 n := expandInlinedFrames(expandedStack, r.Stack)
1019 stack := expandedStack[:n]
1020 for _, pc := range stack {
1021 fmt.Fprintf(w, " %#x", pc)
1022 }
1023 fmt.Fprint(w, "\n")
1024 if debug > 0 {
1025 printStackRecord(w, stack, true)
1026 }
1027 }
1028
1029 if tw != nil {
1030 tw.Flush()
1031 }
1032 return b.Flush()
1033 }
1034
1035
1036 func pprof_goroutineProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool)
1037
1038
1039 func pprof_goroutineLeakProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool)
1040
1041
1042 func pprof_cyclesPerSecond() int64
1043
1044
1045 func pprof_memProfileInternal(p []profilerecord.MemProfileRecord, inuseZero bool) (n int, ok bool)
1046
1047
1048 func pprof_blockProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok bool)
1049
1050
1051 func pprof_mutexProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok bool)
1052
1053
1054 func pprof_threadCreateInternal(p []profilerecord.StackRecord) (n int, ok bool)
1055
1056
1057 func pprof_fpunwindExpand(dst, src []uintptr) int
1058
1059
1060 func pprof_makeProfStack() []uintptr
1061
View as plain text