runtime: rewrite lots of foo_Bar(f, ...) into f.bar(...)

Applies to types fixAlloc, mCache, mCentral, mHeap, mSpan, and
mSpanList.

Two special cases:

1. mHeap_Scavenge() previously didn't take an *mheap parameter, so it
was specially handled in this CL.

2. mHeap_Free() would have collided with mheap's "free" field, so it's
been renamed to (*mheap).freeSpan to parallel its underlying
(*mheap).freeSpanLocked method.

Change-Id: I325938554cca432c166fe9d9d689af2bbd68de4b
Reviewed-on: https://go-review.googlesource.com/16221
Reviewed-by: Ian Lance Taylor <iant@golang.org>
Run-TryBot: Matthew Dempsky <mdempsky@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
diff --git a/src/runtime/heapdump.go b/src/runtime/heapdump.go
index f8f88c6..0a62c67 100644
--- a/src/runtime/heapdump.go
+++ b/src/runtime/heapdump.go
@@ -643,7 +643,7 @@
 	for i := uintptr(0); i < uintptr(mheap_.nspan); i++ {
 		s := h_allspans[i]
 		if s.state == _MSpanInUse {
-			mSpan_EnsureSwept(s)
+			s.ensureSwept()
 		}
 	}
 	memclr(unsafe.Pointer(&typecache), unsafe.Sizeof(typecache))
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index 365422a..efaa46f 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -76,9 +76,6 @@
 //	   or the page heap can avoid zeroing altogether.
 //	2. the cost of zeroing when reusing a small object is
 //	   charged to the mutator, not the garbage collector.
-//
-// This code was written with an eye toward translating to Go
-// in the future.  Methods have the form Type_Method(Type *t, ...).
 
 package runtime
 
@@ -359,7 +356,7 @@
 	}
 
 	// Initialize the rest of the allocator.
-	mHeap_Init(&mheap_, spansSize)
+	mheap_.init(spansSize)
 	_g_ := getg()
 	_g_.m.mcache = allocmcache()
 }
@@ -387,7 +384,7 @@
 	return sysReserve(nil, n, reserved)
 }
 
-func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer {
+func (h *mheap) sysAlloc(n uintptr) unsafe.Pointer {
 	if n > h.arena_end-h.arena_used {
 		// We are in 32-bit mode, maybe we didn't use all possible address space yet.
 		// Reserve some more space.
@@ -409,8 +406,8 @@
 				// Our pages are bigger than hardware pages.
 				h.arena_end = p + p_size
 				used := p + (-uintptr(p) & (_PageSize - 1))
-				mHeap_MapBits(h, used)
-				mHeap_MapSpans(h, used)
+				h.mapBits(used)
+				h.mapSpans(used)
 				h.arena_used = used
 				h.arena_reserved = reserved
 			} else {
@@ -424,8 +421,8 @@
 		// Keep taking from our reservation.
 		p := h.arena_used
 		sysMap(unsafe.Pointer(p), n, h.arena_reserved, &memstats.heap_sys)
-		mHeap_MapBits(h, p+n)
-		mHeap_MapSpans(h, p+n)
+		h.mapBits(p + n)
+		h.mapSpans(p + n)
 		h.arena_used = p + n
 		if raceenabled {
 			racemapshadow(unsafe.Pointer(p), n)
@@ -460,8 +457,8 @@
 	p_end := p + p_size
 	p += -p & (_PageSize - 1)
 	if uintptr(p)+n > h.arena_used {
-		mHeap_MapBits(h, p+n)
-		mHeap_MapSpans(h, p+n)
+		h.mapBits(p + n)
+		h.mapSpans(p + n)
 		h.arena_used = p + n
 		if p_end > h.arena_end {
 			h.arena_end = p_end
@@ -600,7 +597,7 @@
 			v := s.freelist
 			if v.ptr() == nil {
 				systemstack(func() {
-					mCache_Refill(c, tinySizeClass)
+					c.refill(tinySizeClass)
 				})
 				shouldhelpgc = true
 				s = c.alloc[tinySizeClass]
@@ -632,7 +629,7 @@
 			v := s.freelist
 			if v.ptr() == nil {
 				systemstack(func() {
-					mCache_Refill(c, int32(sizeclass))
+					c.refill(int32(sizeclass))
 				})
 				shouldhelpgc = true
 				s = c.alloc[sizeclass]
@@ -757,7 +754,7 @@
 	// pays the debt down to npage pages.
 	deductSweepCredit(npages*_PageSize, npages)
 
-	s := mHeap_Alloc(&mheap_, npages, 0, true, flag&_FlagNoZero == 0)
+	s := mheap_.alloc(npages, 0, true, flag&_FlagNoZero == 0)
 	if s == nil {
 		throw("out of memory")
 	}
diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go
index 11cb6e2..ba123ea 100644
--- a/src/runtime/mbitmap.go
+++ b/src/runtime/mbitmap.go
@@ -131,7 +131,7 @@
 // after observing the change to arena_used.
 //
 //go:nowritebarrier
-func mHeap_MapBits(h *mheap, arena_used uintptr) {
+func (h *mheap) mapBits(arena_used uintptr) {
 	// Caller has added extra mappings to the arena.
 	// Add extra mappings of bitmap words as needed.
 	// We allocate extra bitmap pieces in chunks of bitmapChunk.
diff --git a/src/runtime/mcache.go b/src/runtime/mcache.go
index 7424691..4df1361 100644
--- a/src/runtime/mcache.go
+++ b/src/runtime/mcache.go
@@ -63,7 +63,7 @@
 
 func allocmcache() *mcache {
 	lock(&mheap_.lock)
-	c := (*mcache)(fixAlloc_Alloc(&mheap_.cachealloc))
+	c := (*mcache)(mheap_.cachealloc.alloc())
 	unlock(&mheap_.lock)
 	memclr(unsafe.Pointer(c), unsafe.Sizeof(*c))
 	for i := 0; i < _NumSizeClasses; i++ {
@@ -75,7 +75,7 @@
 
 func freemcache(c *mcache) {
 	systemstack(func() {
-		mCache_ReleaseAll(c)
+		c.releaseAll()
 		stackcache_clear(c)
 
 		// NOTE(rsc,rlh): If gcworkbuffree comes back, we need to coordinate
@@ -85,14 +85,14 @@
 
 		lock(&mheap_.lock)
 		purgecachedstats(c)
-		fixAlloc_Free(&mheap_.cachealloc, unsafe.Pointer(c))
+		mheap_.cachealloc.free(unsafe.Pointer(c))
 		unlock(&mheap_.lock)
 	})
 }
 
 // Gets a span that has a free object in it and assigns it
 // to be the cached span for the given sizeclass.  Returns this span.
-func mCache_Refill(c *mcache, sizeclass int32) *mspan {
+func (c *mcache) refill(sizeclass int32) *mspan {
 	_g_ := getg()
 
 	_g_.m.locks++
@@ -106,7 +106,7 @@
 	}
 
 	// Get a new cached span from the central lists.
-	s = mCentral_CacheSpan(&mheap_.central[sizeclass].mcentral)
+	s = mheap_.central[sizeclass].mcentral.cacheSpan()
 	if s == nil {
 		throw("out of memory")
 	}
@@ -119,11 +119,11 @@
 	return s
 }
 
-func mCache_ReleaseAll(c *mcache) {
+func (c *mcache) releaseAll() {
 	for i := 0; i < _NumSizeClasses; i++ {
 		s := c.alloc[i]
 		if s != &emptymspan {
-			mCentral_UncacheSpan(&mheap_.central[i].mcentral, s)
+			mheap_.central[i].mcentral.uncacheSpan(s)
 			c.alloc[i] = &emptymspan
 		}
 	}
diff --git a/src/runtime/mcentral.go b/src/runtime/mcentral.go
index a09801a..418a5ff 100644
--- a/src/runtime/mcentral.go
+++ b/src/runtime/mcentral.go
@@ -23,14 +23,14 @@
 }
 
 // Initialize a single central free list.
-func mCentral_Init(c *mcentral, sizeclass int32) {
+func (c *mcentral) init(sizeclass int32) {
 	c.sizeclass = sizeclass
-	mSpanList_Init(&c.nonempty)
-	mSpanList_Init(&c.empty)
+	c.nonempty.init()
+	c.empty.init()
 }
 
 // Allocate a span to use in an MCache.
-func mCentral_CacheSpan(c *mcentral) *mspan {
+func (c *mcentral) cacheSpan() *mspan {
 	// Deduct credit for this span allocation and sweep if necessary.
 	deductSweepCredit(uintptr(class_to_size[c.sizeclass]), 0)
 
@@ -40,10 +40,10 @@
 	var s *mspan
 	for s = c.nonempty.first; s != nil; s = s.next {
 		if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
-			mSpanList_Remove(&c.nonempty, s)
-			mSpanList_InsertBack(&c.empty, s)
+			c.nonempty.remove(s)
+			c.empty.insertBack(s)
 			unlock(&c.lock)
-			mSpan_Sweep(s, true)
+			s.sweep(true)
 			goto havespan
 		}
 		if s.sweepgen == sg-1 {
@@ -51,8 +51,8 @@
 			continue
 		}
 		// we have a nonempty span that does not require sweeping, allocate from it
-		mSpanList_Remove(&c.nonempty, s)
-		mSpanList_InsertBack(&c.empty, s)
+		c.nonempty.remove(s)
+		c.empty.insertBack(s)
 		unlock(&c.lock)
 		goto havespan
 	}
@@ -61,11 +61,11 @@
 		if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
 			// we have an empty span that requires sweeping,
 			// sweep it and see if we can free some space in it
-			mSpanList_Remove(&c.empty, s)
+			c.empty.remove(s)
 			// swept spans are at the end of the list
-			mSpanList_InsertBack(&c.empty, s)
+			c.empty.insertBack(s)
 			unlock(&c.lock)
-			mSpan_Sweep(s, true)
+			s.sweep(true)
 			if s.freelist.ptr() != nil {
 				goto havespan
 			}
@@ -85,12 +85,12 @@
 	unlock(&c.lock)
 
 	// Replenish central list if empty.
-	s = mCentral_Grow(c)
+	s = c.grow()
 	if s == nil {
 		return nil
 	}
 	lock(&c.lock)
-	mSpanList_InsertBack(&c.empty, s)
+	c.empty.insertBack(s)
 	unlock(&c.lock)
 
 	// At this point s is a non-empty span, queued at the end of the empty list,
@@ -113,7 +113,7 @@
 }
 
 // Return span from an MCache.
-func mCentral_UncacheSpan(c *mcentral, s *mspan) {
+func (c *mcentral) uncacheSpan(s *mspan) {
 	lock(&c.lock)
 
 	s.incache = false
@@ -125,8 +125,8 @@
 	cap := int32((s.npages << _PageShift) / s.elemsize)
 	n := cap - int32(s.ref)
 	if n > 0 {
-		mSpanList_Remove(&c.empty, s)
-		mSpanList_Insert(&c.nonempty, s)
+		c.empty.remove(s)
+		c.nonempty.insert(s)
 	}
 	unlock(&c.lock)
 }
@@ -137,7 +137,7 @@
 // the latest generation.
 // If preserve=true, don't return the span to heap nor relink in MCentral lists;
 // caller takes care of it.
-func mCentral_FreeSpan(c *mcentral, s *mspan, n int32, start gclinkptr, end gclinkptr, preserve bool) bool {
+func (c *mcentral) freeSpan(s *mspan, n int32, start gclinkptr, end gclinkptr, preserve bool) bool {
 	if s.incache {
 		throw("freespan into cached span")
 	}
@@ -151,7 +151,7 @@
 	if preserve {
 		// preserve is set only when called from MCentral_CacheSpan above,
 		// the span must be in the empty list.
-		if !mSpan_InList(s) {
+		if !s.inList() {
 			throw("can't preserve unlinked span")
 		}
 		atomic.Store(&s.sweepgen, mheap_.sweepgen)
@@ -162,8 +162,8 @@
 
 	// Move to nonempty if necessary.
 	if wasempty {
-		mSpanList_Remove(&c.empty, s)
-		mSpanList_Insert(&c.nonempty, s)
+		c.empty.remove(s)
+		c.nonempty.insert(s)
 	}
 
 	// delay updating sweepgen until here.  This is the signal that
@@ -178,22 +178,22 @@
 	}
 
 	// s is completely freed, return it to the heap.
-	mSpanList_Remove(&c.nonempty, s)
+	c.nonempty.remove(s)
 	s.needzero = 1
 	s.freelist = 0
 	unlock(&c.lock)
 	heapBitsForSpan(s.base()).initSpan(s.layout())
-	mHeap_Free(&mheap_, s, 0)
+	mheap_.freeSpan(s, 0)
 	return true
 }
 
 // Fetch a new span from the heap and carve into objects for the free list.
-func mCentral_Grow(c *mcentral) *mspan {
+func (c *mcentral) grow() *mspan {
 	npages := uintptr(class_to_allocnpages[c.sizeclass])
 	size := uintptr(class_to_size[c.sizeclass])
 	n := (npages << _PageShift) / size
 
-	s := mHeap_Alloc(&mheap_, npages, c.sizeclass, false, true)
+	s := mheap_.alloc(npages, c.sizeclass, false, true)
 	if s == nil {
 		return nil
 	}
diff --git a/src/runtime/mfixalloc.go b/src/runtime/mfixalloc.go
index 54d4a74..8653a6a 100644
--- a/src/runtime/mfixalloc.go
+++ b/src/runtime/mfixalloc.go
@@ -40,7 +40,7 @@
 
 // Initialize f to allocate objects of the given size,
 // using the allocator to obtain chunks of memory.
-func fixAlloc_Init(f *fixalloc, size uintptr, first func(arg, p unsafe.Pointer), arg unsafe.Pointer, stat *uint64) {
+func (f *fixalloc) init(size uintptr, first func(arg, p unsafe.Pointer), arg unsafe.Pointer, stat *uint64) {
 	f.size = size
 	f.first = first
 	f.arg = arg
@@ -51,7 +51,7 @@
 	f.stat = stat
 }
 
-func fixAlloc_Alloc(f *fixalloc) unsafe.Pointer {
+func (f *fixalloc) alloc() unsafe.Pointer {
 	if f.size == 0 {
 		print("runtime: use of FixAlloc_Alloc before FixAlloc_Init\n")
 		throw("runtime: internal error")
@@ -78,7 +78,7 @@
 	return v
 }
 
-func fixAlloc_Free(f *fixalloc, p unsafe.Pointer) {
+func (f *fixalloc) free(p unsafe.Pointer) {
 	f.inuse -= f.size
 	v := (*mlink)(p)
 	v.next = f.list
diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go
index 812ba60..e670689 100644
--- a/src/runtime/mgcsweep.go
+++ b/src/runtime/mgcsweep.go
@@ -47,7 +47,7 @@
 		sg := mheap_.sweepgen
 		for _, s := range work.spans {
 			if s.sweepgen != sg && s.state == _MSpanInUse {
-				mSpan_EnsureSwept(s)
+				s.ensureSwept()
 			}
 		}
 	}
@@ -105,7 +105,7 @@
 			continue
 		}
 		npages := s.npages
-		if !mSpan_Sweep(s, false) {
+		if !s.sweep(false) {
 			npages = 0
 		}
 		_g_.m.locks--
@@ -129,7 +129,7 @@
 
 // Returns only when span s has been swept.
 //go:nowritebarrier
-func mSpan_EnsureSwept(s *mspan) {
+func (s *mspan) ensureSwept() {
 	// Caller must disable preemption.
 	// Otherwise when this function returns the span can become unswept again
 	// (if GC is triggered on another goroutine).
@@ -144,7 +144,7 @@
 	}
 	// The caller must be sure that the span is a MSpanInUse span.
 	if atomic.Cas(&s.sweepgen, sg-2, sg-1) {
-		mSpan_Sweep(s, false)
+		s.sweep(false)
 		return
 	}
 	// unfortunate condition, and we don't have efficient means to wait
@@ -159,7 +159,7 @@
 // If preserve=true, don't return it to heap nor relink in MCentral lists;
 // caller takes care of it.
 //TODO go:nowritebarrier
-func mSpan_Sweep(s *mspan, preserve bool) bool {
+func (s *mspan) sweep(preserve bool) bool {
 	// It's critical that we enter this function with preemption disabled,
 	// GC must not start while we are in the middle of this function.
 	_g_ := getg()
@@ -312,7 +312,7 @@
 	}
 	if nfree > 0 {
 		c.local_nsmallfree[cl] += uintptr(nfree)
-		res = mCentral_FreeSpan(&mheap_.central[cl].mcentral, s, int32(nfree), head, end, preserve)
+		res = mheap_.central[cl].mcentral.freeSpan(s, int32(nfree), head, end, preserve)
 		// MCentral_FreeSpan updates sweepgen
 	} else if freeToHeap {
 		// Free large span to heap
@@ -335,7 +335,7 @@
 			s.limit = 0 // prevent mlookup from finding this span
 			sysFault(unsafe.Pointer(uintptr(s.start<<_PageShift)), size)
 		} else {
-			mHeap_Free(&mheap_, s, 1)
+			mheap_.freeSpan(s, 1)
 		}
 		c.local_nlargefree++
 		c.local_largefree += size
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index 359e62f..2feba43 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -246,7 +246,7 @@
 		unlock(&mheap_.lock)
 	}
 
-	s := mHeap_LookupMaybe(&mheap_, unsafe.Pointer(v))
+	s := mheap_.lookupMaybe(unsafe.Pointer(v))
 	if sp != nil {
 		*sp = s
 	}
@@ -285,22 +285,22 @@
 }
 
 // Initialize the heap.
-func mHeap_Init(h *mheap, spans_size uintptr) {
-	fixAlloc_Init(&h.spanalloc, unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(h), &memstats.mspan_sys)
-	fixAlloc_Init(&h.cachealloc, unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys)
-	fixAlloc_Init(&h.specialfinalizeralloc, unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys)
-	fixAlloc_Init(&h.specialprofilealloc, unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys)
+func (h *mheap) init(spans_size uintptr) {
+	h.spanalloc.init(unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(h), &memstats.mspan_sys)
+	h.cachealloc.init(unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys)
+	h.specialfinalizeralloc.init(unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys)
+	h.specialprofilealloc.init(unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys)
 
 	// h->mapcache needs no init
 	for i := range h.free {
-		mSpanList_Init(&h.free[i])
-		mSpanList_Init(&h.busy[i])
+		h.free[i].init()
+		h.busy[i].init()
 	}
 
-	mSpanList_Init(&h.freelarge)
-	mSpanList_Init(&h.busylarge)
+	h.freelarge.init()
+	h.busylarge.init()
 	for i := range h.central {
-		mCentral_Init(&h.central[i].mcentral, int32(i))
+		h.central[i].mcentral.init(int32(i))
 	}
 
 	sp := (*slice)(unsafe.Pointer(&h_spans))
@@ -317,7 +317,7 @@
 // Waiting to update arena_used until after the memory has been mapped
 // avoids faults when other threads try access the bitmap immediately
 // after observing the change to arena_used.
-func mHeap_MapSpans(h *mheap, arena_used uintptr) {
+func (h *mheap) mapSpans(arena_used uintptr) {
 	// Map spans array, PageSize at a time.
 	n := arena_used
 	n -= h.arena_start
@@ -332,18 +332,18 @@
 
 // Sweeps spans in list until reclaims at least npages into heap.
 // Returns the actual number of pages reclaimed.
-func mHeap_ReclaimList(h *mheap, list *mSpanList, npages uintptr) uintptr {
+func (h *mheap) reclaimList(list *mSpanList, npages uintptr) uintptr {
 	n := uintptr(0)
 	sg := mheap_.sweepgen
 retry:
 	for s := list.first; s != nil; s = s.next {
 		if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
-			mSpanList_Remove(list, s)
+			list.remove(s)
 			// swept spans are at the end of the list
-			mSpanList_InsertBack(list, s)
+			list.insertBack(s)
 			unlock(&h.lock)
 			snpages := s.npages
-			if mSpan_Sweep(s, false) {
+			if s.sweep(false) {
 				n += snpages
 			}
 			lock(&h.lock)
@@ -366,17 +366,17 @@
 
 // Sweeps and reclaims at least npage pages into heap.
 // Called before allocating npage pages.
-func mHeap_Reclaim(h *mheap, npage uintptr) {
+func (h *mheap) reclaim(npage uintptr) {
 	// First try to sweep busy spans with large objects of size >= npage,
 	// this has good chances of reclaiming the necessary space.
 	for i := int(npage); i < len(h.busy); i++ {
-		if mHeap_ReclaimList(h, &h.busy[i], npage) != 0 {
+		if h.reclaimList(&h.busy[i], npage) != 0 {
 			return // Bingo!
 		}
 	}
 
 	// Then -- even larger objects.
-	if mHeap_ReclaimList(h, &h.busylarge, npage) != 0 {
+	if h.reclaimList(&h.busylarge, npage) != 0 {
 		return // Bingo!
 	}
 
@@ -384,7 +384,7 @@
 	// One such object is not enough, so we need to reclaim several of them.
 	reclaimed := uintptr(0)
 	for i := 0; i < int(npage) && i < len(h.busy); i++ {
-		reclaimed += mHeap_ReclaimList(h, &h.busy[i], npage-reclaimed)
+		reclaimed += h.reclaimList(&h.busy[i], npage-reclaimed)
 		if reclaimed >= npage {
 			return
 		}
@@ -407,7 +407,7 @@
 
 // Allocate a new span of npage pages from the heap for GC'd memory
 // and record its size class in the HeapMap and HeapMapCache.
-func mHeap_Alloc_m(h *mheap, npage uintptr, sizeclass int32, large bool) *mspan {
+func (h *mheap) alloc_m(npage uintptr, sizeclass int32, large bool) *mspan {
 	_g_ := getg()
 	if _g_ != _g_.m.g0 {
 		throw("_mheap_alloc not on g0 stack")
@@ -424,7 +424,7 @@
 		// If GC kept a bit for whether there were any marks
 		// in a span, we could release these free spans
 		// at the end of GC and eliminate this entirely.
-		mHeap_Reclaim(h, npage)
+		h.reclaim(npage)
 	}
 
 	// transfer stats from cache to global
@@ -435,7 +435,7 @@
 	memstats.tinyallocs += uint64(_g_.m.mcache.local_tinyallocs)
 	_g_.m.mcache.local_tinyallocs = 0
 
-	s := mHeap_AllocSpanLocked(h, npage)
+	s := h.allocSpanLocked(npage)
 	if s != nil {
 		// Record span info, because gc needs to be
 		// able to map interior pointer to containing span.
@@ -466,9 +466,9 @@
 			memstats.heap_live += uint64(npage << _PageShift)
 			// Swept spans are at the end of lists.
 			if s.npages < uintptr(len(h.free)) {
-				mSpanList_InsertBack(&h.busy[s.npages], s)
+				h.busy[s.npages].insertBack(s)
 			} else {
-				mSpanList_InsertBack(&h.busylarge, s)
+				h.busylarge.insertBack(s)
 			}
 		}
 	}
@@ -494,13 +494,13 @@
 	return s
 }
 
-func mHeap_Alloc(h *mheap, npage uintptr, sizeclass int32, large bool, needzero bool) *mspan {
+func (h *mheap) alloc(npage uintptr, sizeclass int32, large bool, needzero bool) *mspan {
 	// Don't do any operations that lock the heap on the G stack.
 	// It might trigger stack growth, and the stack growth code needs
 	// to be able to allocate heap.
 	var s *mspan
 	systemstack(func() {
-		s = mHeap_Alloc_m(h, npage, sizeclass, large)
+		s = h.alloc_m(npage, sizeclass, large)
 	})
 
 	if s != nil {
@@ -512,13 +512,13 @@
 	return s
 }
 
-func mHeap_AllocStack(h *mheap, npage uintptr) *mspan {
+func (h *mheap) allocStack(npage uintptr) *mspan {
 	_g_ := getg()
 	if _g_ != _g_.m.g0 {
 		throw("mheap_allocstack not on g0 stack")
 	}
 	lock(&h.lock)
-	s := mHeap_AllocSpanLocked(h, npage)
+	s := h.allocSpanLocked(npage)
 	if s != nil {
 		s.state = _MSpanStack
 		s.freelist = 0
@@ -534,14 +534,14 @@
 // Allocates a span of the given size.  h must be locked.
 // The returned span has been removed from the
 // free list, but its state is still MSpanFree.
-func mHeap_AllocSpanLocked(h *mheap, npage uintptr) *mspan {
+func (h *mheap) allocSpanLocked(npage uintptr) *mspan {
 	var list *mSpanList
 	var s *mspan
 
 	// Try in fixed-size lists up to max.
 	for i := int(npage); i < len(h.free); i++ {
 		list = &h.free[i]
-		if !mSpanList_IsEmpty(list) {
+		if !list.isEmpty() {
 			s = list.first
 			goto HaveSpan
 		}
@@ -549,12 +549,12 @@
 
 	// Best fit in list of large spans.
 	list = &h.freelarge
-	s = mHeap_AllocLarge(h, npage)
+	s = h.allocLarge(npage)
 	if s == nil {
-		if !mHeap_Grow(h, npage) {
+		if !h.grow(npage) {
 			return nil
 		}
-		s = mHeap_AllocLarge(h, npage)
+		s = h.allocLarge(npage)
 		if s == nil {
 			return nil
 		}
@@ -568,8 +568,8 @@
 	if s.npages < npage {
 		throw("MHeap_AllocLocked - bad npages")
 	}
-	mSpanList_Remove(list, s)
-	if mSpan_InList(s) {
+	list.remove(s)
+	if s.inList() {
 		throw("still in list")
 	}
 	if s.npreleased > 0 {
@@ -580,8 +580,8 @@
 
 	if s.npages > npage {
 		// Trim extra and put it back in the heap.
-		t := (*mspan)(fixAlloc_Alloc(&h.spanalloc))
-		mSpan_Init(t, s.start+pageID(npage), s.npages-npage)
+		t := (*mspan)(h.spanalloc.alloc())
+		t.init(s.start+pageID(npage), s.npages-npage)
 		s.npages = npage
 		p := uintptr(t.start)
 		p -= (h.arena_start >> _PageShift)
@@ -593,7 +593,7 @@
 		t.needzero = s.needzero
 		s.state = _MSpanStack // prevent coalescing with s
 		t.state = _MSpanStack
-		mHeap_FreeSpanLocked(h, t, false, false, s.unusedsince)
+		h.freeSpanLocked(t, false, false, s.unusedsince)
 		s.state = _MSpanFree
 	}
 	s.unusedsince = 0
@@ -608,14 +608,14 @@
 	memstats.heap_idle -= uint64(npage << _PageShift)
 
 	//println("spanalloc", hex(s.start<<_PageShift))
-	if mSpan_InList(s) {
+	if s.inList() {
 		throw("still in list")
 	}
 	return s
 }
 
 // Allocate a span of exactly npage pages from the list of large spans.
-func mHeap_AllocLarge(h *mheap, npage uintptr) *mspan {
+func (h *mheap) allocLarge(npage uintptr) *mspan {
 	return bestFit(&h.freelarge, npage, nil)
 }
 
@@ -638,7 +638,7 @@
 // returning whether it worked.
 //
 // h must be locked.
-func mHeap_Grow(h *mheap, npage uintptr) bool {
+func (h *mheap) grow(npage uintptr) bool {
 	// Ask for a big chunk, to reduce the number of mappings
 	// the operating system needs to track; also amortizes
 	// the overhead of an operating system mapping.
@@ -649,11 +649,11 @@
 		ask = _HeapAllocChunk
 	}
 
-	v := mHeap_SysAlloc(h, ask)
+	v := h.sysAlloc(ask)
 	if v == nil {
 		if ask > npage<<_PageShift {
 			ask = npage << _PageShift
-			v = mHeap_SysAlloc(h, ask)
+			v = h.sysAlloc(ask)
 		}
 		if v == nil {
 			print("runtime: out of memory: cannot allocate ", ask, "-byte block (", memstats.heap_sys, " in use)\n")
@@ -663,8 +663,8 @@
 
 	// Create a fake "in use" span and free it, so that the
 	// right coalescing happens.
-	s := (*mspan)(fixAlloc_Alloc(&h.spanalloc))
-	mSpan_Init(s, pageID(uintptr(v)>>_PageShift), ask>>_PageShift)
+	s := (*mspan)(h.spanalloc.alloc())
+	s.init(pageID(uintptr(v)>>_PageShift), ask>>_PageShift)
 	p := uintptr(s.start)
 	p -= (h.arena_start >> _PageShift)
 	for i := p; i < p+s.npages; i++ {
@@ -673,14 +673,14 @@
 	atomic.Store(&s.sweepgen, h.sweepgen)
 	s.state = _MSpanInUse
 	h.pagesInUse += uint64(npage)
-	mHeap_FreeSpanLocked(h, s, false, true, 0)
+	h.freeSpanLocked(s, false, true, 0)
 	return true
 }
 
 // Look up the span at the given address.
 // Address is guaranteed to be in map
 // and is guaranteed to be start or end of span.
-func mHeap_Lookup(h *mheap, v unsafe.Pointer) *mspan {
+func (h *mheap) lookup(v unsafe.Pointer) *mspan {
 	p := uintptr(v)
 	p -= h.arena_start
 	return h_spans[p>>_PageShift]
@@ -693,7 +693,7 @@
 // valid for allocated spans.  Free spans may have
 // other garbage in their middles, so we have to
 // check for that.
-func mHeap_LookupMaybe(h *mheap, v unsafe.Pointer) *mspan {
+func (h *mheap) lookupMaybe(v unsafe.Pointer) *mspan {
 	if uintptr(v) < h.arena_start || uintptr(v) >= h.arena_used {
 		return nil
 	}
@@ -708,7 +708,7 @@
 }
 
 // Free the span back into the heap.
-func mHeap_Free(h *mheap, s *mspan, acct int32) {
+func (h *mheap) freeSpan(s *mspan, acct int32) {
 	systemstack(func() {
 		mp := getg().m
 		lock(&h.lock)
@@ -724,7 +724,7 @@
 		if gcBlackenEnabled != 0 {
 			gcController.revise()
 		}
-		mHeap_FreeSpanLocked(h, s, true, true, 0)
+		h.freeSpanLocked(s, true, true, 0)
 		if trace.enabled {
 			traceHeapAlloc()
 		}
@@ -732,7 +732,7 @@
 	})
 }
 
-func mHeap_FreeStack(h *mheap, s *mspan) {
+func (h *mheap) freeStack(s *mspan) {
 	_g_ := getg()
 	if _g_ != _g_.m.g0 {
 		throw("mheap_freestack not on g0 stack")
@@ -740,12 +740,12 @@
 	s.needzero = 1
 	lock(&h.lock)
 	memstats.stacks_inuse -= uint64(s.npages << _PageShift)
-	mHeap_FreeSpanLocked(h, s, true, true, 0)
+	h.freeSpanLocked(s, true, true, 0)
 	unlock(&h.lock)
 }
 
 // s must be on a busy list (h.busy or h.busylarge) or unlinked.
-func mHeap_FreeSpanLocked(h *mheap, s *mspan, acctinuse, acctidle bool, unusedsince int64) {
+func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince int64) {
 	switch s.state {
 	case _MSpanStack:
 		if s.ref != 0 {
@@ -768,8 +768,8 @@
 		memstats.heap_idle += uint64(s.npages << _PageShift)
 	}
 	s.state = _MSpanFree
-	if mSpan_InList(s) {
-		mSpanList_Remove(mHeap_BusyList(h, s.npages), s)
+	if s.inList() {
+		h.busyList(s.npages).remove(s)
 	}
 
 	// Stamp newly unused spans. The scavenger will use that
@@ -792,9 +792,9 @@
 			s.needzero |= t.needzero
 			p -= t.npages
 			h_spans[p] = s
-			mSpanList_Remove(mHeap_FreeList(h, t.npages), t)
+			h.freeList(t.npages).remove(t)
 			t.state = _MSpanDead
-			fixAlloc_Free(&h.spanalloc, unsafe.Pointer(t))
+			h.spanalloc.free(unsafe.Pointer(t))
 		}
 	}
 	if (p+s.npages)*ptrSize < h.spans_mapped {
@@ -804,24 +804,24 @@
 			s.npreleased += t.npreleased
 			s.needzero |= t.needzero
 			h_spans[p+s.npages-1] = s
-			mSpanList_Remove(mHeap_FreeList(h, t.npages), t)
+			h.freeList(t.npages).remove(t)
 			t.state = _MSpanDead
-			fixAlloc_Free(&h.spanalloc, unsafe.Pointer(t))
+			h.spanalloc.free(unsafe.Pointer(t))
 		}
 	}
 
 	// Insert s into appropriate list.
-	mSpanList_Insert(mHeap_FreeList(h, s.npages), s)
+	h.freeList(s.npages).insert(s)
 }
 
-func mHeap_FreeList(h *mheap, npages uintptr) *mSpanList {
+func (h *mheap) freeList(npages uintptr) *mSpanList {
 	if npages < uintptr(len(h.free)) {
 		return &h.free[npages]
 	}
 	return &h.freelarge
 }
 
-func mHeap_BusyList(h *mheap, npages uintptr) *mSpanList {
+func (h *mheap) busyList(npages uintptr) *mSpanList {
 	if npages < uintptr(len(h.free)) {
 		return &h.busy[npages]
 	}
@@ -838,7 +838,7 @@
 		return 0
 	}
 
-	if mSpanList_IsEmpty(list) {
+	if list.isEmpty() {
 		return 0
 	}
 
@@ -855,8 +855,7 @@
 	return sumreleased
 }
 
-func mHeap_Scavenge(k int32, now, limit uint64) {
-	h := &mheap_
+func (h *mheap) scavenge(k int32, now, limit uint64) {
 	lock(&h.lock)
 	var sumreleased uintptr
 	for i := 0; i < len(h.free); i++ {
@@ -878,11 +877,11 @@
 //go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory
 func runtime_debug_freeOSMemory() {
 	gcStart(gcForceBlockMode, false)
-	systemstack(func() { mHeap_Scavenge(-1, ^uint64(0), 0) })
+	systemstack(func() { mheap_.scavenge(-1, ^uint64(0), 0) })
 }
 
 // Initialize a new span with the given start and npages.
-func mSpan_Init(span *mspan, start pageID, npages uintptr) {
+func (span *mspan) init(start pageID, npages uintptr) {
 	span.next = nil
 	span.prev = nil
 	span.list = nil
@@ -901,17 +900,17 @@
 	span.needzero = 0
 }
 
-func mSpan_InList(span *mspan) bool {
+func (span *mspan) inList() bool {
 	return span.prev != nil
 }
 
 // Initialize an empty doubly-linked list.
-func mSpanList_Init(list *mSpanList) {
+func (list *mSpanList) init() {
 	list.first = nil
 	list.last = &list.first
 }
 
-func mSpanList_Remove(list *mSpanList, span *mspan) {
+func (list *mSpanList) remove(span *mspan) {
 	if span.prev == nil || span.list != list {
 		println("failed MSpanList_Remove", span, span.prev, span.list, list)
 		throw("MSpanList_Remove")
@@ -929,11 +928,11 @@
 	span.list = nil
 }
 
-func mSpanList_IsEmpty(list *mSpanList) bool {
+func (list *mSpanList) isEmpty() bool {
 	return list.first == nil
 }
 
-func mSpanList_Insert(list *mSpanList, span *mspan) {
+func (list *mSpanList) insert(span *mspan) {
 	if span.next != nil || span.prev != nil || span.list != nil {
 		println("failed MSpanList_Insert", span, span.next, span.prev, span.list)
 		throw("MSpanList_Insert")
@@ -949,7 +948,7 @@
 	span.list = list
 }
 
-func mSpanList_InsertBack(list *mSpanList, span *mspan) {
+func (list *mSpanList) insertBack(span *mspan) {
 	if span.next != nil || span.prev != nil || span.list != nil {
 		println("failed MSpanList_InsertBack", span, span.next, span.prev, span.list)
 		throw("MSpanList_InsertBack")
@@ -983,7 +982,7 @@
 // (The add will fail only if a record with the same p and s->kind
 //  already exists.)
 func addspecial(p unsafe.Pointer, s *special) bool {
-	span := mHeap_LookupMaybe(&mheap_, p)
+	span := mheap_.lookupMaybe(p)
 	if span == nil {
 		throw("addspecial on invalid pointer")
 	}
@@ -992,7 +991,7 @@
 	// Sweeping accesses the specials list w/o locks, so we have
 	// to synchronize with it. And it's just much safer.
 	mp := acquirem()
-	mSpan_EnsureSwept(span)
+	span.ensureSwept()
 
 	offset := uintptr(p) - uintptr(span.start<<_PageShift)
 	kind := s.kind
@@ -1031,7 +1030,7 @@
 // Returns the record if the record existed, nil otherwise.
 // The caller must FixAlloc_Free the result.
 func removespecial(p unsafe.Pointer, kind uint8) *special {
-	span := mHeap_LookupMaybe(&mheap_, p)
+	span := mheap_.lookupMaybe(p)
 	if span == nil {
 		throw("removespecial on invalid pointer")
 	}
@@ -1040,7 +1039,7 @@
 	// Sweeping accesses the specials list w/o locks, so we have
 	// to synchronize with it. And it's just much safer.
 	mp := acquirem()
-	mSpan_EnsureSwept(span)
+	span.ensureSwept()
 
 	offset := uintptr(p) - uintptr(span.start<<_PageShift)
 
@@ -1078,7 +1077,7 @@
 // Adds a finalizer to the object p.  Returns true if it succeeded.
 func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *ptrtype) bool {
 	lock(&mheap_.speciallock)
-	s := (*specialfinalizer)(fixAlloc_Alloc(&mheap_.specialfinalizeralloc))
+	s := (*specialfinalizer)(mheap_.specialfinalizeralloc.alloc())
 	unlock(&mheap_.speciallock)
 	s.special.kind = _KindSpecialFinalizer
 	s.fn = f
@@ -1110,7 +1109,7 @@
 
 	// There was an old finalizer
 	lock(&mheap_.speciallock)
-	fixAlloc_Free(&mheap_.specialfinalizeralloc, unsafe.Pointer(s))
+	mheap_.specialfinalizeralloc.free(unsafe.Pointer(s))
 	unlock(&mheap_.speciallock)
 	return false
 }
@@ -1122,7 +1121,7 @@
 		return // there wasn't a finalizer to remove
 	}
 	lock(&mheap_.speciallock)
-	fixAlloc_Free(&mheap_.specialfinalizeralloc, unsafe.Pointer(s))
+	mheap_.specialfinalizeralloc.free(unsafe.Pointer(s))
 	unlock(&mheap_.speciallock)
 }
 
@@ -1135,7 +1134,7 @@
 // Set the heap profile bucket associated with addr to b.
 func setprofilebucket(p unsafe.Pointer, b *bucket) {
 	lock(&mheap_.speciallock)
-	s := (*specialprofile)(fixAlloc_Alloc(&mheap_.specialprofilealloc))
+	s := (*specialprofile)(mheap_.specialprofilealloc.alloc())
 	unlock(&mheap_.speciallock)
 	s.special.kind = _KindSpecialProfile
 	s.b = b
@@ -1152,13 +1151,13 @@
 		sf := (*specialfinalizer)(unsafe.Pointer(s))
 		queuefinalizer(p, sf.fn, sf.nret, sf.fint, sf.ot)
 		lock(&mheap_.speciallock)
-		fixAlloc_Free(&mheap_.specialfinalizeralloc, unsafe.Pointer(sf))
+		mheap_.specialfinalizeralloc.free(unsafe.Pointer(sf))
 		unlock(&mheap_.speciallock)
 	case _KindSpecialProfile:
 		sp := (*specialprofile)(unsafe.Pointer(s))
 		mProf_Free(sp.b, size)
 		lock(&mheap_.speciallock)
-		fixAlloc_Free(&mheap_.specialprofilealloc, unsafe.Pointer(sp))
+		mheap_.specialprofilealloc.free(unsafe.Pointer(sp))
 		unlock(&mheap_.speciallock)
 	default:
 		throw("bad special kind")
diff --git a/src/runtime/mstats.go b/src/runtime/mstats.go
index 6e1116b..7422690 100644
--- a/src/runtime/mstats.go
+++ b/src/runtime/mstats.go
@@ -325,7 +325,7 @@
 		if c == nil {
 			continue
 		}
-		mCache_ReleaseAll(c)
+		c.releaseAll()
 		stackcache_clear(c)
 	}
 }
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index 94443b5..74158a4 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -3379,7 +3379,7 @@
 		}
 		// scavenge heap once in a while
 		if lastscavenge+scavengelimit/2 < now {
-			mHeap_Scavenge(int32(nscavenge), uint64(now), uint64(scavengelimit))
+			mheap_.scavenge(int32(nscavenge), uint64(now), uint64(scavengelimit))
 			lastscavenge = now
 			nscavenge++
 		}
diff --git a/src/runtime/stack.go b/src/runtime/stack.go
index cce371c..f217564 100644
--- a/src/runtime/stack.go
+++ b/src/runtime/stack.go
@@ -160,9 +160,9 @@
 		throw("cache size must be a multiple of page size")
 	}
 	for i := range stackpool {
-		mSpanList_Init(&stackpool[i])
+		stackpool[i].init()
 	}
-	mSpanList_Init(&stackFreeQueue)
+	stackFreeQueue.init()
 }
 
 // Allocates a stack from the free pool.  Must be called with
@@ -172,7 +172,7 @@
 	s := list.first
 	if s == nil {
 		// no free stacks.  Allocate another span worth.
-		s = mHeap_AllocStack(&mheap_, _StackCacheSize>>_PageShift)
+		s = mheap_.allocStack(_StackCacheSize >> _PageShift)
 		if s == nil {
 			throw("out of memory")
 		}
@@ -187,7 +187,7 @@
 			x.ptr().next = s.freelist
 			s.freelist = x
 		}
-		mSpanList_Insert(list, s)
+		list.insert(s)
 	}
 	x := s.freelist
 	if x.ptr() == nil {
@@ -197,20 +197,20 @@
 	s.ref++
 	if s.freelist.ptr() == nil {
 		// all stacks in s are allocated.
-		mSpanList_Remove(list, s)
+		list.remove(s)
 	}
 	return x
 }
 
 // Adds stack x to the free pool.  Must be called with stackpoolmu held.
 func stackpoolfree(x gclinkptr, order uint8) {
-	s := mHeap_Lookup(&mheap_, unsafe.Pointer(x))
+	s := mheap_.lookup(unsafe.Pointer(x))
 	if s.state != _MSpanStack {
 		throw("freeing stack not in a stack span")
 	}
 	if s.freelist.ptr() == nil {
 		// s will now have a free stack
-		mSpanList_Insert(&stackpool[order], s)
+		stackpool[order].insert(s)
 	}
 	x.ptr().next = s.freelist
 	s.freelist = x
@@ -231,9 +231,9 @@
 		//    pointer into a free span.
 		//
 		// By not freeing, we prevent step #4 until GC is done.
-		mSpanList_Remove(&stackpool[order], s)
+		stackpool[order].remove(s)
 		s.freelist = 0
-		mHeap_FreeStack(&mheap_, s)
+		mheap_.freeStack(s)
 	}
 }
 
@@ -357,7 +357,7 @@
 		}
 		v = unsafe.Pointer(x)
 	} else {
-		s := mHeap_AllocStack(&mheap_, round(uintptr(n), _PageSize)>>_PageShift)
+		s := mheap_.allocStack(round(uintptr(n), _PageSize) >> _PageShift)
 		if s == nil {
 			throw("out of memory")
 		}
@@ -424,7 +424,7 @@
 			c.stackcache[order].size += n
 		}
 	} else {
-		s := mHeap_Lookup(&mheap_, v)
+		s := mheap_.lookup(v)
 		if s.state != _MSpanStack {
 			println(hex(s.start<<_PageShift), v)
 			throw("bad span state")
@@ -432,7 +432,7 @@
 		if gcphase == _GCoff {
 			// Free the stack immediately if we're
 			// sweeping.
-			mHeap_FreeStack(&mheap_, s)
+			mheap_.freeStack(s)
 		} else {
 			// Otherwise, add it to a list of stack spans
 			// to be freed at the end of GC.
@@ -441,7 +441,7 @@
 			// these spans as stacks, like we do for small
 			// stack spans. (See issue #11466.)
 			lock(&stackpoolmu)
-			mSpanList_Insert(&stackFreeQueue, s)
+			stackFreeQueue.insert(s)
 			unlock(&stackpoolmu)
 		}
 	}
@@ -1001,19 +1001,19 @@
 		for s := list.first; s != nil; {
 			next := s.next
 			if s.ref == 0 {
-				mSpanList_Remove(list, s)
+				list.remove(s)
 				s.freelist = 0
-				mHeap_FreeStack(&mheap_, s)
+				mheap_.freeStack(s)
 			}
 			s = next
 		}
 	}
 
 	// Free queued stack spans.
-	for !mSpanList_IsEmpty(&stackFreeQueue) {
+	for !stackFreeQueue.isEmpty() {
 		s := stackFreeQueue.first
-		mSpanList_Remove(&stackFreeQueue, s)
-		mHeap_FreeStack(&mheap_, s)
+		stackFreeQueue.remove(s)
+		mheap_.freeStack(s)
 	}
 
 	unlock(&stackpoolmu)