blob: 965cd10586de460c523d33404d032767762190dc [file] [log] [blame]
Russ Cox1e2d2f02014-11-11 17:05:02 -05001// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Central free lists.
6//
7// See malloc.h for an overview.
8//
9// The MCentral doesn't actually contain the list of free objects; the MSpan does.
10// Each MCentral is two lists of MSpans: those with free objects (c->nonempty)
11// and those that are completely allocated (c->empty).
12
13package runtime
14
Russ Cox484f8012015-02-19 13:38:46 -050015// Central list of free objects of a given size.
16type mcentral struct {
17 lock mutex
18 sizeclass int32
19 nonempty mspan // list of spans with a free object
20 empty mspan // list of spans with no free objects (or cached in an mcache)
21}
22
Russ Cox1e2d2f02014-11-11 17:05:02 -050023// Initialize a single central free list.
24func mCentral_Init(c *mcentral, sizeclass int32) {
25 c.sizeclass = sizeclass
26 mSpanList_Init(&c.nonempty)
27 mSpanList_Init(&c.empty)
28}
29
30// Allocate a span to use in an MCache.
31func mCentral_CacheSpan(c *mcentral) *mspan {
32 lock(&c.lock)
33 sg := mheap_.sweepgen
34retry:
35 var s *mspan
36 for s = c.nonempty.next; s != &c.nonempty; s = s.next {
37 if s.sweepgen == sg-2 && cas(&s.sweepgen, sg-2, sg-1) {
38 mSpanList_Remove(s)
39 mSpanList_InsertBack(&c.empty, s)
40 unlock(&c.lock)
41 mSpan_Sweep(s, true)
42 goto havespan
43 }
44 if s.sweepgen == sg-1 {
45 // the span is being swept by background sweeper, skip
46 continue
47 }
48 // we have a nonempty span that does not require sweeping, allocate from it
49 mSpanList_Remove(s)
50 mSpanList_InsertBack(&c.empty, s)
51 unlock(&c.lock)
52 goto havespan
53 }
54
55 for s = c.empty.next; s != &c.empty; s = s.next {
56 if s.sweepgen == sg-2 && cas(&s.sweepgen, sg-2, sg-1) {
57 // we have an empty span that requires sweeping,
58 // sweep it and see if we can free some space in it
59 mSpanList_Remove(s)
60 // swept spans are at the end of the list
61 mSpanList_InsertBack(&c.empty, s)
62 unlock(&c.lock)
63 mSpan_Sweep(s, true)
Rick Hudson8cfb0842014-11-20 12:08:13 -050064 if s.freelist.ptr() != nil {
Russ Cox1e2d2f02014-11-11 17:05:02 -050065 goto havespan
66 }
67 lock(&c.lock)
68 // the span is still empty after sweep
69 // it is already in the empty list, so just retry
70 goto retry
71 }
72 if s.sweepgen == sg-1 {
73 // the span is being swept by background sweeper, skip
74 continue
75 }
76 // already swept empty span,
77 // all subsequent ones must also be either swept or in process of sweeping
78 break
79 }
80 unlock(&c.lock)
81
82 // Replenish central list if empty.
83 s = mCentral_Grow(c)
84 if s == nil {
85 return nil
86 }
87 lock(&c.lock)
88 mSpanList_InsertBack(&c.empty, s)
89 unlock(&c.lock)
90
91 // At this point s is a non-empty span, queued at the end of the empty list,
92 // c is unlocked.
93havespan:
94 cap := int32((s.npages << _PageShift) / s.elemsize)
95 n := cap - int32(s.ref)
96 if n == 0 {
Keith Randallb2a950b2014-12-27 20:58:00 -080097 throw("empty span")
Russ Cox1e2d2f02014-11-11 17:05:02 -050098 }
Rick Hudson8cfb0842014-11-20 12:08:13 -050099 if s.freelist.ptr() == nil {
Keith Randallb2a950b2014-12-27 20:58:00 -0800100 throw("freelist empty")
Russ Cox1e2d2f02014-11-11 17:05:02 -0500101 }
102 s.incache = true
103 return s
104}
105
106// Return span from an MCache.
107func mCentral_UncacheSpan(c *mcentral, s *mspan) {
108 lock(&c.lock)
109
110 s.incache = false
111
112 if s.ref == 0 {
Keith Randallb2a950b2014-12-27 20:58:00 -0800113 throw("uncaching full span")
Russ Cox1e2d2f02014-11-11 17:05:02 -0500114 }
115
116 cap := int32((s.npages << _PageShift) / s.elemsize)
117 n := cap - int32(s.ref)
118 if n > 0 {
119 mSpanList_Remove(s)
120 mSpanList_Insert(&c.nonempty, s)
121 }
122 unlock(&c.lock)
123}
124
125// Free n objects from a span s back into the central free list c.
126// Called during sweep.
127// Returns true if the span was returned to heap. Sets sweepgen to
128// the latest generation.
129// If preserve=true, don't return the span to heap nor relink in MCentral lists;
130// caller takes care of it.
Rick Hudson8cfb0842014-11-20 12:08:13 -0500131func mCentral_FreeSpan(c *mcentral, s *mspan, n int32, start gclinkptr, end gclinkptr, preserve bool) bool {
Russ Cox1e2d2f02014-11-11 17:05:02 -0500132 if s.incache {
Keith Randallb2a950b2014-12-27 20:58:00 -0800133 throw("freespan into cached span")
Russ Cox1e2d2f02014-11-11 17:05:02 -0500134 }
135
136 // Add the objects back to s's free list.
Rick Hudson8cfb0842014-11-20 12:08:13 -0500137 wasempty := s.freelist.ptr() == nil
138 end.ptr().next = s.freelist
Russ Cox1e2d2f02014-11-11 17:05:02 -0500139 s.freelist = start
140 s.ref -= uint16(n)
141
142 if preserve {
143 // preserve is set only when called from MCentral_CacheSpan above,
144 // the span must be in the empty list.
145 if s.next == nil {
Keith Randallb2a950b2014-12-27 20:58:00 -0800146 throw("can't preserve unlinked span")
Russ Cox1e2d2f02014-11-11 17:05:02 -0500147 }
148 atomicstore(&s.sweepgen, mheap_.sweepgen)
149 return false
150 }
151
152 lock(&c.lock)
153
154 // Move to nonempty if necessary.
155 if wasempty {
156 mSpanList_Remove(s)
157 mSpanList_Insert(&c.nonempty, s)
158 }
159
160 // delay updating sweepgen until here. This is the signal that
161 // the span may be used in an MCache, so it must come after the
162 // linked list operations above (actually, just after the
163 // lock of c above.)
164 atomicstore(&s.sweepgen, mheap_.sweepgen)
165
166 if s.ref != 0 {
167 unlock(&c.lock)
168 return false
169 }
170
171 // s is completely freed, return it to the heap.
172 mSpanList_Remove(s)
173 s.needzero = 1
Rick Hudson8cfb0842014-11-20 12:08:13 -0500174 s.freelist = 0
Russ Cox1e2d2f02014-11-11 17:05:02 -0500175 unlock(&c.lock)
Russ Cox3965d752015-01-16 14:43:38 -0500176 heapBitsForSpan(s.base()).clearSpan(s.layout())
Russ Cox1e2d2f02014-11-11 17:05:02 -0500177 mHeap_Free(&mheap_, s, 0)
178 return true
179}
180
181// Fetch a new span from the heap and carve into objects for the free list.
182func mCentral_Grow(c *mcentral) *mspan {
183 npages := uintptr(class_to_allocnpages[c.sizeclass])
184 size := uintptr(class_to_size[c.sizeclass])
185 n := (npages << _PageShift) / size
186
187 s := mHeap_Alloc(&mheap_, npages, c.sizeclass, false, true)
188 if s == nil {
189 return nil
190 }
191
Russ Cox1e2d2f02014-11-11 17:05:02 -0500192 p := uintptr(s.start << _PageShift)
193 s.limit = p + size*n
Rick Hudson8cfb0842014-11-20 12:08:13 -0500194 head := gclinkptr(p)
195 tail := gclinkptr(p)
196 // i==0 iteration already done
197 for i := uintptr(1); i < n; i++ {
Russ Cox1e2d2f02014-11-11 17:05:02 -0500198 p += size
Rick Hudson8cfb0842014-11-20 12:08:13 -0500199 tail.ptr().next = gclinkptr(p)
200 tail = gclinkptr(p)
Russ Cox1e2d2f02014-11-11 17:05:02 -0500201 }
Rick Hudson8cfb0842014-11-20 12:08:13 -0500202 if s.freelist.ptr() != nil {
Keith Randallb2a950b2014-12-27 20:58:00 -0800203 throw("freelist not empty")
Rick Hudson8cfb0842014-11-20 12:08:13 -0500204 }
205 tail.ptr().next = 0
206 s.freelist = head
Russ Cox3965d752015-01-16 14:43:38 -0500207 heapBitsForSpan(s.base()).initSpan(s.layout())
Russ Cox1e2d2f02014-11-11 17:05:02 -0500208 return s
209}