Russ Cox | 1e2d2f0 | 2014-11-11 17:05:02 -0500 | [diff] [blame] | 1 | // Copyright 2009 The Go Authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style |
| 3 | // license that can be found in the LICENSE file. |
| 4 | |
| 5 | // Central free lists. |
| 6 | // |
| 7 | // See malloc.h for an overview. |
| 8 | // |
| 9 | // The MCentral doesn't actually contain the list of free objects; the MSpan does. |
| 10 | // Each MCentral is two lists of MSpans: those with free objects (c->nonempty) |
| 11 | // and those that are completely allocated (c->empty). |
| 12 | |
| 13 | package runtime |
| 14 | |
Russ Cox | 484f801 | 2015-02-19 13:38:46 -0500 | [diff] [blame] | 15 | // Central list of free objects of a given size. |
| 16 | type mcentral struct { |
| 17 | lock mutex |
| 18 | sizeclass int32 |
| 19 | nonempty mspan // list of spans with a free object |
| 20 | empty mspan // list of spans with no free objects (or cached in an mcache) |
| 21 | } |
| 22 | |
Russ Cox | 1e2d2f0 | 2014-11-11 17:05:02 -0500 | [diff] [blame] | 23 | // Initialize a single central free list. |
| 24 | func mCentral_Init(c *mcentral, sizeclass int32) { |
| 25 | c.sizeclass = sizeclass |
| 26 | mSpanList_Init(&c.nonempty) |
| 27 | mSpanList_Init(&c.empty) |
| 28 | } |
| 29 | |
| 30 | // Allocate a span to use in an MCache. |
| 31 | func mCentral_CacheSpan(c *mcentral) *mspan { |
| 32 | lock(&c.lock) |
| 33 | sg := mheap_.sweepgen |
| 34 | retry: |
| 35 | var s *mspan |
| 36 | for s = c.nonempty.next; s != &c.nonempty; s = s.next { |
| 37 | if s.sweepgen == sg-2 && cas(&s.sweepgen, sg-2, sg-1) { |
| 38 | mSpanList_Remove(s) |
| 39 | mSpanList_InsertBack(&c.empty, s) |
| 40 | unlock(&c.lock) |
| 41 | mSpan_Sweep(s, true) |
| 42 | goto havespan |
| 43 | } |
| 44 | if s.sweepgen == sg-1 { |
| 45 | // the span is being swept by background sweeper, skip |
| 46 | continue |
| 47 | } |
| 48 | // we have a nonempty span that does not require sweeping, allocate from it |
| 49 | mSpanList_Remove(s) |
| 50 | mSpanList_InsertBack(&c.empty, s) |
| 51 | unlock(&c.lock) |
| 52 | goto havespan |
| 53 | } |
| 54 | |
| 55 | for s = c.empty.next; s != &c.empty; s = s.next { |
| 56 | if s.sweepgen == sg-2 && cas(&s.sweepgen, sg-2, sg-1) { |
| 57 | // we have an empty span that requires sweeping, |
| 58 | // sweep it and see if we can free some space in it |
| 59 | mSpanList_Remove(s) |
| 60 | // swept spans are at the end of the list |
| 61 | mSpanList_InsertBack(&c.empty, s) |
| 62 | unlock(&c.lock) |
| 63 | mSpan_Sweep(s, true) |
Rick Hudson | 8cfb084 | 2014-11-20 12:08:13 -0500 | [diff] [blame] | 64 | if s.freelist.ptr() != nil { |
Russ Cox | 1e2d2f0 | 2014-11-11 17:05:02 -0500 | [diff] [blame] | 65 | goto havespan |
| 66 | } |
| 67 | lock(&c.lock) |
| 68 | // the span is still empty after sweep |
| 69 | // it is already in the empty list, so just retry |
| 70 | goto retry |
| 71 | } |
| 72 | if s.sweepgen == sg-1 { |
| 73 | // the span is being swept by background sweeper, skip |
| 74 | continue |
| 75 | } |
| 76 | // already swept empty span, |
| 77 | // all subsequent ones must also be either swept or in process of sweeping |
| 78 | break |
| 79 | } |
| 80 | unlock(&c.lock) |
| 81 | |
| 82 | // Replenish central list if empty. |
| 83 | s = mCentral_Grow(c) |
| 84 | if s == nil { |
| 85 | return nil |
| 86 | } |
| 87 | lock(&c.lock) |
| 88 | mSpanList_InsertBack(&c.empty, s) |
| 89 | unlock(&c.lock) |
| 90 | |
| 91 | // At this point s is a non-empty span, queued at the end of the empty list, |
| 92 | // c is unlocked. |
| 93 | havespan: |
| 94 | cap := int32((s.npages << _PageShift) / s.elemsize) |
| 95 | n := cap - int32(s.ref) |
| 96 | if n == 0 { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 97 | throw("empty span") |
Russ Cox | 1e2d2f0 | 2014-11-11 17:05:02 -0500 | [diff] [blame] | 98 | } |
Rick Hudson | 8cfb084 | 2014-11-20 12:08:13 -0500 | [diff] [blame] | 99 | if s.freelist.ptr() == nil { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 100 | throw("freelist empty") |
Russ Cox | 1e2d2f0 | 2014-11-11 17:05:02 -0500 | [diff] [blame] | 101 | } |
| 102 | s.incache = true |
| 103 | return s |
| 104 | } |
| 105 | |
| 106 | // Return span from an MCache. |
| 107 | func mCentral_UncacheSpan(c *mcentral, s *mspan) { |
| 108 | lock(&c.lock) |
| 109 | |
| 110 | s.incache = false |
| 111 | |
| 112 | if s.ref == 0 { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 113 | throw("uncaching full span") |
Russ Cox | 1e2d2f0 | 2014-11-11 17:05:02 -0500 | [diff] [blame] | 114 | } |
| 115 | |
| 116 | cap := int32((s.npages << _PageShift) / s.elemsize) |
| 117 | n := cap - int32(s.ref) |
| 118 | if n > 0 { |
| 119 | mSpanList_Remove(s) |
| 120 | mSpanList_Insert(&c.nonempty, s) |
| 121 | } |
| 122 | unlock(&c.lock) |
| 123 | } |
| 124 | |
| 125 | // Free n objects from a span s back into the central free list c. |
| 126 | // Called during sweep. |
| 127 | // Returns true if the span was returned to heap. Sets sweepgen to |
| 128 | // the latest generation. |
| 129 | // If preserve=true, don't return the span to heap nor relink in MCentral lists; |
| 130 | // caller takes care of it. |
Rick Hudson | 8cfb084 | 2014-11-20 12:08:13 -0500 | [diff] [blame] | 131 | func mCentral_FreeSpan(c *mcentral, s *mspan, n int32, start gclinkptr, end gclinkptr, preserve bool) bool { |
Russ Cox | 1e2d2f0 | 2014-11-11 17:05:02 -0500 | [diff] [blame] | 132 | if s.incache { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 133 | throw("freespan into cached span") |
Russ Cox | 1e2d2f0 | 2014-11-11 17:05:02 -0500 | [diff] [blame] | 134 | } |
| 135 | |
| 136 | // Add the objects back to s's free list. |
Rick Hudson | 8cfb084 | 2014-11-20 12:08:13 -0500 | [diff] [blame] | 137 | wasempty := s.freelist.ptr() == nil |
| 138 | end.ptr().next = s.freelist |
Russ Cox | 1e2d2f0 | 2014-11-11 17:05:02 -0500 | [diff] [blame] | 139 | s.freelist = start |
| 140 | s.ref -= uint16(n) |
| 141 | |
| 142 | if preserve { |
| 143 | // preserve is set only when called from MCentral_CacheSpan above, |
| 144 | // the span must be in the empty list. |
| 145 | if s.next == nil { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 146 | throw("can't preserve unlinked span") |
Russ Cox | 1e2d2f0 | 2014-11-11 17:05:02 -0500 | [diff] [blame] | 147 | } |
| 148 | atomicstore(&s.sweepgen, mheap_.sweepgen) |
| 149 | return false |
| 150 | } |
| 151 | |
| 152 | lock(&c.lock) |
| 153 | |
| 154 | // Move to nonempty if necessary. |
| 155 | if wasempty { |
| 156 | mSpanList_Remove(s) |
| 157 | mSpanList_Insert(&c.nonempty, s) |
| 158 | } |
| 159 | |
| 160 | // delay updating sweepgen until here. This is the signal that |
| 161 | // the span may be used in an MCache, so it must come after the |
| 162 | // linked list operations above (actually, just after the |
| 163 | // lock of c above.) |
| 164 | atomicstore(&s.sweepgen, mheap_.sweepgen) |
| 165 | |
| 166 | if s.ref != 0 { |
| 167 | unlock(&c.lock) |
| 168 | return false |
| 169 | } |
| 170 | |
| 171 | // s is completely freed, return it to the heap. |
| 172 | mSpanList_Remove(s) |
| 173 | s.needzero = 1 |
Rick Hudson | 8cfb084 | 2014-11-20 12:08:13 -0500 | [diff] [blame] | 174 | s.freelist = 0 |
Russ Cox | 1e2d2f0 | 2014-11-11 17:05:02 -0500 | [diff] [blame] | 175 | unlock(&c.lock) |
Russ Cox | 3965d75 | 2015-01-16 14:43:38 -0500 | [diff] [blame] | 176 | heapBitsForSpan(s.base()).clearSpan(s.layout()) |
Russ Cox | 1e2d2f0 | 2014-11-11 17:05:02 -0500 | [diff] [blame] | 177 | mHeap_Free(&mheap_, s, 0) |
| 178 | return true |
| 179 | } |
| 180 | |
| 181 | // Fetch a new span from the heap and carve into objects for the free list. |
| 182 | func mCentral_Grow(c *mcentral) *mspan { |
| 183 | npages := uintptr(class_to_allocnpages[c.sizeclass]) |
| 184 | size := uintptr(class_to_size[c.sizeclass]) |
| 185 | n := (npages << _PageShift) / size |
| 186 | |
| 187 | s := mHeap_Alloc(&mheap_, npages, c.sizeclass, false, true) |
| 188 | if s == nil { |
| 189 | return nil |
| 190 | } |
| 191 | |
Russ Cox | 1e2d2f0 | 2014-11-11 17:05:02 -0500 | [diff] [blame] | 192 | p := uintptr(s.start << _PageShift) |
| 193 | s.limit = p + size*n |
Rick Hudson | 8cfb084 | 2014-11-20 12:08:13 -0500 | [diff] [blame] | 194 | head := gclinkptr(p) |
| 195 | tail := gclinkptr(p) |
| 196 | // i==0 iteration already done |
| 197 | for i := uintptr(1); i < n; i++ { |
Russ Cox | 1e2d2f0 | 2014-11-11 17:05:02 -0500 | [diff] [blame] | 198 | p += size |
Rick Hudson | 8cfb084 | 2014-11-20 12:08:13 -0500 | [diff] [blame] | 199 | tail.ptr().next = gclinkptr(p) |
| 200 | tail = gclinkptr(p) |
Russ Cox | 1e2d2f0 | 2014-11-11 17:05:02 -0500 | [diff] [blame] | 201 | } |
Rick Hudson | 8cfb084 | 2014-11-20 12:08:13 -0500 | [diff] [blame] | 202 | if s.freelist.ptr() != nil { |
Keith Randall | b2a950b | 2014-12-27 20:58:00 -0800 | [diff] [blame] | 203 | throw("freelist not empty") |
Rick Hudson | 8cfb084 | 2014-11-20 12:08:13 -0500 | [diff] [blame] | 204 | } |
| 205 | tail.ptr().next = 0 |
| 206 | s.freelist = head |
Russ Cox | 3965d75 | 2015-01-16 14:43:38 -0500 | [diff] [blame] | 207 | heapBitsForSpan(s.base()).initSpan(s.layout()) |
Russ Cox | 1e2d2f0 | 2014-11-11 17:05:02 -0500 | [diff] [blame] | 208 | return s |
| 209 | } |