Source file src/runtime/malloc.go
Documentation: runtime
1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Memory allocator. 6 // 7 // This was originally based on tcmalloc, but has diverged quite a bit. 8 // http://goog-perftools.sourceforge.net/doc/tcmalloc.html 9 10 // The main allocator works in runs of pages. 11 // Small allocation sizes (up to and including 32 kB) are 12 // rounded to one of about 70 size classes, each of which 13 // has its own free set of objects of exactly that size. 14 // Any free page of memory can be split into a set of objects 15 // of one size class, which are then managed using a free bitmap. 16 // 17 // The allocator's data structures are: 18 // 19 // fixalloc: a free-list allocator for fixed-size off-heap objects, 20 // used to manage storage used by the allocator. 21 // mheap: the malloc heap, managed at page (8192-byte) granularity. 22 // mspan: a run of in-use pages managed by the mheap. 23 // mcentral: collects all spans of a given size class. 24 // mcache: a per-P cache of mspans with free space. 25 // mstats: allocation statistics. 26 // 27 // Allocating a small object proceeds up a hierarchy of caches: 28 // 29 // 1. Round the size up to one of the small size classes 30 // and look in the corresponding mspan in this P's mcache. 31 // Scan the mspan's free bitmap to find a free slot. 32 // If there is a free slot, allocate it. 33 // This can all be done without acquiring a lock. 34 // 35 // 2. If the mspan has no free slots, obtain a new mspan 36 // from the mcentral's list of mspans of the required size 37 // class that have free space. 38 // Obtaining a whole span amortizes the cost of locking 39 // the mcentral. 40 // 41 // 3. If the mcentral's mspan list is empty, obtain a run 42 // of pages from the mheap to use for the mspan. 43 // 44 // 4. If the mheap is empty or has no page runs large enough, 45 // allocate a new group of pages (at least 1MB) from the 46 // operating system. Allocating a large run of pages 47 // amortizes the cost of talking to the operating system. 48 // 49 // Sweeping an mspan and freeing objects on it proceeds up a similar 50 // hierarchy: 51 // 52 // 1. If the mspan is being swept in response to allocation, it 53 // is returned to the mcache to satisfy the allocation. 54 // 55 // 2. Otherwise, if the mspan still has allocated objects in it, 56 // it is placed on the mcentral free list for the mspan's size 57 // class. 58 // 59 // 3. Otherwise, if all objects in the mspan are free, the mspan's 60 // pages are returned to the mheap and the mspan is now dead. 61 // 62 // Allocating and freeing a large object uses the mheap 63 // directly, bypassing the mcache and mcentral. 64 // 65 // If mspan.needzero is false, then free object slots in the mspan are 66 // already zeroed. Otherwise if needzero is true, objects are zeroed as 67 // they are allocated. There are various benefits to delaying zeroing 68 // this way: 69 // 70 // 1. Stack frame allocation can avoid zeroing altogether. 71 // 72 // 2. It exhibits better temporal locality, since the program is 73 // probably about to write to the memory. 74 // 75 // 3. We don't zero pages that never get reused. 76 77 // Virtual memory layout 78 // 79 // The heap consists of a set of arenas, which are 64MB on 64-bit and 80 // 4MB on 32-bit (heapArenaBytes). Each arena's start address is also 81 // aligned to the arena size. 82 // 83 // Each arena has an associated heapArena object that stores the 84 // metadata for that arena: the heap bitmap for all words in the arena 85 // and the span map for all pages in the arena. heapArena objects are 86 // themselves allocated off-heap. 87 // 88 // Since arenas are aligned, the address space can be viewed as a 89 // series of arena frames. The arena map (mheap_.arenas) maps from 90 // arena frame number to *heapArena, or nil for parts of the address 91 // space not backed by the Go heap. The arena map is structured as a 92 // two-level array consisting of a "L1" arena map and many "L2" arena 93 // maps; however, since arenas are large, on many architectures, the 94 // arena map consists of a single, large L2 map. 95 // 96 // The arena map covers the entire possible address space, allowing 97 // the Go heap to use any part of the address space. The allocator 98 // attempts to keep arenas contiguous so that large spans (and hence 99 // large objects) can cross arenas. 100 101 package runtime 102 103 import ( 104 "runtime/internal/atomic" 105 "runtime/internal/math" 106 "runtime/internal/sys" 107 "unsafe" 108 ) 109 110 const ( 111 debugMalloc = false 112 113 maxTinySize = _TinySize 114 tinySizeClass = _TinySizeClass 115 maxSmallSize = _MaxSmallSize 116 117 pageShift = _PageShift 118 pageSize = _PageSize 119 pageMask = _PageMask 120 // By construction, single page spans of the smallest object class 121 // have the most objects per span. 122 maxObjsPerSpan = pageSize / 8 123 124 concurrentSweep = _ConcurrentSweep 125 126 _PageSize = 1 << _PageShift 127 _PageMask = _PageSize - 1 128 129 // _64bit = 1 on 64-bit systems, 0 on 32-bit systems 130 _64bit = 1 << (^uintptr(0) >> 63) / 2 131 132 // Tiny allocator parameters, see "Tiny allocator" comment in malloc.go. 133 _TinySize = 16 134 _TinySizeClass = int8(2) 135 136 _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc 137 138 // Per-P, per order stack segment cache size. 139 _StackCacheSize = 32 * 1024 140 141 // Number of orders that get caching. Order 0 is FixedStack 142 // and each successive order is twice as large. 143 // We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks 144 // will be allocated directly. 145 // Since FixedStack is different on different systems, we 146 // must vary NumStackOrders to keep the same maximum cached size. 147 // OS | FixedStack | NumStackOrders 148 // -----------------+------------+--------------- 149 // linux/darwin/bsd | 2KB | 4 150 // windows/32 | 4KB | 3 151 // windows/64 | 8KB | 2 152 // plan9 | 4KB | 3 153 _NumStackOrders = 4 - sys.PtrSize/4*sys.GoosWindows - 1*sys.GoosPlan9 154 155 // heapAddrBits is the number of bits in a heap address. On 156 // amd64, addresses are sign-extended beyond heapAddrBits. On 157 // other arches, they are zero-extended. 158 // 159 // On most 64-bit platforms, we limit this to 48 bits based on a 160 // combination of hardware and OS limitations. 161 // 162 // amd64 hardware limits addresses to 48 bits, sign-extended 163 // to 64 bits. Addresses where the top 16 bits are not either 164 // all 0 or all 1 are "non-canonical" and invalid. Because of 165 // these "negative" addresses, we offset addresses by 1<<47 166 // (arenaBaseOffset) on amd64 before computing indexes into 167 // the heap arenas index. In 2017, amd64 hardware added 168 // support for 57 bit addresses; however, currently only Linux 169 // supports this extension and the kernel will never choose an 170 // address above 1<<47 unless mmap is called with a hint 171 // address above 1<<47 (which we never do). 172 // 173 // arm64 hardware (as of ARMv8) limits user addresses to 48 174 // bits, in the range [0, 1<<48). 175 // 176 // ppc64, mips64, and s390x support arbitrary 64 bit addresses 177 // in hardware. On Linux, Go leans on stricter OS limits. Based 178 // on Linux's processor.h, the user address space is limited as 179 // follows on 64-bit architectures: 180 // 181 // Architecture Name Maximum Value (exclusive) 182 // --------------------------------------------------------------------- 183 // amd64 TASK_SIZE_MAX 0x007ffffffff000 (47 bit addresses) 184 // arm64 TASK_SIZE_64 0x01000000000000 (48 bit addresses) 185 // ppc64{,le} TASK_SIZE_USER64 0x00400000000000 (46 bit addresses) 186 // mips64{,le} TASK_SIZE64 0x00010000000000 (40 bit addresses) 187 // s390x TASK_SIZE 1<<64 (64 bit addresses) 188 // 189 // These limits may increase over time, but are currently at 190 // most 48 bits except on s390x. On all architectures, Linux 191 // starts placing mmap'd regions at addresses that are 192 // significantly below 48 bits, so even if it's possible to 193 // exceed Go's 48 bit limit, it's extremely unlikely in 194 // practice. 195 // 196 // On 32-bit platforms, we accept the full 32-bit address 197 // space because doing so is cheap. 198 // mips32 only has access to the low 2GB of virtual memory, so 199 // we further limit it to 31 bits. 200 // 201 // On ios/arm64, although 64-bit pointers are presumably 202 // available, pointers are truncated to 33 bits. Furthermore, 203 // only the top 4 GiB of the address space are actually available 204 // to the application, but we allow the whole 33 bits anyway for 205 // simplicity. 206 // TODO(mknyszek): Consider limiting it to 32 bits and using 207 // arenaBaseOffset to offset into the top 4 GiB. 208 // 209 // WebAssembly currently has a limit of 4GB linear memory. 210 heapAddrBits = (_64bit*(1-sys.GoarchWasm)*(1-sys.GoosIos*sys.GoarchArm64))*48 + (1-_64bit+sys.GoarchWasm)*(32-(sys.GoarchMips+sys.GoarchMipsle)) + 33*sys.GoosIos*sys.GoarchArm64 211 212 // maxAlloc is the maximum size of an allocation. On 64-bit, 213 // it's theoretically possible to allocate 1<<heapAddrBits bytes. On 214 // 32-bit, however, this is one less than 1<<32 because the 215 // number of bytes in the address space doesn't actually fit 216 // in a uintptr. 217 maxAlloc = (1 << heapAddrBits) - (1-_64bit)*1 218 219 // The number of bits in a heap address, the size of heap 220 // arenas, and the L1 and L2 arena map sizes are related by 221 // 222 // (1 << addr bits) = arena size * L1 entries * L2 entries 223 // 224 // Currently, we balance these as follows: 225 // 226 // Platform Addr bits Arena size L1 entries L2 entries 227 // -------------- --------- ---------- ---------- ----------- 228 // */64-bit 48 64MB 1 4M (32MB) 229 // windows/64-bit 48 4MB 64 1M (8MB) 230 // ios/arm64 33 4MB 1 2048 (8KB) 231 // */32-bit 32 4MB 1 1024 (4KB) 232 // */mips(le) 31 4MB 1 512 (2KB) 233 234 // heapArenaBytes is the size of a heap arena. The heap 235 // consists of mappings of size heapArenaBytes, aligned to 236 // heapArenaBytes. The initial heap mapping is one arena. 237 // 238 // This is currently 64MB on 64-bit non-Windows and 4MB on 239 // 32-bit and on Windows. We use smaller arenas on Windows 240 // because all committed memory is charged to the process, 241 // even if it's not touched. Hence, for processes with small 242 // heaps, the mapped arena space needs to be commensurate. 243 // This is particularly important with the race detector, 244 // since it significantly amplifies the cost of committed 245 // memory. 246 heapArenaBytes = 1 << logHeapArenaBytes 247 248 // logHeapArenaBytes is log_2 of heapArenaBytes. For clarity, 249 // prefer using heapArenaBytes where possible (we need the 250 // constant to compute some other constants). 251 logHeapArenaBytes = (6+20)*(_64bit*(1-sys.GoosWindows)*(1-sys.GoarchWasm)*(1-sys.GoosIos*sys.GoarchArm64)) + (2+20)*(_64bit*sys.GoosWindows) + (2+20)*(1-_64bit) + (2+20)*sys.GoarchWasm + (2+20)*sys.GoosIos*sys.GoarchArm64 252 253 // heapArenaBitmapBytes is the size of each heap arena's bitmap. 254 heapArenaBitmapBytes = heapArenaBytes / (sys.PtrSize * 8 / 2) 255 256 pagesPerArena = heapArenaBytes / pageSize 257 258 // arenaL1Bits is the number of bits of the arena number 259 // covered by the first level arena map. 260 // 261 // This number should be small, since the first level arena 262 // map requires PtrSize*(1<<arenaL1Bits) of space in the 263 // binary's BSS. It can be zero, in which case the first level 264 // index is effectively unused. There is a performance benefit 265 // to this, since the generated code can be more efficient, 266 // but comes at the cost of having a large L2 mapping. 267 // 268 // We use the L1 map on 64-bit Windows because the arena size 269 // is small, but the address space is still 48 bits, and 270 // there's a high cost to having a large L2. 271 arenaL1Bits = 6 * (_64bit * sys.GoosWindows) 272 273 // arenaL2Bits is the number of bits of the arena number 274 // covered by the second level arena index. 275 // 276 // The size of each arena map allocation is proportional to 277 // 1<<arenaL2Bits, so it's important that this not be too 278 // large. 48 bits leads to 32MB arena index allocations, which 279 // is about the practical threshold. 280 arenaL2Bits = heapAddrBits - logHeapArenaBytes - arenaL1Bits 281 282 // arenaL1Shift is the number of bits to shift an arena frame 283 // number by to compute an index into the first level arena map. 284 arenaL1Shift = arenaL2Bits 285 286 // arenaBits is the total bits in a combined arena map index. 287 // This is split between the index into the L1 arena map and 288 // the L2 arena map. 289 arenaBits = arenaL1Bits + arenaL2Bits 290 291 // arenaBaseOffset is the pointer value that corresponds to 292 // index 0 in the heap arena map. 293 // 294 // On amd64, the address space is 48 bits, sign extended to 64 295 // bits. This offset lets us handle "negative" addresses (or 296 // high addresses if viewed as unsigned). 297 // 298 // On aix/ppc64, this offset allows to keep the heapAddrBits to 299 // 48. Otherwise, it would be 60 in order to handle mmap addresses 300 // (in range 0x0a00000000000000 - 0x0afffffffffffff). But in this 301 // case, the memory reserved in (s *pageAlloc).init for chunks 302 // is causing important slowdowns. 303 // 304 // On other platforms, the user address space is contiguous 305 // and starts at 0, so no offset is necessary. 306 arenaBaseOffset = 0xffff800000000000*sys.GoarchAmd64 + 0x0a00000000000000*sys.GoosAix 307 // A typed version of this constant that will make it into DWARF (for viewcore). 308 arenaBaseOffsetUintptr = uintptr(arenaBaseOffset) 309 310 // Max number of threads to run garbage collection. 311 // 2, 3, and 4 are all plausible maximums depending 312 // on the hardware details of the machine. The garbage 313 // collector scales well to 32 cpus. 314 _MaxGcproc = 32 315 316 // minLegalPointer is the smallest possible legal pointer. 317 // This is the smallest possible architectural page size, 318 // since we assume that the first page is never mapped. 319 // 320 // This should agree with minZeroPage in the compiler. 321 minLegalPointer uintptr = 4096 322 ) 323 324 // physPageSize is the size in bytes of the OS's physical pages. 325 // Mapping and unmapping operations must be done at multiples of 326 // physPageSize. 327 // 328 // This must be set by the OS init code (typically in osinit) before 329 // mallocinit. 330 var physPageSize uintptr 331 332 // physHugePageSize is the size in bytes of the OS's default physical huge 333 // page size whose allocation is opaque to the application. It is assumed 334 // and verified to be a power of two. 335 // 336 // If set, this must be set by the OS init code (typically in osinit) before 337 // mallocinit. However, setting it at all is optional, and leaving the default 338 // value is always safe (though potentially less efficient). 339 // 340 // Since physHugePageSize is always assumed to be a power of two, 341 // physHugePageShift is defined as physHugePageSize == 1 << physHugePageShift. 342 // The purpose of physHugePageShift is to avoid doing divisions in 343 // performance critical functions. 344 var ( 345 physHugePageSize uintptr 346 physHugePageShift uint 347 ) 348 349 // OS memory management abstraction layer 350 // 351 // Regions of the address space managed by the runtime may be in one of four 352 // states at any given time: 353 // 1) None - Unreserved and unmapped, the default state of any region. 354 // 2) Reserved - Owned by the runtime, but accessing it would cause a fault. 355 // Does not count against the process' memory footprint. 356 // 3) Prepared - Reserved, intended not to be backed by physical memory (though 357 // an OS may implement this lazily). Can transition efficiently to 358 // Ready. Accessing memory in such a region is undefined (may 359 // fault, may give back unexpected zeroes, etc.). 360 // 4) Ready - may be accessed safely. 361 // 362 // This set of states is more than is strictly necessary to support all the 363 // currently supported platforms. One could get by with just None, Reserved, and 364 // Ready. However, the Prepared state gives us flexibility for performance 365 // purposes. For example, on POSIX-y operating systems, Reserved is usually a 366 // private anonymous mmap'd region with PROT_NONE set, and to transition 367 // to Ready would require setting PROT_READ|PROT_WRITE. However the 368 // underspecification of Prepared lets us use just MADV_FREE to transition from 369 // Ready to Prepared. Thus with the Prepared state we can set the permission 370 // bits just once early on, we can efficiently tell the OS that it's free to 371 // take pages away from us when we don't strictly need them. 372 // 373 // For each OS there is a common set of helpers defined that transition 374 // memory regions between these states. The helpers are as follows: 375 // 376 // sysAlloc transitions an OS-chosen region of memory from None to Ready. 377 // More specifically, it obtains a large chunk of zeroed memory from the 378 // operating system, typically on the order of a hundred kilobytes 379 // or a megabyte. This memory is always immediately available for use. 380 // 381 // sysFree transitions a memory region from any state to None. Therefore, it 382 // returns memory unconditionally. It is used if an out-of-memory error has been 383 // detected midway through an allocation or to carve out an aligned section of 384 // the address space. It is okay if sysFree is a no-op only if sysReserve always 385 // returns a memory region aligned to the heap allocator's alignment 386 // restrictions. 387 // 388 // sysReserve transitions a memory region from None to Reserved. It reserves 389 // address space in such a way that it would cause a fatal fault upon access 390 // (either via permissions or not committing the memory). Such a reservation is 391 // thus never backed by physical memory. 392 // If the pointer passed to it is non-nil, the caller wants the 393 // reservation there, but sysReserve can still choose another 394 // location if that one is unavailable. 395 // NOTE: sysReserve returns OS-aligned memory, but the heap allocator 396 // may use larger alignment, so the caller must be careful to realign the 397 // memory obtained by sysReserve. 398 // 399 // sysMap transitions a memory region from Reserved to Prepared. It ensures the 400 // memory region can be efficiently transitioned to Ready. 401 // 402 // sysUsed transitions a memory region from Prepared to Ready. It notifies the 403 // operating system that the memory region is needed and ensures that the region 404 // may be safely accessed. This is typically a no-op on systems that don't have 405 // an explicit commit step and hard over-commit limits, but is critical on 406 // Windows, for example. 407 // 408 // sysUnused transitions a memory region from Ready to Prepared. It notifies the 409 // operating system that the physical pages backing this memory region are no 410 // longer needed and can be reused for other purposes. The contents of a 411 // sysUnused memory region are considered forfeit and the region must not be 412 // accessed again until sysUsed is called. 413 // 414 // sysFault transitions a memory region from Ready or Prepared to Reserved. It 415 // marks a region such that it will always fault if accessed. Used only for 416 // debugging the runtime. 417 418 func mallocinit() { 419 if class_to_size[_TinySizeClass] != _TinySize { 420 throw("bad TinySizeClass") 421 } 422 423 testdefersizes() 424 425 if heapArenaBitmapBytes&(heapArenaBitmapBytes-1) != 0 { 426 // heapBits expects modular arithmetic on bitmap 427 // addresses to work. 428 throw("heapArenaBitmapBytes not a power of 2") 429 } 430 431 // Copy class sizes out for statistics table. 432 for i := range class_to_size { 433 memstats.by_size[i].size = uint32(class_to_size[i]) 434 } 435 436 // Check physPageSize. 437 if physPageSize == 0 { 438 // The OS init code failed to fetch the physical page size. 439 throw("failed to get system page size") 440 } 441 if physPageSize > maxPhysPageSize { 442 print("system page size (", physPageSize, ") is larger than maximum page size (", maxPhysPageSize, ")\n") 443 throw("bad system page size") 444 } 445 if physPageSize < minPhysPageSize { 446 print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n") 447 throw("bad system page size") 448 } 449 if physPageSize&(physPageSize-1) != 0 { 450 print("system page size (", physPageSize, ") must be a power of 2\n") 451 throw("bad system page size") 452 } 453 if physHugePageSize&(physHugePageSize-1) != 0 { 454 print("system huge page size (", physHugePageSize, ") must be a power of 2\n") 455 throw("bad system huge page size") 456 } 457 if physHugePageSize > maxPhysHugePageSize { 458 // physHugePageSize is greater than the maximum supported huge page size. 459 // Don't throw here, like in the other cases, since a system configured 460 // in this way isn't wrong, we just don't have the code to support them. 461 // Instead, silently set the huge page size to zero. 462 physHugePageSize = 0 463 } 464 if physHugePageSize != 0 { 465 // Since physHugePageSize is a power of 2, it suffices to increase 466 // physHugePageShift until 1<<physHugePageShift == physHugePageSize. 467 for 1<<physHugePageShift != physHugePageSize { 468 physHugePageShift++ 469 } 470 } 471 if pagesPerArena%pagesPerSpanRoot != 0 { 472 print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerSpanRoot (", pagesPerSpanRoot, ")\n") 473 throw("bad pagesPerSpanRoot") 474 } 475 if pagesPerArena%pagesPerReclaimerChunk != 0 { 476 print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerReclaimerChunk (", pagesPerReclaimerChunk, ")\n") 477 throw("bad pagesPerReclaimerChunk") 478 } 479 480 // Initialize the heap. 481 mheap_.init() 482 mcache0 = allocmcache() 483 lockInit(&gcBitsArenas.lock, lockRankGcBitsArenas) 484 lockInit(&proflock, lockRankProf) 485 lockInit(&globalAlloc.mutex, lockRankGlobalAlloc) 486 487 // Create initial arena growth hints. 488 if sys.PtrSize == 8 { 489 // On a 64-bit machine, we pick the following hints 490 // because: 491 // 492 // 1. Starting from the middle of the address space 493 // makes it easier to grow out a contiguous range 494 // without running in to some other mapping. 495 // 496 // 2. This makes Go heap addresses more easily 497 // recognizable when debugging. 498 // 499 // 3. Stack scanning in gccgo is still conservative, 500 // so it's important that addresses be distinguishable 501 // from other data. 502 // 503 // Starting at 0x00c0 means that the valid memory addresses 504 // will begin 0x00c0, 0x00c1, ... 505 // In little-endian, that's c0 00, c1 00, ... None of those are valid 506 // UTF-8 sequences, and they are otherwise as far away from 507 // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0 508 // addresses. An earlier attempt to use 0x11f8 caused out of memory errors 509 // on OS X during thread allocations. 0x00c0 causes conflicts with 510 // AddressSanitizer which reserves all memory up to 0x0100. 511 // These choices reduce the odds of a conservative garbage collector 512 // not collecting memory because some non-pointer block of memory 513 // had a bit pattern that matched a memory address. 514 // 515 // However, on arm64, we ignore all this advice above and slam the 516 // allocation at 0x40 << 32 because when using 4k pages with 3-level 517 // translation buffers, the user address space is limited to 39 bits 518 // On ios/arm64, the address space is even smaller. 519 // 520 // On AIX, mmaps starts at 0x0A00000000000000 for 64-bit. 521 // processes. 522 for i := 0x7f; i >= 0; i-- { 523 var p uintptr 524 switch { 525 case raceenabled: 526 // The TSAN runtime requires the heap 527 // to be in the range [0x00c000000000, 528 // 0x00e000000000). 529 p = uintptr(i)<<32 | uintptrMask&(0x00c0<<32) 530 if p >= uintptrMask&0x00e000000000 { 531 continue 532 } 533 case GOARCH == "arm64" && GOOS == "ios": 534 p = uintptr(i)<<40 | uintptrMask&(0x0013<<28) 535 case GOARCH == "arm64": 536 p = uintptr(i)<<40 | uintptrMask&(0x0040<<32) 537 case GOOS == "aix": 538 if i == 0 { 539 // We don't use addresses directly after 0x0A00000000000000 540 // to avoid collisions with others mmaps done by non-go programs. 541 continue 542 } 543 p = uintptr(i)<<40 | uintptrMask&(0xa0<<52) 544 default: 545 p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32) 546 } 547 hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc()) 548 hint.addr = p 549 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 550 } 551 } else { 552 // On a 32-bit machine, we're much more concerned 553 // about keeping the usable heap contiguous. 554 // Hence: 555 // 556 // 1. We reserve space for all heapArenas up front so 557 // they don't get interleaved with the heap. They're 558 // ~258MB, so this isn't too bad. (We could reserve a 559 // smaller amount of space up front if this is a 560 // problem.) 561 // 562 // 2. We hint the heap to start right above the end of 563 // the binary so we have the best chance of keeping it 564 // contiguous. 565 // 566 // 3. We try to stake out a reasonably large initial 567 // heap reservation. 568 569 const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{}) 570 meta := uintptr(sysReserve(nil, arenaMetaSize)) 571 if meta != 0 { 572 mheap_.heapArenaAlloc.init(meta, arenaMetaSize, true) 573 } 574 575 // We want to start the arena low, but if we're linked 576 // against C code, it's possible global constructors 577 // have called malloc and adjusted the process' brk. 578 // Query the brk so we can avoid trying to map the 579 // region over it (which will cause the kernel to put 580 // the region somewhere else, likely at a high 581 // address). 582 procBrk := sbrk0() 583 584 // If we ask for the end of the data segment but the 585 // operating system requires a little more space 586 // before we can start allocating, it will give out a 587 // slightly higher pointer. Except QEMU, which is 588 // buggy, as usual: it won't adjust the pointer 589 // upward. So adjust it upward a little bit ourselves: 590 // 1/4 MB to get away from the running binary image. 591 p := firstmoduledata.end 592 if p < procBrk { 593 p = procBrk 594 } 595 if mheap_.heapArenaAlloc.next <= p && p < mheap_.heapArenaAlloc.end { 596 p = mheap_.heapArenaAlloc.end 597 } 598 p = alignUp(p+(256<<10), heapArenaBytes) 599 // Because we're worried about fragmentation on 600 // 32-bit, we try to make a large initial reservation. 601 arenaSizes := []uintptr{ 602 512 << 20, 603 256 << 20, 604 128 << 20, 605 } 606 for _, arenaSize := range arenaSizes { 607 a, size := sysReserveAligned(unsafe.Pointer(p), arenaSize, heapArenaBytes) 608 if a != nil { 609 mheap_.arena.init(uintptr(a), size, false) 610 p = mheap_.arena.end // For hint below 611 break 612 } 613 } 614 hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc()) 615 hint.addr = p 616 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 617 } 618 } 619 620 // sysAlloc allocates heap arena space for at least n bytes. The 621 // returned pointer is always heapArenaBytes-aligned and backed by 622 // h.arenas metadata. The returned size is always a multiple of 623 // heapArenaBytes. sysAlloc returns nil on failure. 624 // There is no corresponding free function. 625 // 626 // sysAlloc returns a memory region in the Reserved state. This region must 627 // be transitioned to Prepared and then Ready before use. 628 // 629 // h must be locked. 630 func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) { 631 assertLockHeld(&h.lock) 632 633 n = alignUp(n, heapArenaBytes) 634 635 // First, try the arena pre-reservation. 636 v = h.arena.alloc(n, heapArenaBytes, &memstats.heap_sys) 637 if v != nil { 638 size = n 639 goto mapped 640 } 641 642 // Try to grow the heap at a hint address. 643 for h.arenaHints != nil { 644 hint := h.arenaHints 645 p := hint.addr 646 if hint.down { 647 p -= n 648 } 649 if p+n < p { 650 // We can't use this, so don't ask. 651 v = nil 652 } else if arenaIndex(p+n-1) >= 1<<arenaBits { 653 // Outside addressable heap. Can't use. 654 v = nil 655 } else { 656 v = sysReserve(unsafe.Pointer(p), n) 657 } 658 if p == uintptr(v) { 659 // Success. Update the hint. 660 if !hint.down { 661 p += n 662 } 663 hint.addr = p 664 size = n 665 break 666 } 667 // Failed. Discard this hint and try the next. 668 // 669 // TODO: This would be cleaner if sysReserve could be 670 // told to only return the requested address. In 671 // particular, this is already how Windows behaves, so 672 // it would simplify things there. 673 if v != nil { 674 sysFree(v, n, nil) 675 } 676 h.arenaHints = hint.next 677 h.arenaHintAlloc.free(unsafe.Pointer(hint)) 678 } 679 680 if size == 0 { 681 if raceenabled { 682 // The race detector assumes the heap lives in 683 // [0x00c000000000, 0x00e000000000), but we 684 // just ran out of hints in this region. Give 685 // a nice failure. 686 throw("too many address space collisions for -race mode") 687 } 688 689 // All of the hints failed, so we'll take any 690 // (sufficiently aligned) address the kernel will give 691 // us. 692 v, size = sysReserveAligned(nil, n, heapArenaBytes) 693 if v == nil { 694 return nil, 0 695 } 696 697 // Create new hints for extending this region. 698 hint := (*arenaHint)(h.arenaHintAlloc.alloc()) 699 hint.addr, hint.down = uintptr(v), true 700 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 701 hint = (*arenaHint)(h.arenaHintAlloc.alloc()) 702 hint.addr = uintptr(v) + size 703 hint.next, mheap_.arenaHints = mheap_.arenaHints, hint 704 } 705 706 // Check for bad pointers or pointers we can't use. 707 { 708 var bad string 709 p := uintptr(v) 710 if p+size < p { 711 bad = "region exceeds uintptr range" 712 } else if arenaIndex(p) >= 1<<arenaBits { 713 bad = "base outside usable address space" 714 } else if arenaIndex(p+size-1) >= 1<<arenaBits { 715 bad = "end outside usable address space" 716 } 717 if bad != "" { 718 // This should be impossible on most architectures, 719 // but it would be really confusing to debug. 720 print("runtime: memory allocated by OS [", hex(p), ", ", hex(p+size), ") not in usable address space: ", bad, "\n") 721 throw("memory reservation exceeds address space limit") 722 } 723 } 724 725 if uintptr(v)&(heapArenaBytes-1) != 0 { 726 throw("misrounded allocation in sysAlloc") 727 } 728 729 mapped: 730 // Create arena metadata. 731 for ri := arenaIndex(uintptr(v)); ri <= arenaIndex(uintptr(v)+size-1); ri++ { 732 l2 := h.arenas[ri.l1()] 733 if l2 == nil { 734 // Allocate an L2 arena map. 735 l2 = (*[1 << arenaL2Bits]*heapArena)(persistentalloc(unsafe.Sizeof(*l2), sys.PtrSize, nil)) 736 if l2 == nil { 737 throw("out of memory allocating heap arena map") 738 } 739 atomic.StorepNoWB(unsafe.Pointer(&h.arenas[ri.l1()]), unsafe.Pointer(l2)) 740 } 741 742 if l2[ri.l2()] != nil { 743 throw("arena already initialized") 744 } 745 var r *heapArena 746 r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gcMiscSys)) 747 if r == nil { 748 r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gcMiscSys)) 749 if r == nil { 750 throw("out of memory allocating heap arena metadata") 751 } 752 } 753 754 // Add the arena to the arenas list. 755 if len(h.allArenas) == cap(h.allArenas) { 756 size := 2 * uintptr(cap(h.allArenas)) * sys.PtrSize 757 if size == 0 { 758 size = physPageSize 759 } 760 newArray := (*notInHeap)(persistentalloc(size, sys.PtrSize, &memstats.gcMiscSys)) 761 if newArray == nil { 762 throw("out of memory allocating allArenas") 763 } 764 oldSlice := h.allArenas 765 *(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / sys.PtrSize)} 766 copy(h.allArenas, oldSlice) 767 // Do not free the old backing array because 768 // there may be concurrent readers. Since we 769 // double the array each time, this can lead 770 // to at most 2x waste. 771 } 772 h.allArenas = h.allArenas[:len(h.allArenas)+1] 773 h.allArenas[len(h.allArenas)-1] = ri 774 775 // Store atomically just in case an object from the 776 // new heap arena becomes visible before the heap lock 777 // is released (which shouldn't happen, but there's 778 // little downside to this). 779 atomic.StorepNoWB(unsafe.Pointer(&l2[ri.l2()]), unsafe.Pointer(r)) 780 } 781 782 // Tell the race detector about the new heap memory. 783 if raceenabled { 784 racemapshadow(v, size) 785 } 786 787 return 788 } 789 790 // sysReserveAligned is like sysReserve, but the returned pointer is 791 // aligned to align bytes. It may reserve either n or n+align bytes, 792 // so it returns the size that was reserved. 793 func sysReserveAligned(v unsafe.Pointer, size, align uintptr) (unsafe.Pointer, uintptr) { 794 // Since the alignment is rather large in uses of this 795 // function, we're not likely to get it by chance, so we ask 796 // for a larger region and remove the parts we don't need. 797 retries := 0 798 retry: 799 p := uintptr(sysReserve(v, size+align)) 800 switch { 801 case p == 0: 802 return nil, 0 803 case p&(align-1) == 0: 804 // We got lucky and got an aligned region, so we can 805 // use the whole thing. 806 return unsafe.Pointer(p), size + align 807 case GOOS == "windows": 808 // On Windows we can't release pieces of a 809 // reservation, so we release the whole thing and 810 // re-reserve the aligned sub-region. This may race, 811 // so we may have to try again. 812 sysFree(unsafe.Pointer(p), size+align, nil) 813 p = alignUp(p, align) 814 p2 := sysReserve(unsafe.Pointer(p), size) 815 if p != uintptr(p2) { 816 // Must have raced. Try again. 817 sysFree(p2, size, nil) 818 if retries++; retries == 100 { 819 throw("failed to allocate aligned heap memory; too many retries") 820 } 821 goto retry 822 } 823 // Success. 824 return p2, size 825 default: 826 // Trim off the unaligned parts. 827 pAligned := alignUp(p, align) 828 sysFree(unsafe.Pointer(p), pAligned-p, nil) 829 end := pAligned + size 830 endLen := (p + size + align) - end 831 if endLen > 0 { 832 sysFree(unsafe.Pointer(end), endLen, nil) 833 } 834 return unsafe.Pointer(pAligned), size 835 } 836 } 837 838 // base address for all 0-byte allocations 839 var zerobase uintptr 840 841 // nextFreeFast returns the next free object if one is quickly available. 842 // Otherwise it returns 0. 843 func nextFreeFast(s *mspan) gclinkptr { 844 theBit := sys.Ctz64(s.allocCache) // Is there a free object in the allocCache? 845 if theBit < 64 { 846 result := s.freeindex + uintptr(theBit) 847 if result < s.nelems { 848 freeidx := result + 1 849 if freeidx%64 == 0 && freeidx != s.nelems { 850 return 0 851 } 852 s.allocCache >>= uint(theBit + 1) 853 s.freeindex = freeidx 854 s.allocCount++ 855 return gclinkptr(result*s.elemsize + s.base()) 856 } 857 } 858 return 0 859 } 860 861 // nextFree returns the next free object from the cached span if one is available. 862 // Otherwise it refills the cache with a span with an available object and 863 // returns that object along with a flag indicating that this was a heavy 864 // weight allocation. If it is a heavy weight allocation the caller must 865 // determine whether a new GC cycle needs to be started or if the GC is active 866 // whether this goroutine needs to assist the GC. 867 // 868 // Must run in a non-preemptible context since otherwise the owner of 869 // c could change. 870 func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bool) { 871 s = c.alloc[spc] 872 shouldhelpgc = false 873 freeIndex := s.nextFreeIndex() 874 if freeIndex == s.nelems { 875 // The span is full. 876 if uintptr(s.allocCount) != s.nelems { 877 println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 878 throw("s.allocCount != s.nelems && freeIndex == s.nelems") 879 } 880 c.refill(spc) 881 shouldhelpgc = true 882 s = c.alloc[spc] 883 884 freeIndex = s.nextFreeIndex() 885 } 886 887 if freeIndex >= s.nelems { 888 throw("freeIndex is not valid") 889 } 890 891 v = gclinkptr(freeIndex*s.elemsize + s.base()) 892 s.allocCount++ 893 if uintptr(s.allocCount) > s.nelems { 894 println("s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 895 throw("s.allocCount > s.nelems") 896 } 897 return 898 } 899 900 // Allocate an object of size bytes. 901 // Small objects are allocated from the per-P cache's free lists. 902 // Large objects (> 32 kB) are allocated straight from the heap. 903 func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { 904 if gcphase == _GCmarktermination { 905 throw("mallocgc called with gcphase == _GCmarktermination") 906 } 907 908 if size == 0 { 909 return unsafe.Pointer(&zerobase) 910 } 911 912 if debug.malloc { 913 if debug.sbrk != 0 { 914 align := uintptr(16) 915 if typ != nil { 916 // TODO(austin): This should be just 917 // align = uintptr(typ.align) 918 // but that's only 4 on 32-bit platforms, 919 // even if there's a uint64 field in typ (see #599). 920 // This causes 64-bit atomic accesses to panic. 921 // Hence, we use stricter alignment that matches 922 // the normal allocator better. 923 if size&7 == 0 { 924 align = 8 925 } else if size&3 == 0 { 926 align = 4 927 } else if size&1 == 0 { 928 align = 2 929 } else { 930 align = 1 931 } 932 } 933 return persistentalloc(size, align, &memstats.other_sys) 934 } 935 936 if inittrace.active && inittrace.id == getg().goid { 937 // Init functions are executed sequentially in a single goroutine. 938 inittrace.allocs += 1 939 } 940 } 941 942 // assistG is the G to charge for this allocation, or nil if 943 // GC is not currently active. 944 var assistG *g 945 if gcBlackenEnabled != 0 { 946 // Charge the current user G for this allocation. 947 assistG = getg() 948 if assistG.m.curg != nil { 949 assistG = assistG.m.curg 950 } 951 // Charge the allocation against the G. We'll account 952 // for internal fragmentation at the end of mallocgc. 953 assistG.gcAssistBytes -= int64(size) 954 955 if assistG.gcAssistBytes < 0 { 956 // This G is in debt. Assist the GC to correct 957 // this before allocating. This must happen 958 // before disabling preemption. 959 gcAssistAlloc(assistG) 960 } 961 } 962 963 // Set mp.mallocing to keep from being preempted by GC. 964 mp := acquirem() 965 if mp.mallocing != 0 { 966 throw("malloc deadlock") 967 } 968 if mp.gsignal == getg() { 969 throw("malloc during signal") 970 } 971 mp.mallocing = 1 972 973 shouldhelpgc := false 974 dataSize := size 975 c := getMCache() 976 if c == nil { 977 throw("mallocgc called without a P or outside bootstrapping") 978 } 979 var span *mspan 980 var x unsafe.Pointer 981 noscan := typ == nil || typ.ptrdata == 0 982 // In some cases block zeroing can profitably (for latency reduction purposes) 983 // be delayed till preemption is possible; isZeroed tracks that state. 984 isZeroed := true 985 if size <= maxSmallSize { 986 if noscan && size < maxTinySize { 987 // Tiny allocator. 988 // 989 // Tiny allocator combines several tiny allocation requests 990 // into a single memory block. The resulting memory block 991 // is freed when all subobjects are unreachable. The subobjects 992 // must be noscan (don't have pointers), this ensures that 993 // the amount of potentially wasted memory is bounded. 994 // 995 // Size of the memory block used for combining (maxTinySize) is tunable. 996 // Current setting is 16 bytes, which relates to 2x worst case memory 997 // wastage (when all but one subobjects are unreachable). 998 // 8 bytes would result in no wastage at all, but provides less 999 // opportunities for combining. 1000 // 32 bytes provides more opportunities for combining, 1001 // but can lead to 4x worst case wastage. 1002 // The best case winning is 8x regardless of block size. 1003 // 1004 // Objects obtained from tiny allocator must not be freed explicitly. 1005 // So when an object will be freed explicitly, we ensure that 1006 // its size >= maxTinySize. 1007 // 1008 // SetFinalizer has a special case for objects potentially coming 1009 // from tiny allocator, it such case it allows to set finalizers 1010 // for an inner byte of a memory block. 1011 // 1012 // The main targets of tiny allocator are small strings and 1013 // standalone escaping variables. On a json benchmark 1014 // the allocator reduces number of allocations by ~12% and 1015 // reduces heap size by ~20%. 1016 off := c.tinyoffset 1017 // Align tiny pointer for required (conservative) alignment. 1018 if size&7 == 0 { 1019 off = alignUp(off, 8) 1020 } else if sys.PtrSize == 4 && size == 12 { 1021 // Conservatively align 12-byte objects to 8 bytes on 32-bit 1022 // systems so that objects whose first field is a 64-bit 1023 // value is aligned to 8 bytes and does not cause a fault on 1024 // atomic access. See issue 37262. 1025 // TODO(mknyszek): Remove this workaround if/when issue 36606 1026 // is resolved. 1027 off = alignUp(off, 8) 1028 } else if size&3 == 0 { 1029 off = alignUp(off, 4) 1030 } else if size&1 == 0 { 1031 off = alignUp(off, 2) 1032 } 1033 if off+size <= maxTinySize && c.tiny != 0 { 1034 // The object fits into existing tiny block. 1035 x = unsafe.Pointer(c.tiny + off) 1036 c.tinyoffset = off + size 1037 c.tinyAllocs++ 1038 mp.mallocing = 0 1039 releasem(mp) 1040 return x 1041 } 1042 // Allocate a new maxTinySize block. 1043 span = c.alloc[tinySpanClass] 1044 v := nextFreeFast(span) 1045 if v == 0 { 1046 v, span, shouldhelpgc = c.nextFree(tinySpanClass) 1047 } 1048 x = unsafe.Pointer(v) 1049 (*[2]uint64)(x)[0] = 0 1050 (*[2]uint64)(x)[1] = 0 1051 // See if we need to replace the existing tiny block with the new one 1052 // based on amount of remaining free space. 1053 if !raceenabled && (size < c.tinyoffset || c.tiny == 0) { 1054 // Note: disabled when race detector is on, see comment near end of this function. 1055 c.tiny = uintptr(x) 1056 c.tinyoffset = size 1057 } 1058 size = maxTinySize 1059 } else { 1060 var sizeclass uint8 1061 if size <= smallSizeMax-8 { 1062 sizeclass = size_to_class8[divRoundUp(size, smallSizeDiv)] 1063 } else { 1064 sizeclass = size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)] 1065 } 1066 size = uintptr(class_to_size[sizeclass]) 1067 spc := makeSpanClass(sizeclass, noscan) 1068 span = c.alloc[spc] 1069 v := nextFreeFast(span) 1070 if v == 0 { 1071 v, span, shouldhelpgc = c.nextFree(spc) 1072 } 1073 x = unsafe.Pointer(v) 1074 if needzero && span.needzero != 0 { 1075 memclrNoHeapPointers(unsafe.Pointer(v), size) 1076 } 1077 } 1078 } else { 1079 shouldhelpgc = true 1080 // For large allocations, keep track of zeroed state so that 1081 // bulk zeroing can be happen later in a preemptible context. 1082 span, isZeroed = c.allocLarge(size, needzero && !noscan, noscan) 1083 span.freeindex = 1 1084 span.allocCount = 1 1085 x = unsafe.Pointer(span.base()) 1086 size = span.elemsize 1087 } 1088 1089 var scanSize uintptr 1090 if !noscan { 1091 // If allocating a defer+arg block, now that we've picked a malloc size 1092 // large enough to hold everything, cut the "asked for" size down to 1093 // just the defer header, so that the GC bitmap will record the arg block 1094 // as containing nothing at all (as if it were unused space at the end of 1095 // a malloc block caused by size rounding). 1096 // The defer arg areas are scanned as part of scanstack. 1097 if typ == deferType { 1098 dataSize = unsafe.Sizeof(_defer{}) 1099 } 1100 heapBitsSetType(uintptr(x), size, dataSize, typ) 1101 if dataSize > typ.size { 1102 // Array allocation. If there are any 1103 // pointers, GC has to scan to the last 1104 // element. 1105 if typ.ptrdata != 0 { 1106 scanSize = dataSize - typ.size + typ.ptrdata 1107 } 1108 } else { 1109 scanSize = typ.ptrdata 1110 } 1111 c.scanAlloc += scanSize 1112 } 1113 1114 // Ensure that the stores above that initialize x to 1115 // type-safe memory and set the heap bits occur before 1116 // the caller can make x observable to the garbage 1117 // collector. Otherwise, on weakly ordered machines, 1118 // the garbage collector could follow a pointer to x, 1119 // but see uninitialized memory or stale heap bits. 1120 publicationBarrier() 1121 1122 // Allocate black during GC. 1123 // All slots hold nil so no scanning is needed. 1124 // This may be racing with GC so do it atomically if there can be 1125 // a race marking the bit. 1126 if gcphase != _GCoff { 1127 gcmarknewobject(span, uintptr(x), size, scanSize) 1128 } 1129 1130 if raceenabled { 1131 racemalloc(x, size) 1132 } 1133 1134 if msanenabled { 1135 msanmalloc(x, size) 1136 } 1137 1138 if rate := MemProfileRate; rate > 0 { 1139 // Note cache c only valid while m acquired; see #47302 1140 if rate != 1 && size < c.nextSample { 1141 c.nextSample -= size 1142 } else { 1143 profilealloc(mp, x, size) 1144 } 1145 } 1146 mp.mallocing = 0 1147 releasem(mp) 1148 1149 // Pointerfree data can be zeroed late in a context where preemption can occur. 1150 // x will keep the memory alive. 1151 if !isZeroed && needzero { 1152 memclrNoHeapPointersChunked(size, x) // This is a possible preemption point: see #47302 1153 } 1154 1155 if debug.malloc { 1156 if debug.allocfreetrace != 0 { 1157 tracealloc(x, size, typ) 1158 } 1159 1160 if inittrace.active && inittrace.id == getg().goid { 1161 // Init functions are executed sequentially in a single goroutine. 1162 inittrace.bytes += uint64(size) 1163 } 1164 } 1165 1166 if assistG != nil { 1167 // Account for internal fragmentation in the assist 1168 // debt now that we know it. 1169 assistG.gcAssistBytes -= int64(size - dataSize) 1170 } 1171 1172 if shouldhelpgc { 1173 if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 1174 gcStart(t) 1175 } 1176 } 1177 1178 if raceenabled && noscan && dataSize < maxTinySize { 1179 // Pad tinysize allocations so they are aligned with the end 1180 // of the tinyalloc region. This ensures that any arithmetic 1181 // that goes off the top end of the object will be detectable 1182 // by checkptr (issue 38872). 1183 // Note that we disable tinyalloc when raceenabled for this to work. 1184 // TODO: This padding is only performed when the race detector 1185 // is enabled. It would be nice to enable it if any package 1186 // was compiled with checkptr, but there's no easy way to 1187 // detect that (especially at compile time). 1188 // TODO: enable this padding for all allocations, not just 1189 // tinyalloc ones. It's tricky because of pointer maps. 1190 // Maybe just all noscan objects? 1191 x = add(x, size-dataSize) 1192 } 1193 1194 return x 1195 } 1196 1197 // memclrNoHeapPointersChunked repeatedly calls memclrNoHeapPointers 1198 // on chunks of the buffer to be zeroed, with opportunities for preemption 1199 // along the way. memclrNoHeapPointers contains no safepoints and also 1200 // cannot be preemptively scheduled, so this provides a still-efficient 1201 // block copy that can also be preempted on a reasonable granularity. 1202 // 1203 // Use this with care; if the data being cleared is tagged to contain 1204 // pointers, this allows the GC to run before it is all cleared. 1205 func memclrNoHeapPointersChunked(size uintptr, x unsafe.Pointer) { 1206 v := uintptr(x) 1207 // got this from benchmarking. 128k is too small, 512k is too large. 1208 const chunkBytes = 256 * 1024 1209 vsize := v + size 1210 for voff := v; voff < vsize; voff = voff + chunkBytes { 1211 if getg().preempt { 1212 // may hold locks, e.g., profiling 1213 goschedguarded() 1214 } 1215 // clear min(avail, lump) bytes 1216 n := vsize - voff 1217 if n > chunkBytes { 1218 n = chunkBytes 1219 } 1220 memclrNoHeapPointers(unsafe.Pointer(voff), n) 1221 } 1222 } 1223 1224 // implementation of new builtin 1225 // compiler (both frontend and SSA backend) knows the signature 1226 // of this function 1227 func newobject(typ *_type) unsafe.Pointer { 1228 return mallocgc(typ.size, typ, true) 1229 } 1230 1231 //go:linkname reflect_unsafe_New reflect.unsafe_New 1232 func reflect_unsafe_New(typ *_type) unsafe.Pointer { 1233 return mallocgc(typ.size, typ, true) 1234 } 1235 1236 //go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New 1237 func reflectlite_unsafe_New(typ *_type) unsafe.Pointer { 1238 return mallocgc(typ.size, typ, true) 1239 } 1240 1241 // newarray allocates an array of n elements of type typ. 1242 func newarray(typ *_type, n int) unsafe.Pointer { 1243 if n == 1 { 1244 return mallocgc(typ.size, typ, true) 1245 } 1246 mem, overflow := math.MulUintptr(typ.size, uintptr(n)) 1247 if overflow || mem > maxAlloc || n < 0 { 1248 panic(plainError("runtime: allocation size out of range")) 1249 } 1250 return mallocgc(mem, typ, true) 1251 } 1252 1253 //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray 1254 func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer { 1255 return newarray(typ, n) 1256 } 1257 1258 func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { 1259 c := getMCache() 1260 if c == nil { 1261 throw("profilealloc called without a P or outside bootstrapping") 1262 } 1263 c.nextSample = nextSample() 1264 mProf_Malloc(x, size) 1265 } 1266 1267 // nextSample returns the next sampling point for heap profiling. The goal is 1268 // to sample allocations on average every MemProfileRate bytes, but with a 1269 // completely random distribution over the allocation timeline; this 1270 // corresponds to a Poisson process with parameter MemProfileRate. In Poisson 1271 // processes, the distance between two samples follows the exponential 1272 // distribution (exp(MemProfileRate)), so the best return value is a random 1273 // number taken from an exponential distribution whose mean is MemProfileRate. 1274 func nextSample() uintptr { 1275 if MemProfileRate == 1 { 1276 // Callers assign our return value to 1277 // mcache.next_sample, but next_sample is not used 1278 // when the rate is 1. So avoid the math below and 1279 // just return something. 1280 return 0 1281 } 1282 if GOOS == "plan9" { 1283 // Plan 9 doesn't support floating point in note handler. 1284 if g := getg(); g == g.m.gsignal { 1285 return nextSampleNoFP() 1286 } 1287 } 1288 1289 return uintptr(fastexprand(MemProfileRate)) 1290 } 1291 1292 // fastexprand returns a random number from an exponential distribution with 1293 // the specified mean. 1294 func fastexprand(mean int) int32 { 1295 // Avoid overflow. Maximum possible step is 1296 // -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean. 1297 switch { 1298 case mean > 0x7000000: 1299 mean = 0x7000000 1300 case mean == 0: 1301 return 0 1302 } 1303 1304 // Take a random sample of the exponential distribution exp(-mean*x). 1305 // The probability distribution function is mean*exp(-mean*x), so the CDF is 1306 // p = 1 - exp(-mean*x), so 1307 // q = 1 - p == exp(-mean*x) 1308 // log_e(q) = -mean*x 1309 // -log_e(q)/mean = x 1310 // x = -log_e(q) * mean 1311 // x = log_2(q) * (-log_e(2)) * mean ; Using log_2 for efficiency 1312 const randomBitCount = 26 1313 q := fastrand()%(1<<randomBitCount) + 1 1314 qlog := fastlog2(float64(q)) - randomBitCount 1315 if qlog > 0 { 1316 qlog = 0 1317 } 1318 const minusLog2 = -0.6931471805599453 // -ln(2) 1319 return int32(qlog*(minusLog2*float64(mean))) + 1 1320 } 1321 1322 // nextSampleNoFP is similar to nextSample, but uses older, 1323 // simpler code to avoid floating point. 1324 func nextSampleNoFP() uintptr { 1325 // Set first allocation sample size. 1326 rate := MemProfileRate 1327 if rate > 0x3fffffff { // make 2*rate not overflow 1328 rate = 0x3fffffff 1329 } 1330 if rate != 0 { 1331 return uintptr(fastrand() % uint32(2*rate)) 1332 } 1333 return 0 1334 } 1335 1336 type persistentAlloc struct { 1337 base *notInHeap 1338 off uintptr 1339 } 1340 1341 var globalAlloc struct { 1342 mutex 1343 persistentAlloc 1344 } 1345 1346 // persistentChunkSize is the number of bytes we allocate when we grow 1347 // a persistentAlloc. 1348 const persistentChunkSize = 256 << 10 1349 1350 // persistentChunks is a list of all the persistent chunks we have 1351 // allocated. The list is maintained through the first word in the 1352 // persistent chunk. This is updated atomically. 1353 var persistentChunks *notInHeap 1354 1355 // Wrapper around sysAlloc that can allocate small chunks. 1356 // There is no associated free operation. 1357 // Intended for things like function/type/debug-related persistent data. 1358 // If align is 0, uses default align (currently 8). 1359 // The returned memory will be zeroed. 1360 // 1361 // Consider marking persistentalloc'd types go:notinheap. 1362 func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer { 1363 var p *notInHeap 1364 systemstack(func() { 1365 p = persistentalloc1(size, align, sysStat) 1366 }) 1367 return unsafe.Pointer(p) 1368 } 1369 1370 // Must run on system stack because stack growth can (re)invoke it. 1371 // See issue 9174. 1372 //go:systemstack 1373 func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap { 1374 const ( 1375 maxBlock = 64 << 10 // VM reservation granularity is 64K on windows 1376 ) 1377 1378 if size == 0 { 1379 throw("persistentalloc: size == 0") 1380 } 1381 if align != 0 { 1382 if align&(align-1) != 0 { 1383 throw("persistentalloc: align is not a power of 2") 1384 } 1385 if align > _PageSize { 1386 throw("persistentalloc: align is too large") 1387 } 1388 } else { 1389 align = 8 1390 } 1391 1392 if size >= maxBlock { 1393 return (*notInHeap)(sysAlloc(size, sysStat)) 1394 } 1395 1396 mp := acquirem() 1397 var persistent *persistentAlloc 1398 if mp != nil && mp.p != 0 { 1399 persistent = &mp.p.ptr().palloc 1400 } else { 1401 lock(&globalAlloc.mutex) 1402 persistent = &globalAlloc.persistentAlloc 1403 } 1404 persistent.off = alignUp(persistent.off, align) 1405 if persistent.off+size > persistentChunkSize || persistent.base == nil { 1406 persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys)) 1407 if persistent.base == nil { 1408 if persistent == &globalAlloc.persistentAlloc { 1409 unlock(&globalAlloc.mutex) 1410 } 1411 throw("runtime: cannot allocate memory") 1412 } 1413 1414 // Add the new chunk to the persistentChunks list. 1415 for { 1416 chunks := uintptr(unsafe.Pointer(persistentChunks)) 1417 *(*uintptr)(unsafe.Pointer(persistent.base)) = chunks 1418 if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) { 1419 break 1420 } 1421 } 1422 persistent.off = alignUp(sys.PtrSize, align) 1423 } 1424 p := persistent.base.add(persistent.off) 1425 persistent.off += size 1426 releasem(mp) 1427 if persistent == &globalAlloc.persistentAlloc { 1428 unlock(&globalAlloc.mutex) 1429 } 1430 1431 if sysStat != &memstats.other_sys { 1432 sysStat.add(int64(size)) 1433 memstats.other_sys.add(-int64(size)) 1434 } 1435 return p 1436 } 1437 1438 // inPersistentAlloc reports whether p points to memory allocated by 1439 // persistentalloc. This must be nosplit because it is called by the 1440 // cgo checker code, which is called by the write barrier code. 1441 //go:nosplit 1442 func inPersistentAlloc(p uintptr) bool { 1443 chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks))) 1444 for chunk != 0 { 1445 if p >= chunk && p < chunk+persistentChunkSize { 1446 return true 1447 } 1448 chunk = *(*uintptr)(unsafe.Pointer(chunk)) 1449 } 1450 return false 1451 } 1452 1453 // linearAlloc is a simple linear allocator that pre-reserves a region 1454 // of memory and then optionally maps that region into the Ready state 1455 // as needed. 1456 // 1457 // The caller is responsible for locking. 1458 type linearAlloc struct { 1459 next uintptr // next free byte 1460 mapped uintptr // one byte past end of mapped space 1461 end uintptr // end of reserved space 1462 1463 mapMemory bool // transition memory from Reserved to Ready if true 1464 } 1465 1466 func (l *linearAlloc) init(base, size uintptr, mapMemory bool) { 1467 if base+size < base { 1468 // Chop off the last byte. The runtime isn't prepared 1469 // to deal with situations where the bounds could overflow. 1470 // Leave that memory reserved, though, so we don't map it 1471 // later. 1472 size -= 1 1473 } 1474 l.next, l.mapped = base, base 1475 l.end = base + size 1476 l.mapMemory = mapMemory 1477 } 1478 1479 func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer { 1480 p := alignUp(l.next, align) 1481 if p+size > l.end { 1482 return nil 1483 } 1484 l.next = p + size 1485 if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped { 1486 if l.mapMemory { 1487 // Transition from Reserved to Prepared to Ready. 1488 sysMap(unsafe.Pointer(l.mapped), pEnd-l.mapped, sysStat) 1489 sysUsed(unsafe.Pointer(l.mapped), pEnd-l.mapped) 1490 } 1491 l.mapped = pEnd 1492 } 1493 return unsafe.Pointer(p) 1494 } 1495 1496 // notInHeap is off-heap memory allocated by a lower-level allocator 1497 // like sysAlloc or persistentAlloc. 1498 // 1499 // In general, it's better to use real types marked as go:notinheap, 1500 // but this serves as a generic type for situations where that isn't 1501 // possible (like in the allocators). 1502 // 1503 // TODO: Use this as the return type of sysAlloc, persistentAlloc, etc? 1504 // 1505 //go:notinheap 1506 type notInHeap struct{} 1507 1508 func (p *notInHeap) add(bytes uintptr) *notInHeap { 1509 return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes)) 1510 } 1511