// Copyright 2011 The Go Authors. All rights reserved.// Use of this source code is governed by a BSD-style// license that can be found in the LICENSE file.// +build aix darwin netbsd openbsd plan9 solaris windowspackage runtimeimport ()// This implementation depends on OS-specific implementations of//// func semacreate(mp *m)// Create a semaphore for mp, if it does not already have one.//// func semasleep(ns int64) int32// If ns < 0, acquire m's semaphore and return 0.// If ns >= 0, try to acquire m's semaphore for at most ns nanoseconds.// Return 0 if the semaphore was acquired, -1 if interrupted or timed out.//// func semawakeup(mp *m)// Wake up mp, which is or will soon be sleeping on its semaphore.//const (lockeduintptr = 1active_spin = 4active_spin_cnt = 30passive_spin = 1)func ( *mutex) {lockWithRank(, getLockRank())}func ( *mutex) { := getg()if .m.locks < 0 {throw("runtime·lock: lock count") } .m.locks++// Speculative grab for lock.ifatomic.Casuintptr(&.key, 0, locked) {return }semacreate(.m)// On uniprocessor's, no point spinning. // On multiprocessors, spin for ACTIVE_SPIN attempts. := 0ifncpu > 1 { = active_spin }:for := 0; ; ++ { := atomic.Loaduintptr(&.key)if &locked == 0 {// Unlocked. Try to lock.ifatomic.Casuintptr(&.key, , |locked) {return } = 0 }if < {procyield(active_spin_cnt) } elseif < +passive_spin {osyield() } else {// Someone else has it. // l->waitm points to a linked list of M's waiting // for this lock, chained through m->nextwaitm. // Queue this M.for { .m.nextwaitm = muintptr( &^ locked)ifatomic.Casuintptr(&.key, , uintptr(unsafe.Pointer(.m))|locked) {break } = atomic.Loaduintptr(&.key)if &locked == 0 {continue } }if &locked != 0 {// Queued. Wait.semasleep(-1) = 0 } } }}func ( *mutex) {unlockWithRank()}//go:nowritebarrier// We might not be holding a p in this code.func ( *mutex) { := getg()var *mfor { := atomic.Loaduintptr(&.key)if == locked {ifatomic.Casuintptr(&.key, locked, 0) {break } } else {// Other M's are waiting for the lock. // Dequeue an M. = muintptr( &^ locked).ptr()ifatomic.Casuintptr(&.key, , uintptr(.nextwaitm)) {// Dequeued an M. Wake it.semawakeup()break } } } .m.locks--if .m.locks < 0 {throw("runtime·unlock: lock count") }if .m.locks == 0 && .preempt { // restore the preemption request in case we've cleared it in newstack .stackguard0 = stackPreempt }}// One-time notifications.func ( *note) {ifGOOS == "aix" {// On AIX, semaphores might not synchronize the memory in some // rare cases. See issue #30189.atomic.Storeuintptr(&.key, 0) } else { .key = 0 }}func ( *note) {varuintptrfor { = atomic.Loaduintptr(&.key)ifatomic.Casuintptr(&.key, , locked) {break } }// Successfully set waitm to locked. // What was it before?switch {case == 0:// Nothing was waiting. Done.case == locked:// Two notewakeups! Not allowed.throw("notewakeup - double wakeup")default:// Must be the waiting m. Wake it up.semawakeup((*m)(unsafe.Pointer())) }}func ( *note) { := getg()if != .m.g0 {throw("notesleep not on g0") }semacreate(.m)if !atomic.Casuintptr(&.key, 0, uintptr(unsafe.Pointer(.m))) {// Must be locked (got wakeup).if .key != locked {throw("notesleep - waitm out of sync") }return }// Queued. Sleep. .m.blocked = trueif *cgo_yield == nil {semasleep(-1) } else {// Sleep for an arbitrary-but-moderate interval to poll libc interceptors.const = 10e6foratomic.Loaduintptr(&.key) == 0 {semasleep()asmcgocall(*cgo_yield, nil) } } .m.blocked = false}//go:nosplitfunc ( *note, int64, *g, int64) bool {// gp and deadline are logically local variables, but they are written // as parameters so that the stack space they require is charged // to the caller. // This reduces the nosplit footprint of notetsleep_internal. = getg()// Register for wakeup on n->waitm.if !atomic.Casuintptr(&.key, 0, uintptr(unsafe.Pointer(.m))) {// Must be locked (got wakeup).if .key != locked {throw("notetsleep - waitm out of sync") }returntrue }if < 0 {// Queued. Sleep. .m.blocked = trueif *cgo_yield == nil {semasleep(-1) } else {// Sleep in arbitrary-but-moderate intervals to poll libc interceptors.const = 10e6forsemasleep() < 0 {asmcgocall(*cgo_yield, nil) } } .m.blocked = falsereturntrue } = nanotime() + for {// Registered. Sleep. .m.blocked = trueif *cgo_yield != nil && > 10e6 { = 10e6 }ifsemasleep() >= 0 { .m.blocked = false// Acquired semaphore, semawakeup unregistered us. // Done.returntrue }if *cgo_yield != nil {asmcgocall(*cgo_yield, nil) } .m.blocked = false// Interrupted or timed out. Still registered. Semaphore not acquired. = - nanotime()if <= 0 {break }// Deadline hasn't arrived. Keep sleeping. }// Deadline arrived. Still registered. Semaphore not acquired. // Want to give up and return, but have to unregister first, // so that any notewakeup racing with the return does not // try to grant us the semaphore when we don't expect it.for { := atomic.Loaduintptr(&.key)switch {caseuintptr(unsafe.Pointer(.m)):// No wakeup yet; unregister if possible.ifatomic.Casuintptr(&.key, , 0) {returnfalse }caselocked:// Wakeup happened so semaphore is available. // Grab it to avoid getting out of sync. .m.blocked = trueifsemasleep(-1) < 0 {throw("runtime: unable to acquire - semaphore out of sync") } .m.blocked = falsereturntruedefault:throw("runtime: unexpected waitm - semaphore out of sync") } }}func ( *note, int64) bool { := getg()if != .m.g0 {throw("notetsleep not on g0") }semacreate(.m)returnnotetsleep_internal(, , nil, 0)}// same as runtime·notetsleep, but called on user g (not g0)// calls only nosplit functions between entersyscallblock/exitsyscallfunc ( *note, int64) bool { := getg()if == .m.g0 {throw("notetsleepg on g0") }semacreate(.m)entersyscallblock() := notetsleep_internal(, , nil, 0)exitsyscall()return}func (int64) (*g, bool) {returnnil, false}func () {}
The pages are generated with Goldsv0.2.8-preview. (GOOS=darwin GOARCH=arm64)