// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// +build aix darwin netbsd openbsd plan9 solaris windows

package runtime

import (
	
	
)

// This implementation depends on OS-specific implementations of
//
//	func semacreate(mp *m)
//		Create a semaphore for mp, if it does not already have one.
//
//	func semasleep(ns int64) int32
//		If ns < 0, acquire m's semaphore and return 0.
//		If ns >= 0, try to acquire m's semaphore for at most ns nanoseconds.
//		Return 0 if the semaphore was acquired, -1 if interrupted or timed out.
//
//	func semawakeup(mp *m)
//		Wake up mp, which is or will soon be sleeping on its semaphore.
//
const (
	locked uintptr = 1

	active_spin     = 4
	active_spin_cnt = 30
	passive_spin    = 1
)

func ( *mutex) {
	lockWithRank(, getLockRank())
}

func ( *mutex) {
	 := getg()
	if .m.locks < 0 {
		throw("runtime·lock: lock count")
	}
	.m.locks++

	// Speculative grab for lock.
	if atomic.Casuintptr(&.key, 0, locked) {
		return
	}
	semacreate(.m)

	// On uniprocessor's, no point spinning.
	// On multiprocessors, spin for ACTIVE_SPIN attempts.
	 := 0
	if ncpu > 1 {
		 = active_spin
	}
:
	for  := 0; ; ++ {
		 := atomic.Loaduintptr(&.key)
		if &locked == 0 {
			// Unlocked. Try to lock.
			if atomic.Casuintptr(&.key, , |locked) {
				return
			}
			 = 0
		}
		if  <  {
			procyield(active_spin_cnt)
		} else if  < +passive_spin {
			osyield()
		} else {
			// Someone else has it.
			// l->waitm points to a linked list of M's waiting
			// for this lock, chained through m->nextwaitm.
			// Queue this M.
			for {
				.m.nextwaitm = muintptr( &^ locked)
				if atomic.Casuintptr(&.key, , uintptr(unsafe.Pointer(.m))|locked) {
					break
				}
				 = atomic.Loaduintptr(&.key)
				if &locked == 0 {
					continue 
				}
			}
			if &locked != 0 {
				// Queued. Wait.
				semasleep(-1)
				 = 0
			}
		}
	}
}

func ( *mutex) {
	unlockWithRank()
}

//go:nowritebarrier
// We might not be holding a p in this code.
func ( *mutex) {
	 := getg()
	var  *m
	for {
		 := atomic.Loaduintptr(&.key)
		if  == locked {
			if atomic.Casuintptr(&.key, locked, 0) {
				break
			}
		} else {
			// Other M's are waiting for the lock.
			// Dequeue an M.
			 = muintptr( &^ locked).ptr()
			if atomic.Casuintptr(&.key, , uintptr(.nextwaitm)) {
				// Dequeued an M.  Wake it.
				semawakeup()
				break
			}
		}
	}
	.m.locks--
	if .m.locks < 0 {
		throw("runtime·unlock: lock count")
	}
	if .m.locks == 0 && .preempt { // restore the preemption request in case we've cleared it in newstack
		.stackguard0 = stackPreempt
	}
}

// One-time notifications.
func ( *note) {
	if GOOS == "aix" {
		// On AIX, semaphores might not synchronize the memory in some
		// rare cases. See issue #30189.
		atomic.Storeuintptr(&.key, 0)
	} else {
		.key = 0
	}
}

func ( *note) {
	var  uintptr
	for {
		 = atomic.Loaduintptr(&.key)
		if atomic.Casuintptr(&.key, , locked) {
			break
		}
	}

	// Successfully set waitm to locked.
	// What was it before?
	switch {
	case  == 0:
		// Nothing was waiting. Done.
	case  == locked:
		// Two notewakeups! Not allowed.
		throw("notewakeup - double wakeup")
	default:
		// Must be the waiting m. Wake it up.
		semawakeup((*m)(unsafe.Pointer()))
	}
}

func ( *note) {
	 := getg()
	if  != .m.g0 {
		throw("notesleep not on g0")
	}
	semacreate(.m)
	if !atomic.Casuintptr(&.key, 0, uintptr(unsafe.Pointer(.m))) {
		// Must be locked (got wakeup).
		if .key != locked {
			throw("notesleep - waitm out of sync")
		}
		return
	}
	// Queued. Sleep.
	.m.blocked = true
	if *cgo_yield == nil {
		semasleep(-1)
	} else {
		// Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
		const  = 10e6
		for atomic.Loaduintptr(&.key) == 0 {
			semasleep()
			asmcgocall(*cgo_yield, nil)
		}
	}
	.m.blocked = false
}

//go:nosplit
func ( *note,  int64,  *g,  int64) bool {
	// gp and deadline are logically local variables, but they are written
	// as parameters so that the stack space they require is charged
	// to the caller.
	// This reduces the nosplit footprint of notetsleep_internal.
	 = getg()

	// Register for wakeup on n->waitm.
	if !atomic.Casuintptr(&.key, 0, uintptr(unsafe.Pointer(.m))) {
		// Must be locked (got wakeup).
		if .key != locked {
			throw("notetsleep - waitm out of sync")
		}
		return true
	}
	if  < 0 {
		// Queued. Sleep.
		.m.blocked = true
		if *cgo_yield == nil {
			semasleep(-1)
		} else {
			// Sleep in arbitrary-but-moderate intervals to poll libc interceptors.
			const  = 10e6
			for semasleep() < 0 {
				asmcgocall(*cgo_yield, nil)
			}
		}
		.m.blocked = false
		return true
	}

	 = nanotime() + 
	for {
		// Registered. Sleep.
		.m.blocked = true
		if *cgo_yield != nil &&  > 10e6 {
			 = 10e6
		}
		if semasleep() >= 0 {
			.m.blocked = false
			// Acquired semaphore, semawakeup unregistered us.
			// Done.
			return true
		}
		if *cgo_yield != nil {
			asmcgocall(*cgo_yield, nil)
		}
		.m.blocked = false
		// Interrupted or timed out. Still registered. Semaphore not acquired.
		 =  - nanotime()
		if  <= 0 {
			break
		}
		// Deadline hasn't arrived. Keep sleeping.
	}

	// Deadline arrived. Still registered. Semaphore not acquired.
	// Want to give up and return, but have to unregister first,
	// so that any notewakeup racing with the return does not
	// try to grant us the semaphore when we don't expect it.
	for {
		 := atomic.Loaduintptr(&.key)
		switch  {
		case uintptr(unsafe.Pointer(.m)):
			// No wakeup yet; unregister if possible.
			if atomic.Casuintptr(&.key, , 0) {
				return false
			}
		case locked:
			// Wakeup happened so semaphore is available.
			// Grab it to avoid getting out of sync.
			.m.blocked = true
			if semasleep(-1) < 0 {
				throw("runtime: unable to acquire - semaphore out of sync")
			}
			.m.blocked = false
			return true
		default:
			throw("runtime: unexpected waitm - semaphore out of sync")
		}
	}
}

func ( *note,  int64) bool {
	 := getg()
	if  != .m.g0 {
		throw("notetsleep not on g0")
	}
	semacreate(.m)
	return notetsleep_internal(, , nil, 0)
}

// same as runtime·notetsleep, but called on user g (not g0)
// calls only nosplit functions between entersyscallblock/exitsyscall
func ( *note,  int64) bool {
	 := getg()
	if  == .m.g0 {
		throw("notetsleepg on g0")
	}
	semacreate(.m)
	entersyscallblock()
	 := notetsleep_internal(, , nil, 0)
	exitsyscall()
	return 
}

func (int64) (*g, bool) {
	return nil, false
}

func () {}