package bbolt

import (
	
	
	
	
)

// node represents an in-memory, deserialized page.
type node struct {
	bucket     *Bucket
	isLeaf     bool
	unbalanced bool
	spilled    bool
	key        []byte
	pgid       pgid
	parent     *node
	children   nodes
	inodes     inodes
}

// root returns the top-level node this node is attached to.
func ( *node) () *node {
	if .parent == nil {
		return 
	}
	return .parent.()
}

// minKeys returns the minimum number of inodes this node should have.
func ( *node) () int {
	if .isLeaf {
		return 1
	}
	return 2
}

// size returns the size of the node after serialization.
func ( *node) () int {
	,  := pageHeaderSize, .pageElementSize()
	for  := 0;  < len(.inodes); ++ {
		 := &.inodes[]
		 +=  + uintptr(len(.key)) + uintptr(len(.value))
	}
	return int()
}

// sizeLessThan returns true if the node is less than a given size.
// This is an optimization to avoid calculating a large node when we only need
// to know if it fits inside a certain page size.
func ( *node) ( uintptr) bool {
	,  := pageHeaderSize, .pageElementSize()
	for  := 0;  < len(.inodes); ++ {
		 := &.inodes[]
		 +=  + uintptr(len(.key)) + uintptr(len(.value))
		if  >=  {
			return false
		}
	}
	return true
}

// pageElementSize returns the size of each page element based on the type of node.
func ( *node) () uintptr {
	if .isLeaf {
		return leafPageElementSize
	}
	return branchPageElementSize
}

// childAt returns the child node at a given index.
func ( *node) ( int) *node {
	if .isLeaf {
		panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", ))
	}
	return .bucket.node(.inodes[].pgid, )
}

// childIndex returns the index of a given child node.
func ( *node) ( *node) int {
	 := sort.Search(len(.inodes), func( int) bool { return bytes.Compare(.inodes[].key, .key) != -1 })
	return 
}

// numChildren returns the number of children.
func ( *node) () int {
	return len(.inodes)
}

// nextSibling returns the next node with the same parent.
func ( *node) () *node {
	if .parent == nil {
		return nil
	}
	 := .parent.childIndex()
	if  >= .parent.numChildren()-1 {
		return nil
	}
	return .parent.childAt( + 1)
}

// prevSibling returns the previous node with the same parent.
func ( *node) () *node {
	if .parent == nil {
		return nil
	}
	 := .parent.childIndex()
	if  == 0 {
		return nil
	}
	return .parent.childAt( - 1)
}

// put inserts a key/value.
func ( *node) (, ,  []byte,  pgid,  uint32) {
	if  >= .bucket.tx.meta.pgid {
		panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", , .bucket.tx.meta.pgid))
	} else if len() <= 0 {
		panic("put: zero-length old key")
	} else if len() <= 0 {
		panic("put: zero-length new key")
	}

	// Find insertion index.
	 := sort.Search(len(.inodes), func( int) bool { return bytes.Compare(.inodes[].key, ) != -1 })

	// Add capacity and shift nodes if we don't have an exact match and need to insert.
	 := (len(.inodes) > 0 &&  < len(.inodes) && bytes.Equal(.inodes[].key, ))
	if ! {
		.inodes = append(.inodes, inode{})
		copy(.inodes[+1:], .inodes[:])
	}

	 := &.inodes[]
	.flags = 
	.key = 
	.value = 
	.pgid = 
	_assert(len(.key) > 0, "put: zero-length inode key")
}

// del removes a key from the node.
func ( *node) ( []byte) {
	// Find index of key.
	 := sort.Search(len(.inodes), func( int) bool { return bytes.Compare(.inodes[].key, ) != -1 })

	// Exit if the key isn't found.
	if  >= len(.inodes) || !bytes.Equal(.inodes[].key, ) {
		return
	}

	// Delete inode from the node.
	.inodes = append(.inodes[:], .inodes[+1:]...)

	// Mark the node as needing rebalancing.
	.unbalanced = true
}

// read initializes the node from a page.
func ( *node) ( *page) {
	.pgid = .id
	.isLeaf = ((.flags & leafPageFlag) != 0)
	.inodes = make(inodes, int(.count))

	for  := 0;  < int(.count); ++ {
		 := &.inodes[]
		if .isLeaf {
			 := .leafPageElement(uint16())
			.flags = .flags
			.key = .key()
			.value = .value()
		} else {
			 := .branchPageElement(uint16())
			.pgid = .pgid
			.key = .key()
		}
		_assert(len(.key) > 0, "read: zero-length inode key")
	}

	// Save first key so we can find the node in the parent when we spill.
	if len(.inodes) > 0 {
		.key = .inodes[0].key
		_assert(len(.key) > 0, "read: zero-length node key")
	} else {
		.key = nil
	}
}

// write writes the items onto one or more pages.
func ( *node) ( *page) {
	// Initialize page.
	if .isLeaf {
		.flags |= leafPageFlag
	} else {
		.flags |= branchPageFlag
	}

	if len(.inodes) >= 0xFFFF {
		panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(.inodes), .id))
	}
	.count = uint16(len(.inodes))

	// Stop here if there are no items to write.
	if .count == 0 {
		return
	}

	// Loop over each item and write it to the page.
	// off tracks the offset into p of the start of the next data.
	 := unsafe.Sizeof(*) + .pageElementSize()*uintptr(len(.inodes))
	for ,  := range .inodes {
		_assert(len(.key) > 0, "write: zero-length inode key")

		// Create a slice to write into of needed size and advance
		// byte pointer for next iteration.
		 := len(.key) + len(.value)
		 := unsafeByteSlice(unsafe.Pointer(), , 0, )
		 += uintptr()

		// Write the page element.
		if .isLeaf {
			 := .leafPageElement(uint16())
			.pos = uint32(uintptr(unsafe.Pointer(&[0])) - uintptr(unsafe.Pointer()))
			.flags = .flags
			.ksize = uint32(len(.key))
			.vsize = uint32(len(.value))
		} else {
			 := .branchPageElement(uint16())
			.pos = uint32(uintptr(unsafe.Pointer(&[0])) - uintptr(unsafe.Pointer()))
			.ksize = uint32(len(.key))
			.pgid = .pgid
			_assert(.pgid != .id, "write: circular dependency occurred")
		}

		// Write data for the element to the end of the page.
		 := copy(, .key)
		copy([:], .value)
	}

	// DEBUG ONLY: n.dump()
}

// split breaks up a node into multiple smaller nodes, if appropriate.
// This should only be called from the spill() function.
func ( *node) ( uintptr) []*node {
	var  []*node

	 := 
	for {
		// Split node into two.
		,  := .splitTwo()
		 = append(, )

		// If we can't split then exit the loop.
		if  == nil {
			break
		}

		// Set node to b so it gets split on the next iteration.
		 = 
	}

	return 
}

// splitTwo breaks up a node into two smaller nodes, if appropriate.
// This should only be called from the split() function.
func ( *node) ( uintptr) (*node, *node) {
	// Ignore the split if the page doesn't have at least enough nodes for
	// two pages or if the nodes can fit in a single page.
	if len(.inodes) <= (minKeysPerPage*2) || .sizeLessThan() {
		return , nil
	}

	// Determine the threshold before starting a new node.
	var  = .bucket.FillPercent
	if  < minFillPercent {
		 = minFillPercent
	} else if  > maxFillPercent {
		 = maxFillPercent
	}
	 := int(float64() * )

	// Determine split position and sizes of the two pages.
	,  := .splitIndex()

	// Split node into two separate nodes.
	// If there's no parent then we'll need to create one.
	if .parent == nil {
		.parent = &node{bucket: .bucket, children: []*node{}}
	}

	// Create a new node and add it to the parent.
	 := &node{bucket: .bucket, isLeaf: .isLeaf, parent: .parent}
	.parent.children = append(.parent.children, )

	// Split inodes across two nodes.
	.inodes = .inodes[:]
	.inodes = .inodes[:]

	// Update the statistics.
	.bucket.tx.stats.Split++

	return , 
}

// splitIndex finds the position where a page will fill a given threshold.
// It returns the index as well as the size of the first page.
// This is only be called from split().
func ( *node) ( int) (,  uintptr) {
	 = pageHeaderSize

	// Loop until we only have the minimum number of keys required for the second page.
	for  := 0;  < len(.inodes)-minKeysPerPage; ++ {
		 = uintptr()
		 := .inodes[]
		 := .pageElementSize() + uintptr(len(.key)) + uintptr(len(.value))

		// If we have at least the minimum number of keys and adding another
		// node would put us over the threshold then exit and return.
		if  >= minKeysPerPage && + > uintptr() {
			break
		}

		// Add the element size to the total size.
		 += 
	}

	return
}

// spill writes the nodes to dirty pages and splits nodes as it goes.
// Returns an error if dirty pages cannot be allocated.
func ( *node) () error {
	var  = .bucket.tx
	if .spilled {
		return nil
	}

	// Spill child nodes first. Child nodes can materialize sibling nodes in
	// the case of split-merge so we cannot use a range loop. We have to check
	// the children size on every loop iteration.
	sort.Sort(.children)
	for  := 0;  < len(.children); ++ {
		if  := .children[].();  != nil {
			return 
		}
	}

	// We no longer need the child list because it's only used for spill tracking.
	.children = nil

	// Split nodes into appropriate sizes. The first node will always be n.
	var  = .split(uintptr(.db.pageSize))
	for ,  := range  {
		// Add node's page to the freelist if it's not new.
		if .pgid > 0 {
			.db.freelist.free(.meta.txid, .page(.pgid))
			.pgid = 0
		}

		// Allocate contiguous space for the node.
		,  := .allocate((.size() + .db.pageSize - 1) / .db.pageSize)
		if  != nil {
			return 
		}

		// Write the node.
		if .id >= .meta.pgid {
			panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", .id, .meta.pgid))
		}
		.pgid = .id
		.write()
		.spilled = true

		// Insert into parent inodes.
		if .parent != nil {
			var  = .key
			if  == nil {
				 = .inodes[0].key
			}

			.parent.put(, .inodes[0].key, nil, .pgid, 0)
			.key = .inodes[0].key
			_assert(len(.key) > 0, "spill: zero-length node key")
		}

		// Update the statistics.
		.stats.Spill++
	}

	// If the root node split and created a new root then we need to spill that
	// as well. We'll clear out the children to make sure it doesn't try to respill.
	if .parent != nil && .parent.pgid == 0 {
		.children = nil
		return .parent.()
	}

	return nil
}

// rebalance attempts to combine the node with sibling nodes if the node fill
// size is below a threshold or if there are not enough keys.
func ( *node) () {
	if !.unbalanced {
		return
	}
	.unbalanced = false

	// Update statistics.
	.bucket.tx.stats.Rebalance++

	// Ignore if node is above threshold (25%) and has enough keys.
	var  = .bucket.tx.db.pageSize / 4
	if .size() >  && len(.inodes) > .minKeys() {
		return
	}

	// Root node has special handling.
	if .parent == nil {
		// If root node is a branch and only has one node then collapse it.
		if !.isLeaf && len(.inodes) == 1 {
			// Move root's child up.
			 := .bucket.node(.inodes[0].pgid, )
			.isLeaf = .isLeaf
			.inodes = .inodes[:]
			.children = .children

			// Reparent all child nodes being moved.
			for ,  := range .inodes {
				if ,  := .bucket.nodes[.pgid];  {
					.parent = 
				}
			}

			// Remove old child.
			.parent = nil
			delete(.bucket.nodes, .pgid)
			.free()
		}

		return
	}

	// If node has no keys then just remove it.
	if .numChildren() == 0 {
		.parent.del(.key)
		.parent.removeChild()
		delete(.bucket.nodes, .pgid)
		.free()
		.parent.()
		return
	}

	_assert(.parent.numChildren() > 1, "parent must have at least 2 children")

	// Destination node is right sibling if idx == 0, otherwise left sibling.
	var  *node
	var  = (.parent.childIndex() == 0)
	if  {
		 = .nextSibling()
	} else {
		 = .prevSibling()
	}

	// If both this node and the target node are too small then merge them.
	if  {
		// Reparent all child nodes being moved.
		for ,  := range .inodes {
			if ,  := .bucket.nodes[.pgid];  {
				.parent.removeChild()
				.parent = 
				.parent.children = append(.parent.children, )
			}
		}

		// Copy over inodes from target and remove target.
		.inodes = append(.inodes, .inodes...)
		.parent.del(.key)
		.parent.removeChild()
		delete(.bucket.nodes, .pgid)
		.free()
	} else {
		// Reparent all child nodes being moved.
		for ,  := range .inodes {
			if ,  := .bucket.nodes[.pgid];  {
				.parent.removeChild()
				.parent = 
				.parent.children = append(.parent.children, )
			}
		}

		// Copy over inodes to target and remove node.
		.inodes = append(.inodes, .inodes...)
		.parent.del(.key)
		.parent.removeChild()
		delete(.bucket.nodes, .pgid)
		.free()
	}

	// Either this node or the target node was deleted from the parent so rebalance it.
	.parent.()
}

// removes a node from the list of in-memory children.
// This does not affect the inodes.
func ( *node) ( *node) {
	for ,  := range .children {
		if  ==  {
			.children = append(.children[:], .children[+1:]...)
			return
		}
	}
}

// dereference causes the node to copy all its inode key/value references to heap memory.
// This is required when the mmap is reallocated so inodes are not pointing to stale data.
func ( *node) () {
	if .key != nil {
		 := make([]byte, len(.key))
		copy(, .key)
		.key = 
		_assert(.pgid == 0 || len(.key) > 0, "dereference: zero-length node key on existing node")
	}

	for  := range .inodes {
		 := &.inodes[]

		 := make([]byte, len(.key))
		copy(, .key)
		.key = 
		_assert(len(.key) > 0, "dereference: zero-length inode key")

		 := make([]byte, len(.value))
		copy(, .value)
		.value = 
	}

	// Recursively dereference children.
	for ,  := range .children {
		.()
	}

	// Update statistics.
	.bucket.tx.stats.NodeDeref++
}

// free adds the node's underlying page to the freelist.
func ( *node) () {
	if .pgid != 0 {
		.bucket.tx.db.freelist.free(.bucket.tx.meta.txid, .bucket.tx.page(.pgid))
		.pgid = 0
	}
}

// dump writes the contents of the node to STDERR for debugging purposes.
/*
func (n *node) dump() {
	// Write node header.
	var typ = "branch"
	if n.isLeaf {
		typ = "leaf"
	}
	warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes))

	// Write out abbreviated version of each item.
	for _, item := range n.inodes {
		if n.isLeaf {
			if item.flags&bucketLeafFlag != 0 {
				bucket := (*bucket)(unsafe.Pointer(&item.value[0]))
				warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root)
			} else {
				warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4))
			}
		} else {
			warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid)
		}
	}
	warn("")
}
*/

type nodes []*node

func ( nodes) () int      { return len() }
func ( nodes) (,  int) { [], [] = [], [] }
func ( nodes) (,  int) bool {
	return bytes.Compare([].inodes[0].key, [].inodes[0].key) == -1
}

// inode represents an internal node inside of a node.
// It can be used to point to elements in a page or point
// to an element which hasn't been added to a page yet.
type inode struct {
	flags uint32
	pgid  pgid
	key   []byte
	value []byte
}

type inodes []inode