代码拉取完成,页面将自动刷新
同步操作将从 menuiis/gkit 强制同步,此操作会覆盖自 Fork 仓库以来所做的任何修改,且无法恢复!!!
确定后同步将在后台操作,完成时将刷新页面,请耐心等待。
// Copyright 2021 ByteDance Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package skipmap is a high-performance, scalable, concurrent-safe map based on skip-list.
// In the typical pattern(100000 operations, 90%LOAD 9%STORE 1%DELETE, 8C16T), the skipmap
// up to 10x faster than the built-in sync.Map.
package skipmap
import (
"sync"
"sync/atomic"
"unsafe"
)
// Int64Map represents a map based on skip list in ascending order.
type Int64Map struct {
header *int64Node
length int64
highestLevel int64 // highest level for now
}
type int64Node struct {
key int64
value unsafe.Pointer // *interface{}
next optionalArray // [level]*int64Node
mu sync.Mutex
flags bitflag
level uint32
}
func newInt64Node(key int64, value interface{}, level int) *int64Node {
node := &int64Node{
key: key,
level: uint32(level),
}
node.storeVal(value)
if level > op1 {
node.next.extra = new([op2]unsafe.Pointer)
}
return node
}
func (n *int64Node) storeVal(value interface{}) {
atomic.StorePointer(&n.value, unsafe.Pointer(&value))
}
func (n *int64Node) loadVal() interface{} {
return *(*interface{})(atomic.LoadPointer(&n.value))
}
func (n *int64Node) loadNext(i int) *int64Node {
return (*int64Node)(n.next.load(i))
}
func (n *int64Node) storeNext(i int, node *int64Node) {
n.next.store(i, unsafe.Pointer(node))
}
func (n *int64Node) atomicLoadNext(i int) *int64Node {
return (*int64Node)(n.next.atomicLoad(i))
}
func (n *int64Node) atomicStoreNext(i int, node *int64Node) {
n.next.atomicStore(i, unsafe.Pointer(node))
}
func (n *int64Node) lessthan(key int64) bool {
return n.key < key
}
func (n *int64Node) equal(key int64) bool {
return n.key == key
}
// NewInt64 return an empty int64 skipmap.
func NewInt64() *Int64Map {
h := newInt64Node(0, "", maxLevel)
h.flags.SetTrue(fullyLinked)
return &Int64Map{
header: h,
highestLevel: defaultHighestLevel,
}
}
// findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap.
// The returned preds and succs always satisfy preds[i] > key >= succs[i].
// (without fullpath, if find the node will return immediately)
func (s *Int64Map) findNode(key int64, preds *[maxLevel]*int64Node, succs *[maxLevel]*int64Node) *int64Node {
x := s.header
for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
succ := x.atomicLoadNext(i)
for succ != nil && succ.lessthan(key) {
x = succ
succ = x.atomicLoadNext(i)
}
preds[i] = x
succs[i] = succ
// Check if the key already in the skipmap.
if succ != nil && succ.equal(key) {
return succ
}
}
return nil
}
// findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list.
// The returned preds and succs always satisfy preds[i] > key >= succs[i].
func (s *Int64Map) findNodeDelete(key int64, preds *[maxLevel]*int64Node, succs *[maxLevel]*int64Node) int {
// lFound represents the index of the first layer at which it found a node.
lFound, x := -1, s.header
for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
succ := x.atomicLoadNext(i)
for succ != nil && succ.lessthan(key) {
x = succ
succ = x.atomicLoadNext(i)
}
preds[i] = x
succs[i] = succ
// Check if the key already in the skip list.
if lFound == -1 && succ != nil && succ.equal(key) {
lFound = i
}
}
return lFound
}
func unlockInt64(preds [maxLevel]*int64Node, highestLevel int) {
var prevPred *int64Node
for i := highestLevel; i >= 0; i-- {
if preds[i] != prevPred { // the node could be unlocked by previous loop
preds[i].mu.Unlock()
prevPred = preds[i]
}
}
}
// Store sets the value for a key.
func (s *Int64Map) Store(key int64, value interface{}) {
level := s.randomlevel()
var preds, succs [maxLevel]*int64Node
for {
nodeFound := s.findNode(key, &preds, &succs)
if nodeFound != nil { // indicating the key is already in the skip-list
if !nodeFound.flags.Get(marked) {
// We don't need to care about whether or not the node is fully linked,
// just replace the value.
nodeFound.storeVal(value)
return
}
// If the node is marked, represents some other goroutines is in the process of deleting this node,
// we need to add this node in next loop.
continue
}
// Add this node into skip list.
var (
highestLocked = -1 // the highest level being locked by this process
valid = true
pred, succ, prevPred *int64Node
)
for layer := 0; valid && layer < level; layer++ {
pred = preds[layer] // target node's previous node
succ = succs[layer] // target node's next node
if pred != prevPred { // the node in this layer could be locked by previous loop
pred.mu.Lock()
highestLocked = layer
prevPred = pred
}
// valid check if there is another node has inserted into the skip list in this layer during this process.
// It is valid if:
// 1. The previous node and next node both are not marked.
// 2. The previous node's next node is succ in this layer.
valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
}
if !valid {
unlockInt64(preds, highestLocked)
continue
}
nn := newInt64Node(key, value, level)
for layer := 0; layer < level; layer++ {
nn.storeNext(layer, succs[layer])
preds[layer].atomicStoreNext(layer, nn)
}
nn.flags.SetTrue(fullyLinked)
unlockInt64(preds, highestLocked)
atomic.AddInt64(&s.length, 1)
return
}
}
func (s *Int64Map) randomlevel() int {
// Generate random level.
level := randomLevel()
// Update highest level if possible.
for {
hl := atomic.LoadInt64(&s.highestLevel)
if int64(level) <= hl {
break
}
if atomic.CompareAndSwapInt64(&s.highestLevel, hl, int64(level)) {
break
}
}
return level
}
// Load returns the value stored in the map for a key, or nil if no
// value is present.
// The ok result indicates whether value was found in the map.
func (s *Int64Map) Load(key int64) (value interface{}, ok bool) {
x := s.header
for i := int(atomic.LoadInt64(&s.highestLevel)) - 1; i >= 0; i-- {
nex := x.atomicLoadNext(i)
for nex != nil && nex.lessthan(key) {
x = nex
nex = x.atomicLoadNext(i)
}
// Check if the key already in the skip list.
if nex != nil && nex.equal(key) {
if nex.flags.MGet(fullyLinked|marked, fullyLinked) {
return nex.loadVal(), true
}
return nil, false
}
}
return nil, false
}
// LoadAndDelete deletes the value for a key, returning the previous value if any.
// The loaded result reports whether the key was present.
// (Modified from Delete)
func (s *Int64Map) LoadAndDelete(key int64) (value interface{}, loaded bool) {
var (
nodeToDelete *int64Node
isMarked bool // represents if this operation mark the node
topLayer = -1
preds, succs [maxLevel]*int64Node
)
for {
lFound := s.findNodeDelete(key, &preds, &succs)
if isMarked || // this process mark this node or we can find this node in the skip list
lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
if !isMarked { // we don't mark this node for now
nodeToDelete = succs[lFound]
topLayer = lFound
nodeToDelete.mu.Lock()
if nodeToDelete.flags.Get(marked) {
// The node is marked by another process,
// the physical deletion will be accomplished by another process.
nodeToDelete.mu.Unlock()
return nil, false
}
nodeToDelete.flags.SetTrue(marked)
isMarked = true
}
// Accomplish the physical deletion.
var (
highestLocked = -1 // the highest level being locked by this process
valid = true
pred, succ, prevPred *int64Node
)
for layer := 0; valid && (layer <= topLayer); layer++ {
pred, succ = preds[layer], succs[layer]
if pred != prevPred { // the node in this layer could be locked by previous loop
pred.mu.Lock()
highestLocked = layer
prevPred = pred
}
// valid check if there is another node has inserted into the skip list in this layer
// during this process, or the previous is deleted by another process.
// It is valid if:
// 1. the previous node exists.
// 2. no another node has inserted into the skip list in this layer.
valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ
}
if !valid {
unlockInt64(preds, highestLocked)
continue
}
for i := topLayer; i >= 0; i-- {
// Now we own the `nodeToDelete`, no other goroutine will modify it.
// So we don't need `nodeToDelete.loadNext`
preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
}
nodeToDelete.mu.Unlock()
unlockInt64(preds, highestLocked)
atomic.AddInt64(&s.length, -1)
return nodeToDelete.loadVal(), true
}
return nil, false
}
}
// LoadOrStore returns the existing value for the key if present.
// Otherwise, it stores and returns the given value.
// The loaded result is true if the value was loaded, false if stored.
// (Modified from Store)
func (s *Int64Map) LoadOrStore(key int64, value interface{}) (actual interface{}, loaded bool) {
var (
level int
preds, succs [maxLevel]*int64Node
hl = int(atomic.LoadInt64(&s.highestLevel))
)
for {
nodeFound := s.findNode(key, &preds, &succs)
if nodeFound != nil { // indicating the key is already in the skip-list
if !nodeFound.flags.Get(marked) {
// We don't need to care about whether or not the node is fully linked,
// just return the value.
return nodeFound.loadVal(), true
}
// If the node is marked, represents some other goroutines is in the process of deleting this node,
// we need to add this node in next loop.
continue
}
// Add this node into skip list.
var (
highestLocked = -1 // the highest level being locked by this process
valid = true
pred, succ, prevPred *int64Node
)
if level == 0 {
level = s.randomlevel()
if level > hl {
// If the highest level is updated, usually means that many goroutines
// are inserting items. Hopefully we can find a better path in next loop.
// TODO(zyh): consider filling the preds if s.header[level].next == nil,
// but this strategy's performance is almost the same as the existing method.
continue
}
}
for layer := 0; valid && layer < level; layer++ {
pred = preds[layer] // target node's previous node
succ = succs[layer] // target node's next node
if pred != prevPred { // the node in this layer could be locked by previous loop
pred.mu.Lock()
highestLocked = layer
prevPred = pred
}
// valid check if there is another node has inserted into the skip list in this layer during this process.
// It is valid if:
// 1. The previous node and next node both are not marked.
// 2. The previous node's next node is succ in this layer.
valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ
}
if !valid {
unlockInt64(preds, highestLocked)
continue
}
nn := newInt64Node(key, value, level)
for layer := 0; layer < level; layer++ {
nn.storeNext(layer, succs[layer])
preds[layer].atomicStoreNext(layer, nn)
}
nn.flags.SetTrue(fullyLinked)
unlockInt64(preds, highestLocked)
atomic.AddInt64(&s.length, 1)
return value, false
}
}
// LoadOrStoreLazy returns the existing value for the key if present.
// Otherwise, it stores and returns the given value from f, f will only be called once.
// The loaded result is true if the value was loaded, false if stored.
// (Modified from LoadOrStore)
func (s *Int64Map) LoadOrStoreLazy(key int64, f func() interface{}) (actual interface{}, loaded bool) {
var (
level int
preds, succs [maxLevel]*int64Node
hl = int(atomic.LoadInt64(&s.highestLevel))
)
for {
nodeFound := s.findNode(key, &preds, &succs)
if nodeFound != nil { // indicating the key is already in the skip-list
if !nodeFound.flags.Get(marked) {
// We don't need to care about whether or not the node is fully linked,
// just return the value.
return nodeFound.loadVal(), true
}
// If the node is marked, represents some other goroutines is in the process of deleting this node,
// we need to add this node in next loop.
continue
}
// Add this node into skip list.
var (
highestLocked = -1 // the highest level being locked by this process
valid = true
pred, succ, prevPred *int64Node
)
if level == 0 {
level = s.randomlevel()
if level > hl {
// If the highest level is updated, usually means that many goroutines
// are inserting items. Hopefully we can find a better path in next loop.
// TODO(zyh): consider filling the preds if s.header[level].next == nil,
// but this strategy's performance is almost the same as the existing method.
continue
}
}
for layer := 0; valid && layer < level; layer++ {
pred = preds[layer] // target node's previous node
succ = succs[layer] // target node's next node
if pred != prevPred { // the node in this layer could be locked by previous loop
pred.mu.Lock()
highestLocked = layer
prevPred = pred
}
// valid check if there is another node has inserted into the skip list in this layer during this process.
// It is valid if:
// 1. The previous node and next node both are not marked.
// 2. The previous node's next node is succ in this layer.
valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked))
}
if !valid {
unlockInt64(preds, highestLocked)
continue
}
value := f()
nn := newInt64Node(key, value, level)
for layer := 0; layer < level; layer++ {
nn.storeNext(layer, succs[layer])
preds[layer].atomicStoreNext(layer, nn)
}
nn.flags.SetTrue(fullyLinked)
unlockInt64(preds, highestLocked)
atomic.AddInt64(&s.length, 1)
return value, false
}
}
// Delete deletes the value for a key.
func (s *Int64Map) Delete(key int64) bool {
var (
nodeToDelete *int64Node
isMarked bool // represents if this operation mark the node
topLayer = -1
preds, succs [maxLevel]*int64Node
)
for {
lFound := s.findNodeDelete(key, &preds, &succs)
if isMarked || // this process mark this node or we can find this node in the skip list
lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound {
if !isMarked { // we don't mark this node for now
nodeToDelete = succs[lFound]
topLayer = lFound
nodeToDelete.mu.Lock()
if nodeToDelete.flags.Get(marked) {
// The node is marked by another process,
// the physical deletion will be accomplished by another process.
nodeToDelete.mu.Unlock()
return false
}
nodeToDelete.flags.SetTrue(marked)
isMarked = true
}
// Accomplish the physical deletion.
var (
highestLocked = -1 // the highest level being locked by this process
valid = true
pred, succ, prevPred *int64Node
)
for layer := 0; valid && (layer <= topLayer); layer++ {
pred, succ = preds[layer], succs[layer]
if pred != prevPred { // the node in this layer could be locked by previous loop
pred.mu.Lock()
highestLocked = layer
prevPred = pred
}
// valid check if there is another node has inserted into the skip list in this layer
// during this process, or the previous is deleted by another process.
// It is valid if:
// 1. the previous node exists.
// 2. no another node has inserted into the skip list in this layer.
valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ
}
if !valid {
unlockInt64(preds, highestLocked)
continue
}
for i := topLayer; i >= 0; i-- {
// Now we own the `nodeToDelete`, no other goroutine will modify it.
// So we don't need `nodeToDelete.loadNext`
preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i))
}
nodeToDelete.mu.Unlock()
unlockInt64(preds, highestLocked)
atomic.AddInt64(&s.length, -1)
return true
}
return false
}
}
// Range calls f sequentially for each key and value present in the skipmap.
// If f returns false, range stops the iteration.
//
// Range does not necessarily correspond to any consistent snapshot of the Map's
// contents: no key will be visited more than once, but if the value for any key
// is stored or deleted concurrently, Range may reflect any mapping for that key
// from any point during the Range call.
func (s *Int64Map) Range(f func(key int64, value interface{}) bool) {
x := s.header.atomicLoadNext(0)
for x != nil {
if !x.flags.MGet(fullyLinked|marked, fullyLinked) {
x = x.atomicLoadNext(0)
continue
}
if !f(x.key, x.loadVal()) {
break
}
x = x.atomicLoadNext(0)
}
}
// Len return the length of this skipmap.
func (s *Int64Map) Len() int {
return int(atomic.LoadInt64(&s.length))
}
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。