mirror of
https://github.com/charlienet/go-mixed.git
synced 2025-07-18 08:32:40 +08:00
Compare commits
33 Commits
Author | SHA1 | Date | |
---|---|---|---|
04aecd4abc | |||
1f8789e7eb | |||
299e09f8e3 | |||
ea846a321a | |||
278c8b4cb7 | |||
cf30b4eb4c | |||
f3ca69f159 | |||
bd85140a78 | |||
b76be4ce6b | |||
6e24cf5bdc | |||
abe445f5e6 | |||
ac346274c1 | |||
823cd62148 | |||
d49c02924c | |||
9203c3719f | |||
132bb0d0e2 | |||
716a199c9b | |||
f043d2e5a7 | |||
5b4f8097d6 | |||
1071ad3694 | |||
35751f7fdb | |||
135b3a983b | |||
52fabedd66 | |||
792458a185 | |||
23865214c8 | |||
ebd76d2df6 | |||
93352f03c1 | |||
9c86470fa1 | |||
37e9cabde8 | |||
886723997e | |||
44304f5b16 | |||
f061b2efeb | |||
dcd803b4f2 |
1
.gitignore
vendored
1
.gitignore
vendored
@ -1,2 +1,3 @@
|
||||
*.o
|
||||
fs/logs/**
|
||||
.idea/**
|
||||
|
120
bloom/bloom.go
120
bloom/bloom.go
@ -1,51 +1,113 @@
|
||||
package bloom
|
||||
|
||||
import "github.com/bits-and-blooms/bitset"
|
||||
import (
|
||||
"math"
|
||||
|
||||
"github.com/charlienet/go-mixed/bytesconv"
|
||||
"github.com/charlienet/go-mixed/expr"
|
||||
"github.com/charlienet/go-mixed/hash"
|
||||
"github.com/go-redis/redis/v8"
|
||||
)
|
||||
|
||||
const DEFAULT_SIZE = 2 << 24
|
||||
|
||||
var seeds = []uint{7, 11, 13, 31, 37, 61}
|
||||
|
||||
type simplehash struct {
|
||||
cap uint
|
||||
seed uint
|
||||
type bitStore interface {
|
||||
Clear()
|
||||
Set(pos ...uint) error
|
||||
Test(pos ...uint) (bool, error)
|
||||
}
|
||||
|
||||
type BloomFilter struct {
|
||||
set *bitset.BitSet
|
||||
funcs [6]simplehash
|
||||
bits uint // 布隆过滤器大小
|
||||
funcs uint // 哈希函数数量
|
||||
store bitStore // 位图存储
|
||||
}
|
||||
|
||||
func NewBloomFilter() *BloomFilter {
|
||||
bf := new(BloomFilter)
|
||||
for i := 0; i < len(bf.funcs); i++ {
|
||||
bf.funcs[i] = simplehash{DEFAULT_SIZE, seeds[i]}
|
||||
type bloomOptions struct {
|
||||
redisClient *redis.Client
|
||||
redisKey string
|
||||
}
|
||||
|
||||
type option func(*bloomOptions)
|
||||
|
||||
func WithRedis(redis *redis.Client, key string) option {
|
||||
return func(bo *bloomOptions) {
|
||||
bo.redisClient = redis
|
||||
bo.redisKey = key
|
||||
}
|
||||
bf.set = bitset.New(DEFAULT_SIZE)
|
||||
}
|
||||
|
||||
// New 初始化布隆过滤器
|
||||
// https://pages.cs.wisc.edu/~cao/papers/summary-cache/node8.html
|
||||
func New(expectedInsertions uint, fpp float64, opts ...option) *BloomFilter {
|
||||
opt := &bloomOptions{}
|
||||
|
||||
for _, f := range opts {
|
||||
f(opt)
|
||||
}
|
||||
|
||||
bits := optimalNumOfBits(expectedInsertions, fpp)
|
||||
k := optimalNumOfHashFunctions(bits, expectedInsertions)
|
||||
|
||||
bf := &BloomFilter{
|
||||
bits: bits,
|
||||
funcs: k,
|
||||
store: expr.Ternary[bitStore](
|
||||
opt.redisClient == nil,
|
||||
newMemStore(bits),
|
||||
newRedisStore(opt.redisClient, opt.redisKey, bits)),
|
||||
}
|
||||
|
||||
return bf
|
||||
}
|
||||
|
||||
func (bf *BloomFilter) Add(value string) {
|
||||
for _, f := range bf.funcs {
|
||||
bf.set.Set(f.hash(value))
|
||||
}
|
||||
func (bf *BloomFilter) Add(data string) {
|
||||
offsets := bf.geOffsets([]byte(data))
|
||||
bf.store.Set(offsets...)
|
||||
}
|
||||
|
||||
func (bf *BloomFilter) Contains(value string) bool {
|
||||
if value == "" {
|
||||
return false
|
||||
}
|
||||
ret := true
|
||||
for _, f := range bf.funcs {
|
||||
ret = ret && bf.set.Test(f.hash(value))
|
||||
}
|
||||
return ret
|
||||
func (bf *BloomFilter) ExistString(data string) (bool, error) {
|
||||
return bf.Exists(bytesconv.StringToBytes(data))
|
||||
}
|
||||
|
||||
func (s simplehash) hash(value string) uint {
|
||||
var result uint = 0
|
||||
for i := 0; i < len(value); i++ {
|
||||
result = result*s.seed + uint(value[i])
|
||||
func (bf *BloomFilter) Exists(data []byte) (bool, error) {
|
||||
if data == nil || len(data) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
return (s.cap - 1) & result
|
||||
|
||||
offsets := bf.geOffsets(data)
|
||||
isSet, err := bf.store.Test(offsets...)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return isSet, nil
|
||||
}
|
||||
|
||||
func (bf *BloomFilter) geOffsets(data []byte) []uint {
|
||||
offsets := make([]uint, bf.funcs)
|
||||
for i := uint(0); i < bf.funcs; i++ {
|
||||
offsets[i] = uint(hash.Murmur3(append(data, byte(i))) % uint64(bf.bits))
|
||||
}
|
||||
|
||||
return offsets
|
||||
}
|
||||
|
||||
// 清空布隆过滤器
|
||||
func (bf *BloomFilter) Clear() {
|
||||
bf.store.Clear()
|
||||
}
|
||||
|
||||
// 计算优化的位图长度,
|
||||
// n 期望放置元素数量,
|
||||
// p 预期的误判概率
|
||||
func optimalNumOfBits(n uint, p float64) uint {
|
||||
return (uint)(-float64(n) * math.Log(p) / (math.Log(2) * math.Log(2)))
|
||||
}
|
||||
|
||||
// 计算哈希函数数量
|
||||
func optimalNumOfHashFunctions(m, n uint) uint {
|
||||
return uint(math.Round(float64(m) / float64(n) * math.Log(2)))
|
||||
}
|
||||
|
@ -2,19 +2,121 @@ package bloom_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/charlienet/go-mixed/bloom"
|
||||
"github.com/charlienet/go-mixed/rand"
|
||||
"github.com/charlienet/go-mixed/sys"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const ()
|
||||
|
||||
func TestBloom(t *testing.T) {
|
||||
b := bloom.NewBloomFilter()
|
||||
b := bloom.New(1000, 0.03)
|
||||
|
||||
for i := 0; i < 1000000; i++ {
|
||||
b.Add(strconv.Itoa(i))
|
||||
}
|
||||
|
||||
fmt.Println(b.Contains(strconv.Itoa(9999)))
|
||||
fmt.Println(b.Contains("ss"))
|
||||
v := "6943553521463296-1635402930"
|
||||
|
||||
t.Log(b.ExistString(v))
|
||||
b.Add(v)
|
||||
t.Log(b.ExistString(v))
|
||||
|
||||
isSet, err := b.ExistString(strconv.Itoa(9999))
|
||||
fmt.Println("过滤器中包含值:", isSet, err)
|
||||
|
||||
isSet, err = b.ExistString("ss")
|
||||
fmt.Println("过滤器中未包含:", isSet, err)
|
||||
|
||||
t.Log(sys.ShowMemUsage())
|
||||
}
|
||||
|
||||
func TestOptimize(t *testing.T) {
|
||||
|
||||
expectedInsertions := 1000000 // 期望存储数据量
|
||||
falseProbability := 0.00002 // 预期误差
|
||||
bits := uint(float64(-expectedInsertions) * math.Log(falseProbability) / (math.Log(2) * math.Log(2)))
|
||||
hashSize := uint(math.Round(float64(bits) / float64(expectedInsertions) * math.Log(2)))
|
||||
|
||||
t.Log(bits)
|
||||
t.Log(hashSize)
|
||||
}
|
||||
|
||||
func TestRedis(t *testing.T) {
|
||||
client := redis.NewClient(&redis.Options{
|
||||
Addr: "192.168.2.222:6379",
|
||||
Password: "123456",
|
||||
})
|
||||
|
||||
bf := bloom.New(10000, 0.03, bloom.WithRedis(client, "bloom:test"))
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
bf.Add(strconv.Itoa(i))
|
||||
}
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
isSet, err := bf.ExistString(strconv.Itoa(i))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !isSet {
|
||||
t.Log(i, isSet)
|
||||
}
|
||||
}
|
||||
|
||||
for i := 101; i < 200; i++ {
|
||||
isSet, err := bf.ExistString(strconv.Itoa(i))
|
||||
t.Log(isSet, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClear(t *testing.T) {
|
||||
bf := bloom.New(1000, 0.03)
|
||||
|
||||
v := "abc"
|
||||
bf.Add(v)
|
||||
isSet, _ := bf.ExistString(v)
|
||||
assert.True(t, isSet)
|
||||
|
||||
bf.Clear()
|
||||
isSet, _ = bf.ExistString(v)
|
||||
assert.False(t, isSet)
|
||||
}
|
||||
|
||||
func TestParallel(t *testing.T) {
|
||||
f := bloom.New(1000, 0.03)
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
v := rand.Hex.Generate(10)
|
||||
|
||||
f.Add(v)
|
||||
isSet, _ := f.ExistString(v)
|
||||
|
||||
assert.True(t, isSet)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkFilter(b *testing.B) {
|
||||
f := bloom.New(1000, 0.03)
|
||||
|
||||
b.RunParallel(func(p *testing.PB) {
|
||||
for p.Next() {
|
||||
v := rand.Hex.Generate(10)
|
||||
f.Add(v)
|
||||
|
||||
f.ExistString(v)
|
||||
|
||||
// assert.True(b, f.Contains(v))
|
||||
|
||||
// assert.True(b, f.Contains(v))
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
|
51
bloom/mem_store.go
Normal file
51
bloom/mem_store.go
Normal file
@ -0,0 +1,51 @@
|
||||
package bloom
|
||||
|
||||
import (
|
||||
"github.com/bits-and-blooms/bitset"
|
||||
"github.com/charlienet/go-mixed/locker"
|
||||
)
|
||||
|
||||
type memStore struct {
|
||||
size uint
|
||||
set *bitset.BitSet // 内存位图
|
||||
lock locker.RWLocker // 同步锁
|
||||
}
|
||||
|
||||
func newMemStore(size uint) *memStore {
|
||||
return &memStore{
|
||||
size: size,
|
||||
set: bitset.New(size),
|
||||
lock: locker.NewRWLocker(),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *memStore) Clear() {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
s.set.ClearAll()
|
||||
}
|
||||
|
||||
func (s *memStore) Set(offsets ...uint) error {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
for _, p := range offsets {
|
||||
s.set.Set(p)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *memStore) Test(offsets ...uint) (bool, error) {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
|
||||
for _, p := range offsets {
|
||||
if !s.set.Test(p) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
116
bloom/redis_store.go
Normal file
116
bloom/redis_store.go
Normal file
@ -0,0 +1,116 @@
|
||||
package bloom
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v8"
|
||||
)
|
||||
|
||||
const (
|
||||
// ARGV:偏移量offset数组
|
||||
// KYES[1]: setbit操作的key
|
||||
// 全部设置为1
|
||||
setScript = `
|
||||
for _, offset in ipairs(ARGV) do
|
||||
redis.call("setbit", KEYS[1], offset, 1)
|
||||
end
|
||||
`
|
||||
|
||||
//ARGV:偏移量offset数组
|
||||
//KYES[1]: setbit操作的key
|
||||
//检查是否全部为1
|
||||
testScript = `
|
||||
for _, offset in ipairs(ARGV) do
|
||||
if tonumber(redis.call("getbit", KEYS[1], offset)) == 0 then
|
||||
return false
|
||||
end
|
||||
end
|
||||
return true
|
||||
`
|
||||
)
|
||||
|
||||
var ErrTooLargeOffset = errors.New("超出最大偏移量")
|
||||
|
||||
var _ bitStore = &redisBitSet{}
|
||||
|
||||
// 使用Redis存储位图
|
||||
type redisBitSet struct {
|
||||
store *redis.Client
|
||||
key string
|
||||
bits uint
|
||||
}
|
||||
|
||||
func newRedisStore(store *redis.Client, key string, bits uint) *redisBitSet {
|
||||
return &redisBitSet{
|
||||
store: store,
|
||||
key: key,
|
||||
bits: bits,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *redisBitSet) Set(offsets ...uint) error {
|
||||
args, err := s.buildOffsetArgs(offsets)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*500)
|
||||
defer cancel()
|
||||
|
||||
_, err = s.store.Eval(ctx, setScript, []string{s.key}, args).Result()
|
||||
|
||||
//底层使用的是go-redis,redis.Nil表示操作的key不存在
|
||||
//需要针对key不存在的情况特殊判断
|
||||
if err == redis.Nil {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *redisBitSet) Test(offsets ...uint) (bool, error) {
|
||||
args, err := s.buildOffsetArgs(offsets)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*500)
|
||||
defer cancel()
|
||||
|
||||
resp, err := s.store.Eval(ctx, testScript, []string{s.key}, args).Result()
|
||||
|
||||
// key 不存在,表示还未存放任何数据
|
||||
if err == redis.Nil {
|
||||
return false, nil
|
||||
} else if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
exists, ok := resp.(int64)
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return exists == 1, nil
|
||||
}
|
||||
|
||||
func (s *redisBitSet) Clear() {
|
||||
|
||||
}
|
||||
|
||||
func (r *redisBitSet) buildOffsetArgs(offsets []uint) ([]string, error) {
|
||||
args := make([]string, 0, len(offsets))
|
||||
for _, offset := range offsets {
|
||||
if offset >= r.bits {
|
||||
return nil, ErrTooLargeOffset
|
||||
}
|
||||
|
||||
args = append(args, strconv.FormatUint(uint64(offset), 10))
|
||||
}
|
||||
return args, nil
|
||||
}
|
24
bloom/redis_store_test.go
Normal file
24
bloom/redis_store_test.go
Normal file
@ -0,0 +1,24 @@
|
||||
package bloom
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/go-redis/redis/v8"
|
||||
)
|
||||
|
||||
func TestRedisStore(t *testing.T) {
|
||||
client := redis.NewClient(&redis.Options{
|
||||
Addr: "192.168.2.222:6379",
|
||||
Password: "123456",
|
||||
})
|
||||
|
||||
store := newRedisStore(client, "abcdef", 10000)
|
||||
err := store.Set(1, 2, 3, 9, 1223)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Log(store.Test(1))
|
||||
t.Log(store.Test(1, 2, 3))
|
||||
t.Log(store.Test(4, 5, 8))
|
||||
}
|
@ -5,10 +5,21 @@ import (
|
||||
"encoding/hex"
|
||||
)
|
||||
|
||||
const hextable = "0123456789ABCDEF"
|
||||
const hexTable = "0123456789ABCDEF"
|
||||
|
||||
type BytesResult []byte
|
||||
|
||||
// FromHexString 从十六进制获取
|
||||
func FromHexString(s string) (BytesResult, error) {
|
||||
b, err := hex.DecodeString(s)
|
||||
return BytesResult(b), err
|
||||
}
|
||||
|
||||
func FromBase64String(s string) (BytesResult, error) {
|
||||
b, err := base64.StdEncoding.DecodeString(s)
|
||||
return BytesResult(b), err
|
||||
}
|
||||
|
||||
func (r BytesResult) Hex() string {
|
||||
return hex.EncodeToString(r)
|
||||
}
|
||||
@ -16,9 +27,11 @@ func (r BytesResult) Hex() string {
|
||||
func (r BytesResult) UppercaseHex() string {
|
||||
dst := make([]byte, hex.EncodedLen(len(r)))
|
||||
j := 0
|
||||
for _, v := range r {
|
||||
dst[j] = hextable[v>>4]
|
||||
dst[j+1] = hextable[v&0x0f]
|
||||
|
||||
re := r[:]
|
||||
for _, v := range re {
|
||||
dst[j] = hexTable[v>>4]
|
||||
dst[j+1] = hexTable[v&0x0f]
|
||||
j += 2
|
||||
}
|
||||
|
||||
|
@ -20,3 +20,8 @@ func Decode(b []byte, out any) error {
|
||||
dec := gob.NewDecoder(buf)
|
||||
return dec.Decode(out)
|
||||
}
|
||||
|
||||
func MsgPackage() {
|
||||
|
||||
// msgpack.NewEncoder()
|
||||
}
|
||||
|
3
cache/big_cache.go
vendored
3
cache/big_cache.go
vendored
@ -58,7 +58,8 @@ func (c *bigCacheClient) Set(key string, entry []byte, expire time.Duration) err
|
||||
}
|
||||
|
||||
func (c *bigCacheClient) Delete(keys ...string) error {
|
||||
for _, k := range keys {
|
||||
ks := keys[:]
|
||||
for _, k := range ks {
|
||||
if err := c.cache.Delete(k); err != nil {
|
||||
return err
|
||||
}
|
||||
|
83
cache/cache.go
vendored
83
cache/cache.go
vendored
@ -3,9 +3,12 @@ package cache
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/charlienet/go-mixed/bytesconv"
|
||||
"github.com/charlienet/go-mixed/json"
|
||||
"github.com/charlienet/go-mixed/locker"
|
||||
"github.com/charlienet/go-mixed/logx"
|
||||
)
|
||||
|
||||
@ -14,13 +17,15 @@ var ErrNotFound = errors.New("key not found")
|
||||
type LoadFunc func(context.Context) (any, error)
|
||||
|
||||
type Cache struct {
|
||||
prefix string // 键前缀
|
||||
retry int // 资源获取时的重试次数
|
||||
mem MemCache // 内存缓存
|
||||
distributdCache DistributdCache // 分布式缓存
|
||||
publishSubscribe PublishSubscribe // 发布订阅
|
||||
qps *qps //
|
||||
logger logx.Logger // 日志记录
|
||||
prefix string // 键前缀
|
||||
retry int // 资源获取时的重试次数
|
||||
mem MemCache // 内存缓存
|
||||
distributdCache DistributdCache // 分布式缓存
|
||||
publishSubscribe PublishSubscribe // 发布订阅
|
||||
lock locker.ChanLocker // 资源锁
|
||||
stats *Stats // 缓存命中计数
|
||||
qps *qps // 访问计数
|
||||
logger logx.Logger // 日志记录
|
||||
}
|
||||
|
||||
func NewCache(opts ...option) *Cache {
|
||||
@ -112,14 +117,72 @@ func (c *Cache) getFromMem(key string, out any) error {
|
||||
|
||||
// 从缓存加载数据
|
||||
func (c *Cache) getFromCache() {
|
||||
|
||||
// 从缓存加载数据
|
||||
// 1. 检查内存是否存在
|
||||
// 2. 检查分布缓存是否存在
|
||||
}
|
||||
|
||||
// 从数据源加载数据
|
||||
func (c *Cache) getFromSource(ctx context.Context, key string, fn LoadFunc) {
|
||||
func (c *Cache) getFromSource(ctx context.Context, key string, fn LoadFunc) error {
|
||||
|
||||
// 1. 尝试获取资源锁,如成功获取到锁加载数据
|
||||
// 2. 未获取到锁,等待从缓存中获取
|
||||
fn(ctx)
|
||||
ch, ok := c.lock.Get(key)
|
||||
if ok {
|
||||
defer c.lock.Release(key)
|
||||
|
||||
v, err := fn(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("load from source err:%v", err)
|
||||
}
|
||||
|
||||
// 取出值存入多级缓存
|
||||
_ = v
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// 等待数据加载完成
|
||||
select {
|
||||
case <-ch:
|
||||
|
||||
// 未取到结果时,再次获取
|
||||
return c.getFromSource(ctx, key, fn)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) marshal(value any) ([]byte, error) {
|
||||
switch value := value.(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case []byte:
|
||||
return value, nil
|
||||
case string:
|
||||
return []byte(value), nil
|
||||
}
|
||||
|
||||
b, err := json.Marshal(value)
|
||||
return b, err
|
||||
}
|
||||
|
||||
func (c *Cache) unmarshal(b []byte, value any) error {
|
||||
if len(b) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch value := value.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
case *[]byte:
|
||||
clone := make([]byte, len(b))
|
||||
copy(clone, b)
|
||||
*value = clone
|
||||
return nil
|
||||
case *string:
|
||||
*value = string(b)
|
||||
return nil
|
||||
}
|
||||
|
||||
err := json.Unmarshal(b, value)
|
||||
return err
|
||||
}
|
||||
|
7
cache/local_cache.go
vendored
Normal file
7
cache/local_cache.go
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
package cache
|
||||
|
||||
type LocalCache interface {
|
||||
Set(key string, data []byte)
|
||||
Get(key string) ([]byte, bool)
|
||||
Del(key string)
|
||||
}
|
6
cache/readme.md
vendored
6
cache/readme.md
vendored
@ -11,3 +11,9 @@
|
||||
3. 缓存穿透;从数据源中未找到数据时,在缓存中缓存空值。
|
||||
4. 缓存雪崩;为防止缓存雪崩将资源放入缓存时,对过期时间添加一个随机过期时间,防止缓存同时过期。
|
||||
5. 自动续期;当访问二级缓存时对使用的资源进行延期。
|
||||
|
||||
## 使用方式
|
||||
|
||||
```go
|
||||
Cache.Get(key, dist, func() (bool,error){}, options func(){})
|
||||
```
|
||||
|
20
cache/stats.go
vendored
Normal file
20
cache/stats.go
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
package cache
|
||||
|
||||
import "sync/atomic"
|
||||
|
||||
type Stats struct {
|
||||
Hits uint64
|
||||
Misses uint64
|
||||
}
|
||||
|
||||
func (s *Stats) AddHits() {
|
||||
atomic.AddUint64(&s.Hits, 1)
|
||||
}
|
||||
|
||||
func (s *Stats) AddMisses() {
|
||||
atomic.AddUint64(&s.Misses, 1)
|
||||
}
|
||||
|
||||
func (c *Cache) Stats() *Stats {
|
||||
return c.stats
|
||||
}
|
52
cache/tiny_lfu.go
vendored
Normal file
52
cache/tiny_lfu.go
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/charlienet/go-mixed/locker"
|
||||
"github.com/vmihailenco/go-tinylfu"
|
||||
)
|
||||
|
||||
type TinyLFU struct {
|
||||
mu locker.Locker
|
||||
lfu *tinylfu.T
|
||||
ttl time.Duration
|
||||
}
|
||||
|
||||
func NewTinyLFU(size int, ttl time.Duration) *TinyLFU {
|
||||
return &TinyLFU{
|
||||
mu: locker.NewLocker(),
|
||||
lfu: tinylfu.New(size, 100000),
|
||||
ttl: ttl,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *TinyLFU) Set(key string, b []byte, expire time.Duration) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
c.lfu.Set(&tinylfu.Item{
|
||||
Key: key,
|
||||
Value: b,
|
||||
ExpireAt: time.Now().Add(c.ttl),
|
||||
})
|
||||
}
|
||||
|
||||
func (c *TinyLFU) Get(key string) ([]byte, bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
val, ok := c.lfu.Get(key)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return val.([]byte), true
|
||||
}
|
||||
|
||||
func (c *TinyLFU) Del(key string) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
c.lfu.Del(key)
|
||||
}
|
57
cache/tiny_lfu_test.go
vendored
Normal file
57
cache/tiny_lfu_test.go
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/charlienet/go-mixed/cache"
|
||||
"github.com/charlienet/go-mixed/rand"
|
||||
)
|
||||
|
||||
func TestTinyGet(t *testing.T) {
|
||||
strFor := func(i int) string {
|
||||
return fmt.Sprintf("a string %d", i)
|
||||
}
|
||||
keyName := func(i int) string {
|
||||
return fmt.Sprintf("key-%00000d", i)
|
||||
}
|
||||
|
||||
mycache := cache.NewTinyLFU(1000, 1*time.Second)
|
||||
size := 50000
|
||||
// Put a bunch of stuff in the cache with a TTL of 1 second
|
||||
for i := 0; i < size; i++ {
|
||||
key := keyName(i)
|
||||
mycache.Set(key, []byte(strFor(i)), time.Second*2)
|
||||
}
|
||||
|
||||
// Read stuff for a bit longer than the TTL - that's when the corruption occurs
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
|
||||
done := ctx.Done()
|
||||
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case <-done:
|
||||
// this is expected
|
||||
break loop
|
||||
default:
|
||||
i := rand.Intn(size)
|
||||
key := keyName(i)
|
||||
|
||||
b, ok := mycache.Get(key)
|
||||
if !ok {
|
||||
continue loop
|
||||
}
|
||||
|
||||
got := string(b)
|
||||
expected := strFor(i)
|
||||
if got != expected {
|
||||
t.Fatalf("expected=%q got=%q key=%q", expected, got, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
210
calendar/calendar.go
Normal file
210
calendar/calendar.go
Normal file
@ -0,0 +1,210 @@
|
||||
package calendar
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
var WeekStartDay time.Weekday = time.Sunday
|
||||
|
||||
type Calendar struct {
|
||||
time.Time
|
||||
weekStartsAt time.Weekday
|
||||
}
|
||||
|
||||
func BeginningOfMinute() Calendar {
|
||||
return Create(time.Now()).BeginningOfMinute()
|
||||
}
|
||||
|
||||
func BeginningOfHour() Calendar {
|
||||
return Create(time.Now()).BeginningOfHour()
|
||||
}
|
||||
|
||||
func BeginningOfDay() Calendar {
|
||||
return Create(time.Now()).BeginningOfDay()
|
||||
}
|
||||
|
||||
func BeginningOfWeek() Calendar {
|
||||
return Create(time.Now()).BeginningOfWeek()
|
||||
}
|
||||
|
||||
func BeginningOfMonth() Calendar {
|
||||
return Create(time.Now()).BeginningOfMonth()
|
||||
}
|
||||
|
||||
func BeginningOfQuarter() Calendar {
|
||||
return Create(time.Now()).BeginningOfQuarter()
|
||||
}
|
||||
|
||||
func BeginningOfYear() Calendar {
|
||||
return Create(time.Now()).BeginningOfYear()
|
||||
}
|
||||
|
||||
func EndOfMinute() Calendar {
|
||||
return Create(time.Now()).EndOfMinute()
|
||||
}
|
||||
|
||||
func EndOfHour() Calendar {
|
||||
return Create(time.Now()).EndOfHour()
|
||||
}
|
||||
|
||||
func EndOfDay() Calendar {
|
||||
return Create(time.Now()).EndOfDay()
|
||||
}
|
||||
|
||||
func EndOfWeek() Calendar {
|
||||
return Create(time.Now()).EndOfWeek()
|
||||
}
|
||||
|
||||
func EndOfMonth() Calendar {
|
||||
return Create(time.Now()).EndOfMonth()
|
||||
}
|
||||
|
||||
func EndOfQuarter() Calendar {
|
||||
return Create(time.Now()).EndOfQuarter()
|
||||
}
|
||||
|
||||
func EndOfYear() Calendar {
|
||||
return Create(time.Now()).EndOfYear()
|
||||
}
|
||||
|
||||
func (c Calendar) WeekStartsAt(day time.Weekday) Calendar {
|
||||
return Calendar{
|
||||
Time: c.Time,
|
||||
weekStartsAt: day,
|
||||
}
|
||||
}
|
||||
|
||||
func (c Calendar) BeginningOfMinute() Calendar {
|
||||
return Calendar{Time: c.Truncate(time.Minute)}
|
||||
|
||||
}
|
||||
|
||||
func (c Calendar) BeginningOfHour() Calendar {
|
||||
y, m, d := c.Date()
|
||||
return Calendar{
|
||||
Time: time.Date(y, m, d, c.Hour(), 0, 0, 0, c.Location()),
|
||||
weekStartsAt: c.weekStartsAt,
|
||||
}
|
||||
}
|
||||
|
||||
func (c Calendar) BeginningOfDay() Calendar {
|
||||
y, m, d := c.Date()
|
||||
|
||||
return Calendar{
|
||||
Time: time.Date(y, m, d, 0, 0, 0, 0, c.Location()),
|
||||
}
|
||||
}
|
||||
|
||||
func (c Calendar) BeginningOfWeek() Calendar {
|
||||
t := c.BeginningOfDay()
|
||||
weekday := int(t.Weekday())
|
||||
|
||||
if c.weekStartsAt != time.Sunday {
|
||||
weekStartDayInt := int(c.weekStartsAt)
|
||||
|
||||
if weekday < weekStartDayInt {
|
||||
weekday = weekday + 7 - weekStartDayInt
|
||||
} else {
|
||||
weekday = weekday - weekStartDayInt
|
||||
}
|
||||
}
|
||||
|
||||
return Calendar{
|
||||
Time: t.AddDate(0, 0, -weekday),
|
||||
weekStartsAt: c.weekStartsAt,
|
||||
}
|
||||
}
|
||||
|
||||
func (c Calendar) BeginningOfMonth() Calendar {
|
||||
y, m, _ := c.Date()
|
||||
|
||||
return Calendar{
|
||||
Time: time.Date(y, m, 1, 0, 0, 0, 0, c.Location()),
|
||||
weekStartsAt: c.weekStartsAt,
|
||||
}
|
||||
}
|
||||
|
||||
func (c Calendar) BeginningOfQuarter() Calendar {
|
||||
month := c.BeginningOfMonth()
|
||||
offset := (int(month.Month()) - 1) % 3
|
||||
|
||||
return Calendar{
|
||||
Time: month.AddDate(0, -offset, 0),
|
||||
weekStartsAt: c.weekStartsAt,
|
||||
}
|
||||
}
|
||||
|
||||
func (c Calendar) BeginningOfYear() Calendar {
|
||||
y, _, _ := c.Date()
|
||||
|
||||
return Calendar{
|
||||
Time: time.Date(y, time.January, 1, 0, 0, 0, 0, c.Location()),
|
||||
weekStartsAt: c.weekStartsAt}
|
||||
}
|
||||
|
||||
func (c Calendar) EndOfMinute() Calendar {
|
||||
n := c.BeginningOfMinute()
|
||||
|
||||
return Calendar{
|
||||
Time: n.Add(time.Minute - time.Nanosecond),
|
||||
weekStartsAt: c.weekStartsAt,
|
||||
}
|
||||
}
|
||||
|
||||
func (c Calendar) EndOfHour() Calendar {
|
||||
n := c.BeginningOfHour()
|
||||
|
||||
return Calendar{
|
||||
Time: n.Add(time.Hour - time.Nanosecond),
|
||||
weekStartsAt: c.weekStartsAt,
|
||||
}
|
||||
}
|
||||
|
||||
func (c Calendar) EndOfDay() Calendar {
|
||||
y, m, d := c.Date()
|
||||
|
||||
return Calendar{
|
||||
Time: time.Date(y, m, d, 23, 59, 59, int(time.Second-time.Nanosecond), c.Location()),
|
||||
weekStartsAt: c.weekStartsAt,
|
||||
}
|
||||
}
|
||||
|
||||
func (c Calendar) EndOfWeek() Calendar {
|
||||
n := c.BeginningOfWeek()
|
||||
|
||||
return Calendar{
|
||||
Time: n.AddDate(0, 0, 7).Add(-time.Nanosecond),
|
||||
weekStartsAt: c.weekStartsAt,
|
||||
}
|
||||
}
|
||||
|
||||
func (c Calendar) EndOfMonth() Calendar {
|
||||
n := c.BeginningOfMonth()
|
||||
|
||||
return Calendar{
|
||||
Time: n.AddDate(0, 1, 0).Add(-time.Nanosecond),
|
||||
weekStartsAt: c.weekStartsAt,
|
||||
}
|
||||
}
|
||||
|
||||
func (c Calendar) EndOfQuarter() Calendar {
|
||||
n := c.BeginningOfQuarter()
|
||||
|
||||
return Calendar{
|
||||
Time: n.AddDate(0, 3, 0).Add(-time.Nanosecond),
|
||||
weekStartsAt: c.weekStartsAt,
|
||||
}
|
||||
}
|
||||
|
||||
func (c Calendar) EndOfYear() Calendar {
|
||||
n := c.BeginningOfYear()
|
||||
|
||||
return Calendar{
|
||||
Time: n.AddDate(1, 0, 0).Add(-time.Nanosecond),
|
||||
weekStartsAt: c.weekStartsAt,
|
||||
}
|
||||
}
|
||||
|
||||
func (c Calendar) ToTime() time.Time {
|
||||
return c.Time
|
||||
}
|
38
calendar/calendar_test.go
Normal file
38
calendar/calendar_test.go
Normal file
@ -0,0 +1,38 @@
|
||||
package calendar_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/charlienet/go-mixed/calendar"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var format = "2006-01-02 15:04:05.999999999"
|
||||
|
||||
func TestToday(t *testing.T) {
|
||||
t.Log(calendar.Today())
|
||||
}
|
||||
|
||||
func TestBeginningOf(t *testing.T) {
|
||||
a := assert.New(t)
|
||||
|
||||
n := time.Date(2022, 11, 9, 14, 28, 34, 123456789, time.UTC)
|
||||
a.Equal("2022-11-06 00:00:00", calendar.Create(n).BeginningOfWeek().String())
|
||||
a.Equal("2022-11-07 00:00:00", calendar.Create(n).WeekStartsAt(time.Monday).BeginningOfWeek().Format(format))
|
||||
a.Equal("2022-11-09 14:00:00", calendar.Create(n).BeginningOfHour().Format(format))
|
||||
a.Equal("2022-11-01 00:00:00", calendar.Create(n).BeginningOfMonth().Format(format))
|
||||
a.Equal("2022-10-01 00:00:00", calendar.Create(n).BeginningOfQuarter().Format(format))
|
||||
}
|
||||
|
||||
func TestEndOf(t *testing.T) {
|
||||
a := assert.New(t)
|
||||
|
||||
n := time.Date(2022, 11, 9, 14, 28, 34, 123456789, time.UTC)
|
||||
a.Equal("2022-11-09 14:28:59.999999999", calendar.Create(n).EndOfMinute().Format(format))
|
||||
a.Equal("2022-11-09 14:59:59.999999999", calendar.Create(n).EndOfHour().Format(format))
|
||||
a.Equal("2022-11-09 23:59:59.999999999", calendar.Create(n).EndOfDay().Format(format))
|
||||
a.Equal("2022-11-30 23:59:59.999999999", calendar.Create(n).EndOfMonth().Format(format))
|
||||
a.Equal("2022-12-31 23:59:59.999999999", calendar.Create(n).EndOfQuarter().Format(format))
|
||||
a.Equal("2022-12-31 23:59:59.999999999", calendar.Create(n).EndOfYear().Format(format))
|
||||
}
|
40
calendar/creator.go
Normal file
40
calendar/creator.go
Normal file
@ -0,0 +1,40 @@
|
||||
package calendar
|
||||
|
||||
import "time"
|
||||
|
||||
func Now() Calendar {
|
||||
return Create(time.Now())
|
||||
}
|
||||
|
||||
func Today() Calendar {
|
||||
return Now().BeginningOfDay()
|
||||
}
|
||||
|
||||
func Create(t time.Time) Calendar {
|
||||
return Calendar{
|
||||
Time: t,
|
||||
weekStartsAt: WeekStartDay,
|
||||
}
|
||||
}
|
||||
|
||||
func CreateFromTimestamp(timestamp int64) Calendar {
|
||||
return Create(time.Unix(timestamp, 0))
|
||||
}
|
||||
|
||||
func CreateFromTimestampMilli(timestamp int64) Calendar {
|
||||
return Create(time.Unix(timestamp/1e3, (timestamp%1e3)*1e6))
|
||||
}
|
||||
|
||||
func CreateFromTimestampMicro(timestamp int64) Calendar {
|
||||
return Create(time.Unix(timestamp/1e6, (timestamp%1e6)*1e3))
|
||||
}
|
||||
|
||||
func CreateFromTimestampNano(timestamp int64) Calendar {
|
||||
return Create(time.Unix(timestamp/1e9, timestamp%1e9))
|
||||
}
|
||||
|
||||
func create(year, month, day, hour, minute, second, nanosecond int) Calendar {
|
||||
return Calendar{
|
||||
Time: time.Date(year, time.Month(month), day, hour, minute, second, nanosecond, time.Local),
|
||||
}
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
package dateconv
|
||||
package calendar
|
||||
|
||||
import "time"
|
||||
|
74
calendar/output.go
Normal file
74
calendar/output.go
Normal file
@ -0,0 +1,74 @@
|
||||
package calendar
|
||||
|
||||
import "time"
|
||||
|
||||
// 布局模板常量
|
||||
const (
|
||||
ANSICLayout = time.ANSIC
|
||||
UnixDateLayout = time.UnixDate
|
||||
RubyDateLayout = time.RubyDate
|
||||
RFC822Layout = time.RFC822
|
||||
RFC822ZLayout = time.RFC822Z
|
||||
RFC850Layout = time.RFC850
|
||||
RFC1123Layout = time.RFC1123
|
||||
RFC1123ZLayout = time.RFC1123Z
|
||||
RssLayout = time.RFC1123Z
|
||||
KitchenLayout = time.Kitchen
|
||||
RFC2822Layout = time.RFC1123Z
|
||||
CookieLayout = "Monday, 02-Jan-2006 15:04:05 MST"
|
||||
RFC3339Layout = "2006-01-02T15:04:05Z07:00"
|
||||
RFC3339MilliLayout = "2006-01-02T15:04:05.999Z07:00"
|
||||
RFC3339MicroLayout = "2006-01-02T15:04:05.999999Z07:00"
|
||||
RFC3339NanoLayout = "2006-01-02T15:04:05.999999999Z07:00"
|
||||
ISO8601Layout = "2006-01-02T15:04:05-07:00"
|
||||
ISO8601MilliLayout = "2006-01-02T15:04:05.999-07:00"
|
||||
ISO8601MicroLayout = "2006-01-02T15:04:05.999999-07:00"
|
||||
ISO8601NanoLayout = "2006-01-02T15:04:05.999999999-07:00"
|
||||
RFC1036Layout = "Mon, 02 Jan 06 15:04:05 -0700"
|
||||
RFC7231Layout = "Mon, 02 Jan 2006 15:04:05 GMT"
|
||||
DayDateTimeLayout = "Mon, Jan 2, 2006 3:04 PM"
|
||||
DateTimeLayout = "2006-01-02 15:04:05"
|
||||
DateTimeMilliLayout = "2006-01-02 15:04:05.999"
|
||||
DateTimeMicroLayout = "2006-01-02 15:04:05.999999"
|
||||
DateTimeNanoLayout = "2006-01-02 15:04:05.999999999"
|
||||
ShortDateTimeLayout = "20060102150405"
|
||||
ShortDateTimeMilliLayout = "20060102150405.999"
|
||||
ShortDateTimeMicroLayout = "20060102150405.999999"
|
||||
ShortDateTimeNanoLayout = "20060102150405.999999999"
|
||||
DateLayout = "2006-01-02"
|
||||
DateMilliLayout = "2006-01-02.999"
|
||||
DateMicroLayout = "2006-01-02.999999"
|
||||
DateNanoLayout = "2006-01-02.999999999"
|
||||
ShortDateLayout = "20060102"
|
||||
ShortDateMilliLayout = "20060102.999"
|
||||
ShortDateMicroLayout = "20060102.999999"
|
||||
ShortDateNanoLayout = "20060102.999999999"
|
||||
TimeLayout = "15:04:05"
|
||||
TimeMilliLayout = "15:04:05.999"
|
||||
TimeMicroLayout = "15:04:05.999999"
|
||||
TimeNanoLayout = "15:04:05.999999999"
|
||||
ShortTimeLayout = "150405"
|
||||
ShortTimeMilliLayout = "150405.999"
|
||||
ShortTimeMicroLayout = "150405.999999"
|
||||
ShortTimeNanoLayout = "150405.999999999"
|
||||
)
|
||||
|
||||
func (c Calendar) String() string {
|
||||
return c.ToDateTimeString()
|
||||
}
|
||||
|
||||
func (c Calendar) ToDateTimeString() string {
|
||||
return c.Format(DateTimeLayout)
|
||||
}
|
||||
|
||||
func (c Calendar) ToDateTimeInt() int {
|
||||
return c.ToShortDateInt()*1000000 + c.Hour()*10000 + c.Minute()*100 + c.Second()
|
||||
}
|
||||
|
||||
func (c Calendar) ToShortDateInt() int {
|
||||
return c.Year()*10000 + int(c.Month())*100 + c.Day()
|
||||
}
|
||||
|
||||
func (c Calendar) ToMonthInt() int {
|
||||
return c.Year()*100 + int(c.Month())
|
||||
}
|
19
calendar/output_test.go
Normal file
19
calendar/output_test.go
Normal file
@ -0,0 +1,19 @@
|
||||
package calendar_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/charlienet/go-mixed/calendar"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDayInt(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
n := time.Date(2022, 11, 9, 14, 28, 34, 123456789, time.UTC)
|
||||
|
||||
assert.Equal(20221109, calendar.Create(n).ToShortDateInt())
|
||||
assert.Equal(202211, calendar.Create(n).ToMonthInt())
|
||||
assert.Equal(20221109142834, calendar.Create(n).ToDateTimeInt())
|
||||
}
|
24
cleanup_guard/cleanup_guard.go
Normal file
24
cleanup_guard/cleanup_guard.go
Normal file
@ -0,0 +1,24 @@
|
||||
package cleanupguard
|
||||
|
||||
import "sync"
|
||||
|
||||
type CleanupGuard struct {
|
||||
enable bool
|
||||
fn func()
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
// 新建清理
|
||||
func NewCleanupGuard(fn func()) CleanupGuard {
|
||||
return CleanupGuard{fn: fn, enable: true}
|
||||
}
|
||||
|
||||
func (g *CleanupGuard) Enable() {
|
||||
g.mutex.Lock()
|
||||
defer g.mutex.Unlock()
|
||||
g.enable = true
|
||||
}
|
||||
|
||||
func (g *CleanupGuard) Run() {
|
||||
g.fn()
|
||||
}
|
15
collections/deque/deque.go
Normal file
15
collections/deque/deque.go
Normal file
@ -0,0 +1,15 @@
|
||||
package deque
|
||||
|
||||
import "github.com/charlienet/go-mixed/locker"
|
||||
|
||||
type Deque[T any] struct {
|
||||
locker locker.RWLocker
|
||||
}
|
||||
|
||||
func New[T any]() *Deque[T] {
|
||||
return &Deque[T]{
|
||||
locker: locker.EmptyLocker,
|
||||
}
|
||||
}
|
||||
|
||||
|
1
collections/deque/deque_test.go
Normal file
1
collections/deque/deque_test.go
Normal file
@ -0,0 +1 @@
|
||||
package deque_test
|
199
collections/list/array_list.go
Normal file
199
collections/list/array_list.go
Normal file
@ -0,0 +1,199 @@
|
||||
package list
|
||||
|
||||
import (
|
||||
"github.com/charlienet/go-mixed/locker"
|
||||
)
|
||||
|
||||
const minCapacity = 16
|
||||
|
||||
type ArrayList[T any] struct {
|
||||
buf []T
|
||||
head int
|
||||
tail int
|
||||
minCap int
|
||||
list[T]
|
||||
}
|
||||
|
||||
func NewArrayList[T any](elems ...T) *ArrayList[T] {
|
||||
minCap := minCapacity
|
||||
|
||||
size := len(elems)
|
||||
for minCap < size {
|
||||
minCap <<= 1
|
||||
}
|
||||
|
||||
var tail int = size
|
||||
var buf []T
|
||||
|
||||
if len(elems) > 0 {
|
||||
buf = make([]T, minCap)
|
||||
copy(buf, elems)
|
||||
}
|
||||
|
||||
l := &ArrayList[T]{
|
||||
list: list[T]{size: size, locker: locker.EmptyLocker},
|
||||
buf: buf,
|
||||
tail: tail,
|
||||
minCap: minCap,
|
||||
}
|
||||
|
||||
// for _, v := range elems {
|
||||
// l.PushBack(v)
|
||||
// }
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *ArrayList[T]) PushFront(v T) {
|
||||
l.locker.Lock()
|
||||
defer l.locker.Unlock()
|
||||
|
||||
l.grow()
|
||||
|
||||
l.head = l.prev(l.head)
|
||||
l.buf[l.head] = v
|
||||
l.size++
|
||||
}
|
||||
|
||||
func (l *ArrayList[T]) PushBack(v T) {
|
||||
l.locker.Lock()
|
||||
defer l.locker.Unlock()
|
||||
|
||||
l.grow()
|
||||
|
||||
l.buf[l.tail] = v
|
||||
|
||||
l.tail = l.next(l.tail)
|
||||
l.size++
|
||||
}
|
||||
|
||||
func (l *ArrayList[T]) PopFront() T {
|
||||
l.locker.Lock()
|
||||
defer l.locker.Unlock()
|
||||
|
||||
if l.size <= 0 {
|
||||
panic("list: PopFront() called on empty list")
|
||||
}
|
||||
ret := l.buf[l.head]
|
||||
var zero T
|
||||
l.buf[l.head] = zero
|
||||
|
||||
l.head = l.next(l.head)
|
||||
l.size--
|
||||
|
||||
l.shrink()
|
||||
return ret
|
||||
}
|
||||
|
||||
func (l *ArrayList[T]) PopBack() T {
|
||||
l.locker.Lock()
|
||||
defer l.locker.Unlock()
|
||||
|
||||
l.tail = l.prev(l.tail)
|
||||
|
||||
ret := l.buf[l.tail]
|
||||
var zero T
|
||||
l.buf[l.tail] = zero
|
||||
l.size--
|
||||
|
||||
l.shrink()
|
||||
return ret
|
||||
}
|
||||
|
||||
func (l *ArrayList[T]) RemoveAt(at int) T {
|
||||
if at < 0 || at >= l.Size() {
|
||||
panic(ErrorOutOffRange)
|
||||
}
|
||||
|
||||
l.locker.Lock()
|
||||
defer l.locker.Unlock()
|
||||
|
||||
rm := (l.head + at) & (len(l.buf) - 1)
|
||||
if at*2 < l.size {
|
||||
for i := 0; i < at; i++ {
|
||||
prev := l.prev(rm)
|
||||
l.buf[prev], l.buf[rm] = l.buf[rm], l.buf[prev]
|
||||
rm = prev
|
||||
}
|
||||
return l.PopFront()
|
||||
}
|
||||
swaps := l.size - at - 1
|
||||
for i := 0; i < swaps; i++ {
|
||||
next := l.next(rm)
|
||||
l.buf[rm], l.buf[next] = l.buf[next], l.buf[rm]
|
||||
rm = next
|
||||
}
|
||||
return l.PopBack()
|
||||
}
|
||||
|
||||
func (l *ArrayList[T]) Front() T {
|
||||
l.locker.RLock()
|
||||
defer l.locker.RUnlock()
|
||||
|
||||
return l.buf[l.head]
|
||||
}
|
||||
|
||||
func (l *ArrayList[T]) Back() T {
|
||||
l.locker.RLock()
|
||||
defer l.locker.RUnlock()
|
||||
|
||||
return l.buf[l.tail]
|
||||
}
|
||||
|
||||
func (l *ArrayList[T]) ForEach(fn func(T)) {
|
||||
l.locker.RLock()
|
||||
defer l.locker.RUnlock()
|
||||
|
||||
n := l.head
|
||||
for i := 0; i < l.size; i++ {
|
||||
fn(l.buf[n])
|
||||
|
||||
n = l.next(n)
|
||||
}
|
||||
}
|
||||
|
||||
func (q *ArrayList[T]) prev(i int) int {
|
||||
return (i - 1) & (len(q.buf) - 1)
|
||||
}
|
||||
|
||||
func (l *ArrayList[T]) next(i int) int {
|
||||
return (i + 1) & (len(l.buf) - 1)
|
||||
}
|
||||
|
||||
func (l *ArrayList[T]) grow() {
|
||||
if l.size != len(l.buf) {
|
||||
return
|
||||
}
|
||||
if len(l.buf) == 0 {
|
||||
if l.minCap == 0 {
|
||||
l.minCap = minCapacity
|
||||
}
|
||||
l.buf = make([]T, l.minCap)
|
||||
return
|
||||
}
|
||||
|
||||
l.resize()
|
||||
}
|
||||
|
||||
func (l *ArrayList[T]) shrink() {
|
||||
if len(l.buf) > l.minCap && (l.size<<2) == len(l.buf) {
|
||||
l.resize()
|
||||
}
|
||||
}
|
||||
|
||||
// resize resizes the list to fit exactly twice its current contents. This is
|
||||
// used to grow the list when it is full, and also to shrink it when it is
|
||||
// only a quarter full.
|
||||
func (l *ArrayList[T]) resize() {
|
||||
newBuf := make([]T, l.size<<1)
|
||||
if l.tail > l.head {
|
||||
copy(newBuf, l.buf[l.head:l.tail])
|
||||
} else {
|
||||
n := copy(newBuf, l.buf[l.head:])
|
||||
copy(newBuf[n:], l.buf[:l.tail])
|
||||
}
|
||||
|
||||
l.head = 0
|
||||
l.tail = l.size
|
||||
l.buf = newBuf
|
||||
}
|
43
collections/list/array_list_test.go
Normal file
43
collections/list/array_list_test.go
Normal file
@ -0,0 +1,43 @@
|
||||
package list_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/charlienet/go-mixed/collections/list"
|
||||
)
|
||||
|
||||
func TestNewArrayList(t *testing.T) {
|
||||
l := list.NewArrayList(1, 2, 3)
|
||||
|
||||
l.ForEach(func(i int) {
|
||||
t.Log(i)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestArrayPushBack(t *testing.T) {
|
||||
l := list.NewArrayList[int]()
|
||||
|
||||
l.PushBack(1)
|
||||
l.PushBack(2)
|
||||
l.PushBack(3)
|
||||
|
||||
l.ForEach(func(i int) {
|
||||
t.Log(i)
|
||||
})
|
||||
}
|
||||
|
||||
func TestArrayPushFront(t *testing.T) {
|
||||
l := list.NewArrayList[int]()
|
||||
|
||||
l.PushFront(1)
|
||||
l.PushFront(2)
|
||||
l.PushFront(3)
|
||||
|
||||
l.PushBack(99)
|
||||
l.PushBack(88)
|
||||
|
||||
l.ForEach(func(i int) {
|
||||
t.Log(i)
|
||||
})
|
||||
}
|
163
collections/list/linked_list.go
Normal file
163
collections/list/linked_list.go
Normal file
@ -0,0 +1,163 @@
|
||||
package list
|
||||
|
||||
import (
|
||||
"github.com/charlienet/go-mixed/locker"
|
||||
)
|
||||
|
||||
type LinkedList[T any] struct {
|
||||
list[T]
|
||||
front, tail *LinkedNode[T]
|
||||
}
|
||||
|
||||
type LinkedNode[T any] struct {
|
||||
Value T
|
||||
Prev, Next *LinkedNode[T]
|
||||
}
|
||||
|
||||
// NewLinkedList 初始化链表
|
||||
func NewLinkedList[T any](elems ...T) *LinkedList[T] {
|
||||
l := &LinkedList[T]{
|
||||
list: list[T]{locker: locker.EmptyLocker},
|
||||
}
|
||||
|
||||
for _, e := range elems {
|
||||
l.pushBackNode(&LinkedNode[T]{Value: e})
|
||||
}
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *LinkedList[T]) PushBack(v T) *LinkedList[T] {
|
||||
l.locker.Lock()
|
||||
defer l.locker.Unlock()
|
||||
|
||||
l.pushBackNode(&LinkedNode[T]{Value: v})
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *LinkedList[T]) PushFront(v T) *LinkedList[T] {
|
||||
l.locker.Lock()
|
||||
defer l.locker.Unlock()
|
||||
|
||||
l.pushFrontNode(&LinkedNode[T]{Value: v})
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *LinkedList[T]) FrontNode() *LinkedNode[T] {
|
||||
return l.front
|
||||
}
|
||||
|
||||
func (l *LinkedList[T]) Front() T {
|
||||
return l.FrontNode().Value
|
||||
}
|
||||
|
||||
func (l *LinkedList[T]) BackNode() *LinkedNode[T] {
|
||||
return l.tail
|
||||
}
|
||||
|
||||
func (l *LinkedList[T]) Back() T {
|
||||
if l.size == 0 {
|
||||
panic(ErrorOutOffRange)
|
||||
}
|
||||
return l.tail.Value
|
||||
}
|
||||
|
||||
func (l *LinkedList[T]) ForEach(fn func(T) bool) {
|
||||
l.locker.RLock()
|
||||
defer l.locker.RUnlock()
|
||||
|
||||
for current := l.front; current != nil; current = current.Next {
|
||||
if fn(current.Value) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LinkedList[T]) GetAt(i int) T {
|
||||
if i <= l.Size() {
|
||||
var n int
|
||||
for current := l.front; current != nil; current = current.Next {
|
||||
if n == i {
|
||||
return current.Value
|
||||
}
|
||||
n++
|
||||
}
|
||||
}
|
||||
|
||||
return *new(T)
|
||||
}
|
||||
|
||||
func (l *LinkedList[T]) Remove(n *LinkedNode[T]) {
|
||||
l.locker.Lock()
|
||||
defer l.locker.Unlock()
|
||||
|
||||
if n.Next != nil {
|
||||
n.Next.Prev = n.Prev
|
||||
} else {
|
||||
l.tail = n.Prev
|
||||
}
|
||||
|
||||
if n.Prev != nil {
|
||||
n.Prev.Next = n.Next
|
||||
} else {
|
||||
l.front = n.Next
|
||||
}
|
||||
|
||||
n.Next = nil
|
||||
n.Prev = nil
|
||||
|
||||
l.size--
|
||||
}
|
||||
|
||||
func (l *LinkedList[T]) RemoveAt(index int) {
|
||||
l.locker.Lock()
|
||||
defer l.locker.Unlock()
|
||||
|
||||
var i int
|
||||
for current := l.front; current != nil; current = current.Next {
|
||||
if i == index {
|
||||
|
||||
// 重连接
|
||||
current.Prev.Next = current.Next
|
||||
current.Next.Prev = current.Prev
|
||||
|
||||
current.Prev = nil
|
||||
current.Next = nil
|
||||
|
||||
l.size--
|
||||
break
|
||||
}
|
||||
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LinkedList[T]) pushBackNode(n *LinkedNode[T]) {
|
||||
n.Next = nil
|
||||
n.Prev = l.tail
|
||||
|
||||
if l.tail != nil {
|
||||
l.tail.Next = n
|
||||
} else {
|
||||
l.front = n
|
||||
}
|
||||
|
||||
l.tail = n
|
||||
|
||||
l.size++
|
||||
}
|
||||
|
||||
func (l *LinkedList[T]) pushFrontNode(n *LinkedNode[T]) {
|
||||
n.Next = l.front
|
||||
n.Prev = nil
|
||||
if l.front != nil {
|
||||
l.front.Prev = n
|
||||
} else {
|
||||
l.tail = n
|
||||
}
|
||||
l.front = n
|
||||
|
||||
l.size++
|
||||
}
|
103
collections/list/linked_list_test.go
Normal file
103
collections/list/linked_list_test.go
Normal file
@ -0,0 +1,103 @@
|
||||
package list_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/charlienet/go-mixed/collections/list"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPushBack(t *testing.T) {
|
||||
l := list.NewLinkedList[int]()
|
||||
l.PushBack(1)
|
||||
l.PushBack(2)
|
||||
l.PushBack(3)
|
||||
|
||||
l.ForEach(func(i int) bool {
|
||||
t.Log(i)
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
||||
func TestPushFront(t *testing.T) {
|
||||
l := list.NewLinkedList[int]()
|
||||
l.PushFront(1)
|
||||
l.PushFront(2)
|
||||
l.PushFront(3)
|
||||
|
||||
l.ForEach(func(i int) bool {
|
||||
t.Log(i)
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
||||
func TestRemoveAt(t *testing.T) {
|
||||
|
||||
l := list.NewLinkedList[int]()
|
||||
l.PushBack(1)
|
||||
l.PushBack(2)
|
||||
l.PushBack(3)
|
||||
|
||||
l.RemoveAt(1)
|
||||
|
||||
l.ForEach(func(i int) bool {
|
||||
t.Log(i)
|
||||
return false
|
||||
})
|
||||
|
||||
t.Log()
|
||||
|
||||
l.RemoveAt(0)
|
||||
l.ForEach(func(i int) bool {
|
||||
t.Log(i)
|
||||
return false
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestSize(t *testing.T) {
|
||||
l := list.NewLinkedList[int]()
|
||||
l.PushBack(1)
|
||||
l.PushBack(2)
|
||||
l.PushBack(3)
|
||||
|
||||
assert.Equal(t, 3, l.Size())
|
||||
|
||||
l.RemoveAt(0)
|
||||
assert.Equal(t, 2, l.Size())
|
||||
}
|
||||
|
||||
func TestLinkedListToSlice(t *testing.T) {
|
||||
l := list.NewLinkedList[int]()
|
||||
l.PushBack(1)
|
||||
l.PushBack(2)
|
||||
l.PushBack(3)
|
||||
|
||||
s := l.ToSlice()
|
||||
t.Log(s)
|
||||
}
|
||||
|
||||
func BenchmarkLinkedList(b *testing.B) {
|
||||
l := list.NewLinkedList[int]()
|
||||
l.Synchronize()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
l.PushBack(i)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveNode(t *testing.T) {
|
||||
l := list.NewLinkedList(1, 2, 3, 4, 5)
|
||||
|
||||
// l.ForEach(func(i int) bool {
|
||||
// t.Log(i)
|
||||
|
||||
// return false
|
||||
// })
|
||||
|
||||
l.RemoveAt(1)
|
||||
for currnet := l.FrontNode(); currnet != nil; currnet = currnet.Next {
|
||||
t.Logf("%p %+v", currnet, currnet)
|
||||
}
|
||||
|
||||
}
|
35
collections/list/list.go
Normal file
35
collections/list/list.go
Normal file
@ -0,0 +1,35 @@
|
||||
package list
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/charlienet/go-mixed/locker"
|
||||
)
|
||||
|
||||
var ErrorOutOffRange = errors.New("out of range")
|
||||
|
||||
type List[T any] interface {
|
||||
}
|
||||
|
||||
type list[T any] struct {
|
||||
size int
|
||||
locker locker.RWLocker
|
||||
}
|
||||
|
||||
func (l *list[T]) Synchronize() {
|
||||
l.locker = locker.NewRWLocker()
|
||||
}
|
||||
|
||||
func (l *list[T]) ForEach(fn func(T) bool) { panic("Not Implemented") }
|
||||
|
||||
func (l *LinkedList[T]) ToSlice() []T {
|
||||
s := make([]T, 0, l.Size())
|
||||
l.ForEach(func(t T) bool {
|
||||
s = append(s, t)
|
||||
return false
|
||||
})
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (l *list[T]) Size() int { return l.size }
|
@ -1 +0,0 @@
|
||||
package collections
|
@ -1,11 +0,0 @@
|
||||
package collections
|
||||
|
||||
import "github.com/charlienet/go-mixed/locker"
|
||||
|
||||
type options struct {
|
||||
mu locker.RWLocker
|
||||
}
|
||||
|
||||
func emptyLocker() locker.RWLocker {
|
||||
return locker.EmptyLocker
|
||||
}
|
35
collections/queue/queue.go
Normal file
35
collections/queue/queue.go
Normal file
@ -0,0 +1,35 @@
|
||||
package queue
|
||||
|
||||
import (
|
||||
"github.com/charlienet/go-mixed/collections/list"
|
||||
)
|
||||
|
||||
type Queue[T any] struct {
|
||||
list list.LinkedList[T]
|
||||
}
|
||||
|
||||
func NewQueue[T any]() *Queue[T] {
|
||||
return &Queue[T]{}
|
||||
}
|
||||
|
||||
func (q *Queue[T]) Synchronize() *Queue[T] {
|
||||
q.list.Synchronize()
|
||||
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *Queue[T]) Push(v T) {
|
||||
|
||||
}
|
||||
|
||||
func (q *Queue[T]) Pop(v T) {
|
||||
|
||||
}
|
||||
|
||||
func (q *Queue[T]) Size() int {
|
||||
return q.list.Size()
|
||||
}
|
||||
|
||||
func (q *Queue[T]) IsEmpty() bool {
|
||||
return false
|
||||
}
|
1
collections/queue/queue_test.go
Normal file
1
collections/queue/queue_test.go
Normal file
@ -0,0 +1 @@
|
||||
package queue_test
|
1
collections/rbtree/rbtree.go
Normal file
1
collections/rbtree/rbtree.go
Normal file
@ -0,0 +1 @@
|
||||
package rbtree
|
1
collections/rbtree/rbtree_test.go
Normal file
1
collections/rbtree/rbtree_test.go
Normal file
@ -0,0 +1 @@
|
||||
package rbtree
|
@ -1,42 +0,0 @@
|
||||
package collections
|
||||
|
||||
import "sync"
|
||||
|
||||
type rw_queue[T any] struct {
|
||||
q Queue[T]
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (q *rw_queue[T]) Push(v T) {
|
||||
q.mu.Lock()
|
||||
q.q.Put(v)
|
||||
q.mu.Unlock()
|
||||
}
|
||||
|
||||
func (q *rw_queue[T]) Pop() T {
|
||||
q.mu.Lock()
|
||||
defer q.mu.Unlock()
|
||||
|
||||
return q.q.Poll()
|
||||
}
|
||||
|
||||
func (q *rw_queue[T]) Peek() T {
|
||||
q.mu.Lock()
|
||||
defer q.mu.Unlock()
|
||||
|
||||
return q.q.Peek()
|
||||
}
|
||||
|
||||
func (q *rw_queue[T]) Size() int {
|
||||
q.mu.Lock()
|
||||
defer q.mu.Unlock()
|
||||
|
||||
return q.q.Size()
|
||||
}
|
||||
|
||||
func (q *rw_queue[T]) IsEmpty() bool {
|
||||
q.mu.Lock()
|
||||
defer q.mu.Unlock()
|
||||
|
||||
return q.q.IsEmpty()
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
package collections
|
||||
package stack
|
||||
|
||||
import "sync"
|
||||
|
@ -1,13 +1,13 @@
|
||||
package collections_test
|
||||
package stack_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/charlienet/go-mixed/collections"
|
||||
"github.com/charlienet/go-mixed/collections/stack"
|
||||
)
|
||||
|
||||
func TestStack(t *testing.T) {
|
||||
arrayStack := collections.NewArrayStack[string]()
|
||||
arrayStack := stack.NewArrayStack[string]()
|
||||
arrayStack.Push("cat")
|
||||
arrayStack.Push("dog")
|
||||
arrayStack.Push("hen")
|
17
compiled_buffer/regex2_test.go
Normal file
17
compiled_buffer/regex2_test.go
Normal file
@ -0,0 +1,17 @@
|
||||
package compiledbuffer
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/dlclark/regexp2"
|
||||
)
|
||||
|
||||
func TestCom(t *testing.T) {
|
||||
regex, err := regexp2.Compile(`^\d{11}[;;](?!(37|38))\d{2}\d{6}$`, regexp2.None)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Log(regex.MatchString("14610522152;37764800"))
|
||||
t.Log(regex.MatchString("14610522152;33764800"))
|
||||
}
|
@ -42,7 +42,8 @@ func (z *zipPackage) Write(out *os.File) error {
|
||||
zipWriter := zip.NewWriter(out)
|
||||
defer zipWriter.Close()
|
||||
|
||||
for _, f := range z.files {
|
||||
files := z.files
|
||||
for _, f := range files {
|
||||
fileWriter, err := zipWriter.Create(f.name)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -1,12 +0,0 @@
|
||||
package dateconv_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/charlienet/go-mixed/dateconv"
|
||||
)
|
||||
|
||||
func TestToday(t *testing.T) {
|
||||
today := dateconv.Today()
|
||||
t.Log(dateconv.TimeToString(&today))
|
||||
}
|
@ -1,43 +0,0 @@
|
||||
package dateconv
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
layoutDate = "2006-01-02"
|
||||
layoutTime = "2006-01-02 15:04:05"
|
||||
layoutChineseDate = "2006年01月02日"
|
||||
layoutChineseTime = "2006年01月02日 15:04:05"
|
||||
)
|
||||
|
||||
func Today() time.Time {
|
||||
t := time.Now()
|
||||
year, month, day := t.Date()
|
||||
return time.Date(year, month, day, 0, 0, 0, 0, t.Location())
|
||||
}
|
||||
|
||||
// 日期转换为整数(如:20211222)
|
||||
func DateToInt(date time.Time) int {
|
||||
return date.Year()*10000 + int(date.Month())*100 + date.Day()
|
||||
}
|
||||
|
||||
// 日期转换为字符串
|
||||
func DateToString(date *time.Time) string { return formatTime(date, layoutDate) }
|
||||
|
||||
// 时间转换为字符串
|
||||
func TimeToString(date *time.Time) string { return formatTime(date, layoutTime) }
|
||||
|
||||
// 日期转换为中文
|
||||
func DateToChinese(t *time.Time) string { return formatTime(t, layoutChineseDate) }
|
||||
|
||||
// 时间转换为中文
|
||||
func TimeToChinese(t *time.Time) string { return formatTime(t, layoutChineseTime) }
|
||||
|
||||
func formatTime(t *time.Time, f string) string {
|
||||
if t == nil || t.IsZero() {
|
||||
return ""
|
||||
}
|
||||
|
||||
return t.Format(f)
|
||||
}
|
@ -1,10 +0,0 @@
|
||||
package dateconv
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseDuration(t *testing.T) {
|
||||
t.Log(ParseDuration(""))
|
||||
t.Log(ParseDuration("abc"))
|
||||
}
|
3
db/readme.md
Normal file
3
db/readme.md
Normal file
@ -0,0 +1,3 @@
|
||||
# 数据访问层,创建
|
||||
|
||||
使用gorm作为数据访问层
|
121
expr/expr.go
121
expr/expr.go
@ -1,9 +1,128 @@
|
||||
package expr
|
||||
|
||||
// 如为真返回参数一,否则返回参数二
|
||||
func If[T any](e bool, v1, v2 T) T {
|
||||
func Ternary[T any](e bool, v1, v2 T) T {
|
||||
if e {
|
||||
return v1
|
||||
}
|
||||
return v2
|
||||
}
|
||||
|
||||
func TernaryF[T any](condition bool, ifFunc func() T, elseFunc func() T) T {
|
||||
if condition {
|
||||
return ifFunc()
|
||||
}
|
||||
|
||||
return elseFunc()
|
||||
}
|
||||
|
||||
type ifElse[T any] struct {
|
||||
result T
|
||||
done bool
|
||||
}
|
||||
|
||||
func If[T any](condition bool, result T) *ifElse[T] {
|
||||
if condition {
|
||||
return &ifElse[T]{result, true}
|
||||
}
|
||||
|
||||
var t T
|
||||
return &ifElse[T]{t, false}
|
||||
}
|
||||
|
||||
func IfF[T any](condition bool, resultF func() T) *ifElse[T] {
|
||||
if condition {
|
||||
return &ifElse[T]{resultF(), true}
|
||||
}
|
||||
|
||||
var t T
|
||||
return &ifElse[T]{t, false}
|
||||
}
|
||||
|
||||
func (i *ifElse[T]) ElseIf(condition bool, result T) *ifElse[T] {
|
||||
if !i.done && condition {
|
||||
i.result = result
|
||||
i.done = true
|
||||
}
|
||||
|
||||
return i
|
||||
}
|
||||
|
||||
func (i *ifElse[T]) ElseIfF(condition bool, resultF func() T) *ifElse[T] {
|
||||
if !i.done && condition {
|
||||
i.result = resultF()
|
||||
i.done = true
|
||||
}
|
||||
|
||||
return i
|
||||
}
|
||||
|
||||
func (i *ifElse[T]) Else(result T) T {
|
||||
if i.done {
|
||||
return i.result
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (i *ifElse[T]) ElseF(resultF func() T) T {
|
||||
if i.done {
|
||||
return i.result
|
||||
}
|
||||
|
||||
return resultF()
|
||||
}
|
||||
|
||||
type switchCase[T comparable, R any] struct {
|
||||
predicate T
|
||||
result R
|
||||
done bool
|
||||
}
|
||||
|
||||
func Switch[T comparable, R any](predicate T) *switchCase[T, R] {
|
||||
var result R
|
||||
|
||||
return &switchCase[T, R]{
|
||||
predicate,
|
||||
result,
|
||||
false,
|
||||
}
|
||||
}
|
||||
|
||||
func SwitchF[T comparable, R any](predicate func() T) *switchCase[T, R] {
|
||||
return Switch[T, R](predicate())
|
||||
}
|
||||
|
||||
func (s *switchCase[T, R]) Case(val T, result R) *switchCase[T, R] {
|
||||
if !s.done && s.predicate == val {
|
||||
s.result = result
|
||||
s.done = true
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *switchCase[T, R]) CaseF(val T, cb func() R) *switchCase[T, R] {
|
||||
if !s.done && s.predicate == val {
|
||||
s.result = cb()
|
||||
s.done = true
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *switchCase[T, R]) Default(result R) R {
|
||||
if !s.done {
|
||||
s.result = result
|
||||
}
|
||||
|
||||
return s.result
|
||||
}
|
||||
|
||||
func (s *switchCase[T, R]) DefaultF(cb func() R) R {
|
||||
if !s.done {
|
||||
s.result = cb()
|
||||
}
|
||||
|
||||
return s.result
|
||||
}
|
||||
|
@ -1,9 +1,31 @@
|
||||
package expr
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"testing"
|
||||
|
||||
func TestIf(t *testing.T) {
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestTernary(t *testing.T) {
|
||||
v1 := 10
|
||||
v2 := 4
|
||||
t.Log(If(v1 > v2, v1, v2))
|
||||
t.Log(Ternary(v1 > v2, v1, v2))
|
||||
}
|
||||
|
||||
func TestIf(t *testing.T) {
|
||||
is := assert.New(t)
|
||||
|
||||
is.Equal(1, If(true, 1).ElseIf(false, 2).Else(3))
|
||||
is.Equal(1, If(true, 1).ElseIf(true, 2).Else(3))
|
||||
is.Equal(2, If(false, 1).ElseIf(true, 2).Else(3))
|
||||
is.Equal(3, If(false, 1).ElseIf(false, 2).Else(3))
|
||||
}
|
||||
|
||||
func TestSwitch(t *testing.T) {
|
||||
is := assert.New(t)
|
||||
|
||||
is.Equal(1, Switch[int, int](42).Case(42, 1).Case(1, 2).Default(3))
|
||||
is.Equal(1, Switch[int, int](42).Case(42, 1).Case(42, 2).Default(3))
|
||||
is.Equal(1, Switch[int, int](42).Case(1, 1).Case(42, 2).Default(3))
|
||||
is.Equal(1, Switch[int, int](42).Case(1, 1).Case(1, 2).Default(3))
|
||||
}
|
||||
|
30
go.mod
30
go.mod
@ -3,8 +3,9 @@ module github.com/charlienet/go-mixed
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/bits-and-blooms/bitset v1.2.2
|
||||
github.com/bits-and-blooms/bitset v1.3.3
|
||||
github.com/cespare/xxhash/v2 v2.1.2
|
||||
github.com/go-playground/universal-translator v0.18.0
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/shopspring/decimal v1.3.1
|
||||
github.com/spaolacci/murmur3 v1.1.0
|
||||
@ -12,25 +13,40 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
github.com/fastly/go-utils v0.0.0-20180712184237-d95a45783239 // indirect
|
||||
github.com/go-playground/locales v0.14.0 // indirect
|
||||
github.com/jehiah/go-strftime v0.0.0-20171201141054-1d33003b3869 // indirect
|
||||
github.com/jonboulle/clockwork v0.3.0 // indirect
|
||||
github.com/lestrrat/go-envload v0.0.0-20180220120943-6ed08b54a570 // indirect
|
||||
github.com/lestrrat/go-strftime v0.0.0-20180220042222-ba3bf9c1d042 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/tebeka/strftime v0.1.5 // indirect
|
||||
github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/alicebob/miniredis/v2 v2.23.0
|
||||
github.com/allegro/bigcache/v3 v3.0.2
|
||||
github.com/alphadose/haxmap v1.0.2
|
||||
github.com/antonfisher/nested-logrus-formatter v1.3.1
|
||||
github.com/coocood/freecache v1.2.1
|
||||
github.com/coocood/freecache v1.2.2
|
||||
github.com/dlclark/regexp2 v1.7.0
|
||||
github.com/go-redis/redis/v8 v8.11.5
|
||||
github.com/lestrrat/go-file-rotatelogs v0.0.0-20180223000712-d3151e2a480f
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/stretchr/testify v1.7.2
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect
|
||||
golang.org/x/exp v0.0.0-20220608143224-64259d1afd70
|
||||
golang.org/x/sys v0.0.0-20220608164250-635b8c9b7f68 // indirect
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/vmihailenco/go-tinylfu v0.2.2
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0
|
||||
|
||||
)
|
||||
|
76
go.sum
76
go.sum
@ -1,28 +1,46 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk=
|
||||
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
|
||||
github.com/alicebob/miniredis/v2 v2.23.0 h1:+lwAJYjvvdIVg6doFHuotFjueJ/7KY10xo/vm3X3Scw=
|
||||
github.com/alicebob/miniredis/v2 v2.23.0/go.mod h1:XNqvJdQJv5mSuVMc0ynneafpnL/zv52acZ6kqeS0t88=
|
||||
github.com/allegro/bigcache/v3 v3.0.2 h1:AKZCw+5eAaVyNTBmI2fgyPVJhHkdWder3O9IrprcQfI=
|
||||
github.com/allegro/bigcache/v3 v3.0.2/go.mod h1:aPyh7jEvrog9zAwx5N7+JUQX5dZTSGpxF1LAR4dr35I=
|
||||
github.com/alphadose/haxmap v1.0.2 h1:ZZwFf15DcsAz4O+SyqrpH/xeO5Plh7mNRXDM9QIcWQQ=
|
||||
github.com/alphadose/haxmap v1.0.2/go.mod h1:Pq2IXbl9/ytYHfrIAd7rIVtZQ2ezdIhZfvdqOizDeWY=
|
||||
github.com/antonfisher/nested-logrus-formatter v1.3.1 h1:NFJIr+pzwv5QLHTPyKz9UMEoHck02Q9L0FP13b/xSbQ=
|
||||
github.com/antonfisher/nested-logrus-formatter v1.3.1/go.mod h1:6WTfyWFkBc9+zyBaKIqRrg/KwMqBbodBjgbHjDz7zjA=
|
||||
github.com/bits-and-blooms/bitset v1.2.2 h1:J5gbX05GpMdBjCvQ9MteIg2KKDExr7DrgK+Yc15FvIk=
|
||||
github.com/bits-and-blooms/bitset v1.2.2/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
|
||||
github.com/bits-and-blooms/bitset v1.3.3 h1:R1XWiopGiXf66xygsiLpzLo67xEYvMkHw3w+rCOSAwg=
|
||||
github.com/bits-and-blooms/bitset v1.3.3/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/coocood/freecache v1.2.1 h1:/v1CqMq45NFH9mp/Pt142reundeBM0dVUD3osQBeu/U=
|
||||
github.com/coocood/freecache v1.2.1/go.mod h1:RBUWa/Cy+OHdfTGFEhEuE1pMCMX51Ncizj7rthiQ3vk=
|
||||
github.com/coocood/freecache v1.2.2 h1:UPkJCxhRujykq1jXuwxAPgDHnm6lKGrLZPnuHzgWRtE=
|
||||
github.com/coocood/freecache v1.2.2/go.mod h1:RBUWa/Cy+OHdfTGFEhEuE1pMCMX51Ncizj7rthiQ3vk=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo=
|
||||
github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/fastly/go-utils v0.0.0-20180712184237-d95a45783239 h1:Ghm4eQYC0nEPnSJdVkTrXpu9KtoVCSo1hg7mtI7G9KU=
|
||||
github.com/fastly/go-utils v0.0.0-20180712184237-d95a45783239/go.mod h1:Gdwt2ce0yfBxPvZrHkprdPPTTS3N5rwmLE8T22KBXlw=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU=
|
||||
github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
|
||||
github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho=
|
||||
github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
|
||||
github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
|
||||
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
@ -41,10 +59,18 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/jehiah/go-strftime v0.0.0-20171201141054-1d33003b3869 h1:IPJ3dvxmJ4uczJe5YQdrYB16oTJlGSC/OyZDqUk9xX4=
|
||||
github.com/jehiah/go-strftime v0.0.0-20171201141054-1d33003b3869/go.mod h1:cJ6Cj7dQo+O6GJNiMx+Pa94qKj+TG8ONdKHgMNIyyag=
|
||||
github.com/jonboulle/clockwork v0.3.0 h1:9BSCMi8C+0qdApAp4auwX0RkLGUjs956h0EkuQymUhg=
|
||||
github.com/jonboulle/clockwork v0.3.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/lestrrat/go-envload v0.0.0-20180220120943-6ed08b54a570 h1:0iQektZGS248WXmGIYOwRXSQhD4qn3icjMpuxwO7qlo=
|
||||
github.com/lestrrat/go-envload v0.0.0-20180220120943-6ed08b54a570/go.mod h1:BLt8L9ld7wVsvEWQbuLrUZnCMnUmLZ+CGDzKtclrTlE=
|
||||
github.com/lestrrat/go-file-rotatelogs v0.0.0-20180223000712-d3151e2a480f h1:sgUSP4zdTUZYZgAGGtN5Lxk92rK+JUFOwf+FT99EEI4=
|
||||
github.com/lestrrat/go-file-rotatelogs v0.0.0-20180223000712-d3151e2a480f/go.mod h1:UGmTpUd3rjbtfIpwAPrcfmGf/Z1HS95TATB+m57TPB8=
|
||||
github.com/lestrrat/go-strftime v0.0.0-20180220042222-ba3bf9c1d042 h1:Bvq8AziQ5jFF4BHGAEDSqwPW1NJS3XshxbRCxtjFAZc=
|
||||
github.com/lestrrat/go-strftime v0.0.0-20180220042222-ba3bf9c1d042/go.mod h1:TPpsiPUEh0zFL1Snz4crhMlBe60PYxRHr5oFF3rRYg0=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
@ -60,25 +86,33 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
|
||||
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
|
||||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
||||
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s=
|
||||
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/tebeka/strftime v0.1.5 h1:1NQKN1NiQgkqd/2moD6ySP/5CoZQsKa1d3ZhJ44Jpmg=
|
||||
github.com/tebeka/strftime v0.1.5/go.mod h1:29/OidkoWHdEKZqzyDLUyC+LmgDgdHo4WAFCDT7D/Ig=
|
||||
github.com/tjfoc/gmsm v1.4.1 h1:aMe1GlZb+0bLjn+cKTPEvvn9oUEBlJitaZiiBwsbgho=
|
||||
github.com/tjfoc/gmsm v1.4.1/go.mod h1:j4INPkHWMrhJb38G+J6W4Tw0AbuN8Thu3PbdVYhVcTE=
|
||||
github.com/vmihailenco/go-tinylfu v0.2.2 h1:H1eiG6HM36iniK6+21n9LLpzx1G9R3DJa2UjUjbynsI=
|
||||
github.com/vmihailenco/go-tinylfu v0.2.2/go.mod h1:CutYi2Q9puTxfcolkliPq4npPuofg9N9t8JVrjzwa3Q=
|
||||
github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 h1:k/gmLsJDWwWqbLCur2yWnJzwQEKRcAHXo6seXGuSwWw=
|
||||
github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM=
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20220608143224-64259d1afd70 h1:8uGxpY2cLF9H/NSHUiEWUIBZqIcsMzMWIMPCCUkyYgc=
|
||||
golang.org/x/exp v0.0.0-20220608143224-64259d1afd70/go.mod h1:yh0Ynu2b5ZUe3MQfp2nM0ecK7wsgouWTDN0FNeJuIys=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
@ -94,15 +128,18 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220608164250-635b8c9b7f68 h1:z8Hj/bl9cOV2grsOpEaQFUaly0JWN3i97mo3jXKJNp0=
|
||||
golang.org/x/sys v0.0.0-20220608164250-635b8c9b7f68/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
@ -129,6 +166,7 @@ gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXL
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
16
hmac/hmac.go
16
hmac/hmac.go
@ -21,13 +21,13 @@ var _ crypto.Signer = &hashComparer{}
|
||||
type HMacFunc func(key, msg []byte) bytesconv.BytesResult
|
||||
|
||||
var hmacFuncs = map[string]HMacFunc{
|
||||
"HmacMD5": Md5,
|
||||
"HmacSHA1": Sha1,
|
||||
"HmacSHA224": Sha224,
|
||||
"HmacSHA256": Sha256,
|
||||
"HmacSHA384": Sha384,
|
||||
"HmacSHA512": Sha512,
|
||||
"HmacSM3": Sm3,
|
||||
"HMACMD5": Md5,
|
||||
"HMACSHA1": Sha1,
|
||||
"HMACSHA224": Sha224,
|
||||
"HMACSHA256": Sha256,
|
||||
"HMACSHA384": Sha384,
|
||||
"HMACSHA512": Sha512,
|
||||
"HMACSM3": Sm3,
|
||||
}
|
||||
|
||||
type hashComparer struct {
|
||||
@ -63,7 +63,7 @@ func ByName(name string) (HMacFunc, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
return nil, errors.New("Unsupported hash functions")
|
||||
return nil, errors.New("Unsupported hash function:" + name)
|
||||
}
|
||||
|
||||
func Md5(key, msg []byte) bytesconv.BytesResult { return sum(md5.New, key, msg) }
|
||||
|
124
ip_range/ip_range.go
Normal file
124
ip_range/ip_range.go
Normal file
@ -0,0 +1,124 @@
|
||||
package iprange
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var maskPattern = regexp.MustCompile(`\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b`)
|
||||
|
||||
type IpRange struct {
|
||||
segments []ipSegment
|
||||
}
|
||||
|
||||
type ipSegment interface {
|
||||
Contains(netip.Addr) bool
|
||||
}
|
||||
|
||||
type singleIp struct {
|
||||
ip netip.Addr
|
||||
}
|
||||
|
||||
func (i *singleIp) Contains(ip netip.Addr) bool {
|
||||
return i.ip.Compare(ip) == 0
|
||||
}
|
||||
|
||||
type prefixSegments struct {
|
||||
prefix netip.Prefix
|
||||
}
|
||||
|
||||
func (i *prefixSegments) Contains(ip netip.Addr) bool {
|
||||
return i.prefix.Contains(ip)
|
||||
}
|
||||
|
||||
type rangeSegment struct {
|
||||
start netip.Addr
|
||||
end netip.Addr
|
||||
}
|
||||
|
||||
func (r *rangeSegment) Contains(ip netip.Addr) bool {
|
||||
return ip.Compare(r.start) >= 0 && ip.Compare(r.end) <= 0
|
||||
}
|
||||
|
||||
// NewRange IP范围判断,支持以下规则:
|
||||
// 单IP地址,如 192.168.100.2
|
||||
// IP范围, 如 192.168.100.120-192.168.100.150
|
||||
// 掩码模式,如 192.168.2.0/24
|
||||
func NewRange(ip ...string) (*IpRange, error) {
|
||||
seg := make([]ipSegment, 0, len(ip))
|
||||
|
||||
for _, i := range ip {
|
||||
if s, err := createSegment(i); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
seg = append(seg, s)
|
||||
}
|
||||
}
|
||||
|
||||
return &IpRange{segments: seg}, nil
|
||||
}
|
||||
|
||||
func (r *IpRange) Contains(ip string) bool {
|
||||
addr, err := netip.ParseAddr(ip)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, v := range r.segments {
|
||||
if v.Contains(addr) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func createSegment(ip string) (ipSegment, error) {
|
||||
switch {
|
||||
case strings.Contains(ip, "-"):
|
||||
ips := strings.Split(ip, "-")
|
||||
if len(ips) != 2 {
|
||||
return nil, fmt.Errorf("IP范围定义错误:%s", ip)
|
||||
}
|
||||
|
||||
start, err := netip.ParseAddr(ips[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
end, err := netip.ParseAddr(ips[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &rangeSegment{
|
||||
start: start,
|
||||
end: end,
|
||||
}, nil
|
||||
|
||||
case strings.Contains(ip, "/"):
|
||||
sec := strings.Split(ip, "/")
|
||||
ip := sec[0]
|
||||
mask := sec[1]
|
||||
|
||||
if maskPattern.MatchString(mask) {
|
||||
mask = strconv.Itoa(MaskToBits(mask))
|
||||
}
|
||||
|
||||
if prefix, err := netip.ParsePrefix(ip + "/" + mask); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return &prefixSegments{prefix: prefix}, nil
|
||||
}
|
||||
default:
|
||||
i, err := netip.ParseAddr(ip)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("格式错误, 不是有效的IP地址:%s", ip)
|
||||
}
|
||||
|
||||
return &singleIp{ip: i}, nil
|
||||
}
|
||||
}
|
130
ip_range/ip_range_test.go
Normal file
130
ip_range/ip_range_test.go
Normal file
@ -0,0 +1,130 @@
|
||||
package iprange
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSingleErrorIP(t *testing.T) {
|
||||
values := []string{
|
||||
"192.168.01",
|
||||
"::",
|
||||
}
|
||||
|
||||
for _, v := range values {
|
||||
r, err := NewRange(v)
|
||||
|
||||
t.Log(r, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSingleIp(t *testing.T) {
|
||||
r, err := NewRange("192.168.0.1")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assert.True(t, r.Contains("192.168.0.1"))
|
||||
assert.False(t, r.Contains("192.168.0.123"))
|
||||
}
|
||||
|
||||
func TestSinglePrefix(t *testing.T) {
|
||||
r, err := NewRange("192.168.2.100/32")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assert.False(t, r.Contains("192.168.2.56"))
|
||||
assert.True(t, r.Contains("192.168.2.100"))
|
||||
assert.False(t, r.Contains("192.168.2.130"))
|
||||
}
|
||||
|
||||
func TestAllIp(t *testing.T) {
|
||||
r, err := NewRange("0.0.0.0/0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assert.True(t, r.Contains("192.168.2.100"))
|
||||
assert.True(t, r.Contains("192.3.2.100"))
|
||||
assert.True(t, r.Contains("192.65.2.100"))
|
||||
assert.True(t, r.Contains("172.168.2.100"))
|
||||
assert.True(t, r.Contains("8.8.8.8"))
|
||||
assert.True(t, r.Contains("114.114.114.114"))
|
||||
}
|
||||
|
||||
func TestPrefix(t *testing.T) {
|
||||
r, err := NewRange("192.168.2.0/24")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assert.True(t, r.Contains("192.168.2.12"))
|
||||
assert.True(t, r.Contains("192.168.2.162"))
|
||||
assert.False(t, r.Contains("192.168.3.162"))
|
||||
}
|
||||
|
||||
func TestPrefix2(t *testing.T) {
|
||||
r, err := NewRange("192.168.15.0/21")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assert.True(t, r.Contains("192.168.8.10"))
|
||||
assert.True(t, r.Contains("192.168.14.162"))
|
||||
assert.False(t, r.Contains("192.168.3.162"))
|
||||
assert.False(t, r.Contains("192.168.2.162"))
|
||||
}
|
||||
|
||||
func TestDotMask(t *testing.T) {
|
||||
r, err := NewRange("192.168.15.0/255.255.248.0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assert.True(t, r.Contains("192.168.8.10"))
|
||||
assert.True(t, r.Contains("192.168.14.162"))
|
||||
assert.False(t, r.Contains("192.168.3.162"))
|
||||
assert.False(t, r.Contains("192.168.2.162"))
|
||||
}
|
||||
|
||||
func TestRange(t *testing.T) {
|
||||
r, err := NewRange("192.168.2.20-192.168.2.30")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assert.True(t, r.Contains("192.168.2.20"))
|
||||
assert.True(t, r.Contains("192.168.2.21"))
|
||||
assert.True(t, r.Contains("192.168.2.30"))
|
||||
|
||||
assert.False(t, r.Contains("192.168.2.10"))
|
||||
assert.False(t, r.Contains("192.168.2.31"))
|
||||
}
|
||||
|
||||
func TestLocalhost(t *testing.T) {
|
||||
r, err := NewRange("::1")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assert.True(t, r.Contains("::1"))
|
||||
}
|
||||
|
||||
func TestNetIP(t *testing.T) {
|
||||
addr, err := netip.ParseAddr("192.168.2.10")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Log(netip.MustParseAddr("192.168.2.4").Compare(addr))
|
||||
t.Log(netip.MustParseAddr("192.168.2.10").Compare(addr))
|
||||
t.Log(netip.MustParseAddr("192.168.2.11").Compare(addr))
|
||||
|
||||
prefix := netip.MustParsePrefix("192.168.2.0/24")
|
||||
|
||||
t.Log(prefix.Contains(netip.MustParseAddr("192.168.2.53")))
|
||||
t.Log(prefix.Contains(netip.MustParseAddr("192.168.3.53")))
|
||||
}
|
34
ip_range/mask_bits.go
Normal file
34
ip_range/mask_bits.go
Normal file
@ -0,0 +1,34 @@
|
||||
package iprange
|
||||
|
||||
import "strings"
|
||||
|
||||
var maskBits = map[string]int{
|
||||
"255": 8,
|
||||
"254": 7,
|
||||
"252": 6,
|
||||
"248": 5,
|
||||
"240": 4,
|
||||
"224": 3,
|
||||
"192": 2,
|
||||
"128": 1,
|
||||
"0": 0,
|
||||
}
|
||||
|
||||
func MaskToBits(mask string) int {
|
||||
bits := 0
|
||||
|
||||
secs := strings.Split(mask, ".")
|
||||
if len(secs) != 4 {
|
||||
panic("the mask is incorrect")
|
||||
}
|
||||
|
||||
for _, s := range secs {
|
||||
if v, ok := maskBits[s]; ok {
|
||||
bits += v
|
||||
} else {
|
||||
panic("the mask is incorrect")
|
||||
}
|
||||
}
|
||||
|
||||
return bits
|
||||
}
|
26
ip_range/mask_bits_test.go
Normal file
26
ip_range/mask_bits_test.go
Normal file
@ -0,0 +1,26 @@
|
||||
package iprange
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestMaskToBits(t *testing.T) {
|
||||
|
||||
masks := []struct {
|
||||
mask string
|
||||
expect int
|
||||
}{
|
||||
{"255.255.255.0", 24},
|
||||
{"255.255.248.0", 21},
|
||||
{"255.255.192.0", 18},
|
||||
{"255.255.255.192", 26},
|
||||
}
|
||||
|
||||
for _, m := range masks {
|
||||
bits := MaskToBits(m.mask)
|
||||
assert.Equal(t, m.expect, bits, fmt.Sprintf("IP:%s 掩码位数错误。", m.mask))
|
||||
}
|
||||
}
|
@ -5,6 +5,9 @@ package json
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
func RegisterFuzzyDecoders() {
|
||||
}
|
||||
|
||||
var (
|
||||
Marshal = json.Marshal
|
||||
Unmarshal = json.Unmarshal
|
||||
|
@ -2,12 +2,13 @@ package json
|
||||
|
||||
import "github.com/charlienet/go-mixed/bytesconv"
|
||||
|
||||
// StructToJsonIndent 结构转换为带格式字符串
|
||||
func StructToJsonIndent(obj any) string {
|
||||
b, _ := MarshalIndent(obj, "", " ")
|
||||
return bytesconv.BytesToString(b)
|
||||
}
|
||||
|
||||
// 结构转换为json字符串
|
||||
// StructToJson 结构转换为json字符串
|
||||
func StructToJson(obj any) string {
|
||||
b, _ := Marshal(obj)
|
||||
return bytesconv.BytesToString(b)
|
||||
|
41
locker/chan_source_locker.go
Normal file
41
locker/chan_source_locker.go
Normal file
@ -0,0 +1,41 @@
|
||||
package locker
|
||||
|
||||
type ChanLocker interface {
|
||||
Get(key string) (ch <-chan int, ok bool)
|
||||
Release(key string)
|
||||
}
|
||||
|
||||
type chanSourceLock struct {
|
||||
m RWLocker
|
||||
content map[string]chan int
|
||||
}
|
||||
|
||||
func (s *chanSourceLock) Get(key string) (ch <-chan int, ok bool) {
|
||||
s.m.RLock()
|
||||
ch, ok = s.content[key]
|
||||
s.m.RUnlock()
|
||||
if ok {
|
||||
return
|
||||
}
|
||||
s.m.Lock()
|
||||
ch, ok = s.content[key]
|
||||
if ok {
|
||||
s.m.Unlock()
|
||||
return
|
||||
}
|
||||
s.content[key] = make(chan int)
|
||||
ch = s.content[key]
|
||||
ok = true
|
||||
s.m.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (s *chanSourceLock) Release(key string) {
|
||||
s.m.Lock()
|
||||
ch, ok := s.content[key]
|
||||
if ok {
|
||||
close(ch)
|
||||
delete(s.content, key)
|
||||
}
|
||||
s.m.Unlock()
|
||||
}
|
@ -2,53 +2,93 @@ package locker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// 资源锁
|
||||
// 带计数器锁
|
||||
type countLocker struct {
|
||||
Locker
|
||||
Count int32
|
||||
}
|
||||
|
||||
// SourceLocker 资源锁
|
||||
type SourceLocker struct {
|
||||
m RWLocker
|
||||
locks map[string]Locker
|
||||
locks map[string]*countLocker
|
||||
}
|
||||
|
||||
func NewSourceLocker() *SourceLocker {
|
||||
return &SourceLocker{
|
||||
m: NewRWLocker(),
|
||||
locks: make(map[string]Locker),
|
||||
locks: make(map[string]*countLocker),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SourceLocker) Lock(key string) {
|
||||
s.m.RLock()
|
||||
l, ok := s.locks[key]
|
||||
s.m.RUnlock()
|
||||
|
||||
if ok {
|
||||
s.m.RUnlock()
|
||||
|
||||
atomic.AddInt32(&l.Count, 1)
|
||||
l.Lock()
|
||||
|
||||
fmt.Println("加锁")
|
||||
} else {
|
||||
s.m.RUnlock()
|
||||
|
||||
// 加锁,再次检查是否已经具有锁
|
||||
s.m.Lock()
|
||||
new := NewLocker()
|
||||
s.locks[key] = new
|
||||
s.m.Unlock()
|
||||
if l2, ok := s.locks[key]; ok {
|
||||
s.m.Unlock()
|
||||
|
||||
new.Lock()
|
||||
fmt.Println("初始加锁")
|
||||
l2.Lock()
|
||||
fmt.Println("二次检查加锁")
|
||||
} else {
|
||||
n := NewLocker()
|
||||
s.locks[key] = &countLocker{Locker: n, Count: 1}
|
||||
|
||||
s.m.Unlock()
|
||||
|
||||
fmt.Printf("新锁准备加锁:%p\n", n)
|
||||
n.Lock()
|
||||
|
||||
fmt.Println("初始加锁")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SourceLocker) Unlock(key string) {
|
||||
s.m.Lock()
|
||||
|
||||
if l, ok := s.locks[key]; ok {
|
||||
atomic.AddInt32(&l.Count, -1)
|
||||
fmt.Printf("解锁%p\n", l)
|
||||
l.Unlock()
|
||||
// delete(s.locks, key)
|
||||
fmt.Println("解锁")
|
||||
|
||||
if l.Count == 0 {
|
||||
delete(s.locks, key)
|
||||
}
|
||||
}
|
||||
s.m.Unlock()
|
||||
}
|
||||
|
||||
func (s *SourceLocker) TryLock(key string) bool {
|
||||
return false
|
||||
// 加读锁
|
||||
s.m.RLock()
|
||||
l, ok := s.locks[key]
|
||||
|
||||
if ok {
|
||||
ret := l.TryLock()
|
||||
s.m.RUnlock()
|
||||
|
||||
return ret
|
||||
} else {
|
||||
s.m.RUnlock()
|
||||
|
||||
s.m.Lock()
|
||||
n := NewLocker()
|
||||
s.locks[key] = &countLocker{Locker: n, Count: 1}
|
||||
s.m.Unlock()
|
||||
|
||||
return n.TryLock()
|
||||
}
|
||||
}
|
||||
|
@ -3,10 +3,15 @@ package locker
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var sourcekey = "u-0001"
|
||||
|
||||
func TestTryLock(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func TestSourceLocker(t *testing.T) {
|
||||
l := NewSourceLocker()
|
||||
|
||||
@ -27,6 +32,32 @@ func TestSourceLocker(t *testing.T) {
|
||||
|
||||
wg.Wait()
|
||||
t.Log("n:", n)
|
||||
t.Logf("%+v", l)
|
||||
}
|
||||
|
||||
func TestSourceTryLock(t *testing.T) {
|
||||
c := 5
|
||||
n := 0
|
||||
wg := new(sync.WaitGroup)
|
||||
wg.Add(c)
|
||||
|
||||
l := NewSourceLocker()
|
||||
|
||||
for i := 0; i < c; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if l.TryLock(sourcekey) {
|
||||
n++
|
||||
time.Sleep(time.Second)
|
||||
|
||||
l.Unlock(sourcekey)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
t.Log("n:", n)
|
||||
t.Logf("%+v", l)
|
||||
}
|
||||
|
||||
func BenchmarkSourceLocker(b *testing.B) {
|
||||
|
@ -1,5 +1,7 @@
|
||||
package logx
|
||||
|
||||
import "io"
|
||||
|
||||
var std = defaultLogger()
|
||||
|
||||
func StandardLogger() Logger {
|
||||
@ -34,4 +36,5 @@ type Logger interface {
|
||||
Println(args ...any)
|
||||
Print(args ...any)
|
||||
Printf(format string, args ...any)
|
||||
Writer() io.Writer
|
||||
}
|
||||
|
@ -1,3 +1,23 @@
|
||||
package logx
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
type Rotate int
|
||||
|
||||
const (
|
||||
None Rotate = iota // 不分割日志
|
||||
Size // 按大小分割
|
||||
Date // 按日期分割
|
||||
)
|
||||
|
||||
type OutputOptions struct {
|
||||
LogrusOutputOptions
|
||||
}
|
||||
|
||||
func WithFile(filename string) (io.Writer, error) {
|
||||
mode := os.FileMode(0644)
|
||||
return os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_APPEND, mode)
|
||||
}
|
||||
|
1
logx/logger.go
Normal file
1
logx/logger.go
Normal file
@ -0,0 +1 @@
|
||||
package logx
|
@ -1,16 +0,0 @@
|
||||
package logx
|
||||
|
||||
type loggerBuilder struct {
|
||||
}
|
||||
|
||||
func NewBuilder() *loggerBuilder {
|
||||
return &loggerBuilder{}
|
||||
}
|
||||
|
||||
func (b *loggerBuilder) WithLogrus() *loggerBuilder {
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *loggerBuilder) WithLogger() *loggerBuilder {
|
||||
return b
|
||||
}
|
@ -2,14 +2,8 @@ package logx_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/charlienet/go-mixed/logx"
|
||||
)
|
||||
|
||||
func TestBuilder(t *testing.T) {
|
||||
logger := logx.NewBuilder().
|
||||
WithLogrus().
|
||||
WithLogger()
|
||||
|
||||
_ = logger
|
||||
}
|
||||
|
@ -1,6 +1,8 @@
|
||||
package logx
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -37,3 +39,7 @@ func (l *logrusWrpper) WithFields(fields Fields) Logger {
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *logrusWrpper) Writer() io.Writer {
|
||||
return l.Entry.Writer()
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"runtime"
|
||||
|
||||
nested "github.com/antonfisher/nested-logrus-formatter"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const defaultTimestampFormat = "2006-01-02 15:04:05.000"
|
||||
@ -14,6 +15,14 @@ type NestedFormatterOption struct {
|
||||
Color bool
|
||||
}
|
||||
|
||||
func NewJsonFormatter() logrus.Formatter {
|
||||
return &logrus.JSONFormatter{}
|
||||
}
|
||||
|
||||
func NewTextFOrmatter() logrus.Formatter {
|
||||
return &logrus.TextFormatter{}
|
||||
}
|
||||
|
||||
func NewNestedFormatter(option NestedFormatterOption) *nested.Formatter {
|
||||
return &nested.Formatter{
|
||||
TimestampFormat: defaultTimestampFormat,
|
@ -4,6 +4,7 @@ import (
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/charlienet/go-mixed/fs"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -32,11 +33,12 @@ type LogrusOutputOptions struct {
|
||||
}
|
||||
|
||||
type LogrusBackupOptions struct {
|
||||
MaxSize int // 默认大小100M
|
||||
MaxAge int // 备份保留天数
|
||||
MaxBackups int // 备份保留数量
|
||||
LocalTime bool // 使用本地时间
|
||||
Compress bool // 是否压缩备份
|
||||
BackupType Rotate // 分割类型
|
||||
MaxSize int // 默认大小100M
|
||||
MaxAge int // 备份保留天数
|
||||
MaxBackups int // 备份保留数量
|
||||
LocalTime bool // 使用本地时间
|
||||
Compress bool // 是否压缩备份
|
||||
}
|
||||
|
||||
func (o LogrusBackupOptions) hasBackup() bool {
|
||||
@ -66,6 +68,7 @@ func WithFormatter(formatter logrus.Formatter) logrusOption {
|
||||
}
|
||||
|
||||
func WithOutput(options LogrusOutputOptions) logrusOption {
|
||||
_ = time.Now()
|
||||
return func(l *logrus.Logger) {
|
||||
var writer io.Writer
|
||||
switch {
|
||||
|
@ -39,9 +39,3 @@ func TestLevel(t *testing.T) {
|
||||
// logger.SetLevel(l)
|
||||
logger.Info("bcdefg")
|
||||
}
|
||||
|
||||
func TestMutiWriter(t *testing.T) {
|
||||
l := NewLogger().AppendLogger()
|
||||
|
||||
_ = l
|
||||
}
|
||||
|
@ -1,12 +0,0 @@
|
||||
package logx
|
||||
|
||||
type mutiLogger struct {
|
||||
}
|
||||
|
||||
func NewLogger() *mutiLogger {
|
||||
return &mutiLogger{}
|
||||
}
|
||||
|
||||
func (w *mutiLogger) AppendLogger() Logger {
|
||||
return nil
|
||||
}
|
26
logx/readme.md
Normal file
26
logx/readme.md
Normal file
@ -0,0 +1,26 @@
|
||||
# 日志记录组件
|
||||
|
||||
日志分割及备份
|
||||
|
||||
日志可按照日期或大小进行分割,保留的历史日志文件数量由备份数量决定。
|
||||
|
||||
1. 按天拆分,每天生成新的日志文件名称。格式为file.yyyy-mm-dd.log, 其中file和log为配置的日志文件名称。
|
||||
2. 按大小拆分,使用lumberjack组件对日志文件进行分割。
|
||||
3. 按时间间隔拆分,日志文件按照指定的间隔拆分,
|
||||
|
||||
日志输出流
|
||||
支持控制台和文件输出,可扩展输出组件
|
||||
|
||||
``` golang
|
||||
logx.NewLogger(
|
||||
WithLevel("debug"),
|
||||
WithFormatter(),
|
||||
WithConsole(),
|
||||
WithRoateBySize(FileRoateSize{
|
||||
MaxSize
|
||||
MaxAge
|
||||
MaxBackups
|
||||
}),
|
||||
WithRoateByDate("filename", MaxAge, MaxBackups),
|
||||
WithFile("filename"))
|
||||
```
|
22
logx/rotate_date_writer.go
Normal file
22
logx/rotate_date_writer.go
Normal file
@ -0,0 +1,22 @@
|
||||
package logx
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// ensure we always implement io.WriteCloser
|
||||
var _ io.WriteCloser = (*rotateDateWriter)(nil)
|
||||
|
||||
type rotateDateWriter struct {
|
||||
MaxAge int
|
||||
MaxBackups int
|
||||
}
|
||||
|
||||
func (l *rotateDateWriter) Write(p []byte) (n int, err error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (l *rotateDateWriter) Close() error {
|
||||
|
||||
return nil
|
||||
}
|
3
logx/rotate_size_writer.go
Normal file
3
logx/rotate_size_writer.go
Normal file
@ -0,0 +1,3 @@
|
||||
package logx
|
||||
|
||||
// 按大小分割的日志记录器
|
27
logx/rotate_writer_test.go
Normal file
27
logx/rotate_writer_test.go
Normal file
@ -0,0 +1,27 @@
|
||||
package logx
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
rotatelogs "github.com/lestrrat/go-file-rotatelogs"
|
||||
)
|
||||
|
||||
func TestNewWriter(t *testing.T) {
|
||||
t.Log(filepath.Abs("logs"))
|
||||
|
||||
logf, err := rotatelogs.New("logs/aaaa.%Y%m%d.log",
|
||||
rotatelogs.WithMaxAge(24*time.Hour),
|
||||
rotatelogs.WithRotationTime(time.Hour))
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer logf.Close()
|
||||
|
||||
t.Log(logf.CurrentFileName())
|
||||
|
||||
_, err = logf.Write([]byte("abaccad"))
|
||||
t.Log(err)
|
||||
}
|
@ -7,17 +7,16 @@ import (
|
||||
|
||||
"github.com/charlienet/go-mixed/bytesconv"
|
||||
"github.com/charlienet/go-mixed/hash"
|
||||
"golang.org/x/exp/constraints"
|
||||
)
|
||||
|
||||
var defaultNumOfBuckets = runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
type concurrnetMap[K constraints.Ordered, V any] struct {
|
||||
type concurrnetMap[K hashable, V any] struct {
|
||||
buckets []Map[K, V]
|
||||
numOfBuckets uint64
|
||||
}
|
||||
|
||||
func NewConcurrentMap[K constraints.Ordered, V any](maps ...map[K]V) *concurrnetMap[K, V] {
|
||||
func NewConcurrentMap[K hashable, V any](maps ...map[K]V) *concurrnetMap[K, V] {
|
||||
num := defaultNumOfBuckets
|
||||
|
||||
buckets := make([]Map[K, V], num)
|
||||
@ -57,16 +56,16 @@ func (m *concurrnetMap[K, V]) Exist(key K) bool {
|
||||
return mm.Exist(key)
|
||||
}
|
||||
|
||||
func (m *concurrnetMap[K, V]) Iter() <-chan *Entry[K, V] {
|
||||
num := int(m.numOfBuckets)
|
||||
ch := make(chan *Entry[K, V], m.Count())
|
||||
for i := 0; i < num; i++ {
|
||||
c := m.buckets[i].Iter()
|
||||
ch <- <-c
|
||||
}
|
||||
// func (m *concurrnetMap[K, V]) Iter() <-chan *Entry[K, V] {
|
||||
// num := int(m.numOfBuckets)
|
||||
// ch := make(chan *Entry[K, V], m.Count())
|
||||
// for i := 0; i < num; i++ {
|
||||
// c := m.buckets[i].Iter()
|
||||
// ch <- <-c
|
||||
// }
|
||||
|
||||
return ch
|
||||
}
|
||||
// return ch
|
||||
// }
|
||||
|
||||
func (m *concurrnetMap[K, V]) Keys() []K {
|
||||
keys := make([]K, m.Count())
|
||||
@ -111,27 +110,6 @@ func (m *concurrnetMap[K, V]) ForEach(f func(K, V) bool) {
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func (m *concurrnetMap[K, V]) Clone() Map[K, V] {
|
||||
|
||||
num := int(m.numOfBuckets)
|
||||
|
||||
buckets := make([]Map[K, V], m.numOfBuckets)
|
||||
for i := 0; i < num; i++ {
|
||||
buckets[i] = m.buckets[i].Clone()
|
||||
}
|
||||
|
||||
return &concurrnetMap[K, V]{
|
||||
buckets: buckets,
|
||||
numOfBuckets: m.numOfBuckets,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *concurrnetMap[K, V]) Clear() {
|
||||
for i := 0; i < int(m.numOfBuckets); i++ {
|
||||
m.buckets[i].Clear()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *concurrnetMap[K, V]) Count() int {
|
||||
var count int
|
||||
for i := 0; i < int(m.numOfBuckets); i++ {
|
||||
|
@ -2,15 +2,14 @@ package maps
|
||||
|
||||
import (
|
||||
"github.com/charlienet/go-mixed/locker"
|
||||
"golang.org/x/exp/constraints"
|
||||
)
|
||||
|
||||
type hashMap[K constraints.Ordered, V any] struct {
|
||||
type hashMap[K hashable, V any] struct {
|
||||
m map[K]V
|
||||
opt *options
|
||||
}
|
||||
|
||||
func NewHashMap[K constraints.Ordered, V any](maps ...map[K]V) *hashMap[K, V] {
|
||||
func NewHashMap[K hashable, V any](maps ...map[K]V) *hashMap[K, V] {
|
||||
m := make(map[K]V)
|
||||
if len(maps) > 0 {
|
||||
m = Merge(maps...)
|
||||
@ -74,7 +73,7 @@ func (m *hashMap[K, V]) Iter() <-chan *Entry[K, V] {
|
||||
return ch
|
||||
}
|
||||
|
||||
func (m *hashMap[K, V]) ForEach(f func(K, V) bool ) {
|
||||
func (m *hashMap[K, V]) ForEach(f func(K, V) bool) {
|
||||
cloned := m.ToMap()
|
||||
|
||||
for k, v := range cloned {
|
||||
|
@ -25,3 +25,8 @@ func TestForEach(t *testing.T) {
|
||||
assert.True(t, hashMap.Exist(k))
|
||||
}
|
||||
}
|
||||
|
||||
func TestSynchronize(t *testing.T) {
|
||||
mep := NewHashMap[string, string]().Synchronize()
|
||||
mep.Set("aaaa", "bbb")
|
||||
}
|
||||
|
81
maps/haxmap.go
Normal file
81
maps/haxmap.go
Normal file
@ -0,0 +1,81 @@
|
||||
package maps
|
||||
|
||||
import (
|
||||
"github.com/alphadose/haxmap"
|
||||
)
|
||||
|
||||
type haxHashable interface {
|
||||
int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64 | uintptr | float32 | float64 | string | complex64 | complex128
|
||||
}
|
||||
|
||||
var _ Map[string, string] = &haxmapWrapper[string, string]{}
|
||||
|
||||
type haxmapWrapper[K haxHashable, V any] struct {
|
||||
mep *haxmap.Map[K, V]
|
||||
}
|
||||
|
||||
func NewHaxmap[K haxHashable, V any](size int) *haxmapWrapper[K, V] {
|
||||
return &haxmapWrapper[K, V]{
|
||||
mep: haxmap.New[K, V](uintptr(size)),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *haxmapWrapper[K, V]) Set(k K, v V) {
|
||||
m.mep.Set(k, v)
|
||||
}
|
||||
|
||||
func (m *haxmapWrapper[K, V]) Get(k K) (V, bool) {
|
||||
return m.mep.Get(k)
|
||||
}
|
||||
|
||||
func (m *haxmapWrapper[K, V]) Keys() []K {
|
||||
keys := make([]K, 0, m.mep.Len())
|
||||
m.mep.ForEach(func(k K, v V) bool {
|
||||
keys = append(keys, k)
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
return keys
|
||||
}
|
||||
|
||||
func (m *haxmapWrapper[K, V]) Values() []V {
|
||||
values := make([]V, 0, m.mep.Len())
|
||||
m.mep.ForEach(func(k K, v V) bool {
|
||||
values = append(values, v)
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
return values
|
||||
}
|
||||
|
||||
func (m *haxmapWrapper[K, V]) Exist(k K) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *haxmapWrapper[K, V]) Delete(key K) {
|
||||
m.mep.Del(key)
|
||||
}
|
||||
|
||||
func (m *haxmapWrapper[K, V]) ToMap() map[K]V {
|
||||
mm := make(map[K]V, m.mep.Len())
|
||||
m.mep.ForEach(func(k K, v V) bool {
|
||||
mm[k] = v
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
return mm
|
||||
}
|
||||
|
||||
func (m *haxmapWrapper[K, V]) ForEach(fn func(K, V) bool) {
|
||||
|
||||
}
|
||||
|
||||
func (m *haxmapWrapper[K, V]) Count() int {
|
||||
return int(m.mep.Len())
|
||||
}
|
||||
|
||||
func (m *haxmapWrapper[K, V]) Clear() {
|
||||
}
|
18
maps/map.go
18
maps/map.go
@ -6,7 +6,11 @@ import (
|
||||
"golang.org/x/exp/constraints"
|
||||
)
|
||||
|
||||
type Map[K constraints.Ordered, V any] interface {
|
||||
type hashable interface {
|
||||
constraints.Integer | constraints.Float | ~string
|
||||
}
|
||||
|
||||
type Map[K hashable, V any] interface {
|
||||
Set(key K, value V) // 设置值
|
||||
Get(key K) (value V, ok bool) // 获取值
|
||||
Exist(key K) bool // 键是否存在
|
||||
@ -14,19 +18,17 @@ type Map[K constraints.Ordered, V any] interface {
|
||||
Keys() []K // 获取所有键
|
||||
Values() []V // 获取所有值
|
||||
ToMap() map[K]V // 转换为map
|
||||
Clone() Map[K, V] // 复制
|
||||
Clear() // 清空
|
||||
Count() int // 数量
|
||||
Iter() <-chan *Entry[K, V] // 迭代器
|
||||
ForEach(f func(K, V) bool) // ForEach
|
||||
// Iter() <-chan *Entry[K, V] // 迭代器
|
||||
ForEach(f func(K, V) bool) // ForEach
|
||||
}
|
||||
|
||||
type Entry[K constraints.Ordered, V any] struct {
|
||||
type Entry[K hashable, V any] struct {
|
||||
Key K
|
||||
Value V
|
||||
}
|
||||
|
||||
func Merge[K comparable, V any](mm ...map[K]V) map[K]V {
|
||||
func Merge[K hashable, V any](mm ...map[K]V) map[K]V {
|
||||
ret := make(map[K]V)
|
||||
for _, m := range mm {
|
||||
for k, v := range m {
|
||||
@ -38,7 +40,7 @@ func Merge[K comparable, V any](mm ...map[K]V) map[K]V {
|
||||
}
|
||||
|
||||
// 按照键值生成字符串
|
||||
func Join[K constraints.Ordered, V any](m Map[K, V], sep string, f func(k K, v V) string) string {
|
||||
func Join[K hashable, V any](m Map[K, V], sep string, f func(k K, v V) string) string {
|
||||
slice := make([]string, 0, m.Count())
|
||||
|
||||
m.ForEach(func(k K, v V) bool {
|
||||
|
@ -2,23 +2,21 @@ package maps
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"golang.org/x/exp/constraints"
|
||||
)
|
||||
|
||||
var _ Map[string, any] = &rw_map[string, any]{}
|
||||
|
||||
type rw_map[K constraints.Ordered, V any] struct {
|
||||
type rw_map[K hashable, V any] struct {
|
||||
m Map[K, V]
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func NewRWMap[K constraints.Ordered, V any](maps ...map[K]V) *rw_map[K, V] {
|
||||
func NewRWMap[K hashable, V any](maps ...map[K]V) *rw_map[K, V] {
|
||||
merged := Merge(maps...)
|
||||
return &rw_map[K, V]{m: NewHashMap(merged)}
|
||||
}
|
||||
|
||||
func newRWMap[K constraints.Ordered, V any](m Map[K, V]) *rw_map[K, V] {
|
||||
func newRWMap[K hashable, V any](m Map[K, V]) *rw_map[K, V] {
|
||||
return &rw_map[K, V]{m: m}
|
||||
}
|
||||
|
||||
@ -62,6 +60,10 @@ func (m *rw_map[K, V]) ToMap() map[K]V {
|
||||
return m.m.ToMap()
|
||||
}
|
||||
|
||||
func (m *rw_map[K, V]) Shrink() map[K]V {
|
||||
return m.m.ToMap()
|
||||
}
|
||||
|
||||
func (m *rw_map[K, V]) Exist(key K) bool {
|
||||
return m.m.Exist(key)
|
||||
}
|
||||
@ -70,28 +72,13 @@ func (m *rw_map[K, V]) Count() int {
|
||||
return m.m.Count()
|
||||
}
|
||||
|
||||
func (m *rw_map[K, V]) Iter() <-chan *Entry[K, V] {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
// func (m *rw_map[K, V]) Iter() <-chan *Entry[K, V] {
|
||||
// m.mu.RLock()
|
||||
// defer m.mu.RUnlock()
|
||||
|
||||
return m.m.Iter()
|
||||
}
|
||||
// return m.m.Iter()
|
||||
// }
|
||||
|
||||
func (m *rw_map[K, V]) ForEach(f func(K, V) bool) {
|
||||
|
||||
m.mu.RLock()
|
||||
cloned := m.m.Clone()
|
||||
m.mu.RUnlock()
|
||||
|
||||
cloned.ForEach(f)
|
||||
}
|
||||
|
||||
func (m *rw_map[K, V]) Clone() Map[K, V] {
|
||||
return newRWMap(m.m.Clone())
|
||||
}
|
||||
|
||||
func (m *rw_map[K, V]) Clear() {
|
||||
m.mu.Lock()
|
||||
m.m.Clear()
|
||||
m.mu.Unlock()
|
||||
}
|
||||
|
@ -3,29 +3,27 @@ package maps
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/exp/constraints"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
xmaps "golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
var (
|
||||
_ Map[string, any] = &sorted_map[string, any]{}
|
||||
_ SortedMap[string, any] = &sorted_map[string, any]{}
|
||||
)
|
||||
|
||||
type SortedMap[K constraints.Ordered, V any] interface {
|
||||
type SortedMap[K hashable, V any] interface {
|
||||
Map[K, V]
|
||||
Asc() SortedMap[K, V]
|
||||
Desc() SortedMap[K, V]
|
||||
}
|
||||
|
||||
type sorted_map[K constraints.Ordered, V any] struct {
|
||||
type sorted_map[K hashable, V any] struct {
|
||||
keys []K
|
||||
maps Map[K, V]
|
||||
}
|
||||
|
||||
func NewSortedMap[K constraints.Ordered, V any](maps ...map[K]V) *sorted_map[K, V] {
|
||||
func NewSortedMap[K hashable, V any](maps ...map[K]V) *sorted_map[K, V] {
|
||||
merged := Merge(maps...)
|
||||
return &sorted_map[K, V]{
|
||||
keys: xmaps.Keys(merged),
|
||||
@ -33,7 +31,7 @@ func NewSortedMap[K constraints.Ordered, V any](maps ...map[K]V) *sorted_map[K,
|
||||
}
|
||||
}
|
||||
|
||||
func NewSortedByMap[K constraints.Ordered, V any](m Map[K, V]) *sorted_map[K, V] {
|
||||
func NewSortedByMap[K hashable, V any](m Map[K, V]) *sorted_map[K, V] {
|
||||
return &sorted_map[K, V]{maps: m, keys: m.Keys()}
|
||||
}
|
||||
|
||||
@ -43,6 +41,8 @@ func (m *sorted_map[K, V]) Get(key K) (V, bool) {
|
||||
|
||||
func (m *sorted_map[K, V]) Set(key K, value V) {
|
||||
m.maps.Set(key, value)
|
||||
|
||||
slices.Sort(m.keys)
|
||||
m.keys = append(m.keys, key)
|
||||
}
|
||||
|
||||
@ -61,15 +61,6 @@ func (m *sorted_map[K, V]) Count() int {
|
||||
return m.maps.Count()
|
||||
}
|
||||
|
||||
func (m *sorted_map[K, V]) Clear() {
|
||||
m.keys = make([]K, 0)
|
||||
m.maps.Clear()
|
||||
}
|
||||
|
||||
func (m *sorted_map[K, V]) Clone() Map[K, V] {
|
||||
return &sorted_map[K, V]{maps: m.maps.Clone(), keys: m.Keys()}
|
||||
}
|
||||
|
||||
func (m *sorted_map[K, V]) Iter() <-chan *Entry[K, V] {
|
||||
c := make(chan *Entry[K, V], m.Count())
|
||||
go func() {
|
||||
@ -88,7 +79,8 @@ func (m *sorted_map[K, V]) Iter() <-chan *Entry[K, V] {
|
||||
}
|
||||
|
||||
func (m *sorted_map[K, V]) ForEach(f func(K, V) bool) {
|
||||
for _, k := range m.keys {
|
||||
keys := m.keys[:]
|
||||
for _, k := range keys {
|
||||
if v, ok := m.Get(k); ok {
|
||||
if f(k, v) {
|
||||
break
|
||||
|
27
mathx/int.go
27
mathx/int.go
@ -3,14 +3,33 @@ package mathx
|
||||
import (
|
||||
"github.com/charlienet/go-mixed/expr"
|
||||
"golang.org/x/exp/constraints"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// MaxInt returns the larger one of v1 and v2.
|
||||
// Max returns the larger one of v1 and v2.
|
||||
func Max[T constraints.Ordered](v1, v2 T) T {
|
||||
return expr.If(v1 > v2, v1, v2)
|
||||
return expr.Ternary(v1 > v2, v1, v2)
|
||||
}
|
||||
|
||||
// MinInt returns the smaller one of v1 and v2.
|
||||
// Min returns the smaller one of v1 and v2.
|
||||
func Min[T constraints.Ordered](v1, v2 T) T {
|
||||
return expr.If(v1 < v2, v1, v2)
|
||||
return expr.Ternary(v1 < v2, v1, v2)
|
||||
}
|
||||
|
||||
func Abs1[T constraints.Signed](n T) T {
|
||||
shift := 63
|
||||
switch unsafe.Sizeof(n) {
|
||||
case 1:
|
||||
shift = 7
|
||||
case 4:
|
||||
shift = 31
|
||||
}
|
||||
|
||||
y := n >> shift
|
||||
return T((n ^ y) - y)
|
||||
}
|
||||
|
||||
func Abs(n int64) int64 {
|
||||
y := n >> 63
|
||||
return (n ^ y) - y
|
||||
}
|
||||
|
27
mathx/int_test.go
Normal file
27
mathx/int_test.go
Normal file
@ -0,0 +1,27 @@
|
||||
package mathx_test
|
||||
|
||||
import (
|
||||
"github.com/charlienet/go-mixed/mathx"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMin(t *testing.T) {
|
||||
assert.Equal(t, 1, mathx.Min(1, 3))
|
||||
assert.Equal(t, 2, mathx.Min(66, 2))
|
||||
}
|
||||
|
||||
func TestMax(t *testing.T) {
|
||||
assert.Equal(t, 3, mathx.Max(1, 3))
|
||||
assert.Equal(t, 66, mathx.Max(66, 2))
|
||||
}
|
||||
|
||||
func TestAbs(t *testing.T) {
|
||||
assert.Equal(t, 23, mathx.Abs1(23))
|
||||
assert.Equal(t, 23, mathx.Abs1(-23))
|
||||
assert.Equal(t, 0, mathx.Abs1(0))
|
||||
|
||||
var u int8 = -127
|
||||
var exp int8 = 127
|
||||
assert.Equal(t, exp, mathx.Abs1(u))
|
||||
}
|
81
panic/panic.go
Normal file
81
panic/panic.go
Normal file
@ -0,0 +1,81 @@
|
||||
package panic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
type Panic struct {
|
||||
R any
|
||||
Stack []byte
|
||||
}
|
||||
|
||||
func (p Panic) String() string {
|
||||
return fmt.Sprintf("%v\n%s", p.R, p.Stack)
|
||||
}
|
||||
|
||||
type PanicGroup struct {
|
||||
panics chan Panic // 致命错误通知
|
||||
dones chan int // 协程完成通知
|
||||
jobs chan int // 并发数量
|
||||
jobN int32 // 工作协程数量
|
||||
}
|
||||
|
||||
func NewPanicGroup(maxConcurrent int) *PanicGroup {
|
||||
return &PanicGroup{
|
||||
panics: make(chan Panic, 8),
|
||||
dones: make(chan int, 8),
|
||||
jobs: make(chan int, maxConcurrent),
|
||||
}
|
||||
}
|
||||
|
||||
func (g *PanicGroup) Go(f func()) *PanicGroup {
|
||||
g.jobN++
|
||||
|
||||
go func() {
|
||||
g.jobs <- 1
|
||||
defer func() {
|
||||
<-g.jobs
|
||||
// go 语言只能在自己的协程中捕获自己的 panic
|
||||
// 如果不处理,整个*进程*都会退出
|
||||
if r := recover(); r != nil {
|
||||
g.panics <- Panic{R: r, Stack: debug.Stack()}
|
||||
// 如果发生 panic 就不再通知 Wait() 已完成
|
||||
// 不然就可能出现 g.jobN 为 0 但 g.panics 非空
|
||||
// 的情况,此时 Wait() 方法需要在正常结束的分支
|
||||
// 中再额外检查是否发生了 panic,非常麻烦
|
||||
return
|
||||
}
|
||||
|
||||
g.dones <- 1
|
||||
}()
|
||||
|
||||
f()
|
||||
}()
|
||||
|
||||
return g
|
||||
}
|
||||
|
||||
func (g *PanicGroup) Wait(ctx context.Context) error {
|
||||
if g.jobN == 0 {
|
||||
panic("no job to wait")
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-g.dones: // 协程正常结束
|
||||
g.jobN--
|
||||
if g.jobN == 0 {
|
||||
return nil
|
||||
}
|
||||
case p := <-g.panics: // 协程有 panic
|
||||
panic(p)
|
||||
case <-ctx.Done():
|
||||
// 整个 ctx 结束,超时或者调用方主动取消
|
||||
// 子协程应该共用该 ctx,都会收到相同的结束信号
|
||||
// 不需要在这里再去通知各协程结束(实现起来也麻烦)
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
}
|
32
panic/panic_test.go
Normal file
32
panic/panic_test.go
Normal file
@ -0,0 +1,32 @@
|
||||
package panic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestPanic(t *testing.T) {
|
||||
defer func() {
|
||||
t.Log("捕捉异常")
|
||||
if e := recover(); e != nil {
|
||||
if err, ok := e.(error); ok {
|
||||
t.Log(err.Error())
|
||||
}
|
||||
t.Log("格式化:", e)
|
||||
}
|
||||
}()
|
||||
|
||||
g := NewPanicGroup(10)
|
||||
g.Go(func() {
|
||||
panic("1243")
|
||||
})
|
||||
|
||||
if err := g.Wait(context.Background()); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
fmt.Println("这条消息可打印")
|
||||
}
|
54
redis/readme.md
Normal file
54
redis/readme.md
Normal file
@ -0,0 +1,54 @@
|
||||
# go-redis 连接选项
|
||||
|
||||
连接字符串
|
||||
redis://<user>:<pass>@<hostname>:<port>/<db>
|
||||
|
||||
redis.ParseURL
|
||||
|
||||
```go
|
||||
gClient = redis.NewClient(&redis.Options{
|
||||
//连接信息
|
||||
Network: "tcp", //网络类型,tcp or unix,默认tcp
|
||||
Addr: "127.0.0.1:6379", //主机名+冒号+端口,默认localhost:6379
|
||||
Password: "", //密码
|
||||
DB: 0, // redis数据库index
|
||||
|
||||
//连接池容量及闲置连接数量
|
||||
PoolSize: 15, // 连接池最大socket连接数,默认为4倍CPU数, 4 * runtime.NumCPU
|
||||
MinIdleConns: 10, //在启动阶段创建指定数量的Idle连接,并长期维持idle状态的连接数不少于指定数量;。
|
||||
|
||||
//超时
|
||||
DialTimeout: 5 * time.Second, //连接建立超时时间,默认5秒。
|
||||
ReadTimeout: 3 * time.Second, //读超时,默认3秒, -1表示取消读超时
|
||||
WriteTimeout: 3 * time.Second, //写超时,默认等于读超时
|
||||
PoolTimeout: 4 * time.Second, //当所有连接都处在繁忙状态时,客户端等待可用连接的最大等待时长,默认为读超时+1秒。
|
||||
|
||||
//闲置连接检查包括IdleTimeout,MaxConnAge
|
||||
IdleCheckFrequency: 60 * time.Second, //闲置连接检查的周期,默认为1分钟,-1表示不做周期性检查,只在客户端获取连接时对闲置连接进行处理。
|
||||
IdleTimeout: 5 * time.Minute, //闲置超时,默认5分钟,-1表示取消闲置超时检查
|
||||
MaxConnAge: 0 * time.Second, //连接存活时长,从创建开始计时,超过指定时长则关闭连接,默认为0,即不关闭存活时长较长的连接
|
||||
|
||||
//命令执行失败时的重试策略
|
||||
MaxRetries: 0, // 命令执行失败时,最多重试多少次,默认为0即不重试
|
||||
MinRetryBackoff: 8 * time.Millisecond, //每次计算重试间隔时间的下限,默认8毫秒,-1表示取消间隔
|
||||
MaxRetryBackoff: 512 * time.Millisecond, //每次计算重试间隔时间的上限,默认512毫秒,-1表示取消间隔
|
||||
|
||||
//可自定义连接函数
|
||||
Dialer: func() (net.Conn, error) {
|
||||
netDialer := &net.Dialer{
|
||||
Timeout: 5 * time.Second,
|
||||
KeepAlive: 5 * time.Minute,
|
||||
}
|
||||
return netDialer.Dial("tcp", "127.0.0.1:6379")
|
||||
},
|
||||
|
||||
//钩子函数
|
||||
OnConnect: func(conn *redis.Conn) error { //仅当客户端执行命令时需要从连接池获取连接时,如果连接池需要新建连接时则会调用此钩子函数
|
||||
fmt.Printf("conn=%v\n", conn)
|
||||
return nil
|
||||
},
|
||||
|
||||
})
|
||||
defer gClient.Close()
|
||||
|
||||
```
|
105
redis/redis.go
Normal file
105
redis/redis.go
Normal file
@ -0,0 +1,105 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis/v8"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultSeparator = ":"
|
||||
|
||||
blockingQueryTimeout = 5 * time.Second
|
||||
readWriteTimeout = 2 * time.Second
|
||||
defaultSlowThreshold = time.Millisecond * 100 // 慢查询
|
||||
)
|
||||
|
||||
type Option func(r *Redis)
|
||||
|
||||
type Redis struct {
|
||||
addr string // 服务器地址
|
||||
prefix string // 键值前缀
|
||||
separator string // 分隔符
|
||||
}
|
||||
|
||||
func New(addr string, opts ...Option) *Redis {
|
||||
r := &Redis{
|
||||
addr: addr,
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func (s *Redis) Set(ctx context.Context, key, value string) error {
|
||||
conn, err := s.getRedis()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return conn.Set(ctx, s.formatKey(key), value, 0).Err()
|
||||
}
|
||||
|
||||
func (s *Redis) Get(ctx context.Context, key string) (string, error) {
|
||||
conn, err := s.getRedis()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return conn.Get(ctx, s.formatKey(key)).Result()
|
||||
}
|
||||
|
||||
func (s *Redis) GetSet(ctx context.Context, key, value string) (string, error) {
|
||||
conn, err := s.getRedis()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
val, err := conn.GetSet(ctx, s.formatKey(key), value).Result()
|
||||
return val, err
|
||||
}
|
||||
|
||||
func (s *Redis) Del(ctx context.Context, key ...string) (int, error) {
|
||||
conn, err := s.getRedis()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
keys := s.formatKeys(key...)
|
||||
v, err := conn.Del(ctx, keys...).Result()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return int(v), err
|
||||
}
|
||||
|
||||
func (s *Redis) getRedis() (redis.UniversalClient, error) {
|
||||
client := redis.NewUniversalClient(&redis.UniversalOptions{
|
||||
Addrs: []string{s.addr},
|
||||
})
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (s *Redis) formatKeys(keys ...string) []string {
|
||||
// If no prefix is configured, this parameter is returned
|
||||
if s.prefix == "" {
|
||||
return keys
|
||||
}
|
||||
|
||||
ret := make([]string, 0, len(keys))
|
||||
for _, k := range keys {
|
||||
ret = append(ret, s.formatKey(k))
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (s *Redis) formatKey(key string) string {
|
||||
if s.prefix == "" {
|
||||
return key
|
||||
}
|
||||
|
||||
return s.prefix + s.separator + key
|
||||
}
|
86
redis/redis_test.go
Normal file
86
redis/redis_test.go
Normal file
@ -0,0 +1,86 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/alicebob/miniredis/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGetSet(t *testing.T) {
|
||||
runOnRedis(t, func(client *Redis) {
|
||||
ctx := context.Background()
|
||||
|
||||
val, err := client.GetSet(ctx, "hello", "world")
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, "", val)
|
||||
|
||||
val, err = client.Get(ctx, "hello")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "world", val)
|
||||
|
||||
val, err = client.GetSet(ctx, "hello", "newworld")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "world", val)
|
||||
|
||||
val, err = client.Get(ctx, "hello")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "newworld", val)
|
||||
|
||||
ret, err := client.Del(ctx, "hello")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 1, ret)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRedis_SetGetDel(t *testing.T) {
|
||||
runOnRedis(t, func(client *Redis) {
|
||||
ctx := context.Background()
|
||||
|
||||
err := client.Set(ctx, "hello", "world")
|
||||
assert.Nil(t, err)
|
||||
|
||||
val, err := client.Get(ctx, "hello")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "world", val)
|
||||
ret, err := client.Del(ctx, "hello")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 1, ret)
|
||||
})
|
||||
}
|
||||
|
||||
func runOnRedis(t *testing.T, fn func(client *Redis)) {
|
||||
redis, clean, err := CreateMiniRedis()
|
||||
assert.Nil(t, err)
|
||||
|
||||
defer clean()
|
||||
|
||||
fn(redis)
|
||||
}
|
||||
|
||||
func CreateMiniRedis() (r *Redis, clean func(), err error) {
|
||||
mr, err := miniredis.Run()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
addr := mr.Addr()
|
||||
log.Println("mini redis run at:", addr)
|
||||
|
||||
return New(addr), func() {
|
||||
ch := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
mr.Close()
|
||||
close(ch)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ch:
|
||||
case <-time.After(time.Second):
|
||||
}
|
||||
}, nil
|
||||
}
|
@ -6,37 +6,62 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/charlienet/go-mixed/locker"
|
||||
"golang.org/x/exp/constraints"
|
||||
)
|
||||
|
||||
var _ Set[string] = &hash_set[string]{}
|
||||
|
||||
type hash_set[T constraints.Ordered] map[T]struct{}
|
||||
type hash_set[T constraints.Ordered] struct {
|
||||
m map[T]struct{}
|
||||
lock locker.RWLocker
|
||||
}
|
||||
|
||||
func NewHashSet[T constraints.Ordered](values ...T) *hash_set[T] {
|
||||
set := make(hash_set[T], len(values))
|
||||
set := hash_set[T]{
|
||||
m: make(map[T]struct{}, len(values)),
|
||||
lock: locker.EmptyLocker,
|
||||
}
|
||||
|
||||
set.Add(values...)
|
||||
return &set
|
||||
}
|
||||
|
||||
func (s *hash_set[T]) Sync() *hash_set[T] {
|
||||
s.lock = locker.NewRWLocker()
|
||||
return s
|
||||
}
|
||||
|
||||
func (s hash_set[T]) Add(values ...T) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
for _, v := range values {
|
||||
s[v] = struct{}{}
|
||||
s.m[v] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (s hash_set[T]) Remove(v T) {
|
||||
delete(s, v)
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
delete(s.m, v)
|
||||
}
|
||||
|
||||
func (s hash_set[T]) Contains(value T) bool {
|
||||
_, ok := s[value]
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
|
||||
_, ok := s.m[value]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (s hash_set[T]) ContainsAny(values ...T) bool {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
|
||||
for _, v := range values {
|
||||
if _, ok := s[v]; ok {
|
||||
if _, ok := s.m[v]; ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@ -45,8 +70,11 @@ func (s hash_set[T]) ContainsAny(values ...T) bool {
|
||||
}
|
||||
|
||||
func (s hash_set[T]) ContainsAll(values ...T) bool {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
|
||||
for _, v := range values {
|
||||
if _, ok := s[v]; !ok {
|
||||
if _, ok := s.m[v]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@ -64,13 +92,17 @@ func (s hash_set[T]) Desc() Set[T] {
|
||||
|
||||
func (s hash_set[T]) copyToSorted() Set[T] {
|
||||
orderd := NewSortedSet[T]()
|
||||
for k := range s {
|
||||
for k := range s.m {
|
||||
orderd.Add(k)
|
||||
}
|
||||
|
||||
return orderd
|
||||
}
|
||||
|
||||
func (s *hash_set[T]) Shrink() *hash_set[T] {
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *hash_set[T]) Clone() *hash_set[T] {
|
||||
set := NewHashSet[T]()
|
||||
set.Add(s.ToSlice()...)
|
||||
@ -78,7 +110,7 @@ func (s *hash_set[T]) Clone() *hash_set[T] {
|
||||
}
|
||||
|
||||
func (s hash_set[T]) Iterate(fn func(value T)) {
|
||||
for v := range s {
|
||||
for v := range s.m {
|
||||
fn(v)
|
||||
}
|
||||
}
|
||||
@ -93,17 +125,17 @@ func (s hash_set[T]) ToSlice() []T {
|
||||
}
|
||||
|
||||
func (s hash_set[T]) IsEmpty() bool {
|
||||
return len(s) == 0
|
||||
return len(s.m) == 0
|
||||
}
|
||||
|
||||
func (s hash_set[T]) Size() int {
|
||||
return len(s)
|
||||
return len(s.m)
|
||||
}
|
||||
|
||||
func (s hash_set[T]) MarshalJSON() ([]byte, error) {
|
||||
items := make([]string, 0, s.Size())
|
||||
|
||||
for ele := range s {
|
||||
for ele := range s.m {
|
||||
b, err := json.Marshal(ele)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -135,8 +167,8 @@ func (s hash_set[T]) UnmarshalJSON(b []byte) error {
|
||||
}
|
||||
|
||||
func (s hash_set[T]) String() string {
|
||||
l := make([]string, 0, len(s))
|
||||
for k := range s {
|
||||
l := make([]string, 0, len(s.m))
|
||||
for k := range s.m {
|
||||
l = append(l, fmt.Sprint(k))
|
||||
}
|
||||
|
||||
|
@ -29,7 +29,7 @@ func TestContainsAll(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestContainsAny(t *testing.T) {
|
||||
|
||||
sets.NewHashSet("1", "2").Sync()
|
||||
}
|
||||
|
||||
func TestMarshal(t *testing.T) {
|
||||
|
@ -1,20 +1,22 @@
|
||||
package snowflake
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// 雪花算法默认起始时间 2020-01-01
|
||||
const defaultStarTimestamp = 1579536000
|
||||
// 雪花算法默认起始时间 2022-01-01
|
||||
const defaultStarTimestamp = 1640966400000
|
||||
|
||||
const (
|
||||
MachineIdBits = uint(8) //机器id所占的位数
|
||||
SequenceBits = uint(12) //序列所占的位数
|
||||
//MachineIdMax = int64(-1 ^ (-1 << MachineIdBits)) //支持的最大机器id数量
|
||||
SequenceMask = int64(-1 ^ (-1 << SequenceBits)) //
|
||||
MachineIdShift = SequenceBits //机器id左移位数
|
||||
TimestampShift = SequenceBits + MachineIdBits //时间戳左移位数
|
||||
MachineIdBits = uint(8) //机器id所占的位数
|
||||
SequenceBits = uint(12) //序列所占的位数
|
||||
MachineIdMax = int64(-1 ^ (-1 << MachineIdBits)) //支持的最大机器id数量
|
||||
SequenceMask = int64(-1 ^ (-1 << SequenceBits)) //
|
||||
MachineIdShift = SequenceBits //机器id左移位数
|
||||
TimestampShift = SequenceBits + MachineIdBits //时间戳左移位数
|
||||
)
|
||||
|
||||
type SnowFlake interface {
|
||||
@ -30,28 +32,42 @@ type snowflake struct {
|
||||
}
|
||||
|
||||
func CreateSnowflake(machineId int64) SnowFlake {
|
||||
timeBits := 63 - MachineIdBits - SequenceBits
|
||||
maxTime := time.UnixMilli(defaultStarTimestamp + (int64(-1 ^ (-1 << timeBits))))
|
||||
log.Println("最大可用时间:", maxTime)
|
||||
|
||||
return &snowflake{
|
||||
startTimeStamp: defaultStarTimestamp,
|
||||
machineId: machineId,
|
||||
machineId: machineId & MachineIdMax,
|
||||
}
|
||||
}
|
||||
|
||||
// 组织方式 时间戳-机器码-序列号
|
||||
func (s *snowflake) GetId() int64 {
|
||||
|
||||
// 生成序列号规则
|
||||
// 检查当前生成时间与上次生成时间对比
|
||||
// 如等于上次生成时间,检查是否已经达到序列号的最大值,如已达到等待下一个时间点并且设置序列号为零。
|
||||
// 如不相等则序列号自增
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
now := time.Now().UnixNano() / 1e6
|
||||
if s.timestamp == now {
|
||||
s.sequence = (s.sequence + 1) & SequenceMask
|
||||
if s.sequence == 0 {
|
||||
for now <= s.timestamp {
|
||||
now = time.Now().UnixNano() / 1e6
|
||||
}
|
||||
now := time.Now().UnixMilli()
|
||||
if s.timestamp == now && s.sequence == 0 {
|
||||
fmt.Println(time.Now().Format("2006-01-02 15:04:05.000"), "下一个时间点")
|
||||
for now <= s.timestamp {
|
||||
now = time.Now().UnixMilli()
|
||||
}
|
||||
} else {
|
||||
s.sequence = 0
|
||||
}
|
||||
|
||||
s.timestamp = now
|
||||
s.sequence = (s.sequence + 1) & SequenceMask
|
||||
|
||||
log.Println("时间戳:", now-s.startTimeStamp)
|
||||
|
||||
log.Println("时间差:", time.Now().Sub(time.UnixMilli(defaultStarTimestamp)))
|
||||
|
||||
r := (now-s.startTimeStamp)<<TimestampShift | (s.machineId << MachineIdShift) | (s.sequence)
|
||||
|
||||
return r
|
||||
}
|
||||
|
@ -1,6 +1,15 @@
|
||||
package snowflake
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/charlienet/go-mixed/sets"
|
||||
)
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
s := CreateSnowflake(2)
|
||||
t.Log(s.GetId())
|
||||
}
|
||||
|
||||
func TestGetId(t *testing.T) {
|
||||
s := CreateSnowflake(22)
|
||||
@ -8,3 +17,46 @@ func TestGetId(t *testing.T) {
|
||||
t.Log(s.GetId())
|
||||
}
|
||||
}
|
||||
|
||||
func TestMutiGetId(t *testing.T) {
|
||||
s := CreateSnowflake(11)
|
||||
for i := 0; i < 100000; i++ {
|
||||
s.GetId()
|
||||
}
|
||||
}
|
||||
|
||||
func TestMutiConflict(t *testing.T) {
|
||||
set := sets.NewHashSet[int64]()
|
||||
s := CreateSnowflake(11)
|
||||
for i := 0; i < 10000000; i++ {
|
||||
id := s.GetId()
|
||||
if set.Contains(id) {
|
||||
t.Fatal("失败,生成重复数据")
|
||||
}
|
||||
|
||||
set.Add(id)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkGetId(b *testing.B) {
|
||||
s := CreateSnowflake(11)
|
||||
for i := 0; i < b.N; i++ {
|
||||
s.GetId()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMutiGetId(b *testing.B) {
|
||||
s := CreateSnowflake(11)
|
||||
set := sets.NewHashSet[int64]().Sync()
|
||||
b.RunParallel(func(p *testing.PB) {
|
||||
for i := 0; p.Next(); i++ {
|
||||
id := s.GetId()
|
||||
|
||||
if set.Contains(id) {
|
||||
b.Fatal("标识重复", id)
|
||||
}
|
||||
|
||||
set.Add(id)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -40,7 +40,9 @@ func (s *mapSorter[T]) Desc() *mapSorter[T] {
|
||||
|
||||
func (s *mapSorter[T]) Join(sep string, f func(k string, v T) string) string {
|
||||
slice := make([]string, 0, len(s.m))
|
||||
for _, k := range s.keys {
|
||||
|
||||
keys := s.keys[:]
|
||||
for _, k := range keys {
|
||||
slice = append(slice, f(k, s.m[k]))
|
||||
}
|
||||
|
||||
@ -53,7 +55,9 @@ func (s *mapSorter[T]) Keys() []string {
|
||||
|
||||
func (s *mapSorter[T]) Values() []T {
|
||||
ret := make([]T, 0, len(s.m))
|
||||
for _, k := range s.keys {
|
||||
|
||||
keys := s.keys[:]
|
||||
for _, k := range keys {
|
||||
ret = append(ret, s.m[k])
|
||||
}
|
||||
|
||||
|
@ -19,7 +19,7 @@ func parseField(fi reflect.StructField, opt option) field {
|
||||
|
||||
return field{
|
||||
name: fi.Name,
|
||||
tagName: expr.If(isValidTag(name), name, expr.If(opt.NameConverter != nil, opt.NameConverter(fi.Name), fi.Name)),
|
||||
tagName: expr.Ternary(isValidTag(name), name, expr.Ternary(opt.NameConverter != nil, opt.NameConverter(fi.Name), fi.Name)),
|
||||
ignoreEmpty: opt.IgnoreEmpty || (opts.Contains("omitempty") && opt.Omitempty),
|
||||
ignore: (name == "-" && opt.Ignore) || isSkipField(fi.Name, opt.SkipFields),
|
||||
}
|
||||
|
@ -36,7 +36,8 @@ func (s *Struct) Kind() reflect.Kind {
|
||||
|
||||
func (s *Struct) Names() []string {
|
||||
names := make([]string, len(s.fields))
|
||||
for i, f := range s.fields {
|
||||
fields := s.fields[:]
|
||||
for i, f := range fields {
|
||||
names[i] = f.name
|
||||
}
|
||||
|
||||
@ -45,7 +46,9 @@ func (s *Struct) Names() []string {
|
||||
|
||||
func (s *Struct) Values() []any {
|
||||
values := make([]any, 0, len(s.fields))
|
||||
for _, fi := range s.fields {
|
||||
|
||||
fields := s.fields[:]
|
||||
for _, fi := range fields {
|
||||
v := s.value.FieldByName(fi.name)
|
||||
values = append(values, v.Interface())
|
||||
}
|
||||
@ -54,7 +57,8 @@ func (s *Struct) Values() []any {
|
||||
}
|
||||
|
||||
func (s *Struct) IsZero() bool {
|
||||
for _, fi := range s.fields {
|
||||
fields := s.fields[:]
|
||||
for _, fi := range fields {
|
||||
source := s.value.FieldByName(fi.name)
|
||||
if !source.IsZero() {
|
||||
return false
|
||||
@ -66,7 +70,9 @@ func (s *Struct) IsZero() bool {
|
||||
|
||||
func (s *Struct) ToMap() map[string]any {
|
||||
m := make(map[string]any, len(s.fields))
|
||||
for _, fi := range s.fields {
|
||||
|
||||
fields := s.fields[:]
|
||||
for _, fi := range fields {
|
||||
source := s.value.FieldByName(fi.name)
|
||||
if fi.shouldIgnore(source) {
|
||||
continue
|
||||
@ -109,7 +115,8 @@ func (s *Struct) Copy(dest any) error {
|
||||
}
|
||||
|
||||
func (s *Struct) getByName(name string) (field, bool) {
|
||||
for i := range s.fields {
|
||||
fields := s.fields[:]
|
||||
for i := range fields {
|
||||
f := s.fields[i]
|
||||
if f.name == name {
|
||||
return f, true
|
||||
|
@ -3,6 +3,7 @@ package tests
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@ -37,3 +38,16 @@ func BenchmarkStringSplice(b *testing.B) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestIPSegment(t *testing.T) {
|
||||
i, n, err := net.ParseCIDR("0.0.0.0/0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Log(i, n)
|
||||
|
||||
address := net.ParseIP("192.168.0.2")
|
||||
|
||||
t.Log(n.Contains(address))
|
||||
}
|
||||
|
7
validator_translation/validator_translation.go
Normal file
7
validator_translation/validator_translation.go
Normal file
@ -0,0 +1,7 @@
|
||||
package validatortranslation
|
||||
|
||||
import (
|
||||
ut "github.com/go-playground/universal-translator"
|
||||
)
|
||||
|
||||
var Trans ut.Translator
|
1
worker_pool/worker_pool.go
Normal file
1
worker_pool/worker_pool.go
Normal file
@ -0,0 +1 @@
|
||||
package workerpool
|
Reference in New Issue
Block a user