1
0
mirror of https://github.com/charlienet/go-mixed.git synced 2025-07-18 16:42:41 +08:00

2 Commits

Author SHA1 Message Date
37e9cabde8 snow flake 2022-07-25 14:51:16 +08:00
886723997e 优化range引用 2022-07-04 12:01:44 +08:00
10 changed files with 230 additions and 31 deletions

View File

@ -26,7 +26,8 @@ func NewBloomFilter() *BloomFilter {
} }
func (bf *BloomFilter) Add(value string) { func (bf *BloomFilter) Add(value string) {
for _, f := range bf.funcs { funcs := bf.funcs[:]
for _, f := range funcs {
bf.set.Set(f.hash(value)) bf.set.Set(f.hash(value))
} }
} }
@ -36,7 +37,9 @@ func (bf *BloomFilter) Contains(value string) bool {
return false return false
} }
ret := true ret := true
for _, f := range bf.funcs {
funcs := bf.funcs[:]
for _, f := range funcs {
ret = ret && bf.set.Test(f.hash(value)) ret = ret && bf.set.Test(f.hash(value))
} }
return ret return ret

View File

@ -16,7 +16,9 @@ func (r BytesResult) Hex() string {
func (r BytesResult) UppercaseHex() string { func (r BytesResult) UppercaseHex() string {
dst := make([]byte, hex.EncodedLen(len(r))) dst := make([]byte, hex.EncodedLen(len(r)))
j := 0 j := 0
for _, v := range r {
re := r[:]
for _, v := range re {
dst[j] = hextable[v>>4] dst[j] = hextable[v>>4]
dst[j+1] = hextable[v&0x0f] dst[j+1] = hextable[v&0x0f]
j += 2 j += 2

View File

@ -42,7 +42,8 @@ func (z *zipPackage) Write(out *os.File) error {
zipWriter := zip.NewWriter(out) zipWriter := zip.NewWriter(out)
defer zipWriter.Close() defer zipWriter.Close()
for _, f := range z.files { files := z.files
for _, f := range files {
fileWriter, err := zipWriter.Create(f.name) fileWriter, err := zipWriter.Create(f.name)
if err != nil { if err != nil {
return err return err

View File

@ -88,7 +88,8 @@ func (m *sorted_map[K, V]) Iter() <-chan *Entry[K, V] {
} }
func (m *sorted_map[K, V]) ForEach(f func(K, V) bool) { func (m *sorted_map[K, V]) ForEach(f func(K, V) bool) {
for _, k := range m.keys { keys := m.keys[:]
for _, k := range keys {
if v, ok := m.Get(k); ok { if v, ok := m.Get(k); ok {
if f(k, v) { if f(k, v) {
break break

81
panic/panic.go Normal file
View File

@ -0,0 +1,81 @@
package panic
import (
"context"
"fmt"
"runtime/debug"
)
type Panic struct {
R any
Stack []byte
}
func (p Panic) String() string {
return fmt.Sprintf("%v\n%s", p.R, p.Stack)
}
type PanicGroup struct {
panics chan Panic // 致命错误通知
dones chan int // 协程完成通知
jobs chan int // 并发数量
jobN int32 // 工作协程数量
}
func NewPanicGroup(maxConcurrent int) *PanicGroup {
return &PanicGroup{
panics: make(chan Panic, 8),
dones: make(chan int, 8),
jobs: make(chan int, maxConcurrent),
}
}
func (g *PanicGroup) Go(f func()) *PanicGroup {
g.jobN++
go func() {
g.jobs <- 1
defer func() {
<-g.jobs
// go 语言只能在自己的协程中捕获自己的 panic
// 如果不处理,整个*进程*都会退出
if r := recover(); r != nil {
g.panics <- Panic{R: r, Stack: debug.Stack()}
// 如果发生 panic 就不再通知 Wait() 已完成
// 不然就可能出现 g.jobN 为 0 但 g.panics 非空
// 的情况,此时 Wait() 方法需要在正常结束的分支
// 中再额外检查是否发生了 panic非常麻烦
return
}
g.dones <- 1
}()
f()
}()
return g
}
func (g *PanicGroup) Wait(ctx context.Context) error {
if g.jobN == 0 {
panic("no job to wait")
}
for {
select {
case <-g.dones: // 协程正常结束
g.jobN--
if g.jobN == 0 {
return nil
}
case p := <-g.panics: // 协程有 panic
panic(p)
case <-ctx.Done():
// 整个 ctx 结束,超时或者调用方主动取消
// 子协程应该共用该 ctx都会收到相同的结束信号
// 不需要在这里再去通知各协程结束(实现起来也麻烦)
return ctx.Err()
}
}
}

32
panic/panic_test.go Normal file
View File

@ -0,0 +1,32 @@
package panic
import (
"context"
"fmt"
"testing"
"time"
)
func TestPanic(t *testing.T) {
defer func() {
t.Log("捕捉异常")
if e := recover(); e != nil {
if err, ok := e.(error); ok {
t.Log(err.Error())
}
t.Log("格式化:", e)
}
}()
g := NewPanicGroup(10)
g.Go(func() {
panic("1243")
})
if err := g.Wait(context.Background()); err != nil {
panic(err)
}
time.Sleep(1 * time.Second)
fmt.Println("这条消息可打印")
}

View File

@ -1,20 +1,22 @@
package snowflake package snowflake
import ( import (
"fmt"
"log"
"sync" "sync"
"time" "time"
) )
// 雪花算法默认起始时间 2020-01-01 // 雪花算法默认起始时间 2022-01-01
const defaultStarTimestamp = 1579536000 const defaultStarTimestamp = 1640966400000
const ( const (
MachineIdBits = uint(8) //机器id所占的位数 MachineIdBits = uint(8) //机器id所占的位数
SequenceBits = uint(12) //序列所占的位数 SequenceBits = uint(12) //序列所占的位数
//MachineIdMax = int64(-1 ^ (-1 << MachineIdBits)) //支持的最大机器id数量 MachineIdMax = int64(-1 ^ (-1 << MachineIdBits)) //支持的最大机器id数量
SequenceMask = int64(-1 ^ (-1 << SequenceBits)) // SequenceMask = int64(-1 ^ (-1 << SequenceBits)) //
MachineIdShift = SequenceBits //机器id左移位数 MachineIdShift = SequenceBits //机器id左移位数
TimestampShift = SequenceBits + MachineIdBits //时间戳左移位数 TimestampShift = SequenceBits + MachineIdBits //时间戳左移位数
) )
type SnowFlake interface { type SnowFlake interface {
@ -30,28 +32,42 @@ type snowflake struct {
} }
func CreateSnowflake(machineId int64) SnowFlake { func CreateSnowflake(machineId int64) SnowFlake {
timeBits := 63 - MachineIdBits - SequenceBits
maxTime := time.UnixMilli(defaultStarTimestamp + (int64(-1 ^ (-1 << timeBits))))
log.Println("最大可用时间:", maxTime)
return &snowflake{ return &snowflake{
startTimeStamp: defaultStarTimestamp, startTimeStamp: defaultStarTimestamp,
machineId: machineId, machineId: machineId & MachineIdMax,
} }
} }
// 组织方式 时间戳-机器码-序列号
func (s *snowflake) GetId() int64 { func (s *snowflake) GetId() int64 {
// 生成序列号规则
// 检查当前生成时间与上次生成时间对比
// 如等于上次生成时间,检查是否已经达到序列号的最大值,如已达到等待下一个时间点并且设置序列号为零。
// 如不相等则序列号自增
s.Lock() s.Lock()
defer s.Unlock() defer s.Unlock()
now := time.Now().UnixNano() / 1e6 now := time.Now().UnixMilli()
if s.timestamp == now { if s.timestamp == now && s.sequence == 0 {
s.sequence = (s.sequence + 1) & SequenceMask fmt.Println(time.Now().Format("2006-01-02 15:04:05.000"), "下一个时间点")
if s.sequence == 0 { for now <= s.timestamp {
for now <= s.timestamp { now = time.Now().UnixMilli()
now = time.Now().UnixNano() / 1e6
}
} }
} else {
s.sequence = 0
} }
s.timestamp = now s.timestamp = now
s.sequence = (s.sequence + 1) & SequenceMask
log.Println("时间戳:", now-s.startTimeStamp)
log.Println("时间差:", time.Now().Sub(time.UnixMilli(defaultStarTimestamp)))
r := (now-s.startTimeStamp)<<TimestampShift | (s.machineId << MachineIdShift) | (s.sequence) r := (now-s.startTimeStamp)<<TimestampShift | (s.machineId << MachineIdShift) | (s.sequence)
return r return r
} }

View File

@ -1,6 +1,15 @@
package snowflake package snowflake
import "testing" import (
"testing"
"github.com/charlienet/go-mixed/sets"
)
func TestGet(t *testing.T) {
s := CreateSnowflake(2)
t.Log(s.GetId())
}
func TestGetId(t *testing.T) { func TestGetId(t *testing.T) {
s := CreateSnowflake(22) s := CreateSnowflake(22)
@ -8,3 +17,46 @@ func TestGetId(t *testing.T) {
t.Log(s.GetId()) t.Log(s.GetId())
} }
} }
func TestMutiGetId(t *testing.T) {
s := CreateSnowflake(11)
for i := 0; i < 100000; i++ {
s.GetId()
}
}
func TestMutiConflict(t *testing.T) {
set := sets.NewHashSet[int64]()
s := CreateSnowflake(11)
for i := 0; i < 10000000; i++ {
id := s.GetId()
if set.Contains(id) {
t.Fatal("失败,生成重复数据")
}
set.Add(id)
}
}
func BenchmarkGetId(b *testing.B) {
s := CreateSnowflake(11)
for i := 0; i < b.N; i++ {
s.GetId()
}
}
func BenchmarkMutiGetId(b *testing.B) {
s := CreateSnowflake(11)
set := sets.NewHashSet[int64]().WithSync()
b.RunParallel(func(p *testing.PB) {
for i := 0; p.Next(); i++ {
id := s.GetId()
if set.Contains(id) {
b.Fatal("标识重复", id)
}
set.Add(id)
}
})
}

View File

@ -40,7 +40,9 @@ func (s *mapSorter[T]) Desc() *mapSorter[T] {
func (s *mapSorter[T]) Join(sep string, f func(k string, v T) string) string { func (s *mapSorter[T]) Join(sep string, f func(k string, v T) string) string {
slice := make([]string, 0, len(s.m)) slice := make([]string, 0, len(s.m))
for _, k := range s.keys {
keys := s.keys[:]
for _, k := range keys {
slice = append(slice, f(k, s.m[k])) slice = append(slice, f(k, s.m[k]))
} }
@ -53,7 +55,9 @@ func (s *mapSorter[T]) Keys() []string {
func (s *mapSorter[T]) Values() []T { func (s *mapSorter[T]) Values() []T {
ret := make([]T, 0, len(s.m)) ret := make([]T, 0, len(s.m))
for _, k := range s.keys {
keys := s.keys[:]
for _, k := range keys {
ret = append(ret, s.m[k]) ret = append(ret, s.m[k])
} }

View File

@ -36,7 +36,8 @@ func (s *Struct) Kind() reflect.Kind {
func (s *Struct) Names() []string { func (s *Struct) Names() []string {
names := make([]string, len(s.fields)) names := make([]string, len(s.fields))
for i, f := range s.fields { fields := s.fields[:]
for i, f := range fields {
names[i] = f.name names[i] = f.name
} }
@ -45,7 +46,9 @@ func (s *Struct) Names() []string {
func (s *Struct) Values() []any { func (s *Struct) Values() []any {
values := make([]any, 0, len(s.fields)) values := make([]any, 0, len(s.fields))
for _, fi := range s.fields {
fields := s.fields[:]
for _, fi := range fields {
v := s.value.FieldByName(fi.name) v := s.value.FieldByName(fi.name)
values = append(values, v.Interface()) values = append(values, v.Interface())
} }
@ -54,7 +57,8 @@ func (s *Struct) Values() []any {
} }
func (s *Struct) IsZero() bool { func (s *Struct) IsZero() bool {
for _, fi := range s.fields { fields := s.fields[:]
for _, fi := range fields {
source := s.value.FieldByName(fi.name) source := s.value.FieldByName(fi.name)
if !source.IsZero() { if !source.IsZero() {
return false return false
@ -66,7 +70,9 @@ func (s *Struct) IsZero() bool {
func (s *Struct) ToMap() map[string]any { func (s *Struct) ToMap() map[string]any {
m := make(map[string]any, len(s.fields)) m := make(map[string]any, len(s.fields))
for _, fi := range s.fields {
fields := s.fields[:]
for _, fi := range fields {
source := s.value.FieldByName(fi.name) source := s.value.FieldByName(fi.name)
if fi.shouldIgnore(source) { if fi.shouldIgnore(source) {
continue continue
@ -109,7 +115,8 @@ func (s *Struct) Copy(dest any) error {
} }
func (s *Struct) getByName(name string) (field, bool) { func (s *Struct) getByName(name string) (field, bool) {
for i := range s.fields { fields := s.fields[:]
for i := range fields {
f := s.fields[i] f := s.fields[i]
if f.name == name { if f.name == name {
return f, true return f, true