Skip to content

Commit

Permalink
init commit
Browse files Browse the repository at this point in the history
  • Loading branch information
xiaost committed Mar 29, 2017
1 parent 7c8f6c9 commit 10182ad
Show file tree
Hide file tree
Showing 26 changed files with 3,383 additions and 0 deletions.
38 changes: 38 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so

# Folders
_obj
_test

# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out

*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*

_testmain.go

*.exe
*.test
*.prof

# Output of the go coverage tool, specifically when used with LiteIDE
*.out

# external packages folder
vendor/


# bin
blobcached
tools/mcstat/mcstat

# default cache path
cachedata/
38 changes: 38 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
Blobcached
=====
Blobcached is a memcached protocol-compatible cache server for blob on SSD.

### Supported commands
| Command | Format |
| ------ | ------ |
| get | get <key> [<key>]+\r\n |
| set | set <key> <flags> <expiry> <datalen> [noreply]\r\n<data>\r\n |
| delete | delete <key> [noreply]\r\n |
| touch | touch <key> <expiry>[noreply]\r\n |
| stats | stats\r\n |

### How it works
#### concepts
| Name | |
| ------ | ------ |
| indexfile | an indexfile contains many of `items` powered by [blotdb](https://github.com/boltdb/bolt) |
| datafile | a regular file powered by [mmap](https://github.com/edsrzf/mmap-go) for storing values |
| item | an item is made up of `key`, `offset`, `term`, `size` anchoring the value in datafile |
| term | everytime the `datafile` is full, the `term` of `datafile` is increased |

#### Command: Set
* get the `offset` and `term` of `datafile`
* write value to the `datafile`
* write `item` with the `offset`, `term` and `key` to the `indexfile`

#### Command: Get
* get the `item` by `key`
* check `term` and `offset` of the `item` against `datafile`
* read value from the `datafile`

#### Command: Touch
* implemented by `get` & `set`

#### GC
* Blobcached scans and removes expired or invalid `items` in the `indexfile`
* by default, the rate up to 32k items/second
214 changes: 214 additions & 0 deletions cache/cache.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,214 @@
package cache

import (
"fmt"
"os"
"path/filepath"

"github.com/pkg/errors"
)

var (
ErrNotFound = errors.New("key not found")
ErrValueSize = errors.New("value size exceeded")
)

const (
MaxShards = 128
MaxValueSize = int64(128 << 20) // 128MB
MinShardSize = MaxValueSize + 4096
)

type CacheMetrics struct {
GetTotal int64 // number of get request
GetHits int64 // number of items that hit from data
GetMisses int64 // number of items that not found
GetExpired int64 // number of items that expired when get
SetTotal int64 // number of set request
DelTotal int64 // number of del request
Expired int64 // number of items that expired
Evicted int64 // number of items evicted
EvictedAge int64 // min age of the last evicted item
}

func (m *CacheMetrics) Add(o CacheMetrics) {
m.GetTotal += o.GetTotal
m.GetHits += o.GetHits
m.GetMisses += o.GetMisses
m.GetExpired += o.GetExpired
m.SetTotal += o.SetTotal
m.DelTotal += o.DelTotal
m.Expired += o.Expired
m.Evicted += o.Evicted
// use min age
if m.EvictedAge <= 0 || (o.EvictedAge > 0 && o.EvictedAge < m.EvictedAge) {
m.EvictedAge = o.EvictedAge
}
}

type CacheStats struct {
Keys uint64 // number of keys
Bytes uint64 // bytes of keys that used
LastUpdate int64 // stat time, the stat is async updated
}

func (st *CacheStats) Add(o CacheStats) {
st.Keys += o.Keys
st.Bytes += o.Bytes
// use oldest time
if o.LastUpdate < st.LastUpdate {
st.LastUpdate = o.LastUpdate
}
}

type Cache struct {
hash ConsistentHash
shards []*Shard

options CacheOptions
}

type Allocator interface {
Malloc(n int) []byte
Free([]byte)
}

type CacheOptions struct {
ShardNum int
Size int64
TTL int64
Allocator Allocator
}

var DefualtCacheOptions = CacheOptions{
ShardNum: 16,
Size: 32 * MaxValueSize, // 32*128MB = 4GB
TTL: 0,
Allocator: NewAllocatorPool(),
}

func NewCache(path string, options *CacheOptions) (*Cache, error) {
os.MkdirAll(path, 0700)
if options == nil {
options = &CacheOptions{}
*options = DefualtCacheOptions
}
if options.Allocator == nil {
options.Allocator = NewAllocatorPool()
}
if options.ShardNum > MaxShards {
options.ShardNum = MaxShards
}
if options.Size/int64(options.ShardNum) < MinShardSize {
options.ShardNum = int(options.Size / MinShardSize)
}
var err error
cache := Cache{options: *options}
cache.shards = make([]*Shard, options.ShardNum)
for i := 0; i < MaxShards; i++ {
fn := filepath.Join(path, fmt.Sprintf("shard.%03d", i))
if i >= options.ShardNum { // rm unused files
os.Remove(fn + indexSubfix)
os.Remove(fn + dataSubfix)
continue
}
sopts := &ShardOptions{
Size: options.Size / int64(options.ShardNum),
TTL: options.TTL,
Allocator: options.Allocator,
}
cache.shards[i], err = LoadCacheShard(fn, sopts)
if err != nil {
return nil, errors.Wrap(err, "load cache shard")
}
}
cache.hash = NewConsistentHashTable(len(cache.shards))
return &cache, nil
}

func (c *Cache) Close() error {
var err error
for _, s := range c.shards {
er := s.Close()
if er != nil {
err = er
}
}
return err
}

func (c *Cache) getshard(key string) *Shard {
return c.shards[c.hash.Get(key)]
}

type Item struct {
Key string
Value []byte
Timestamp int64
TTL uint32
Flags uint32

allocator Allocator
}

func (i *Item) Free() {
if i.Value != nil && i.allocator != nil {
i.allocator.Free(i.Value)
i.allocator = nil
i.Value = nil
}
}

func (c *Cache) Set(item Item) error {
if int64(len(item.Value)) > MaxValueSize {
return ErrValueSize
}
s := c.getshard(item.Key)
return s.Set(item)
}

func (c *Cache) Get(key string) (Item, error) {
s := c.getshard(key)
return s.Get(key)
}

func (c *Cache) Del(key string) error {
s := c.getshard(key)
return s.Del(key)
}

func (c *Cache) GetMetrics() CacheMetrics {
var m CacheMetrics
for _, s := range c.GetMetricsByShards() {
m.Add(s)
}
return m
}

func (c *Cache) GetStats() CacheStats {
var st CacheStats
for _, s := range c.GetStatsByShards() {
st.Add(s)
}
return st
}

func (c *Cache) GetMetricsByShards() []CacheMetrics {
var ret = make([]CacheMetrics, len(c.shards), len(c.shards))
for i, s := range c.shards {
ret[i] = s.GetMetrics()
}
return ret
}

func (c *Cache) GetStatsByShards() []CacheStats {
var ret = make([]CacheStats, len(c.shards), len(c.shards))
for i, s := range c.shards {
ret[i] = s.GetStats()
}
return ret
}

func (c *Cache) GetOptions() CacheOptions {
return c.options
}
54 changes: 54 additions & 0 deletions cache/cache_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
package cache

import (
"bytes"
"io/ioutil"
"math/rand"
"os"
"strconv"
"testing"
)

func TestCache(t *testing.T) {
dir, err := ioutil.TempDir("", "test_cache")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
c, err := NewCache(dir, nil)
if err != nil {
t.Fatal(err)
}
b := make([]byte, 100)

n := 1000
for i := 0; i < n; i++ {
key := strconv.FormatInt(rand.Int63(), 16)
rand.Read(b)
if err := c.Set(Item{Key: key, Value: b}); err != nil {
t.Fatal(err)
}
item, err := c.Get(key)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(item.Value, b) {
t.Fatal("set get not equal")
}
if err := c.Del(key); err != nil {
t.Fatal(err)
}
if _, err := c.Get(key); err != ErrNotFound {
t.Fatal("should not found")
}
}
m := c.GetMetrics()

k := int64(n)
if m.GetTotal != 2*k || m.DelTotal != k || m.SetTotal != k || m.GetMisses != k || m.GetHits != k {
t.Fatal("metrics err", m)
}
if err := c.Close(); err != nil {
t.Fatal(err)
}
}
Loading

0 comments on commit 10182ad

Please sign in to comment.