mirror of
https://github.com/foomo/neosproxy.git
synced 2025-10-16 12:35:39 +00:00
feature/cache dependencies (#5)
* feat: add test for filesystem caching * content cache dependencies cms content loader: context with timeout go builder version 1.13
This commit is contained in:
parent
d36eb1837e
commit
ceb8106914
@ -1,16 +1,13 @@
|
||||
# -----------------------------------------------------------------------------
|
||||
# Builder Base
|
||||
# -----------------------------------------------------------------------------
|
||||
FROM golang:alpine as base
|
||||
FROM golang:1.13-alpine as base
|
||||
|
||||
RUN apk add --no-cache git glide upx \
|
||||
RUN apk add --no-cache git upx \
|
||||
&& rm -rf /var/cache/apk/*
|
||||
|
||||
WORKDIR /go/src/github.com/foomo/neosproxy
|
||||
|
||||
COPY glide.yaml glide.lock ./
|
||||
RUN glide install
|
||||
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
@ -21,7 +18,7 @@ FROM base as builder
|
||||
COPY . ./
|
||||
|
||||
# Build the binary
|
||||
RUN glide install
|
||||
RUN go mod vendor
|
||||
RUN CGO_ENABLED=0 go build -o /go/bin/neosproxy cmd/neosproxy/main.go
|
||||
|
||||
# Compress the binary
|
||||
|
||||
33
cache/content/constructor.go
vendored
33
cache/content/constructor.go
vendored
@ -1,23 +1,52 @@
|
||||
package content
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"time"
|
||||
|
||||
"github.com/foomo/neosproxy/cache/content/store"
|
||||
"github.com/foomo/neosproxy/client/cms"
|
||||
"github.com/foomo/neosproxy/logging"
|
||||
"golang.org/x/sync/singleflight"
|
||||
)
|
||||
|
||||
// New will return a newly created content cache
|
||||
func New(cacheLifetime time.Duration, store store.CacheStore, loader cms.ContentLoader, observer Observer) *Cache {
|
||||
func New(cacheLifetime time.Duration, store store.CacheStore, loader cms.ContentLoader, observer Observer, log logging.Entry) *Cache {
|
||||
c := &Cache{
|
||||
observer: observer,
|
||||
loader: loader,
|
||||
store: store,
|
||||
|
||||
// invalidationChannel: make(chan InvalidationRequest),
|
||||
cacheDependencies: NewCacheDependencies(),
|
||||
|
||||
invalidationRequestGroup: &singleflight.Group{},
|
||||
invalidationChannel: make(chan InvalidationRequest),
|
||||
invalidationRetryChannel: make(chan InvalidationRequest),
|
||||
retryQueue: &list.List{},
|
||||
lifetime: cacheLifetime,
|
||||
log: log,
|
||||
}
|
||||
|
||||
// load cache dependencies
|
||||
cacheDependencies, errCacheDependencies := c.store.GetAllCacheDependencies()
|
||||
if errCacheDependencies != nil {
|
||||
c.log.WithError(errCacheDependencies).Error("unable to init cache dependencies")
|
||||
}
|
||||
|
||||
// update cache dependencies
|
||||
for _, obj := range cacheDependencies {
|
||||
for _, targetID := range obj.Dependencies {
|
||||
c.cacheDependencies.Set(obj.ID, targetID, obj.Dimension, obj.Workspace)
|
||||
}
|
||||
}
|
||||
|
||||
// initialize invalidation workers
|
||||
for w := 1; w <= 10; w++ {
|
||||
go c.invalidationWorker(w)
|
||||
}
|
||||
|
||||
// initialize retry worker
|
||||
c.runRetryWorker()
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
71
cache/content/dependencies.go
vendored
Normal file
71
cache/content/dependencies.go
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
package content
|
||||
|
||||
import "sync"
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// ~ CACHE DEPENDENCIES for all dimensions in all workspaces
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
type cacheDependencies struct {
|
||||
dependencies map[string]*cacheDependency
|
||||
}
|
||||
|
||||
func NewCacheDependencies() *cacheDependencies {
|
||||
return &cacheDependencies{
|
||||
dependencies: make(map[string]*cacheDependency, 4),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cacheDependencies) getHash(dimension, workspace string) string {
|
||||
return workspace + "_" + dimension
|
||||
}
|
||||
|
||||
func (c *cacheDependencies) Get(id, dimension, workspace string) []string {
|
||||
hash := c.getHash(dimension, workspace)
|
||||
if cache, ok := c.dependencies[hash]; ok {
|
||||
return cache.Get(id)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *cacheDependencies) Set(sourceID, targetID, dimension, workspace string) {
|
||||
hash := c.getHash(dimension, workspace)
|
||||
if _, ok := c.dependencies[hash]; !ok {
|
||||
c.dependencies[hash] = &cacheDependency{}
|
||||
}
|
||||
cache := c.dependencies[hash]
|
||||
cache.Set(sourceID, targetID)
|
||||
return
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// ~ CACHE DEPENDENCY for a dimension in a workspace
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
type cacheDependency struct {
|
||||
lock sync.RWMutex
|
||||
dependencies map[string][]string
|
||||
}
|
||||
|
||||
func (c *cacheDependency) Get(id string) []string {
|
||||
c.lock.RLock()
|
||||
if dependencies, ok := c.dependencies[id]; ok {
|
||||
c.lock.RUnlock()
|
||||
return dependencies
|
||||
}
|
||||
c.lock.RUnlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *cacheDependency) Set(sourceID, targetID string) {
|
||||
c.lock.Lock()
|
||||
if c.dependencies == nil || len(c.dependencies) == 0 {
|
||||
c.dependencies = make(map[string][]string)
|
||||
}
|
||||
if _, ok := c.dependencies[targetID]; !ok {
|
||||
c.dependencies[targetID] = []string{}
|
||||
}
|
||||
c.dependencies[targetID] = append(c.dependencies[targetID], sourceID)
|
||||
c.lock.Unlock()
|
||||
return
|
||||
}
|
||||
22
cache/content/dependencies_test.go
vendored
Normal file
22
cache/content/dependencies_test.go
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
package content
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDependencies(t *testing.T) {
|
||||
deps := NewCacheDependencies()
|
||||
deps.Set("abc", "123", "de", "live")
|
||||
d := deps.Get("123", "de", "live")
|
||||
|
||||
fmt.Println(d)
|
||||
|
||||
if len(d) != 1 {
|
||||
t.Fatal("unexpected length")
|
||||
}
|
||||
|
||||
if d[0] != "abc" {
|
||||
t.Fatal("unexpected dependency")
|
||||
}
|
||||
}
|
||||
123
cache/content/invalidator.go
vendored
123
cache/content/invalidator.go
vendored
@ -1,10 +1,12 @@
|
||||
package content
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/foomo/neosproxy/cache/content/store"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// RemoveAll will reset whole cache by dropping all items
|
||||
@ -12,43 +14,36 @@ func (c *Cache) RemoveAll() (err error) {
|
||||
return c.store.RemoveAll()
|
||||
}
|
||||
|
||||
func (c *Cache) invalidator(id, dimension, workspace string) (item store.CacheItem, err error) {
|
||||
|
||||
// timer
|
||||
start := time.Now()
|
||||
|
||||
// load item
|
||||
cmsContent, errGetContent := c.loader.GetContent(id, dimension, workspace)
|
||||
if errGetContent != nil {
|
||||
err = errGetContent
|
||||
return
|
||||
// Invalidate creates an invalidation job and adds it to the queue
|
||||
// serveral workers will take care of job execution
|
||||
func (c *Cache) Invalidate(id, dimension, workspace string) {
|
||||
c.log.WithFields(logrus.Fields{
|
||||
"id": id,
|
||||
"dimension": dimension,
|
||||
"workspace": workspace,
|
||||
}).Info("content cache invalidation request added to queue")
|
||||
req := InvalidationRequest{
|
||||
CreatedAt: time.Now(),
|
||||
ID: id,
|
||||
Dimension: dimension,
|
||||
Workspace: workspace,
|
||||
ExecutionCounter: 0,
|
||||
}
|
||||
|
||||
// prepare cache item
|
||||
item = store.NewCacheItem(id, dimension, workspace, cmsContent.HTML, c.validUntil(cmsContent.ValidUntil))
|
||||
|
||||
// write item to cache
|
||||
errUpsert := c.store.Upsert(item)
|
||||
if errUpsert != nil {
|
||||
err = errUpsert
|
||||
return
|
||||
}
|
||||
|
||||
// notify observer
|
||||
c.observer.Notify(InvalidationResponse{
|
||||
Item: item,
|
||||
Duration: time.Since(start),
|
||||
})
|
||||
|
||||
return
|
||||
c.invalidationChannel <- req
|
||||
}
|
||||
|
||||
// Invalidate cache item
|
||||
func (c *Cache) Invalidate(id, dimension, workspace string) (item store.CacheItem, err error) {
|
||||
// Load will immediately load content from NEOS and persist it as a cache item
|
||||
// no retry if it fails
|
||||
func (c *Cache) Load(id, dimension, workspace string) (item store.CacheItem, err error) {
|
||||
|
||||
groupName := strings.Join([]string{"invalidate", id, dimension, workspace}, "-")
|
||||
itemInterfaced, errThrottled, _ := c.invalidationRequestGroup.Do(groupName, func() (i interface{}, e error) {
|
||||
return c.invalidator(id, dimension, workspace)
|
||||
return c.invalidate(InvalidationRequest{
|
||||
CreatedAt: time.Now(),
|
||||
ID: id,
|
||||
Dimension: dimension,
|
||||
Workspace: workspace,
|
||||
})
|
||||
})
|
||||
|
||||
if errThrottled != nil {
|
||||
@ -60,6 +55,72 @@ func (c *Cache) Invalidate(id, dimension, workspace string) (item store.CacheIte
|
||||
return
|
||||
}
|
||||
|
||||
// invalidate cache item, load fresh content from NEOS
|
||||
func (c *Cache) invalidate(req InvalidationRequest) (item store.CacheItem, err error) {
|
||||
|
||||
// timer
|
||||
start := time.Now()
|
||||
|
||||
timeout := 10 * time.Second
|
||||
if req.ExecutionCounter >= 5 {
|
||||
timeout = 30 * time.Second
|
||||
}
|
||||
|
||||
// context
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
// load item
|
||||
cmsContent, errGetContent := c.loader.GetContent(req.ID, req.Dimension, req.Workspace, ctx)
|
||||
if errGetContent != nil {
|
||||
err = errGetContent
|
||||
return
|
||||
}
|
||||
|
||||
// update cache dependencies
|
||||
if len(cmsContent.CacheDependencies) > 0 {
|
||||
for _, targetID := range cmsContent.CacheDependencies {
|
||||
c.cacheDependencies.Set(req.ID, targetID, req.Dimension, req.Workspace)
|
||||
}
|
||||
}
|
||||
|
||||
// invalidate dependencies
|
||||
dependencies := c.cacheDependencies.Get(req.ID, req.Dimension, req.Workspace)
|
||||
if len(dependencies) > 0 {
|
||||
for _, nodeID := range dependencies {
|
||||
c.Invalidate(nodeID, req.Dimension, req.Workspace)
|
||||
}
|
||||
}
|
||||
|
||||
// prepare cache item
|
||||
item = store.NewCacheItem(req.ID, req.Dimension, req.Workspace, cmsContent.HTML, cmsContent.CacheDependencies, c.validUntil(cmsContent.ValidUntil))
|
||||
|
||||
// write item to cache
|
||||
errUpsert := c.store.Upsert(item)
|
||||
if errUpsert != nil {
|
||||
err = errUpsert
|
||||
return
|
||||
}
|
||||
|
||||
// logging
|
||||
c.log.WithFields(logrus.Fields{
|
||||
"id": req.ID,
|
||||
"dimension": req.Dimension,
|
||||
"workspace": req.Workspace,
|
||||
"retry": req.ExecutionCounter,
|
||||
"createdAt": req.CreatedAt,
|
||||
"waitTime": time.Since(req.CreatedAt).Seconds(),
|
||||
}).WithDuration(start).Info("content cache invalidated")
|
||||
|
||||
// notify observer
|
||||
c.observer.Notify(InvalidationResponse{
|
||||
Item: item,
|
||||
Duration: time.Since(start),
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Cache) validUntil(validUntil int64) time.Time {
|
||||
|
||||
now := time.Now()
|
||||
|
||||
2
cache/content/store/cache.go
vendored
2
cache/content/store/cache.go
vendored
@ -10,6 +10,8 @@ type CacheStore interface {
|
||||
GetEtag(hash string) (etag string, e error)
|
||||
GetAllEtags(workspace string) (etags map[string]string)
|
||||
|
||||
GetAllCacheDependencies() ([]CacheDependencies, error)
|
||||
|
||||
Count() (int, error)
|
||||
|
||||
Remove(hash string) (e error)
|
||||
|
||||
38
cache/content/store/fs/filesystem.go
vendored
38
cache/content/store/fs/filesystem.go
vendored
@ -96,6 +96,44 @@ func (f *fsCacheStore) Upsert(item store.CacheItem) (e error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fsCacheStore) GetAllCacheDependencies() ([]store.CacheDependencies, error) {
|
||||
start := time.Now()
|
||||
l := f.l.WithField(logging.FieldFunction, "GetAllCacheDependencies")
|
||||
files, errReadDir := ioutil.ReadDir(f.CacheDir)
|
||||
if errReadDir != nil {
|
||||
l.WithError(errReadDir).Error("failed reading cache dir")
|
||||
return nil, errReadDir
|
||||
}
|
||||
|
||||
dependencies := []store.CacheDependencies{}
|
||||
|
||||
counter := 0
|
||||
for _, file := range files {
|
||||
if !file.IsDir() {
|
||||
filename := file.Name()
|
||||
index := strings.Index(filename, ".")
|
||||
if index >= 0 {
|
||||
filename = filename[0:index]
|
||||
}
|
||||
item, errGet := f.Get(filename)
|
||||
if errGet != nil {
|
||||
l.WithError(errGet).Warn("could not load cache item")
|
||||
continue
|
||||
}
|
||||
counter++
|
||||
dependencies = append(dependencies, store.CacheDependencies{
|
||||
ID: item.ID,
|
||||
Dimension: item.Dimension,
|
||||
Workspace: item.Workspace,
|
||||
Dependencies: item.Dependencies,
|
||||
})
|
||||
}
|
||||
}
|
||||
l.WithField("len", counter).WithDuration(start).Debug("all cache dependencies loaded")
|
||||
|
||||
return dependencies, nil
|
||||
}
|
||||
|
||||
func (f *fsCacheStore) GetAllEtags(workspace string) (etags map[string]string) {
|
||||
f.lockEtags.RLock()
|
||||
etags = make(map[string]string)
|
||||
|
||||
33
cache/content/store/fs/filesystem_test.go
vendored
Normal file
33
cache/content/store/fs/filesystem_test.go
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"github.com/foomo/neosproxy/cache/content/store"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewCacheStore(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "")
|
||||
assert.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
hash := "derp"
|
||||
|
||||
s := NewCacheStore(dir)
|
||||
item := store.CacheItem{
|
||||
Hash: hash,
|
||||
ID: "123",
|
||||
Dimension: "de",
|
||||
Workspace: "live",
|
||||
HTML: "<html></html>",
|
||||
}
|
||||
err = s.Upsert(item)
|
||||
assert.NoError(t, err)
|
||||
|
||||
cachedItem, err := s.Get(hash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, item, cachedItem)
|
||||
|
||||
}
|
||||
3
cache/content/store/fs/lock.go
vendored
3
cache/content/store/fs/lock.go
vendored
@ -86,18 +86,17 @@ func (f *fsCacheStore) hashKey(key string) string {
|
||||
|
||||
func (f *fsCacheStore) rwLock(hashKey string) *sync.RWMutex {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if f.rw == nil {
|
||||
f.rw = make(map[string]*sync.RWMutex)
|
||||
}
|
||||
|
||||
if result, ok := f.rw[hashKey]; ok {
|
||||
f.lock.Unlock()
|
||||
return result
|
||||
}
|
||||
|
||||
var result sync.RWMutex
|
||||
f.rw[hashKey] = &result
|
||||
f.lock.Unlock()
|
||||
return &result
|
||||
}
|
||||
|
||||
@ -20,7 +20,9 @@ func TestCache(t *testing.T) {
|
||||
assert.NoError(t, countErr)
|
||||
assert.Equal(t, 0, count)
|
||||
|
||||
item := store.NewCacheItem(id, dimension, workspace, "<h1>Test</h1>", validUntil)
|
||||
dependencies := []string{"foo", "bar"}
|
||||
|
||||
item := store.NewCacheItem(id, dimension, workspace, "<h1>Test</h1>", dependencies, validUntil)
|
||||
hash := item.Hash
|
||||
errUpsert := s.Upsert(item)
|
||||
assert.NoError(t, errUpsert)
|
||||
@ -37,6 +39,8 @@ func TestCache(t *testing.T) {
|
||||
assert.NoError(t, errGet)
|
||||
assert.NotNil(t, itemCached)
|
||||
|
||||
assert.Equal(t, 2, len(itemCached.Dependencies))
|
||||
|
||||
errRemoveAll := s.RemoveAll()
|
||||
assert.NoError(t, errRemoveAll)
|
||||
|
||||
|
||||
31
cache/content/store/vo.go
vendored
31
cache/content/store/vo.go
vendored
@ -20,21 +20,30 @@ type CacheItem struct {
|
||||
created time.Time
|
||||
validUntil time.Time
|
||||
|
||||
HTML string
|
||||
Etag string // hashed fingerprint of html content
|
||||
HTML string
|
||||
Etag string // hashed fingerprint of html content
|
||||
Dependencies []string
|
||||
}
|
||||
|
||||
type CacheDependencies struct {
|
||||
ID string
|
||||
Dimension string
|
||||
Workspace string
|
||||
Dependencies []string
|
||||
}
|
||||
|
||||
// NewCacheItem will create a new cache item
|
||||
func NewCacheItem(id string, dimension string, workspace string, html string, validUntil time.Time) CacheItem {
|
||||
func NewCacheItem(id string, dimension string, workspace string, html string, dependencies []string, validUntil time.Time) CacheItem {
|
||||
return CacheItem{
|
||||
Hash: GetHash(id, dimension, workspace),
|
||||
ID: id,
|
||||
Dimension: dimension,
|
||||
Workspace: workspace,
|
||||
created: time.Now(),
|
||||
validUntil: validUntil,
|
||||
HTML: html,
|
||||
Etag: generateFingerprint(html),
|
||||
Hash: GetHash(id, dimension, workspace),
|
||||
ID: id,
|
||||
Dimension: dimension,
|
||||
Workspace: workspace,
|
||||
created: time.Now(),
|
||||
validUntil: validUntil,
|
||||
HTML: html,
|
||||
Etag: generateFingerprint(html),
|
||||
Dependencies: dependencies,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
25
cache/content/vo.go
vendored
25
cache/content/vo.go
vendored
@ -1,30 +1,41 @@
|
||||
package content
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"time"
|
||||
|
||||
"github.com/foomo/neosproxy/cache/content/store"
|
||||
"github.com/foomo/neosproxy/client/cms"
|
||||
"github.com/foomo/neosproxy/logging"
|
||||
"golang.org/x/sync/singleflight"
|
||||
)
|
||||
|
||||
// Cache workspace items
|
||||
type Cache struct {
|
||||
observer Observer
|
||||
loader cms.ContentLoader
|
||||
store store.CacheStore
|
||||
invalidationChannel chan InvalidationRequest
|
||||
invalidationRequestGroup *singleflight.Group
|
||||
observer Observer
|
||||
loader cms.ContentLoader
|
||||
store store.CacheStore
|
||||
|
||||
lifetime time.Duration // time until an item must be re-invalidated (< 0 === never)
|
||||
invalidationRequestGroup *singleflight.Group
|
||||
invalidationChannel chan InvalidationRequest
|
||||
invalidationRetryChannel chan InvalidationRequest
|
||||
retryQueue *list.List
|
||||
|
||||
cacheDependencies *cacheDependencies
|
||||
lifetime time.Duration // time until an item must be re-invalidated (< 0 === never)
|
||||
|
||||
log logging.Entry
|
||||
}
|
||||
|
||||
// InvalidationRequest request VO
|
||||
type InvalidationRequest struct {
|
||||
CreatedAt time.Time
|
||||
ID string
|
||||
Dimension string
|
||||
Workspace string
|
||||
|
||||
CreatedAt time.Time
|
||||
LastExecutedAt time.Time
|
||||
ExecutionCounter int
|
||||
}
|
||||
|
||||
// InvalidationResponse response VO
|
||||
|
||||
125
cache/content/worker.go
vendored
Normal file
125
cache/content/worker.go
vendored
Normal file
@ -0,0 +1,125 @@
|
||||
package content
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/foomo/neosproxy/client/cms"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var retryWorkerSingleton sync.Once
|
||||
|
||||
// runRetryWorker will run a singleton of a retry worker
|
||||
// it will slow down a job retry and add it to the invalidation queue after some criterias are met
|
||||
// but it will ensure that a job will be executed until it succeeds or failed more then 50 times in a row
|
||||
func (c *Cache) runRetryWorker() {
|
||||
retryWorkerSingleton.Do(func() {
|
||||
go func() {
|
||||
tick := time.Tick(10 * time.Second)
|
||||
for {
|
||||
select {
|
||||
case <-tick:
|
||||
before := time.Now().Add(-5 * time.Minute)
|
||||
|
||||
if c.retryQueue.Len() > 0 {
|
||||
var markedForDeletion bool
|
||||
var prev *list.Element
|
||||
last := c.retryQueue.Back() // last element
|
||||
|
||||
// loop over the whole queue
|
||||
for e := c.retryQueue.Front(); e != nil; e = e.Next() {
|
||||
|
||||
req := e.Value.(InvalidationRequest)
|
||||
|
||||
prev = e.Prev()
|
||||
if prev == nil {
|
||||
prev = c.retryQueue.Front()
|
||||
}
|
||||
|
||||
// remove previous element if marked for deletion
|
||||
// we cannot immediately remove it, otherwise we would saw on the branch we sit on
|
||||
if prev != nil && markedForDeletion {
|
||||
c.retryQueue.Remove(prev)
|
||||
markedForDeletion = false
|
||||
}
|
||||
|
||||
// less then 5 executions, please try it again
|
||||
if req.ExecutionCounter < 5 {
|
||||
c.invalidationChannel <- req
|
||||
markedForDeletion = true
|
||||
continue
|
||||
}
|
||||
|
||||
// older then 5 minutes => slow down, but retry
|
||||
if req.LastExecutedAt.Before(before) {
|
||||
c.invalidationChannel <- req
|
||||
markedForDeletion = true
|
||||
continue
|
||||
}
|
||||
|
||||
// it's the end of the world ...
|
||||
if e == last {
|
||||
break
|
||||
}
|
||||
}
|
||||
// remove last item if marked for deletion
|
||||
if markedForDeletion {
|
||||
c.retryQueue.Remove(c.retryQueue.Back())
|
||||
}
|
||||
}
|
||||
|
||||
case req := <-c.invalidationRetryChannel:
|
||||
// add a new job to the end of the line (retry queue)
|
||||
c.retryQueue.PushBack(req)
|
||||
}
|
||||
}
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
// invalidationWorkers will take care of invalidating the jobs which are in the queue
|
||||
func (c *Cache) invalidationWorker(id int) {
|
||||
for job := range c.invalidationChannel {
|
||||
|
||||
// invalidate
|
||||
_, err := c.invalidate(job)
|
||||
|
||||
// well done
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// logger
|
||||
l := c.log.WithFields(logrus.Fields{
|
||||
"id": job.ID,
|
||||
"dimension": job.Dimension,
|
||||
"workspace": job.Workspace,
|
||||
"retry": job.ExecutionCounter,
|
||||
"createdAt": job.CreatedAt,
|
||||
"modifiedAt": job.LastExecutedAt,
|
||||
"waitTime": time.Since(job.CreatedAt).Seconds(),
|
||||
}).WithError(err)
|
||||
|
||||
// too many executions => cancel that job
|
||||
if job.ExecutionCounter >= 10 {
|
||||
// @todo: inform in slack channel?
|
||||
l.Warn("content cache invalidation failed to often - request will be ignored")
|
||||
continue
|
||||
}
|
||||
|
||||
// unresolvable error => cancel that job
|
||||
if err == cms.ErrorNotFound || err == cms.ErrorBadRequest {
|
||||
// @todo: inform in slack channel?
|
||||
l.Warn("content cache invalidation failed - request will be ignored")
|
||||
continue
|
||||
}
|
||||
|
||||
// retry
|
||||
job.LastExecutedAt = time.Now()
|
||||
job.ExecutionCounter++
|
||||
c.invalidationRetryChannel <- job
|
||||
l.Warn("content cache invalidation failed, retry job added to queue")
|
||||
}
|
||||
}
|
||||
4
cache/invalidator.go
vendored
4
cache/invalidator.go
vendored
@ -18,10 +18,10 @@ func (c *Cache) Invalidate() bool {
|
||||
|
||||
select {
|
||||
case c.invalidationChannel <- time.Now():
|
||||
log.Info("invalidation request added to queue")
|
||||
log.Info("contentserver export invalidation request added to queue")
|
||||
return true
|
||||
default:
|
||||
log.Info("invalidation request ignored, queue seems to be full")
|
||||
log.Info("contentserver export invalidation request ignored, queue seems to be full")
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
4
cache/scheduler.go
vendored
4
cache/scheduler.go
vendored
@ -30,7 +30,7 @@ func (c *Cache) scheduleInvalidation() {
|
||||
if errInvalidation := c.cacheNeosContentServerExport(); errInvalidation != nil {
|
||||
|
||||
if errInvalidation == ErrorNoNewExort {
|
||||
log.WithDuration(requestTime).Info("cache invalidation request processed - but export hash matches old one")
|
||||
log.WithDuration(requestTime).Info("contentserver export cache invalidation request processed - but export hash matches old one")
|
||||
continue
|
||||
}
|
||||
|
||||
@ -40,7 +40,7 @@ func (c *Cache) scheduleInvalidation() {
|
||||
|
||||
// @todo: notify observers
|
||||
|
||||
log.WithDuration(requestTime).Info("cache invalidation request processed")
|
||||
log.WithDuration(requestTime).Info("contentserver export cache invalidation request processed")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -2,6 +2,7 @@ package cms
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
@ -27,7 +28,7 @@ func New(endpoint string) (*Client, error) {
|
||||
|
||||
endpointURL, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not parse the url from 'endpoint'")
|
||||
return nil, errors.Wrap(err, "could not parse url from 'endpoint'")
|
||||
}
|
||||
|
||||
transport, transportErr := utils.GetDefaultTransportFor("CMS")
|
||||
@ -36,7 +37,7 @@ func New(endpoint string) (*Client, error) {
|
||||
}
|
||||
|
||||
httpClient := &http.Client{
|
||||
Timeout: time.Second * 5,
|
||||
Timeout: time.Second * 30,
|
||||
Transport: transport,
|
||||
}
|
||||
|
||||
@ -76,11 +77,10 @@ func (c *Client) NewGetRequest(path string, body interface{}) (*http.Request, er
|
||||
}
|
||||
|
||||
//Do will send an API request and returns the API response (decodes and serializes)
|
||||
func (c *Client) Do(req *http.Request, v interface{}) *ClientError {
|
||||
|
||||
resp, err := c.client.Do(req)
|
||||
func (c *Client) Do(req *http.Request, ctx context.Context, v interface{}) *ClientError {
|
||||
resp, err := c.client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return CreateClientError(errors.Wrap(err, "could not create a new request"), req, resp, nil)
|
||||
return CreateClientError(errors.Wrap(err, "could not execute request"), req, resp, nil)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
|
||||
@ -1,11 +1,13 @@
|
||||
package cms
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@ -43,9 +45,57 @@ func TestClient(t *testing.T) {
|
||||
client, clientErr := New(ts.URL)
|
||||
assert.NoError(t, clientErr, "client must be initialised without errors")
|
||||
|
||||
content, contentErr := client.CMS.GetContent(id, dimension, workspace)
|
||||
assert.NoError(t, contentErr)
|
||||
// context
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*500)
|
||||
defer cancel()
|
||||
|
||||
content, contentErr := client.CMS.GetContent(id, dimension, workspace, ctx)
|
||||
assert.NoError(t, contentErr)
|
||||
assert.NotEmpty(t, content)
|
||||
assert.Equal(t, "<h1>Test</h1>", content.HTML)
|
||||
}
|
||||
|
||||
func TestClientTimeout(t *testing.T) {
|
||||
|
||||
id := "a839f683-dc58-47aa-8000-72d5b6fdeb85"
|
||||
dimension := "de"
|
||||
workspace := "stage"
|
||||
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
path := fmt.Sprintf(pathContent, dimension, id, workspace)
|
||||
|
||||
if r.RequestURI != path {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
w.Write([]byte("error: invalid request uri: " + r.RequestURI))
|
||||
return
|
||||
}
|
||||
|
||||
time.Sleep(time.Second * 1)
|
||||
|
||||
data := &Content{
|
||||
HTML: "<h1>Test</h1>",
|
||||
}
|
||||
|
||||
encoder := json.NewEncoder(w)
|
||||
err := encoder.Encode(data)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
w.Write([]byte("error: " + err.Error()))
|
||||
return
|
||||
}
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
client, clientErr := New(ts.URL)
|
||||
assert.NoError(t, clientErr, "client must be initialised without errors")
|
||||
|
||||
// context
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*500)
|
||||
defer cancel()
|
||||
|
||||
content, contentErr := client.CMS.GetContent(id, dimension, workspace, ctx)
|
||||
assert.Error(t, contentErr)
|
||||
assert.Empty(t, content)
|
||||
assert.Equal(t, ErrorResponseTimeout, contentErr)
|
||||
}
|
||||
|
||||
@ -1,12 +1,14 @@
|
||||
package cms
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/foomo/neosproxy/cache/content/store"
|
||||
"github.com/foomo/neosproxy/logging"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
@ -72,9 +74,14 @@ func NewCMSService(defaultClient *Client) Service {
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
// GetContent from NEOS CMS as html string
|
||||
func (s *cmsService) GetContent(id string, dimension string, workspace string) (content Content, e error) {
|
||||
func (s *cmsService) GetContent(id string, dimension string, workspace string, ctx context.Context) (content Content, e error) {
|
||||
|
||||
l := s.logger.WithField(logging.FieldFunction, "GetContent")
|
||||
l := s.logger.WithFields(logrus.Fields{
|
||||
logging.FieldFunction: "GetContent",
|
||||
"id": id,
|
||||
"dimension": dimension,
|
||||
"workspace": workspace,
|
||||
})
|
||||
var clientErr error
|
||||
|
||||
path := fmt.Sprintf(pathContent, dimension, id, workspace)
|
||||
@ -86,7 +93,7 @@ func (s *cmsService) GetContent(id string, dimension string, workspace string) (
|
||||
}
|
||||
|
||||
content = Content{}
|
||||
e, clientErr = s.convertClientErr(s.client.Do(req, &content))
|
||||
e, clientErr = s.convertClientErr(s.client.Do(req, ctx, &content), ctx)
|
||||
if clientErr != nil {
|
||||
l.WithError(clientErr).Error("unable to load html content from cms")
|
||||
return
|
||||
@ -99,13 +106,27 @@ func (s *cmsService) GetContent(id string, dimension string, workspace string) (
|
||||
// ~ PRIVATE METHODS
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
func (s *cmsService) convertClientErr(clientErr *ClientError) (error, error) {
|
||||
func (s *cmsService) convertClientErr(clientErr *ClientError, ctx context.Context) (error, error) {
|
||||
if clientErr == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if clientErr.Response.StatusCode == http.StatusServiceUnavailable {
|
||||
if ctx != nil && ctx.Err() != nil && ctx.Err() == context.DeadlineExceeded {
|
||||
return ErrorResponseTimeout, clientErr.Error
|
||||
}
|
||||
if ctx != nil && ctx.Err() != nil && ctx.Err() == context.Canceled {
|
||||
return ErrorRequest, clientErr.Error
|
||||
}
|
||||
|
||||
switch clientErr.Response.StatusCode {
|
||||
case http.StatusServiceUnavailable:
|
||||
return ErrorMaintenance, clientErr.Error
|
||||
case http.StatusNotFound:
|
||||
return ErrorNotFound, clientErr.Error
|
||||
case http.StatusBadRequest:
|
||||
return ErrorBadRequest, clientErr.Error
|
||||
case http.StatusInternalServerError:
|
||||
return ErrorInternalServerError, clientErr.Error
|
||||
}
|
||||
|
||||
return ErrorResponse, clientErr.Error
|
||||
|
||||
@ -7,10 +7,14 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
ErrorUnknown = errors.New("unknown error occured")
|
||||
ErrorRequest = errors.New("request error")
|
||||
ErrorResponse = errors.New("response error")
|
||||
ErrorMaintenance = errors.New("cms in maintenance mode")
|
||||
ErrorUnknown = errors.New("unknown error occured")
|
||||
ErrorRequest = errors.New("request error")
|
||||
ErrorResponse = errors.New("response error")
|
||||
ErrorResponseTimeout = errors.New("cms response timeout")
|
||||
ErrorMaintenance = errors.New("cms in maintenance mode")
|
||||
ErrorNotFound = errors.New("resource not found")
|
||||
ErrorBadRequest = errors.New("bad request")
|
||||
ErrorInternalServerError = errors.New("internal server error")
|
||||
)
|
||||
|
||||
type ClientError struct {
|
||||
|
||||
@ -1,12 +1,14 @@
|
||||
package cms
|
||||
|
||||
import "context"
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// ~ INTERFACES
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
// Service to load
|
||||
type Service interface {
|
||||
GetContent(id string, dimension string, workspace string) (content Content, e error)
|
||||
GetContent(id string, dimension string, workspace string, ctx context.Context) (content Content, e error)
|
||||
|
||||
// GetRepo(id string, dimension string) (html string, e error)
|
||||
// GetImage(id string, dimension string) (html string, e error)
|
||||
@ -15,5 +17,5 @@ type Service interface {
|
||||
|
||||
// ContentLoader interface
|
||||
type ContentLoader interface {
|
||||
GetContent(id, dimension, workspace string) (content Content, e error)
|
||||
GetContent(id, dimension, workspace string, ctx context.Context) (content Content, e error)
|
||||
}
|
||||
|
||||
@ -16,6 +16,7 @@ type Client struct {
|
||||
|
||||
// Content returned on getContent
|
||||
type Content struct {
|
||||
HTML string `json:"html"`
|
||||
ValidUntil int64 `json:"validUntil"`
|
||||
HTML string `json:"html"`
|
||||
ValidUntil int64 `json:"validUntil"`
|
||||
CacheDependencies []string `json:"cacheDependencies"`
|
||||
}
|
||||
|
||||
13
go.mod
13
go.mod
@ -3,17 +3,22 @@ module github.com/foomo/neosproxy
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
code.cloudfoundry.org/bytefmt v0.0.0-20190819182555-854d396b647c // indirect
|
||||
github.com/auth0/go-jwt-middleware v0.0.0-20170425171159-5493cabe49f7
|
||||
github.com/cloudfoundry/bytefmt v0.0.0-20180906201452-2aa6f33b730c
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible
|
||||
github.com/codegangsta/negroni v1.0.0 // indirect
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
|
||||
github.com/foomo/shop v0.0.0-20190306093145-644b0b683ba1
|
||||
github.com/gorilla/context v1.1.1 // indirect
|
||||
github.com/gorilla/mux v1.6.2
|
||||
github.com/olebedev/emitter v0.0.0-20190110104742-e8d1457e6aee
|
||||
github.com/onsi/ginkgo v1.10.2 // indirect
|
||||
github.com/onsi/gomega v1.7.0 // indirect
|
||||
github.com/pkg/errors v0.0.0-20181023235946-059132a15dd0
|
||||
github.com/sirupsen/logrus v1.2.0
|
||||
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 // indirect
|
||||
github.com/stretchr/testify v1.2.2
|
||||
golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc
|
||||
golang.org/x/sync v0.0.0-20170517211232-f52d1811a629
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f
|
||||
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce
|
||||
gopkg.in/yaml.v2 v2.2.2
|
||||
)
|
||||
|
||||
57
go.sum
57
go.sum
@ -1,30 +1,77 @@
|
||||
code.cloudfoundry.org/bytefmt v0.0.0-20190819182555-854d396b647c h1:2RuXx1+tSNWRjxhY0Bx52kjV2odJQ0a6MTbfTPhGAkg=
|
||||
code.cloudfoundry.org/bytefmt v0.0.0-20190819182555-854d396b647c/go.mod h1:wN/zk7mhREp/oviagqUXY3EwuHhWyOvAdsn5Y4CzOrc=
|
||||
github.com/auth0/go-jwt-middleware v0.0.0-20170425171159-5493cabe49f7 h1:irR1cO6eek3n5uquIVaRAsQmZnlsfPuHNz31cXo4eyk=
|
||||
github.com/auth0/go-jwt-middleware v0.0.0-20170425171159-5493cabe49f7/go.mod h1:LWMyo4iOLWXHGdBki7NIht1kHru/0wM179h+d3g8ATM=
|
||||
github.com/cloudfoundry/bytefmt v0.0.0-20180906201452-2aa6f33b730c h1:zE9z4EZZwJTjOi9Q9WYM/81BuwOKyjhHagiNUDhDdnI=
|
||||
github.com/cloudfoundry/bytefmt v0.0.0-20180906201452-2aa6f33b730c/go.mod h1:4oo6ExqTPaBVBwSm814h6UO5Fels1kN2KvpNscaCcS0=
|
||||
github.com/codegangsta/negroni v1.0.0 h1:+aYywywx4bnKXWvoWtRfJ91vC59NbEhEY03sZjQhbVY=
|
||||
github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/foomo/shop v0.0.0-20190306093145-644b0b683ba1 h1:BSbbitW3EfDmDVW4BOwB/xWCCFheimcBZHZPO9rSaRM=
|
||||
github.com/foomo/shop v0.0.0-20190306093145-644b0b683ba1/go.mod h1:+Y2nUdyvktarq9+F2B0tvk/BM7y+jwNJI1CW/KgM7as=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/olebedev/emitter v0.0.0-20190110104742-e8d1457e6aee h1:IquUs3fIykn10zWDIyddanhpTqBvAHMaPnFhQuyYw5U=
|
||||
github.com/olebedev/emitter v0.0.0-20190110104742-e8d1457e6aee/go.mod h1:eT2/Pcsim3XBjbvldGiJBvvgiqZkAFyiOJJsDKXs/ts=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.2 h1:uqH7bpe+ERSiDa34FDOF7RikN6RzXgduUF8yarlZp94=
|
||||
github.com/onsi/ginkgo v1.10.2/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/pkg/errors v0.0.0-20181023235946-059132a15dd0 h1:R+lX9nKwNd1n7UE5SQAyoorREvRn3aLF6ZndXBoIWqY=
|
||||
github.com/pkg/errors v0.0.0-20181023235946-059132a15dd0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc h1:F5tKCVGp+MUAHhKp5MZtGqAlGX3+oCsiL1Q629FL90M=
|
||||
golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/sync v0.0.0-20170517211232-f52d1811a629 h1:wqoYUzeICxRnvJCvfHTh0OY0VQ6xern7nYq+ccc19e4=
|
||||
golang.org/x/sync v0.0.0-20170517211232-f52d1811a629/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20171031081856-95c657629925/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU=
|
||||
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
||||
21
proxy/api.go
21
proxy/api.go
@ -9,12 +9,12 @@ import (
|
||||
|
||||
"github.com/foomo/neosproxy/cache/content/store"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/cloudfoundry/bytefmt"
|
||||
"github.com/foomo/neosproxy/cache"
|
||||
"github.com/foomo/neosproxy/client/cms"
|
||||
"github.com/foomo/neosproxy/logging"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/sirupsen/logrus"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
content_cache "github.com/foomo/neosproxy/cache/content"
|
||||
@ -75,10 +75,10 @@ func (p *Proxy) getContent(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// invalidate content
|
||||
startInvalidation := time.Now()
|
||||
itemInvalidated, errCacheInvalidate := p.contentCache.Invalidate(id, dimension, workspace)
|
||||
itemInvalidated, errCacheInvalidate := p.contentCache.Load(id, dimension, workspace)
|
||||
if errCacheInvalidate != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
log.WithError(errCacheInvalidate).Error("cache invalidation failed")
|
||||
log.WithError(errCacheInvalidate).Error("serving uncached item failed")
|
||||
return
|
||||
}
|
||||
log.WithDuration(startInvalidation).WithField("len", p.contentCache.Len()).Debug("invalidated content item")
|
||||
@ -88,7 +88,8 @@ func (p *Proxy) getContent(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// prepare response data
|
||||
data := &cms.Content{
|
||||
HTML: item.HTML,
|
||||
HTML: item.HTML,
|
||||
CacheDependencies: item.Dependencies,
|
||||
}
|
||||
|
||||
w.Header().Set("ETag", item.GetEtag())
|
||||
@ -103,7 +104,8 @@ func (p *Proxy) getContent(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// done
|
||||
log.WithDuration(start).Debug("content served")
|
||||
// log.WithDuration(start).Debug("content served")
|
||||
p.servedStatsChan <- true
|
||||
return
|
||||
}
|
||||
|
||||
@ -144,13 +146,8 @@ func (p *Proxy) invalidateCache(w http.ResponseWriter, r *http.Request) {
|
||||
for _, workspace := range workspaces {
|
||||
|
||||
for _, dimension := range p.config.Neos.Dimensions {
|
||||
// @todo use channels and workers!!!
|
||||
go func(id, dimension, workspace string) {
|
||||
_, errInvalidate := p.contentCache.Invalidate(id, dimension, workspace)
|
||||
if errInvalidate != nil {
|
||||
log.WithError(errInvalidate).WithField(logging.FieldDimension, dimension).Error("invalidate content cache failed")
|
||||
}
|
||||
}(id, dimension, workspace)
|
||||
// add invalidation request / job / task
|
||||
p.contentCache.Invalidate(id, dimension, workspace)
|
||||
}
|
||||
|
||||
// load workspace worker
|
||||
|
||||
@ -33,11 +33,26 @@ func New(cfg *config.Config, contentLoader cms.ContentLoader, contentStore store
|
||||
ProviderReports: map[string]model.Report{},
|
||||
ConsumerReports: map[string]model.Report{},
|
||||
},
|
||||
broker: notifier.NewBroker(),
|
||||
broker: notifier.NewBroker(),
|
||||
servedStatsChan: make(chan bool),
|
||||
servedStatsCounter: uint(0),
|
||||
}
|
||||
|
||||
go func() {
|
||||
for {
|
||||
tick := time.Tick(time.Minute * 1)
|
||||
select {
|
||||
case <-tick:
|
||||
p.log.WithField("requests", p.servedStatsCounter).Info("requests served in the last 60 seconds")
|
||||
p.servedStatsCounter = uint(0)
|
||||
case <-p.servedStatsChan:
|
||||
p.servedStatsCounter++
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// content cache for html from neos
|
||||
p.contentCache = content_cache.New(cacheLifetime, contentStore, contentLoader, p.broker)
|
||||
p.contentCache = content_cache.New(cacheLifetime, contentStore, contentLoader, p.broker, p.log)
|
||||
|
||||
// sitemap / site structure cache for content servers
|
||||
for _, workspace := range cfg.Neos.Workspaces {
|
||||
|
||||
@ -27,6 +27,9 @@ type Proxy struct {
|
||||
|
||||
status *model.Status
|
||||
broker *notifier.Broker
|
||||
|
||||
servedStatsChan chan bool
|
||||
servedStatsCounter uint // served requests per minute
|
||||
}
|
||||
|
||||
type basicAuth struct {
|
||||
|
||||
Loading…
Reference in New Issue
Block a user