refactored to support a config file instead of flag parameters

This commit is contained in:
Frederik Löffert 2017-11-08 17:04:23 +01:00
parent daedd8a7b2
commit 41662f0d31
15 changed files with 693 additions and 370 deletions

1
.gitconfig Normal file
View File

@ -0,0 +1 @@
release-notes = "!sh -c 'git log $(git rev-parse --abbrev-ref HEAD) --no-merges --not master | format_release_notes.rb' -"

4
.gitignore vendored
View File

@ -1,3 +1,5 @@
.*
!.git*
/bin
/changelog.md
/bin
/vendor

View File

@ -1,26 +1,48 @@
SHELL := /bin/bash
#TAG=`git describe --exact-match --tags $(git log -n1 --pretty='%h') 2>/dev/null || git rev-parse --abbrev-ref HEAD`
TAG=`git describe --abbrev=0 --tag`
LAST_TAG := $(shell git describe --abbrev=0 --tags)
PASSWORD ?= $(shell stty -echo; read -p "new tag: " tag; stty echo; echo $$tag)
NEW_TAG ?= $(shell read -p "new tag: " tag; stty echo; echo $$tag)
GITHUB_API_KEY ?= $(shell read -p "please enter the github api key: " key; stty echo; echo $$key)
all: build test
clean:
rm -fv bin/neosp*
build: clean
go build -o bin/neosproxy neosproxy.go
go build -o bin/neosproxy cmd/neosproxy/main.go
build-arch: clean build-linux
GOOS=darwin GOARCH=amd64 go build -o bin/neosproxy-darwin-amd64 neosproxy.go
GOOS=darwin GOARCH=amd64 go build -o bin/neosproxy-darwin-amd64 cmd/neosproxy/main.go
build-linux: clean
GOOS=linux GOARCH=amd64 go build -o bin/neosproxy-linux-amd64 neosproxy.go
GOOS=linux GOARCH=amd64 go build -o bin/neosproxy-linux-amd64 cmd/neosproxy/main.go
build-docker: clean build-arch prepare-docker
docker build -t foomo/neosproxy:latest .
prepare-docker:
curl -o files/cacert.pem https://curl.haxx.se/ca/cacert.pem
release: clean build-linux prepare-docker
git add -f files/cacert.pem
git add -f bin/neosproxy-linux-amd64
git commit -m 'build release candidate - new binary added for docker autobuild'
@echo "-------------------------"
@echo "please make sure that version number has been bumped, then tag and push the git repo"
@echo "-------------------------"
#release: clean build-linux prepare-docker
# git add -f files/cacert.pem
# git add -f bin/neosproxy-linux-amd64
# git commit -m 'build release candidate - new binary added for docker autobuild'
# @echo "-------------------------"
# @echo "please make sure that version number has been bumped, then tag and push the git repo"
# @echo "-------------------------"
test:
go test ./...
go test ./...
run: clean
API_KEY="0000" go run cmd/neosproxy/main.go
latest-tag:
@echo "last tagged version: $(LAST_TAG)"
release-notes: latest-tag
@git log $(LAST_TAG)..HEAD --no-merges --format="%h: %s" > changelog.md
@echo create new tag: $(NEW_TAG)
@echo GitHub API key is: $(GITHUB_API_KEY)
# echo The password is $(PASSWORD)
#--release-notes=FILE

Binary file not shown.

196
cache.go Normal file
View File

@ -0,0 +1,196 @@
package neosproxy
import (
"crypto/md5"
"encoding/hex"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"time"
)
const DefaultWorkspace = "live"
// invalidateCache ...
func (p *Proxy) invalidateCache(w http.ResponseWriter, r *http.Request) {
if r.Method != "DELETE" {
p.error(w, r, http.StatusMethodNotAllowed, "cached contentserver export: invalidate cache failed - method not allowed")
return
}
if r.Header.Get("Authorization") != p.APIKey {
p.error(w, r, http.StatusUnauthorized, "cached contentserver export: invalidate cache failed - authorization required")
return
}
log.Println(fmt.Sprintf("%s\t%s", r.URL, "cache invalidation request"))
workspace := p.getRequestedWorkspace(r.URL)
channel := p.addInvalidationChannel(workspace)
select {
case channel <- time.Now():
w.WriteHeader(http.StatusOK)
log.Println(fmt.Sprintf("added cache invalidation request to queue for workspace %s", workspace))
default:
w.WriteHeader(http.StatusTooManyRequests)
log.Println(fmt.Sprintf("ignored cache invalidation request due to pending invalidation requests for workspace %s", workspace))
}
}
// serveCachedNeosContentServerExport ...
func (p *Proxy) serveCachedNeosContentServerExport(w http.ResponseWriter, r *http.Request) {
workspace := p.getRequestedWorkspace(r.URL)
cacheFilename := p.getCacheFilename(workspace)
log.Println(fmt.Sprintf("%s\t%s", r.URL, "serve cached neos content server export request"))
if _, err := os.Stat(cacheFilename); os.IsNotExist(err) {
log.Println(fmt.Sprintf("cached contentserver export: not yet cached for workspace %s", workspace))
if err = p.cacheNeosContentServerExport(workspace); err != nil {
log.Println(err.Error())
p.error(w, r, http.StatusInternalServerError, "cached contentserver export: unable to load export from neos")
return
}
}
p.streamCachedNeosContentServerExport(w, r)
}
// getRequestedWorkspace returns the requested workspace or default
func (p *Proxy) getRequestedWorkspace(url *url.URL) string {
workspace := url.Query().Get("workspace")
if workspace == "" {
workspace = DefaultWorkspace
}
return workspace
}
// getCacheFilename returns the cache filename for the given workspace
func (p *Proxy) getCacheFilename(workspace string) string {
return fmt.Sprintf("%s/contentserver-export-%s.json", p.Config.Cache.Directory, workspace)
}
// streamCachedNeosContentServerExport ...
func (p *Proxy) streamCachedNeosContentServerExport(w http.ResponseWriter, r *http.Request) {
log.Println("cached contentserver export: stream file start")
workspace := p.getRequestedWorkspace(r.URL)
cacheFilename := p.getCacheFilename(workspace)
if _, err := os.Stat(cacheFilename); os.IsNotExist(err) {
p.error(w, r, http.StatusNotFound, "cached contentserver export: file not found")
return
}
fileInfo, err := os.Stat(cacheFilename)
if err != nil {
p.error(w, r, http.StatusInternalServerError, "cached contentserver export: read file info failed")
return
}
bytes, err := ioutil.ReadFile(cacheFilename)
if err != nil {
p.error(w, r, http.StatusInternalServerError, "cached contentserver export: read file failed")
return
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.Header().Set("Last-Modified", fileInfo.ModTime().Format(http.TimeFormat))
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
w.Header().Set("Pragma", "no-cache")
w.Header().Set("Expires", "0")
w.WriteHeader(http.StatusOK)
w.Write(bytes)
log.Println("cached contentserver export: stream file done")
return
}
// cacheNeosContentServerExport ...
func (p *Proxy) cacheNeosContentServerExport(workspace string) error {
log.Println(fmt.Sprintf("caching new neos contentserver export for workspace %s", workspace))
cacheFilename := p.getCacheFilename(workspace)
downloadFilename := cacheFilename + ".download"
if err := p.downloadNeosContentServerExport(downloadFilename, workspace); err != nil {
return err
}
cacheFileHash, err := p.getMD5Hash(cacheFilename)
if err != nil {
return err
}
downloadFileHash, err := p.getMD5Hash(downloadFilename)
if err != nil {
return err
}
log.Println("md5 new: '" + downloadFileHash + "', md5 old: '" + cacheFileHash + "'")
if err := os.Rename(downloadFilename, cacheFilename); err != nil {
return err
}
// notify webhooks
if cacheFileHash != downloadFileHash {
p.NotifyOnUpdate(workspace)
} else {
log.Println("skipping 'updated' notifications since nothing changed")
}
log.Println(fmt.Sprintf("cached new contentserver export from neos for workspace %s", workspace))
return nil
}
// downloadNeosContentServerExport ...
func (p *Proxy) downloadNeosContentServerExport(filename string, workspace string) error {
file, err := os.Create(filename)
if err != nil {
return err
}
defer file.Close()
response, err := http.Get(p.Config.Neos.URL.String() + "/contentserver/export?workspace=" + workspace)
if err != nil {
return err
}
if response.StatusCode != http.StatusOK {
return errors.New(fmt.Sprintln("unexpected status code from site contentserver export", response.StatusCode, response.Status))
}
defer response.Body.Close()
bodyBytes, err := ioutil.ReadAll(response.Body)
if err != nil {
return err
}
if _, err = file.Write(bodyBytes); err != nil {
return err
}
return nil
}
// getM5Hash returns a file's md5 hash
func (p *Proxy) getMD5Hash(filename string) (hash string, err error) {
if _, err := os.Stat(filename); os.IsNotExist(err) {
return "", nil
}
file, err := os.Open(filename)
if err != nil {
return hash, err
}
defer file.Close()
hasher := md5.New()
if _, err := io.Copy(hasher, file); err != nil {
return hash, err
}
return hex.EncodeToString(hasher.Sum(nil)), nil
}

64
cmd/neosproxy/main.go Normal file
View File

@ -0,0 +1,64 @@
package main
import (
"flag"
"fmt"
"log"
"os"
"time"
"github.com/foomo/neosproxy"
)
func main() {
// load config
config, configErr := neosproxy.GetConfig()
if configErr != nil {
log.Fatalln("failed to read config:", configErr)
os.Exit(255)
}
apiKey := os.Getenv("API_KEY")
if apiKey == "" {
log.Fatal("missing env variable API_KEY")
}
flag.Parse()
// prepare proxy
p := &neosproxy.Proxy{
Config: config,
APIKey: apiKey,
CacheInvalidationChannels: make(map[string](chan time.Time)),
}
// auto update
if config.Cache.AutoUpdateDuration != "" {
autoUpdate, err := time.ParseDuration(config.Cache.AutoUpdateDuration)
if err != nil {
log.Fatal("invalid auto-update duration value: " + err.Error())
}
go func() {
log.Println(config.Cache.AutoUpdateDuration, "auto update enabled")
for {
time.Sleep(autoUpdate)
log.Println(fmt.Sprintf("auto update: updating %d cache", len(p.CacheInvalidationChannels)))
for workspace, channel := range p.CacheInvalidationChannels {
select {
case channel <- time.Now():
log.Println(fmt.Sprintf("auto update: added cache invalidation request to queue for '%s' workspace", workspace))
default:
log.Println(fmt.Sprintf("auto update: ignored cache invalidation request due to pending invalidation requests for '%s' workspace", workspace))
}
}
}
}()
}
// run proxy
log.Println("start proxy on", config.Proxy.Address, "for neos", config.Neos.URL.String(), "with cache in directory:", p.Config.Cache.Directory)
if err := p.Run(); err != nil {
log.Fatal(err)
}
}

25
config-example.toml Normal file
View File

@ -0,0 +1,25 @@
[proxy]
address = "1.2.3.4:80"
[neos]
host = "http://cms-example-hostname/"
[cache]
# duration value on which to automatically update the proxy
autoUpdateDuration = "15m"
# cache directory
directory = "/var/cache"
[callbacks]
[[callbacks.notifyOnUpdate]]
workspace = "stage"
url = "https://host.example.com/whatever/to-call"
verify-tls = true
key = "1234"
[[callbacks.notifyOnUpdate]]
workspace = "live"
url = "https://host.example.com/whatever/to-call"
verify-tls = false
key = "765432"

122
config.go Normal file
View File

@ -0,0 +1,122 @@
package neosproxy
import (
"log"
"net/url"
"os"
"github.com/spf13/viper"
)
type Hook struct {
Workspace string
URL *url.URL
VerifyTLS bool
APIKey string
}
type Config struct {
Proxy struct {
Address string
}
Neos struct {
URL *url.URL
}
Cache struct {
AutoUpdateDuration string
Directory string
}
Callbacks struct {
NotifyOnUpdateHooks []*Hook
}
}
func setDefaultConfig() {
// default flags
viper.SetDefault("proxy.address", "0.0.0.0:80")
viper.SetDefault("neos.host", "http://neos/")
// update flags
viper.SetDefault("cache.autoUpdateDuration", "")
viper.SetDefault("cache.directoy", os.TempDir())
// config dir setup
viper.SetConfigName("config") // name of config file (without extension)
viper.AddConfigPath("/etc/neosproxy/") // path to look for the config file in
viper.AddConfigPath(".") // optionally look for config in the working directory
}
// read configuration file
func readConfig() error {
setDefaultConfig()
return viper.ReadInConfig()
}
// GetConfig reads config file and returns a new configuration
func GetConfig() (config *Config, err error) {
if readConfig() != nil {
// log.Fatal(fmt.Errorf("fatal error read config file: %s", configReadErr))
return
}
// parse neos host from config
neosURL, err := url.Parse(viper.GetString("neos.host"))
if err != nil {
return
}
// prepare config vo
config = &Config{}
config.Proxy.Address = viper.GetString("proxy.address")
config.Neos.URL = neosURL
config.Cache.Directory = viper.GetString("cache.directory")
config.Cache.AutoUpdateDuration = viper.GetString("cache.autoUpdateDuration")
config.Callbacks.NotifyOnUpdateHooks = make([]*Hook, 0)
var callbacks = viper.Get("callbacks")
for key, callback := range callbacks.(map[string]interface{}) {
for _, hook := range callback.([]interface{}) {
if key == "notifyonupdate" {
hookMap, ok := hook.(map[string]interface{})
if !ok {
log.Println("unable to parse notify on update hook")
continue
}
hookWorkspace, ok := hookMap["workspace"]
if !ok {
hookWorkspace = DefaultWorkspace
}
hookVerifyTls, ok := hookMap["verify-tls"]
if !ok {
hookVerifyTls = true
}
hookKey, ok := hookMap["key"]
if !ok {
hookKey = ""
}
hookHost, ok := hookMap["url"]
hookHostURL, hookHostURLErr := url.Parse(hookHost.(string))
if hookHostURLErr != nil {
// log.Fatal("unable to parse hook url", err)
return nil, hookHostURLErr
}
hook := &Hook{
Workspace: hookWorkspace.(string),
APIKey: hookKey.(string),
VerifyTLS: hookVerifyTls.(bool),
URL: hookHostURL,
}
config.Callbacks.NotifyOnUpdateHooks = append(config.Callbacks.NotifyOnUpdateHooks, hook)
}
}
}
return
}

61
config_test.go Normal file
View File

@ -0,0 +1,61 @@
package neosproxy
import (
"testing"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
)
func TestReadConfiguration(t *testing.T) {
viper.SetConfigName("config-example")
err := readConfig()
if err != nil {
t.Fatal(err)
}
}
func TestReadNonExistingConfiguration(t *testing.T) {
viper.SetConfigName("config-404")
err := readConfig()
if err == nil {
t.Fatal("did not fail on reading a non existing config file")
}
}
func loadConfig(t *testing.T) *Config {
viper.SetConfigName("config-example")
conf, err := GetConfig()
if err != nil {
t.Fatal(err)
}
return conf
}
func TestProxyAddress(t *testing.T) {
c := loadConfig(t)
assert.Equal(t, "1.2.3.4:80", c.Proxy.Address)
}
func TestNeosHost(t *testing.T) {
c := loadConfig(t)
assert.Equal(t, "http://cms-example-hostname/", c.Neos.URL.String())
}
func TestCacheAutoUpdate(t *testing.T) {
c := loadConfig(t)
assert.Equal(t, "15m", c.Cache.AutoUpdateDuration)
}
func TestHooks(t *testing.T) {
c := loadConfig(t)
assert.Equal(t, 2, len(c.Callbacks.NotifyOnUpdateHooks))
callback := c.Callbacks.NotifyOnUpdateHooks[0]
assert.Equal(t, "host.example.com", callback.URL.Host)
assert.Equal(t, "/whatever/to-call", callback.URL.Path)
assert.Equal(t, "https", callback.URL.Scheme)
assert.Equal(t, "1234", callback.APIKey)
assert.Equal(t, true, callback.VerifyTLS)
}

7
format_release_notes.rb Normal file
View File

@ -0,0 +1,7 @@
#!/usr/bin/env ruby
log = ARGF.read
formatted = log.gsub(/commit ([\da-f]{40})\nAuthor: .*\nDate: +.*\n\n {4}(.*)\n(?:\ {4}.*\n)*/, '|\1|\2|')
puts formatted

60
glide.lock generated Normal file
View File

@ -0,0 +1,60 @@
hash: 9d061b7a21a0ca2a8ea9bb7bfa706e2c94f1a78600a0c4b9c187d87bfeb6652e
updated: 2017-11-08T17:02:53.07188337+01:00
imports:
- name: github.com/fsnotify/fsnotify
version: 629574ca2a5df945712d3079857300b5e4da0236
- name: github.com/hashicorp/hcl
version: 80e628d796135357b3d2e33a985c666b9f35eee1
subpackages:
- hcl/ast
- hcl/parser
- hcl/scanner
- hcl/strconv
- hcl/token
- json/parser
- json/scanner
- json/token
- name: github.com/magiconair/properties
version: 9c47895dc1ce54302908ab8a43385d1f5df2c11c
- name: github.com/mitchellh/mapstructure
version: f3009df150dadf309fdee4a54ed65c124afad715
- name: github.com/pelletier/go-buffruneio
version: df1e16fde7fc330a0ca68167c23bf7ed6ac31d6d
- name: github.com/pelletier/go-toml
version: 439fbba1f887c286024370cb4f281ba815c4c7d7
- name: github.com/spf13/afero
version: 90dd71edc4d0a8b3511dc12ea15d617d03be09e0
subpackages:
- mem
- name: github.com/spf13/cast
version: 56a7ecbeb18dde53c6db4bd96b541fd9741b8d44
- name: github.com/spf13/jwalterweatherman
version: 33c24e77fb80341fe7130ee7c594256ff08ccc46
- name: github.com/spf13/pflag
version: 25f8b5b07aece3207895bf19f7ab517eb3b22a40
- name: github.com/spf13/viper
version: 5ed0fc31f7f453625df314d8e66b9791e8d13003
- name: golang.org/x/sys
version: b699b7032584f0953262cb2788a0ca19bb494703
subpackages:
- unix
- name: golang.org/x/text
version: a49bea13b776691cb1b49873e5d8df96ec74831a
subpackages:
- transform
- unicode/norm
- name: gopkg.in/yaml.v2
version: a5b47d31c556af34a302ce5d659e6fea44d90de0
testImports:
- name: github.com/davecgh/go-spew
version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9
subpackages:
- spew
- name: github.com/pmezard/go-difflib
version: d8ed2627bdf02c080bf22230dbb337003b7aba2d
subpackages:
- difflib
- name: github.com/stretchr/testify
version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0
subpackages:
- assert

3
glide.yaml Normal file
View File

@ -0,0 +1,3 @@
package: github.com/foomo/neosproxy
import:
- package: github.com/spf13/viper

View File

@ -1,358 +0,0 @@
package main
import (
"bytes"
"crypto/md5"
"crypto/tls"
"encoding/hex"
"encoding/json"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/http/httputil"
"net/url"
"os"
"strings"
"time"
)
const DefaultWorkspace = "live"
// error ...
func (p *Proxy) error(w http.ResponseWriter, r *http.Request, code int, msg string) {
log.Println(fmt.Sprintf("%d\t%s\t%s", code, r.URL, msg))
w.WriteHeader(code)
}
// invalidateCache ...
func (p *Proxy) invalidateCache(w http.ResponseWriter, r *http.Request) {
if r.Method != "DELETE" {
p.error(w, r, http.StatusMethodNotAllowed, "cached contentserver export: invalidate cache failed - method not allowed")
return
}
if r.Header.Get("Authorization") != p.APIKey {
p.error(w, r, http.StatusUnauthorized, "cached contentserver export: invalidate cache failed - authorization required")
return
}
log.Println(fmt.Sprintf("%s\t%s", r.URL, "cache invalidation request"))
workspace := p.getRequestedWorkspace(r.URL)
channel := p.addInvalidationChannel(workspace)
select {
case channel <- time.Now():
w.WriteHeader(http.StatusOK)
log.Println(fmt.Sprintf("added cache invalidation request to queue for workspace %s", workspace))
default:
w.WriteHeader(http.StatusTooManyRequests)
log.Println(fmt.Sprintf("ignored cache invalidation request due to pending invalidation requests for workspace %s", workspace))
}
}
// serveCachedNeosContentServerExport ...
func (p *Proxy) serveCachedNeosContentServerExport(w http.ResponseWriter, r *http.Request) {
workspace := p.getRequestedWorkspace(r.URL)
cacheFilename := p.getCacheFilename(workspace)
log.Println(fmt.Sprintf("%s\t%s", r.URL, "serve cached neos content server export request"))
if _, err := os.Stat(cacheFilename); os.IsNotExist(err) {
log.Println(fmt.Sprintf("cached contentserver export: not yet cached for workspace %s", workspace))
if err = p.cacheNeosContentServerExport(workspace); err != nil {
log.Println(err.Error())
p.error(w, r, http.StatusInternalServerError, "cached contentserver export: unable to load export from neos")
return
}
}
p.streamCachedNeosContentServerExport(w, r)
}
// getRequestedWorkspace returns the requested workspace or default
func (p *Proxy) getRequestedWorkspace(url *url.URL) string {
workspace := url.Query().Get("workspace")
if workspace == "" {
workspace = DefaultWorkspace
}
return workspace
}
// getCacheFilename returns the cache filename for the given workspace
func (p *Proxy) getCacheFilename(workspace string) string {
return fmt.Sprintf("%s/contentserver-export-%s.json", p.CacheDir, workspace)
}
// streamCachedNeosContentServerExport ...
func (p *Proxy) streamCachedNeosContentServerExport(w http.ResponseWriter, r *http.Request) {
log.Println("cached contentserver export: stream file start")
workspace := p.getRequestedWorkspace(r.URL)
cacheFilename := p.getCacheFilename(workspace)
if _, err := os.Stat(cacheFilename); os.IsNotExist(err) {
p.error(w, r, http.StatusNotFound, "cached contentserver export: file not found")
return
}
fileInfo, err := os.Stat(cacheFilename)
if err != nil {
p.error(w, r, http.StatusInternalServerError, "cached contentserver export: read file info failed")
return
}
bytes, err := ioutil.ReadFile(cacheFilename)
if err != nil {
p.error(w, r, http.StatusInternalServerError, "cached contentserver export: read file failed")
return
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.Header().Set("Last-Modified", fileInfo.ModTime().Format(http.TimeFormat))
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
w.Header().Set("Pragma", "no-cache")
w.Header().Set("Expires", "0")
w.WriteHeader(http.StatusOK)
w.Write(bytes)
log.Println("cached contentserver export: stream file done")
return
}
// cacheNeosContentServerExport ...
func (p *Proxy) cacheNeosContentServerExport(workspace string) error {
log.Println(fmt.Sprintf("caching new neos contentserver export for workspace %s", workspace))
cacheFilename := p.getCacheFilename(workspace)
downloadFilename := cacheFilename + ".download"
if err := p.downloadNeosContentServerExport(downloadFilename, workspace); err != nil {
return err
}
cacheFileHash, err := p.getM5Hash(cacheFilename)
if err != nil {
return err
}
downloadFileHash, err := p.getM5Hash(downloadFilename)
if err != nil {
return err
}
log.Println("md5 new: '" + downloadFileHash + "', md5 old: '" + cacheFileHash + "'")
if err := os.Rename(downloadFilename, cacheFilename); err != nil {
return err
}
// Notify webhooks
if len(p.CallbackUpdated) > 0 && cacheFileHash != downloadFileHash {
p.notify("updated", p.CallbackUpdated, workspace)
} else {
log.Println("skipping 'updated' notifications since nothing changed")
}
log.Println(fmt.Sprintf("cached new contentserver export from neos for workspace %s", workspace))
return nil
}
// downloadNeosContentServerExport ...
func (p *Proxy) downloadNeosContentServerExport(filename string, workspace string) error {
file, err := os.Create(filename)
if err != nil {
return err
}
defer file.Close()
response, err := http.Get(p.Endpoint.String() + "/contentserver/export?workspace=" + workspace)
if err != nil {
return err
}
if response.StatusCode != http.StatusOK {
return errors.New(fmt.Sprintln("unexpected status code from site contentserver export", response.StatusCode, response.Status))
}
defer response.Body.Close()
bodyBytes, err := ioutil.ReadAll(response.Body)
if err != nil {
return err
}
if _, err = file.Write(bodyBytes); err != nil {
return err
}
return nil
}
// getM5Hash returns a file's md5 hash
func (p *Proxy) getM5Hash(filename string) (hash string, err error) {
if _, err := os.Stat(filename); os.IsNotExist(err) {
return "", nil
}
file, err := os.Open(filename)
if err != nil {
return hash, err
}
defer file.Close()
hasher := md5.New()
if _, err := io.Copy(hasher, file); err != nil {
return hash, err
}
return hex.EncodeToString(hasher.Sum(nil)), nil
}
// notify notifies callbacks for the given event
func (p *Proxy) notify(event string, urls []string, workspace string) {
log.Println(fmt.Sprintf("Notifying %d for '%s' event on workspace %s", len(urls), event, workspace))
data, _ := json.Marshal(map[string]string{
"type": event,
"workspace": workspace,
})
httpClient := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: !p.CallbackTLSVerify,
},
},
}
for _, value := range urls {
go func() {
// Create request
req, err := http.NewRequest(http.MethodPost, value, bytes.NewBuffer(data))
if err != nil {
log.Println(fmt.Sprintf("Failed to create callback request! Got error: %s", err.Error()))
return
}
// Add header
req.Header.Set("Content-Type", "application/json")
req.Header.Add("key", p.CallbackKey)
// Send request
resp, err := httpClient.Do(req)
if err != nil {
log.Println(fmt.Sprintf("Failed to notify a webhook! Got error: %s", err.Error()))
} else {
log.Println(fmt.Sprintf("Notified webhook with response code: %d", resp.StatusCode))
}
}()
}
}
type Proxy struct {
APIKey string
Address string
Endpoint *url.URL
CacheDir string
CacheInvalidationChannels map[string](chan time.Time)
CallbackUpdated []string
CallbackKey string
CallbackTLSVerify bool
}
func (p *Proxy) run() error {
p.addInvalidationChannel(DefaultWorkspace)
proxyHandler := httputil.NewSingleHostReverseProxy(p.Endpoint)
mux := http.NewServeMux()
mux.Handle("/contentserver/export/", proxyHandler)
mux.HandleFunc("/contentserverproxy/cache", p.invalidateCache)
mux.HandleFunc("/contentserver/export", p.serveCachedNeosContentServerExport)
mux.Handle("/", proxyHandler)
return http.ListenAndServe(p.Address, mux)
}
// addInvalidationChannel adds a new invalidation channel
func (p *Proxy) addInvalidationChannel(workspace string) chan time.Time {
if _, ok := p.CacheInvalidationChannels[workspace]; !ok {
channel := make(chan time.Time, 1)
p.CacheInvalidationChannels[workspace] = channel
go func(workspace string, channel chan time.Time) {
for {
sleepTime := 5 * time.Second
time.Sleep(sleepTime)
requestTime := <-channel
if err := p.cacheNeosContentServerExport(workspace); err != nil {
log.Println(err.Error())
} else {
log.Println(fmt.Sprintf(
"processed cache invalidation request, which has been added at %s in %.2fs for workspace %s",
requestTime.Format(time.RFC3339),
time.Since(requestTime.Add(sleepTime)).Seconds(),
workspace,
))
}
}
}(workspace, channel)
}
return p.CacheInvalidationChannels[workspace]
}
func main() {
apiKey := os.Getenv("API_KEY")
if apiKey == "" {
log.Fatal("missing env variable API_KEY")
}
flagAddress := flag.String("address", "0.0.0.0:80", "address to listen to")
flagNeosHostname := flag.String("neos", "http://neos/", "neos cms hostname")
// Callback flags
flagCallbackKey := flag.String("callback-key", "", "optinonal header key to send with each web callback")
flagCallbackTLSVerify := flag.Bool("callback-tls-verify", true, "skip TLS verification on web callbacks")
flagCallbackUpdated := flag.String("callback-updated", "", "comma seperated list of urls to notify on update event")
// Update flags
flagAutoUpdate := flag.String("auto-update", "", "duration value on which to automatically update the proxy")
flag.Parse()
neosURL, err := url.Parse(*flagNeosHostname)
if err != nil {
log.Fatal(err)
}
p := &Proxy{
APIKey: apiKey,
Address: *flagAddress,
Endpoint: neosURL,
CacheDir: os.TempDir(),
CacheInvalidationChannels: make(map[string](chan time.Time)),
CallbackKey: *flagCallbackKey,
CallbackTLSVerify: *flagCallbackTLSVerify,
CallbackUpdated: strings.Split(*flagCallbackUpdated, ","),
}
if *flagAutoUpdate != "" {
autoUpdate, err := time.ParseDuration(*flagAutoUpdate)
if err != nil {
log.Fatal("invalid auto-update duration value: " + err.Error())
}
go func() {
log.Println("starting with auto update every " + *flagAutoUpdate)
for {
time.Sleep(autoUpdate)
log.Println(fmt.Sprintf("auto update: updating %d cache", len(p.CacheInvalidationChannels)))
for workspace, channel := range p.CacheInvalidationChannels {
select {
case channel <- time.Now():
log.Println(fmt.Sprintf("auto update: added cache invalidation request to queue for '%s' workspace", workspace))
default:
log.Println(fmt.Sprintf("auto update: ignored cache invalidation request due to pending invalidation requests for '%s' workspace", workspace))
}
}
}
}()
}
fmt.Println("start proxy on ", *flagAddress, "for neos", *flagNeosHostname, "with cache dir in", p.CacheDir)
if err := p.run(); err != nil {
log.Fatal(err)
}
}

59
notifier.go Normal file
View File

@ -0,0 +1,59 @@
package neosproxy
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"log"
"net/http"
)
// NotifyOnUpdate will notify all hooks on given workspace update event
func (p *Proxy) NotifyOnUpdate(workspace string) {
for _, hook := range p.Config.Callbacks.NotifyOnUpdateHooks {
if hook.Workspace == workspace {
go p.notify("updated", hook)
}
}
}
// notify notifies callbacks for the given event
func (p *Proxy) notify(event string, hook *Hook) {
// logging
log.Println(fmt.Sprintf("Notifying '%s' event on workspace %s: %s", event, hook.Workspace, hook.URL))
// payload
data, _ := json.Marshal(map[string]string{
"type": event,
"workspace": hook.Workspace,
})
// setup http client
httpClient := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: !hook.VerifyTLS,
},
},
}
// prepare request
req, err := http.NewRequest(http.MethodPost, hook.URL.String(), bytes.NewBuffer(data))
if err != nil {
log.Println(fmt.Sprintf("Failed to create callback request! Got error: %s", err.Error()))
return
}
// add header
req.Header.Set("Content-Type", "application/json")
req.Header.Add("key", hook.APIKey)
// send request
resp, err := httpClient.Do(req)
if err != nil {
log.Println(fmt.Sprintf("failed to notify a webhook! Got error: %s", err.Error()))
} else {
log.Println(fmt.Sprintf("notified webhook with response code: %d", resp.StatusCode))
}
}

59
proxy.go Normal file
View File

@ -0,0 +1,59 @@
package neosproxy
import (
"fmt"
"log"
"net/http"
"net/http/httputil"
"time"
)
type Proxy struct {
Config *Config
APIKey string
CacheInvalidationChannels map[string](chan time.Time)
}
func (p *Proxy) Run() error {
p.addInvalidationChannel(DefaultWorkspace)
proxyHandler := httputil.NewSingleHostReverseProxy(p.Config.Neos.URL)
mux := http.NewServeMux()
mux.Handle("/contentserver/export/", proxyHandler)
mux.HandleFunc("/contentserverproxy/cache", p.invalidateCache)
mux.HandleFunc("/contentserver/export", p.serveCachedNeosContentServerExport)
mux.Handle("/", proxyHandler)
return http.ListenAndServe(p.Config.Proxy.Address, mux)
}
// error ...
func (p *Proxy) error(w http.ResponseWriter, r *http.Request, code int, msg string) {
log.Println(fmt.Sprintf("%d\t%s\t%s", code, r.URL, msg))
w.WriteHeader(code)
}
// addInvalidationChannel adds a new invalidation channel
func (p *Proxy) addInvalidationChannel(workspace string) chan time.Time {
if _, ok := p.CacheInvalidationChannels[workspace]; !ok {
channel := make(chan time.Time, 1)
p.CacheInvalidationChannels[workspace] = channel
go func(workspace string, channel chan time.Time) {
for {
sleepTime := 5 * time.Second
time.Sleep(sleepTime)
requestTime := <-channel
if err := p.cacheNeosContentServerExport(workspace); err != nil {
log.Println(err.Error())
} else {
log.Println(fmt.Sprintf(
"processed cache invalidation request, which has been added at %s in %.2fs for workspace %s",
requestTime.Format(time.RFC3339),
time.Since(requestTime.Add(sleepTime)).Seconds(),
workspace,
))
}
}
}(workspace, channel)
}
return p.CacheInvalidationChannels[workspace]
}