Skip to content

Commit

Permalink
fixing some compile and lint errors
Browse files Browse the repository at this point in the history
  • Loading branch information
ldemailly committed Oct 21, 2023
1 parent 66bf671 commit 5311433
Show file tree
Hide file tree
Showing 6 changed files with 6 additions and 18 deletions.
7 changes: 1 addition & 6 deletions app.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,6 @@ var slackRetryErrors = map[string]string{
var doNotProcessChannels = map[string]time.Time{}

func CheckError(err string, channel string) (retryable bool, pause bool, description string) {

// Special case for channel_not_found, we don't want to retry this one right away.
// We are making it a 'soft failure' so that we don't keep retrying it for a period of time for any message that is sent to a channel that doesn't exist.
// We keep track of said channel in a map, and we will retry it after a period of time.
Expand All @@ -112,7 +111,6 @@ func CheckError(err string, channel string) (retryable bool, pause bool, descrip

// This should not happen, but if it does, we just try to retry it
return true, false, "Unknown error"

}

func (s *SlackClient) PostMessage(request SlackPostMessageRequest, url string, token string) error {
Expand Down Expand Up @@ -151,7 +149,6 @@ func (s *SlackClient) PostMessage(request SlackPostMessageRequest, url string, t
}

func NewApp(queueSize int, httpClient *http.Client, metrics *Metrics) *App {

return &App{
slackQueue: make(chan SlackPostMessageRequest, queueSize),
messenger: &SlackClient{client: httpClient},
Expand Down Expand Up @@ -186,7 +183,7 @@ func (app *App) processQueue(ctx context.Context, MaxRetries int, InitialBackoff
// On shutdown, it would cancel the context, even if the queue was stopped (thus no messages would even come in).
err := r.Wait(ctx)
if err != nil {
log.Fatalf("Error while waiting for rate limiter. This should not happen, provide debug info + error message to an issue if it does: %w", err)
log.Fatalf("Error while waiting for rate limiter. This should not happen, provide debug info + error message to an issue if it does: %v", err)
return
}

Expand All @@ -195,7 +192,6 @@ func (app *App) processQueue(ctx context.Context, MaxRetries int, InitialBackoff

retryCount := 0
for {

// Check if the channel is in the doNotProcessChannels map, if it is, check if it's been more than 15 minutes since we last tried to send a message to it.
if (doNotProcessChannels[msg.Channel] != time.Time{}) {
if time.Since(doNotProcessChannels[msg.Channel]) >= 15*time.Minute {
Expand All @@ -210,7 +206,6 @@ func (app *App) processQueue(ctx context.Context, MaxRetries int, InitialBackoff

err := app.messenger.PostMessage(msg, SlackPostMessageURL, tokenFlag)
if err != nil {

retryable, pause, description := CheckError(err.Error(), msg.Channel)

if pause {
Expand Down
2 changes: 0 additions & 2 deletions app_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ func (m *MockSlackMessenger) PostMessage(req SlackPostMessageRequest, url string
}

func TestApp_singleBurst_Success(t *testing.T) {

messenger := &MockSlackMessenger{}
app := &App{
slackQueue: make(chan SlackPostMessageRequest, 2),
Expand Down Expand Up @@ -62,7 +61,6 @@ func TestApp_singleBurst_Success(t *testing.T) {
}

func TestApp_MultiBurst_Success(t *testing.T) {

messenger := &MockSlackMessenger{}
app := &App{
slackQueue: make(chan SlackPostMessageRequest, 2),
Expand Down
1 change: 0 additions & 1 deletion main.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@ type App struct {
}

func main() {

var (
MaxRetries = 2
InitialBackoffMs = 1000
Expand Down
7 changes: 2 additions & 5 deletions metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,12 @@ import (
)

func NewMetrics(reg prometheus.Registerer) *Metrics {

m := &Metrics{
RequestsReceivedTotal: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "slackproxy",
Name: "requests_recieved_total",
Help: "The total number of requests recieved",
Name: "requests_received_total",
Help: "The total number of requests received",
},
[]string{"channel"},
),
Expand Down Expand Up @@ -69,11 +68,9 @@ func NewMetrics(reg prometheus.Registerer) *Metrics {
reg.MustRegister(m.QueueSize)

return m

}

func StartMetricServer(reg *prometheus.Registry, addr *string) {

http.Handle("/metrics", promhttp.HandlerFor(
reg,
promhttp.HandlerOpts{
Expand Down
6 changes: 3 additions & 3 deletions server.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,8 @@ func (app *App) handleRequest(w http.ResponseWriter, r *http.Request) {
maxQueueSize := int(float64(cap(app.slackQueue)) * 0.9)
// Reject requests if the queue is almost full
// If the channel is full, the request will block until there is space in the channel.
// Ideally we don't reject at 90%, but initialy after some tests I got blocked. So I decided to be a bit more conservative.
// ToDo: Fix this behaviour so we can reach 100% channel size without problems.
// Ideally we don't reject at 90%, but initially after some tests I got blocked. So I decided to be a bit more conservative.
// ToDo: Fix this behavior so we can reach 100% channel size without problems.
if len(app.slackQueue) >= maxQueueSize {
w.WriteHeader(http.StatusServiceUnavailable)

Expand Down Expand Up @@ -116,7 +116,7 @@ func (app *App) handleRequest(w http.ResponseWriter, r *http.Request) {

// Respond, this is not entirely accurate as we have no idea if the message will be processed successfully.
// This is the downside of having a queue which could potentially delay responses by a lot.
// We do our due diligences on the recieved message and can make a fair assumption we will be able to process it.
// We do our due diligences on the received message and can make a fair assumption we will be able to process it.
// Application should utlise this applications metrics and logs to find out if there are any issues.
w.WriteHeader(http.StatusOK)
_, err = w.Write(responseData)
Expand Down
1 change: 0 additions & 1 deletion server_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,6 @@ func TestHandleRequest(t *testing.T) {

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {

r := prometheus.NewRegistry()
metrics := NewMetrics(r)

Expand Down

0 comments on commit 5311433

Please sign in to comment.