diff --git a/app.go b/app.go index 7724072..f8fd7d6 100644 --- a/app.go +++ b/app.go @@ -91,7 +91,6 @@ var slackRetryErrors = map[string]string{ var doNotProcessChannels = map[string]time.Time{} func CheckError(err string, channel string) (retryable bool, pause bool, description string) { - // Special case for channel_not_found, we don't want to retry this one right away. // We are making it a 'soft failure' so that we don't keep retrying it for a period of time for any message that is sent to a channel that doesn't exist. // We keep track of said channel in a map, and we will retry it after a period of time. @@ -112,7 +111,6 @@ func CheckError(err string, channel string) (retryable bool, pause bool, descrip // This should not happen, but if it does, we just try to retry it return true, false, "Unknown error" - } func (s *SlackClient) PostMessage(request SlackPostMessageRequest, url string, token string) error { @@ -151,7 +149,6 @@ func (s *SlackClient) PostMessage(request SlackPostMessageRequest, url string, t } func NewApp(queueSize int, httpClient *http.Client, metrics *Metrics) *App { - return &App{ slackQueue: make(chan SlackPostMessageRequest, queueSize), messenger: &SlackClient{client: httpClient}, @@ -186,7 +183,7 @@ func (app *App) processQueue(ctx context.Context, MaxRetries int, InitialBackoff // On shutdown, it would cancel the context, even if the queue was stopped (thus no messages would even come in). err := r.Wait(ctx) if err != nil { - log.Fatalf("Error while waiting for rate limiter. This should not happen, provide debug info + error message to an issue if it does: %w", err) + log.Fatalf("Error while waiting for rate limiter. This should not happen, provide debug info + error message to an issue if it does: %v", err) return } @@ -195,7 +192,6 @@ func (app *App) processQueue(ctx context.Context, MaxRetries int, InitialBackoff retryCount := 0 for { - // Check if the channel is in the doNotProcessChannels map, if it is, check if it's been more than 15 minutes since we last tried to send a message to it. if (doNotProcessChannels[msg.Channel] != time.Time{}) { if time.Since(doNotProcessChannels[msg.Channel]) >= 15*time.Minute { @@ -210,7 +206,6 @@ func (app *App) processQueue(ctx context.Context, MaxRetries int, InitialBackoff err := app.messenger.PostMessage(msg, SlackPostMessageURL, tokenFlag) if err != nil { - retryable, pause, description := CheckError(err.Error(), msg.Channel) if pause { diff --git a/app_test.go b/app_test.go index 23ca0ae..331fd26 100644 --- a/app_test.go +++ b/app_test.go @@ -24,7 +24,6 @@ func (m *MockSlackMessenger) PostMessage(req SlackPostMessageRequest, url string } func TestApp_singleBurst_Success(t *testing.T) { - messenger := &MockSlackMessenger{} app := &App{ slackQueue: make(chan SlackPostMessageRequest, 2), @@ -62,7 +61,6 @@ func TestApp_singleBurst_Success(t *testing.T) { } func TestApp_MultiBurst_Success(t *testing.T) { - messenger := &MockSlackMessenger{} app := &App{ slackQueue: make(chan SlackPostMessageRequest, 2), diff --git a/main.go b/main.go index 285d971..acb6353 100644 --- a/main.go +++ b/main.go @@ -50,7 +50,6 @@ type App struct { } func main() { - var ( MaxRetries = 2 InitialBackoffMs = 1000 diff --git a/metrics.go b/metrics.go index b64ebfa..d725cf5 100644 --- a/metrics.go +++ b/metrics.go @@ -9,13 +9,12 @@ import ( ) func NewMetrics(reg prometheus.Registerer) *Metrics { - m := &Metrics{ RequestsReceivedTotal: prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: "slackproxy", - Name: "requests_recieved_total", - Help: "The total number of requests recieved", + Name: "requests_received_total", + Help: "The total number of requests received", }, []string{"channel"}, ), @@ -69,11 +68,9 @@ func NewMetrics(reg prometheus.Registerer) *Metrics { reg.MustRegister(m.QueueSize) return m - } func StartMetricServer(reg *prometheus.Registry, addr *string) { - http.Handle("/metrics", promhttp.HandlerFor( reg, promhttp.HandlerOpts{ diff --git a/server.go b/server.go index 713fb03..7d75515 100644 --- a/server.go +++ b/server.go @@ -51,8 +51,8 @@ func (app *App) handleRequest(w http.ResponseWriter, r *http.Request) { maxQueueSize := int(float64(cap(app.slackQueue)) * 0.9) // Reject requests if the queue is almost full // If the channel is full, the request will block until there is space in the channel. - // Ideally we don't reject at 90%, but initialy after some tests I got blocked. So I decided to be a bit more conservative. - // ToDo: Fix this behaviour so we can reach 100% channel size without problems. + // Ideally we don't reject at 90%, but initially after some tests I got blocked. So I decided to be a bit more conservative. + // ToDo: Fix this behavior so we can reach 100% channel size without problems. if len(app.slackQueue) >= maxQueueSize { w.WriteHeader(http.StatusServiceUnavailable) @@ -116,7 +116,7 @@ func (app *App) handleRequest(w http.ResponseWriter, r *http.Request) { // Respond, this is not entirely accurate as we have no idea if the message will be processed successfully. // This is the downside of having a queue which could potentially delay responses by a lot. - // We do our due diligences on the recieved message and can make a fair assumption we will be able to process it. + // We do our due diligences on the received message and can make a fair assumption we will be able to process it. // Application should utlise this applications metrics and logs to find out if there are any issues. w.WriteHeader(http.StatusOK) _, err = w.Write(responseData) diff --git a/server_test.go b/server_test.go index a0cbf8b..50e307b 100644 --- a/server_test.go +++ b/server_test.go @@ -44,7 +44,6 @@ func TestHandleRequest(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - r := prometheus.NewRegistry() metrics := NewMetrics(r)