1
0
Fork 0
mirror of https://codeberg.org/forgejo/forgejo.git synced 2024-12-27 13:39:19 -05:00
forgejo/modules/queue/workerqueue.go
Victoria Nadasdi 54acfa8880
refactor: redis queue backend test cleanup
Summary:
- Move existing test under a `testify` Suite as `baseRedisWithServerTestSuite`
  - Those tests require real redis server.
- Add `go.uber.org/mock/mockgen@latest` as dependency
  - as a tool (Makefile).
  - in the `go.mod` file.
- Mock redis client lives under a `mock` directory under the queue module.
  - That mock module has an extra hand-written mock in-memory redis-like struct.
- Add tests using the mock redis client.
- Changed the logic around queue provider creation.
  - Now the `getNewQueue` returns a Queue provider directly, not an init
    function to create it.

The whole Queue module is close to impossible to test properly because
everything is private, everything goes through a struct route. Because
of that, we can't test for example what keys are used for given queue.

To overcome this, as a first step I removed one step from that hard
route by allowing custom calls to create new queue provider. To achieve
this, I moved the creation logic into the `getNewQueue` (previously it
was `getNewQueueFn`). That changes nothing on that side, everything goes
as before, except the `newXXX` call happens directly in that function
and not outside that.

That made it possible to add extra provider specific parameters to those
function (`newXXX`). For example a client on redis. Calling it through
the `getNewQueue` function, it gets `nil`.

- If the provided client is not `nil`, it will use that instead of the
connection string.
- If it's `nil` (default behaviour), it creates a new redis client as it
  did before, no changes to that.

The rest of the provider code is unchanged. All these changes were
required to make it possible to generate mock clients for providers and
use them.

For the tests, the existing two test cases are good with redis server,
and they need some extra helpers, for example to start a new redis
server if required, or waiting on a redis server to be ready to use.
These helpers are only required for test cases using real redis server.

For better isolation, moved existing test under a testify Suite, and
moved them into a new test file called `base_redis_with_server_test.go`
because, well they test the code with server. These tests do exactly the
same as before, calling the same sub-tests the same way as before, the
only change is the structure of the test (remove repetition, scope
server related helper functions).

Finally, we can create unit tests without redis server. The main focus of
this group of tests are higher level overview of operations. With the
mock redis client we can set up expectations about used queue names,
received values, return value to simulate faulty state.

These new unit test functions don't test all functionality, at least
it's not aimed for it now. It's more about the possibility of doing that
and add extra tests around parts we couldn't test before, for example
key.

What extra features can test the new unit test group:
- What is the received key for given queue? For example using `prefix`,
  or if all the `SXxx` calls are expected to use `queue_unique` if
  it's a unique queue.
- If it's not a unique queue, no `SXxx` functions are called, because
  those sets are used only to check if a value is unique or not.
- `HasItem` return `false` always if it's a non-unique queue.
- All functions are called exactly `N` times, and we don't have any
  unexpected calls to redis from the code.

Signed-off-by: Victoria Nadasdi <victoria@efertone.me>
2024-05-21 18:02:33 +02:00

260 lines
7.1 KiB
Go

// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package queue
import (
"context"
"fmt"
"sync"
"sync/atomic"
"time"
"code.gitea.io/gitea/modules/json"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/process"
"code.gitea.io/gitea/modules/setting"
)
// WorkerPoolQueue is a queue that uses a pool of workers to process items
// It can use different underlying (base) queue types
type WorkerPoolQueue[T any] struct {
ctxRun context.Context
ctxRunCancel context.CancelFunc
shutdownDone chan struct{}
shutdownTimeout atomic.Int64 // in case some buggy handlers (workers) would hang forever, "shutdown" should finish in predictable time
origHandler HandlerFuncT[T]
safeHandler HandlerFuncT[T]
baseQueueType string
baseConfig *BaseConfig
baseQueue baseQueue
batchChan chan []T
flushChan chan flushType
batchLength int
workerNum int
workerMaxNum int
workerActiveNum int
workerNumMu sync.Mutex
}
type flushType chan struct{}
var _ ManagedWorkerPoolQueue = (*WorkerPoolQueue[any])(nil)
func (q *WorkerPoolQueue[T]) GetName() string {
return q.baseConfig.ManagedName
}
func (q *WorkerPoolQueue[T]) GetType() string {
return q.baseQueueType
}
func (q *WorkerPoolQueue[T]) GetItemTypeName() string {
var t T
return fmt.Sprintf("%T", t)
}
func (q *WorkerPoolQueue[T]) GetWorkerNumber() int {
q.workerNumMu.Lock()
defer q.workerNumMu.Unlock()
return q.workerNum
}
func (q *WorkerPoolQueue[T]) GetWorkerActiveNumber() int {
q.workerNumMu.Lock()
defer q.workerNumMu.Unlock()
return q.workerActiveNum
}
func (q *WorkerPoolQueue[T]) GetWorkerMaxNumber() int {
q.workerNumMu.Lock()
defer q.workerNumMu.Unlock()
return q.workerMaxNum
}
func (q *WorkerPoolQueue[T]) SetWorkerMaxNumber(num int) {
q.workerNumMu.Lock()
defer q.workerNumMu.Unlock()
q.workerMaxNum = num
}
func (q *WorkerPoolQueue[T]) GetQueueItemNumber() int {
cnt, err := q.baseQueue.Len(q.ctxRun)
if err != nil {
log.Error("Failed to get number of items in queue %q: %v", q.GetName(), err)
}
return cnt
}
func (q *WorkerPoolQueue[T]) FlushWithContext(ctx context.Context, timeout time.Duration) (err error) {
if q.isBaseQueueDummy() {
return nil
}
log.Debug("Try to flush queue %q with timeout %v", q.GetName(), timeout)
defer log.Debug("Finish flushing queue %q, err: %v", q.GetName(), err)
var after <-chan time.Time
after = infiniteTimerC
if timeout > 0 {
after = time.After(timeout)
}
c := make(flushType)
// send flush request
// if it blocks, it means that there is a flush in progress or the queue hasn't been started yet
select {
case q.flushChan <- c:
case <-ctx.Done():
return ctx.Err()
case <-q.ctxRun.Done():
return q.ctxRun.Err()
case <-after:
return context.DeadlineExceeded
}
// wait for flush to finish
select {
case <-c:
return nil
case <-ctx.Done():
return ctx.Err()
case <-q.ctxRun.Done():
return q.ctxRun.Err()
case <-after:
return context.DeadlineExceeded
}
}
// RemoveAllItems removes all items in the baes queue
func (q *WorkerPoolQueue[T]) RemoveAllItems(ctx context.Context) error {
return q.baseQueue.RemoveAll(ctx)
}
func (q *WorkerPoolQueue[T]) marshal(data T) []byte {
bs, err := json.Marshal(data)
if err != nil {
log.Error("Failed to marshal item for queue %q: %v", q.GetName(), err)
return nil
}
return bs
}
func (q *WorkerPoolQueue[T]) unmarshal(data []byte) (t T, ok bool) {
if err := json.Unmarshal(data, &t); err != nil {
log.Error("Failed to unmarshal item from queue %q: %v", q.GetName(), err)
return t, false
}
return t, true
}
func (q *WorkerPoolQueue[T]) isBaseQueueDummy() bool {
_, isDummy := q.baseQueue.(*baseDummy)
return isDummy
}
// Push adds an item to the queue, it may block for a while and then returns an error if the queue is full
func (q *WorkerPoolQueue[T]) Push(data T) error {
if q.isBaseQueueDummy() && q.safeHandler != nil {
// FIXME: the "immediate" queue is only for testing, but it really causes problems because its behavior is different from a real queue.
// Even if tests pass, it doesn't mean that there is no bug in code.
if data, ok := q.unmarshal(q.marshal(data)); ok {
q.safeHandler(data)
}
}
return q.baseQueue.PushItem(q.ctxRun, q.marshal(data))
}
// Has only works for unique queues. Keep in mind that this check may not be reliable (due to lacking of proper transaction support)
// There could be a small chance that duplicate items appear in the queue
func (q *WorkerPoolQueue[T]) Has(data T) (bool, error) {
return q.baseQueue.HasItem(q.ctxRun, q.marshal(data))
}
func (q *WorkerPoolQueue[T]) Run() {
q.doRun()
}
func (q *WorkerPoolQueue[T]) Cancel() {
q.ctxRunCancel()
}
// ShutdownWait shuts down the queue, waits for all workers to finish their jobs, and pushes the unhandled items back to the base queue
// It waits for all workers (handlers) to finish their jobs, in case some buggy handlers would hang forever, a reasonable timeout is needed
func (q *WorkerPoolQueue[T]) ShutdownWait(timeout time.Duration) {
q.shutdownTimeout.Store(int64(timeout))
q.ctxRunCancel()
<-q.shutdownDone
}
func getNewQueue(t string, cfg *BaseConfig, unique bool) (string, baseQueue, error) {
switch t {
case "dummy", "immediate":
queue, err := newBaseDummy(cfg, unique)
return t, queue, err
case "channel":
queue, err := newBaseChannelGeneric(cfg, unique)
return t, queue, err
case "redis":
queue, err := newBaseRedisGeneric(cfg, unique, nil)
return t, queue, err
default: // level(leveldb,levelqueue,persistable-channel)
queue, err := newBaseLevelQueueGeneric(cfg, unique)
return "level", queue, err
}
}
func newWorkerPoolQueueForTest[T any](name string, queueSetting setting.QueueSettings, handler HandlerFuncT[T], unique bool) (*WorkerPoolQueue[T], error) {
return NewWorkerPoolQueueWithContext(context.Background(), name, queueSetting, handler, unique)
}
func NewWorkerPoolQueueWithContext[T any](ctx context.Context, name string, queueSetting setting.QueueSettings, handler HandlerFuncT[T], unique bool) (*WorkerPoolQueue[T], error) {
if handler == nil {
log.Debug("Use dummy queue for %q because handler is nil and caller doesn't want to process the queue items", name)
queueSetting.Type = "dummy"
}
var w WorkerPoolQueue[T]
var err error
w.baseConfig = toBaseConfig(name, queueSetting)
w.baseQueueType, w.baseQueue, err = getNewQueue(queueSetting.Type, w.baseConfig, unique)
if err != nil {
return nil, err
}
log.Trace("Created queue %q of type %q", name, w.baseQueueType)
w.ctxRun, _, w.ctxRunCancel = process.GetManager().AddTypedContext(ctx, "Queue: "+w.GetName(), process.SystemProcessType, false)
w.batchChan = make(chan []T)
w.flushChan = make(chan flushType)
w.shutdownDone = make(chan struct{})
w.shutdownTimeout.Store(int64(shutdownDefaultTimeout))
w.workerMaxNum = queueSetting.MaxWorkers
w.batchLength = queueSetting.BatchLength
w.origHandler = handler
w.safeHandler = func(t ...T) (unhandled []T) {
defer func() {
err := recover()
if err != nil {
log.Error("Recovered from panic in queue %q handler: %v\n%s", name, err, log.Stack(2))
}
}()
if w.origHandler != nil {
return w.origHandler(t...)
}
return nil
}
return &w, nil
}