mirror of
https://github.com/go-gitea/gitea
synced 2025-07-22 18:28:37 +00:00
Rewrite queue (#24505)
# ⚠️ Breaking Many deprecated queue config options are removed (actually, they should have been removed in 1.18/1.19). If you see the fatal message when starting Gitea: "Please update your app.ini to remove deprecated config options", please follow the error messages to remove these options from your app.ini. Example: ``` 2023/05/06 19:39:22 [E] Removed queue option: `[indexer].ISSUE_INDEXER_QUEUE_TYPE`. Use new options in `[queue.issue_indexer]` 2023/05/06 19:39:22 [E] Removed queue option: `[indexer].UPDATE_BUFFER_LEN`. Use new options in `[queue.issue_indexer]` 2023/05/06 19:39:22 [F] Please update your app.ini to remove deprecated config options ``` Many options in `[queue]` are are dropped, including: `WRAP_IF_NECESSARY`, `MAX_ATTEMPTS`, `TIMEOUT`, `WORKERS`, `BLOCK_TIMEOUT`, `BOOST_TIMEOUT`, `BOOST_WORKERS`, they can be removed from app.ini. # The problem The old queue package has some legacy problems: * complexity: I doubt few people could tell how it works. * maintainability: Too many channels and mutex/cond are mixed together, too many different structs/interfaces depends each other. * stability: due to the complexity & maintainability, sometimes there are strange bugs and difficult to debug, and some code doesn't have test (indeed some code is difficult to test because a lot of things are mixed together). * general applicability: although it is called "queue", its behavior is not a well-known queue. * scalability: it doesn't seem easy to make it work with a cluster without breaking its behaviors. It came from some very old code to "avoid breaking", however, its technical debt is too heavy now. It's a good time to introduce a better "queue" package. # The new queue package It keeps using old config and concept as much as possible. * It only contains two major kinds of concepts: * The "base queue": channel, levelqueue, redis * They have the same abstraction, the same interface, and they are tested by the same testing code. * The "WokerPoolQueue", it uses the "base queue" to provide "worker pool" function, calls the "handler" to process the data in the base queue. * The new code doesn't do "PushBack" * Think about a queue with many workers, the "PushBack" can't guarantee the order for re-queued unhandled items, so in new code it just does "normal push" * The new code doesn't do "pause/resume" * The "pause/resume" was designed to handle some handler's failure: eg: document indexer (elasticsearch) is down * If a queue is paused for long time, either the producers blocks or the new items are dropped. * The new code doesn't do such "pause/resume" trick, it's not a common queue's behavior and it doesn't help much. * If there are unhandled items, the "push" function just blocks for a few seconds and then re-queue them and retry. * The new code doesn't do "worker booster" * Gitea's queue's handlers are light functions, the cost is only the go-routine, so it doesn't make sense to "boost" them. * The new code only use "max worker number" to limit the concurrent workers. * The new "Push" never blocks forever * Instead of creating more and more blocking goroutines, return an error is more friendly to the server and to the end user. There are more details in code comments: eg: the "Flush" problem, the strange "code.index" hanging problem, the "immediate" queue problem. Almost ready for review. TODO: * [x] add some necessary comments during review * [x] add some more tests if necessary * [x] update documents and config options * [x] test max worker / active worker * [x] re-run the CI tasks to see whether any test is flaky * [x] improve the `handleOldLengthConfiguration` to provide more friendly messages * [x] fine tune default config values (eg: length?) ## Code coverage: 
This commit is contained in:
@@ -273,10 +273,6 @@ func (b *BleveIndexer) Close() {
|
||||
log.Info("PID: %d Repository Indexer closed", os.Getpid())
|
||||
}
|
||||
|
||||
// SetAvailabilityChangeCallback does nothing
|
||||
func (b *BleveIndexer) SetAvailabilityChangeCallback(callback func(bool)) {
|
||||
}
|
||||
|
||||
// Ping does nothing
|
||||
func (b *BleveIndexer) Ping() bool {
|
||||
return true
|
||||
|
@@ -42,12 +42,11 @@ var _ Indexer = &ElasticSearchIndexer{}
|
||||
|
||||
// ElasticSearchIndexer implements Indexer interface
|
||||
type ElasticSearchIndexer struct {
|
||||
client *elastic.Client
|
||||
indexerAliasName string
|
||||
available bool
|
||||
availabilityCallback func(bool)
|
||||
stopTimer chan struct{}
|
||||
lock sync.RWMutex
|
||||
client *elastic.Client
|
||||
indexerAliasName string
|
||||
available bool
|
||||
stopTimer chan struct{}
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
type elasticLogger struct {
|
||||
@@ -198,13 +197,6 @@ func (b *ElasticSearchIndexer) init() (bool, error) {
|
||||
return exists, nil
|
||||
}
|
||||
|
||||
// SetAvailabilityChangeCallback sets callback that will be triggered when availability changes
|
||||
func (b *ElasticSearchIndexer) SetAvailabilityChangeCallback(callback func(bool)) {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
b.availabilityCallback = callback
|
||||
}
|
||||
|
||||
// Ping checks if elastic is available
|
||||
func (b *ElasticSearchIndexer) Ping() bool {
|
||||
b.lock.RLock()
|
||||
@@ -529,8 +521,4 @@ func (b *ElasticSearchIndexer) setAvailability(available bool) {
|
||||
}
|
||||
|
||||
b.available = available
|
||||
if b.availabilityCallback != nil {
|
||||
// Call the callback from within the lock to ensure that the ordering remains correct
|
||||
b.availabilityCallback(b.available)
|
||||
}
|
||||
}
|
||||
|
@@ -44,7 +44,6 @@ type SearchResultLanguages struct {
|
||||
// Indexer defines an interface to index and search code contents
|
||||
type Indexer interface {
|
||||
Ping() bool
|
||||
SetAvailabilityChangeCallback(callback func(bool))
|
||||
Index(ctx context.Context, repo *repo_model.Repository, sha string, changes *repoChanges) error
|
||||
Delete(repoID int64) error
|
||||
Search(ctx context.Context, repoIDs []int64, language, keyword string, page, pageSize int, isMatch bool) (int64, []*SearchResult, []*SearchResultLanguages, error)
|
||||
@@ -81,7 +80,7 @@ type IndexerData struct {
|
||||
RepoID int64
|
||||
}
|
||||
|
||||
var indexerQueue queue.UniqueQueue
|
||||
var indexerQueue *queue.WorkerPoolQueue[*IndexerData]
|
||||
|
||||
func index(ctx context.Context, indexer Indexer, repoID int64) error {
|
||||
repo, err := repo_model.GetRepositoryByID(ctx, repoID)
|
||||
@@ -137,37 +136,45 @@ func Init() {
|
||||
// Create the Queue
|
||||
switch setting.Indexer.RepoType {
|
||||
case "bleve", "elasticsearch":
|
||||
handler := func(data ...queue.Data) []queue.Data {
|
||||
handler := func(items ...*IndexerData) (unhandled []*IndexerData) {
|
||||
idx, err := indexer.get()
|
||||
if idx == nil || err != nil {
|
||||
log.Error("Codes indexer handler: unable to get indexer!")
|
||||
return data
|
||||
return items
|
||||
}
|
||||
|
||||
unhandled := make([]queue.Data, 0, len(data))
|
||||
for _, datum := range data {
|
||||
indexerData, ok := datum.(*IndexerData)
|
||||
if !ok {
|
||||
log.Error("Unable to process provided datum: %v - not possible to cast to IndexerData", datum)
|
||||
continue
|
||||
}
|
||||
for _, indexerData := range items {
|
||||
log.Trace("IndexerData Process Repo: %d", indexerData.RepoID)
|
||||
|
||||
// FIXME: it seems there is a bug in `CatFileBatch` or `nio.Pipe`, which will cause the process to hang forever in rare cases
|
||||
/*
|
||||
sync.(*Cond).Wait(cond.go:70)
|
||||
github.com/djherbis/nio/v3.(*PipeReader).Read(sync.go:106)
|
||||
bufio.(*Reader).fill(bufio.go:106)
|
||||
bufio.(*Reader).ReadSlice(bufio.go:372)
|
||||
bufio.(*Reader).collectFragments(bufio.go:447)
|
||||
bufio.(*Reader).ReadString(bufio.go:494)
|
||||
code.gitea.io/gitea/modules/git.ReadBatchLine(batch_reader.go:149)
|
||||
code.gitea.io/gitea/modules/indexer/code.(*BleveIndexer).addUpdate(bleve.go:214)
|
||||
code.gitea.io/gitea/modules/indexer/code.(*BleveIndexer).Index(bleve.go:296)
|
||||
code.gitea.io/gitea/modules/indexer/code.(*wrappedIndexer).Index(wrapped.go:74)
|
||||
code.gitea.io/gitea/modules/indexer/code.index(indexer.go:105)
|
||||
*/
|
||||
if err := index(ctx, indexer, indexerData.RepoID); err != nil {
|
||||
if !setting.IsInTesting {
|
||||
log.Error("indexer index error for repo %v: %v", indexerData.RepoID, err)
|
||||
}
|
||||
if indexer.Ping() {
|
||||
if !idx.Ping() {
|
||||
log.Error("Code indexer handler: indexer is unavailable.")
|
||||
unhandled = append(unhandled, indexerData)
|
||||
continue
|
||||
}
|
||||
// Add back to queue
|
||||
unhandled = append(unhandled, datum)
|
||||
if !setting.IsInTesting {
|
||||
log.Error("Codes indexer handler: index error for repo %v: %v", indexerData.RepoID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return unhandled
|
||||
}
|
||||
|
||||
indexerQueue = queue.CreateUniqueQueue("code_indexer", handler, &IndexerData{})
|
||||
indexerQueue = queue.CreateUniqueQueue("code_indexer", handler)
|
||||
if indexerQueue == nil {
|
||||
log.Fatal("Unable to create codes indexer queue")
|
||||
}
|
||||
@@ -224,18 +231,6 @@ func Init() {
|
||||
|
||||
indexer.set(rIndexer)
|
||||
|
||||
if queue, ok := indexerQueue.(queue.Pausable); ok {
|
||||
rIndexer.SetAvailabilityChangeCallback(func(available bool) {
|
||||
if !available {
|
||||
log.Info("Code index queue paused")
|
||||
queue.Pause()
|
||||
} else {
|
||||
log.Info("Code index queue resumed")
|
||||
queue.Resume()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Start processing the queue
|
||||
go graceful.GetManager().RunWithShutdownFns(indexerQueue.Run)
|
||||
|
||||
|
@@ -56,16 +56,6 @@ func (w *wrappedIndexer) get() (Indexer, error) {
|
||||
return w.internal, nil
|
||||
}
|
||||
|
||||
// SetAvailabilityChangeCallback sets callback that will be triggered when availability changes
|
||||
func (w *wrappedIndexer) SetAvailabilityChangeCallback(callback func(bool)) {
|
||||
indexer, err := w.get()
|
||||
if err != nil {
|
||||
log.Error("Failed to get indexer: %v", err)
|
||||
return
|
||||
}
|
||||
indexer.SetAvailabilityChangeCallback(callback)
|
||||
}
|
||||
|
||||
// Ping checks if elastic is available
|
||||
func (w *wrappedIndexer) Ping() bool {
|
||||
indexer, err := w.get()
|
||||
|
@@ -187,10 +187,6 @@ func (b *BleveIndexer) Init() (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// SetAvailabilityChangeCallback does nothing
|
||||
func (b *BleveIndexer) SetAvailabilityChangeCallback(callback func(bool)) {
|
||||
}
|
||||
|
||||
// Ping does nothing
|
||||
func (b *BleveIndexer) Ping() bool {
|
||||
return true
|
||||
|
@@ -18,10 +18,6 @@ func (i *DBIndexer) Init() (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// SetAvailabilityChangeCallback dummy function
|
||||
func (i *DBIndexer) SetAvailabilityChangeCallback(callback func(bool)) {
|
||||
}
|
||||
|
||||
// Ping checks if database is available
|
||||
func (i *DBIndexer) Ping() bool {
|
||||
return db.GetEngine(db.DefaultContext).Ping() != nil
|
||||
|
@@ -22,12 +22,11 @@ var _ Indexer = &ElasticSearchIndexer{}
|
||||
|
||||
// ElasticSearchIndexer implements Indexer interface
|
||||
type ElasticSearchIndexer struct {
|
||||
client *elastic.Client
|
||||
indexerName string
|
||||
available bool
|
||||
availabilityCallback func(bool)
|
||||
stopTimer chan struct{}
|
||||
lock sync.RWMutex
|
||||
client *elastic.Client
|
||||
indexerName string
|
||||
available bool
|
||||
stopTimer chan struct{}
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
type elasticLogger struct {
|
||||
@@ -138,13 +137,6 @@ func (b *ElasticSearchIndexer) Init() (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// SetAvailabilityChangeCallback sets callback that will be triggered when availability changes
|
||||
func (b *ElasticSearchIndexer) SetAvailabilityChangeCallback(callback func(bool)) {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
b.availabilityCallback = callback
|
||||
}
|
||||
|
||||
// Ping checks if elastic is available
|
||||
func (b *ElasticSearchIndexer) Ping() bool {
|
||||
b.lock.RLock()
|
||||
@@ -305,8 +297,4 @@ func (b *ElasticSearchIndexer) setAvailability(available bool) {
|
||||
}
|
||||
|
||||
b.available = available
|
||||
if b.availabilityCallback != nil {
|
||||
// Call the callback from within the lock to ensure that the ordering remains correct
|
||||
b.availabilityCallback(b.available)
|
||||
}
|
||||
}
|
||||
|
@@ -49,7 +49,6 @@ type SearchResult struct {
|
||||
type Indexer interface {
|
||||
Init() (bool, error)
|
||||
Ping() bool
|
||||
SetAvailabilityChangeCallback(callback func(bool))
|
||||
Index(issue []*IndexerData) error
|
||||
Delete(ids ...int64) error
|
||||
Search(ctx context.Context, kw string, repoIDs []int64, limit, start int) (*SearchResult, error)
|
||||
@@ -94,7 +93,7 @@ func (h *indexerHolder) get() Indexer {
|
||||
|
||||
var (
|
||||
// issueIndexerQueue queue of issue ids to be updated
|
||||
issueIndexerQueue queue.Queue
|
||||
issueIndexerQueue *queue.WorkerPoolQueue[*IndexerData]
|
||||
holder = newIndexerHolder()
|
||||
)
|
||||
|
||||
@@ -108,62 +107,44 @@ func InitIssueIndexer(syncReindex bool) {
|
||||
// Create the Queue
|
||||
switch setting.Indexer.IssueType {
|
||||
case "bleve", "elasticsearch", "meilisearch":
|
||||
handler := func(data ...queue.Data) []queue.Data {
|
||||
handler := func(items ...*IndexerData) (unhandled []*IndexerData) {
|
||||
indexer := holder.get()
|
||||
if indexer == nil {
|
||||
log.Error("Issue indexer handler: unable to get indexer!")
|
||||
return data
|
||||
log.Error("Issue indexer handler: unable to get indexer.")
|
||||
return items
|
||||
}
|
||||
|
||||
iData := make([]*IndexerData, 0, len(data))
|
||||
unhandled := make([]queue.Data, 0, len(data))
|
||||
for _, datum := range data {
|
||||
indexerData, ok := datum.(*IndexerData)
|
||||
if !ok {
|
||||
log.Error("Unable to process provided datum: %v - not possible to cast to IndexerData", datum)
|
||||
continue
|
||||
}
|
||||
toIndex := make([]*IndexerData, 0, len(items))
|
||||
for _, indexerData := range items {
|
||||
log.Trace("IndexerData Process: %d %v %t", indexerData.ID, indexerData.IDs, indexerData.IsDelete)
|
||||
if indexerData.IsDelete {
|
||||
if err := indexer.Delete(indexerData.IDs...); err != nil {
|
||||
log.Error("Error whilst deleting from index: %v Error: %v", indexerData.IDs, err)
|
||||
if indexer.Ping() {
|
||||
continue
|
||||
log.Error("Issue indexer handler: failed to from index: %v Error: %v", indexerData.IDs, err)
|
||||
if !indexer.Ping() {
|
||||
log.Error("Issue indexer handler: indexer is unavailable when deleting")
|
||||
unhandled = append(unhandled, indexerData)
|
||||
}
|
||||
// Add back to queue
|
||||
unhandled = append(unhandled, datum)
|
||||
}
|
||||
continue
|
||||
}
|
||||
iData = append(iData, indexerData)
|
||||
toIndex = append(toIndex, indexerData)
|
||||
}
|
||||
if len(unhandled) > 0 {
|
||||
for _, indexerData := range iData {
|
||||
unhandled = append(unhandled, indexerData)
|
||||
if err := indexer.Index(toIndex); err != nil {
|
||||
log.Error("Error whilst indexing: %v Error: %v", toIndex, err)
|
||||
if !indexer.Ping() {
|
||||
log.Error("Issue indexer handler: indexer is unavailable when indexing")
|
||||
unhandled = append(unhandled, toIndex...)
|
||||
}
|
||||
return unhandled
|
||||
}
|
||||
if err := indexer.Index(iData); err != nil {
|
||||
log.Error("Error whilst indexing: %v Error: %v", iData, err)
|
||||
if indexer.Ping() {
|
||||
return nil
|
||||
}
|
||||
// Add back to queue
|
||||
for _, indexerData := range iData {
|
||||
unhandled = append(unhandled, indexerData)
|
||||
}
|
||||
return unhandled
|
||||
}
|
||||
return nil
|
||||
return unhandled
|
||||
}
|
||||
|
||||
issueIndexerQueue = queue.CreateQueue("issue_indexer", handler, &IndexerData{})
|
||||
issueIndexerQueue = queue.CreateSimpleQueue("issue_indexer", handler)
|
||||
|
||||
if issueIndexerQueue == nil {
|
||||
log.Fatal("Unable to create issue indexer queue")
|
||||
}
|
||||
default:
|
||||
issueIndexerQueue = &queue.DummyQueue{}
|
||||
issueIndexerQueue = queue.CreateSimpleQueue[*IndexerData]("issue_indexer", nil)
|
||||
}
|
||||
|
||||
// Create the Indexer
|
||||
@@ -240,18 +221,6 @@ func InitIssueIndexer(syncReindex bool) {
|
||||
log.Fatal("Unknown issue indexer type: %s", setting.Indexer.IssueType)
|
||||
}
|
||||
|
||||
if queue, ok := issueIndexerQueue.(queue.Pausable); ok {
|
||||
holder.get().SetAvailabilityChangeCallback(func(available bool) {
|
||||
if !available {
|
||||
log.Info("Issue index queue paused")
|
||||
queue.Pause()
|
||||
} else {
|
||||
log.Info("Issue index queue resumed")
|
||||
queue.Resume()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Start processing the queue
|
||||
go graceful.GetManager().RunWithShutdownFns(issueIndexerQueue.Run)
|
||||
|
||||
@@ -285,9 +254,7 @@ func InitIssueIndexer(syncReindex bool) {
|
||||
case <-graceful.GetManager().IsShutdown():
|
||||
log.Warn("Shutdown occurred before issue index initialisation was complete")
|
||||
case <-time.After(timeout):
|
||||
if shutdownable, ok := issueIndexerQueue.(queue.Shutdownable); ok {
|
||||
shutdownable.Terminate()
|
||||
}
|
||||
issueIndexerQueue.ShutdownWait(5 * time.Second)
|
||||
log.Fatal("Issue Indexer Initialization timed-out after: %v", timeout)
|
||||
}
|
||||
}()
|
||||
|
@@ -17,12 +17,11 @@ var _ Indexer = &MeilisearchIndexer{}
|
||||
|
||||
// MeilisearchIndexer implements Indexer interface
|
||||
type MeilisearchIndexer struct {
|
||||
client *meilisearch.Client
|
||||
indexerName string
|
||||
available bool
|
||||
availabilityCallback func(bool)
|
||||
stopTimer chan struct{}
|
||||
lock sync.RWMutex
|
||||
client *meilisearch.Client
|
||||
indexerName string
|
||||
available bool
|
||||
stopTimer chan struct{}
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// MeilisearchIndexer creates a new meilisearch indexer
|
||||
@@ -73,13 +72,6 @@ func (b *MeilisearchIndexer) Init() (bool, error) {
|
||||
return false, b.checkError(err)
|
||||
}
|
||||
|
||||
// SetAvailabilityChangeCallback sets callback that will be triggered when availability changes
|
||||
func (b *MeilisearchIndexer) SetAvailabilityChangeCallback(callback func(bool)) {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
b.availabilityCallback = callback
|
||||
}
|
||||
|
||||
// Ping checks if meilisearch is available
|
||||
func (b *MeilisearchIndexer) Ping() bool {
|
||||
b.lock.RLock()
|
||||
@@ -178,8 +170,4 @@ func (b *MeilisearchIndexer) setAvailability(available bool) {
|
||||
}
|
||||
|
||||
b.available = available
|
||||
if b.availabilityCallback != nil {
|
||||
// Call the callback from within the lock to ensure that the ordering remains correct
|
||||
b.availabilityCallback(b.available)
|
||||
}
|
||||
}
|
||||
|
@@ -41,7 +41,7 @@ func TestRepoStatsIndex(t *testing.T) {
|
||||
err = UpdateRepoIndexer(repo)
|
||||
assert.NoError(t, err)
|
||||
|
||||
queue.GetManager().FlushAll(context.Background(), 5*time.Second)
|
||||
assert.NoError(t, queue.GetManager().FlushAll(context.Background(), 5*time.Second))
|
||||
|
||||
status, err := repo_model.GetIndexerStatus(db.DefaultContext, repo, repo_model.RepoIndexerTypeStats)
|
||||
assert.NoError(t, err)
|
||||
|
@@ -14,12 +14,11 @@ import (
|
||||
)
|
||||
|
||||
// statsQueue represents a queue to handle repository stats updates
|
||||
var statsQueue queue.UniqueQueue
|
||||
var statsQueue *queue.WorkerPoolQueue[int64]
|
||||
|
||||
// handle passed PR IDs and test the PRs
|
||||
func handle(data ...queue.Data) []queue.Data {
|
||||
for _, datum := range data {
|
||||
opts := datum.(int64)
|
||||
func handler(items ...int64) []int64 {
|
||||
for _, opts := range items {
|
||||
if err := indexer.Index(opts); err != nil {
|
||||
if !setting.IsInTesting {
|
||||
log.Error("stats queue indexer.Index(%d) failed: %v", opts, err)
|
||||
@@ -30,7 +29,7 @@ func handle(data ...queue.Data) []queue.Data {
|
||||
}
|
||||
|
||||
func initStatsQueue() error {
|
||||
statsQueue = queue.CreateUniqueQueue("repo_stats_update", handle, int64(0))
|
||||
statsQueue = queue.CreateUniqueQueue("repo_stats_update", handler)
|
||||
if statsQueue == nil {
|
||||
return fmt.Errorf("Unable to create repo_stats_update Queue")
|
||||
}
|
||||
|
Reference in New Issue
Block a user