mirror of
				https://github.com/go-gitea/gitea
				synced 2025-10-31 11:28:24 +00:00 
			
		
		
		
	# ⚠️ Breaking Many deprecated queue config options are removed (actually, they should have been removed in 1.18/1.19). If you see the fatal message when starting Gitea: "Please update your app.ini to remove deprecated config options", please follow the error messages to remove these options from your app.ini. Example: ``` 2023/05/06 19:39:22 [E] Removed queue option: `[indexer].ISSUE_INDEXER_QUEUE_TYPE`. Use new options in `[queue.issue_indexer]` 2023/05/06 19:39:22 [E] Removed queue option: `[indexer].UPDATE_BUFFER_LEN`. Use new options in `[queue.issue_indexer]` 2023/05/06 19:39:22 [F] Please update your app.ini to remove deprecated config options ``` Many options in `[queue]` are are dropped, including: `WRAP_IF_NECESSARY`, `MAX_ATTEMPTS`, `TIMEOUT`, `WORKERS`, `BLOCK_TIMEOUT`, `BOOST_TIMEOUT`, `BOOST_WORKERS`, they can be removed from app.ini. # The problem The old queue package has some legacy problems: * complexity: I doubt few people could tell how it works. * maintainability: Too many channels and mutex/cond are mixed together, too many different structs/interfaces depends each other. * stability: due to the complexity & maintainability, sometimes there are strange bugs and difficult to debug, and some code doesn't have test (indeed some code is difficult to test because a lot of things are mixed together). * general applicability: although it is called "queue", its behavior is not a well-known queue. * scalability: it doesn't seem easy to make it work with a cluster without breaking its behaviors. It came from some very old code to "avoid breaking", however, its technical debt is too heavy now. It's a good time to introduce a better "queue" package. # The new queue package It keeps using old config and concept as much as possible. * It only contains two major kinds of concepts: * The "base queue": channel, levelqueue, redis * They have the same abstraction, the same interface, and they are tested by the same testing code. * The "WokerPoolQueue", it uses the "base queue" to provide "worker pool" function, calls the "handler" to process the data in the base queue. * The new code doesn't do "PushBack" * Think about a queue with many workers, the "PushBack" can't guarantee the order for re-queued unhandled items, so in new code it just does "normal push" * The new code doesn't do "pause/resume" * The "pause/resume" was designed to handle some handler's failure: eg: document indexer (elasticsearch) is down * If a queue is paused for long time, either the producers blocks or the new items are dropped. * The new code doesn't do such "pause/resume" trick, it's not a common queue's behavior and it doesn't help much. * If there are unhandled items, the "push" function just blocks for a few seconds and then re-queue them and retry. * The new code doesn't do "worker booster" * Gitea's queue's handlers are light functions, the cost is only the go-routine, so it doesn't make sense to "boost" them. * The new code only use "max worker number" to limit the concurrent workers. * The new "Push" never blocks forever * Instead of creating more and more blocking goroutines, return an error is more friendly to the server and to the end user. There are more details in code comments: eg: the "Flush" problem, the strange "code.index" hanging problem, the "immediate" queue problem. Almost ready for review. TODO: * [x] add some necessary comments during review * [x] add some more tests if necessary * [x] update documents and config options * [x] test max worker / active worker * [x] re-run the CI tasks to see whether any test is flaky * [x] improve the `handleOldLengthConfiguration` to provide more friendly messages * [x] fine tune default config values (eg: length?) ## Code coverage: 
		
			
				
	
	
		
			169 lines
		
	
	
		
			4.9 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
			
		
		
	
	
			169 lines
		
	
	
		
			4.9 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
| // Copyright 2022 The Gitea Authors. All rights reserved.
 | |
| // SPDX-License-Identifier: MIT
 | |
| 
 | |
| //nolint:forbidigo
 | |
| package base
 | |
| 
 | |
| import (
 | |
| 	"context"
 | |
| 	"fmt"
 | |
| 	"os"
 | |
| 	"path"
 | |
| 	"path/filepath"
 | |
| 	"runtime"
 | |
| 	"testing"
 | |
| 
 | |
| 	"code.gitea.io/gitea/models/unittest"
 | |
| 	"code.gitea.io/gitea/modules/base"
 | |
| 	"code.gitea.io/gitea/modules/git"
 | |
| 	"code.gitea.io/gitea/modules/log"
 | |
| 	"code.gitea.io/gitea/modules/setting"
 | |
| 	"code.gitea.io/gitea/modules/testlogger"
 | |
| 
 | |
| 	"github.com/stretchr/testify/assert"
 | |
| 	"xorm.io/xorm"
 | |
| )
 | |
| 
 | |
| // PrepareTestEnv prepares the test environment and reset the database. The skip parameter should usually be 0.
 | |
| // Provide models to be sync'd with the database - in particular any models you expect fixtures to be loaded from.
 | |
| //
 | |
| // fixtures in `models/migrations/fixtures/<TestName>` will be loaded automatically
 | |
| func PrepareTestEnv(t *testing.T, skip int, syncModels ...interface{}) (*xorm.Engine, func()) {
 | |
| 	t.Helper()
 | |
| 	ourSkip := 2
 | |
| 	ourSkip += skip
 | |
| 	deferFn := testlogger.PrintCurrentTest(t, ourSkip)
 | |
| 	assert.NoError(t, os.RemoveAll(setting.RepoRootPath))
 | |
| 	assert.NoError(t, unittest.CopyDir(path.Join(filepath.Dir(setting.AppPath), "tests/gitea-repositories-meta"), setting.RepoRootPath))
 | |
| 	ownerDirs, err := os.ReadDir(setting.RepoRootPath)
 | |
| 	if err != nil {
 | |
| 		assert.NoError(t, err, "unable to read the new repo root: %v\n", err)
 | |
| 	}
 | |
| 	for _, ownerDir := range ownerDirs {
 | |
| 		if !ownerDir.Type().IsDir() {
 | |
| 			continue
 | |
| 		}
 | |
| 		repoDirs, err := os.ReadDir(filepath.Join(setting.RepoRootPath, ownerDir.Name()))
 | |
| 		if err != nil {
 | |
| 			assert.NoError(t, err, "unable to read the new repo root: %v\n", err)
 | |
| 		}
 | |
| 		for _, repoDir := range repoDirs {
 | |
| 			_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "pack"), 0o755)
 | |
| 			_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "objects", "info"), 0o755)
 | |
| 			_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "heads"), 0o755)
 | |
| 			_ = os.MkdirAll(filepath.Join(setting.RepoRootPath, ownerDir.Name(), repoDir.Name(), "refs", "tag"), 0o755)
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	if err := deleteDB(); err != nil {
 | |
| 		t.Errorf("unable to reset database: %v", err)
 | |
| 		return nil, deferFn
 | |
| 	}
 | |
| 
 | |
| 	x, err := newXORMEngine()
 | |
| 	assert.NoError(t, err)
 | |
| 	if x != nil {
 | |
| 		oldDefer := deferFn
 | |
| 		deferFn = func() {
 | |
| 			oldDefer()
 | |
| 			if err := x.Close(); err != nil {
 | |
| 				t.Errorf("error during close: %v", err)
 | |
| 			}
 | |
| 			if err := deleteDB(); err != nil {
 | |
| 				t.Errorf("unable to reset database: %v", err)
 | |
| 			}
 | |
| 		}
 | |
| 	}
 | |
| 	if err != nil {
 | |
| 		return x, deferFn
 | |
| 	}
 | |
| 
 | |
| 	if len(syncModels) > 0 {
 | |
| 		if err := x.Sync2(syncModels...); err != nil {
 | |
| 			t.Errorf("error during sync: %v", err)
 | |
| 			return x, deferFn
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	fixturesDir := filepath.Join(filepath.Dir(setting.AppPath), "models", "migrations", "fixtures", t.Name())
 | |
| 
 | |
| 	if _, err := os.Stat(fixturesDir); err == nil {
 | |
| 		t.Logf("initializing fixtures from: %s", fixturesDir)
 | |
| 		if err := unittest.InitFixtures(
 | |
| 			unittest.FixturesOptions{
 | |
| 				Dir: fixturesDir,
 | |
| 			}, x); err != nil {
 | |
| 			t.Errorf("error whilst initializing fixtures from %s: %v", fixturesDir, err)
 | |
| 			return x, deferFn
 | |
| 		}
 | |
| 		if err := unittest.LoadFixtures(x); err != nil {
 | |
| 			t.Errorf("error whilst loading fixtures from %s: %v", fixturesDir, err)
 | |
| 			return x, deferFn
 | |
| 		}
 | |
| 	} else if !os.IsNotExist(err) {
 | |
| 		t.Errorf("unexpected error whilst checking for existence of fixtures: %v", err)
 | |
| 	} else {
 | |
| 		t.Logf("no fixtures found in: %s", fixturesDir)
 | |
| 	}
 | |
| 
 | |
| 	return x, deferFn
 | |
| }
 | |
| 
 | |
| func MainTest(m *testing.M) {
 | |
| 	log.Register("test", testlogger.NewTestLogger)
 | |
| 
 | |
| 	giteaRoot := base.SetupGiteaRoot()
 | |
| 	if giteaRoot == "" {
 | |
| 		fmt.Println("Environment variable $GITEA_ROOT not set")
 | |
| 		os.Exit(1)
 | |
| 	}
 | |
| 	giteaBinary := "gitea"
 | |
| 	if runtime.GOOS == "windows" {
 | |
| 		giteaBinary += ".exe"
 | |
| 	}
 | |
| 	setting.AppPath = path.Join(giteaRoot, giteaBinary)
 | |
| 	if _, err := os.Stat(setting.AppPath); err != nil {
 | |
| 		fmt.Printf("Could not find gitea binary at %s\n", setting.AppPath)
 | |
| 		os.Exit(1)
 | |
| 	}
 | |
| 
 | |
| 	giteaConf := os.Getenv("GITEA_CONF")
 | |
| 	if giteaConf == "" {
 | |
| 		giteaConf = path.Join(filepath.Dir(setting.AppPath), "tests/sqlite.ini")
 | |
| 		fmt.Printf("Environment variable $GITEA_CONF not set - defaulting to %s\n", giteaConf)
 | |
| 	}
 | |
| 
 | |
| 	if !path.IsAbs(giteaConf) {
 | |
| 		setting.CustomConf = path.Join(giteaRoot, giteaConf)
 | |
| 	} else {
 | |
| 		setting.CustomConf = giteaConf
 | |
| 	}
 | |
| 
 | |
| 	tmpDataPath, err := os.MkdirTemp("", "data")
 | |
| 	if err != nil {
 | |
| 		fmt.Printf("Unable to create temporary data path %v\n", err)
 | |
| 		os.Exit(1)
 | |
| 	}
 | |
| 
 | |
| 	setting.AppDataPath = tmpDataPath
 | |
| 
 | |
| 	setting.SetCustomPathAndConf("", "", "")
 | |
| 	unittest.InitSettings()
 | |
| 	if err = git.InitFull(context.Background()); err != nil {
 | |
| 		fmt.Printf("Unable to InitFull: %v\n", err)
 | |
| 		os.Exit(1)
 | |
| 	}
 | |
| 	setting.LoadDBSetting()
 | |
| 	setting.InitLogs(true)
 | |
| 
 | |
| 	exitStatus := m.Run()
 | |
| 
 | |
| 	if err := removeAllWithRetry(setting.RepoRootPath); err != nil {
 | |
| 		fmt.Fprintf(os.Stderr, "os.RemoveAll: %v\n", err)
 | |
| 	}
 | |
| 	if err := removeAllWithRetry(tmpDataPath); err != nil {
 | |
| 		fmt.Fprintf(os.Stderr, "os.RemoveAll: %v\n", err)
 | |
| 	}
 | |
| 	os.Exit(exitStatus)
 | |
| }
 |