mirror of
https://github.com/go-gitea/gitea
synced 2025-12-07 13:28:25 +00:00
Merge branch 'main' into development
This commit is contained in:
@@ -15,6 +15,7 @@ import (
|
||||
"code.gitea.io/gitea/models/dbfs"
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/storage"
|
||||
"code.gitea.io/gitea/modules/zstd"
|
||||
|
||||
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
@@ -28,6 +29,9 @@ const (
|
||||
defaultBufSize = MaxLineSize
|
||||
)
|
||||
|
||||
// WriteLogs appends logs to DBFS file for temporary storage.
|
||||
// It doesn't respect the file format in the filename like ".zst", since it's difficult to reopen a closed compressed file and append new content.
|
||||
// Why doesn't it store logs in object storage directly? Because it's not efficient to append content to object storage.
|
||||
func WriteLogs(ctx context.Context, filename string, offset int64, rows []*runnerv1.LogRow) ([]int, error) {
|
||||
flag := os.O_WRONLY
|
||||
if offset == 0 {
|
||||
@@ -106,6 +110,17 @@ func ReadLogs(ctx context.Context, inStorage bool, filename string, offset, limi
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
const (
|
||||
// logZstdBlockSize is the block size for zstd compression.
|
||||
// 128KB leads the compression ratio to be close to the regular zstd compression.
|
||||
// And it means each read from the underlying object storage will be at least 128KB*(compression ratio).
|
||||
// The compression ratio is about 30% for text files, so the actual read size is about 38KB, which should be acceptable.
|
||||
logZstdBlockSize = 128 * 1024 // 128KB
|
||||
)
|
||||
|
||||
// TransferLogs transfers logs from DBFS to object storage.
|
||||
// It happens when the file is complete and no more logs will be appended.
|
||||
// It respects the file format in the filename like ".zst", and compresses the content if needed.
|
||||
func TransferLogs(ctx context.Context, filename string) (func(), error) {
|
||||
name := DBFSPrefix + filename
|
||||
remove := func() {
|
||||
@@ -119,7 +134,26 @@ func TransferLogs(ctx context.Context, filename string) (func(), error) {
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if _, err := storage.Actions.Save(filename, f, -1); err != nil {
|
||||
var reader io.Reader = f
|
||||
if strings.HasSuffix(filename, ".zst") {
|
||||
r, w := io.Pipe()
|
||||
reader = r
|
||||
zstdWriter, err := zstd.NewSeekableWriter(w, logZstdBlockSize)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("zstd NewSeekableWriter: %w", err)
|
||||
}
|
||||
go func() {
|
||||
defer func() {
|
||||
_ = w.CloseWithError(zstdWriter.Close())
|
||||
}()
|
||||
if _, err := io.Copy(zstdWriter, f); err != nil {
|
||||
_ = w.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if _, err := storage.Actions.Save(filename, reader, -1); err != nil {
|
||||
return nil, fmt.Errorf("storage save %q: %w", filename, err)
|
||||
}
|
||||
return remove, nil
|
||||
@@ -150,11 +184,22 @@ func OpenLogs(ctx context.Context, inStorage bool, filename string) (io.ReadSeek
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
f, err := storage.Actions.Open(filename)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storage open %q: %w", filename, err)
|
||||
}
|
||||
return f, nil
|
||||
|
||||
var reader io.ReadSeekCloser = f
|
||||
if strings.HasSuffix(filename, ".zst") {
|
||||
r, err := zstd.NewSeekableReader(f)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("zstd NewSeekableReader: %w", err)
|
||||
}
|
||||
reader = r
|
||||
}
|
||||
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
func FormatLog(timestamp time.Time, content string) string {
|
||||
|
||||
@@ -48,13 +48,10 @@ func BasicAuthDecode(encoded string) (string, string, error) {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
auth := strings.SplitN(string(s), ":", 2)
|
||||
|
||||
if len(auth) != 2 {
|
||||
return "", "", errors.New("invalid basic authentication")
|
||||
if username, password, ok := strings.Cut(string(s), ":"); ok {
|
||||
return username, password, nil
|
||||
}
|
||||
|
||||
return auth[0], auth[1], nil
|
||||
return "", "", errors.New("invalid basic authentication")
|
||||
}
|
||||
|
||||
// VerifyTimeLimitCode verify time limit code
|
||||
|
||||
@@ -41,6 +41,9 @@ func TestBasicAuthDecode(t *testing.T) {
|
||||
|
||||
_, _, err = BasicAuthDecode("invalid")
|
||||
assert.Error(t, err)
|
||||
|
||||
_, _, err = BasicAuthDecode("YWxpY2U=") // "alice", no colon
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestVerifyTimeLimitCode(t *testing.T) {
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
|
||||
package container
|
||||
|
||||
import "maps"
|
||||
|
||||
type Set[T comparable] map[T]struct{}
|
||||
|
||||
// SetOf creates a set and adds the specified elements to it.
|
||||
@@ -29,11 +31,15 @@ func (s Set[T]) AddMultiple(values ...T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Contains determines whether a set contains the specified element.
|
||||
// Contains determines whether a set contains the specified elements.
|
||||
// Returns true if the set contains the specified element; otherwise, false.
|
||||
func (s Set[T]) Contains(value T) bool {
|
||||
_, has := s[value]
|
||||
return has
|
||||
func (s Set[T]) Contains(values ...T) bool {
|
||||
ret := true
|
||||
for _, value := range values {
|
||||
_, has := s[value]
|
||||
ret = ret && has
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// Remove removes the specified element.
|
||||
@@ -54,3 +60,12 @@ func (s Set[T]) Values() []T {
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
// Union constructs a new set that is the union of the provided sets
|
||||
func (s Set[T]) Union(sets ...Set[T]) Set[T] {
|
||||
newSet := maps.Clone(s)
|
||||
for i := range sets {
|
||||
maps.Copy(newSet, sets[i])
|
||||
}
|
||||
return newSet
|
||||
}
|
||||
|
||||
46
modules/git/batch.go
Normal file
46
modules/git/batch.go
Normal file
@@ -0,0 +1,46 @@
|
||||
// Copyright 2024 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package git
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
)
|
||||
|
||||
type Batch struct {
|
||||
cancel context.CancelFunc
|
||||
Reader *bufio.Reader
|
||||
Writer WriteCloserError
|
||||
}
|
||||
|
||||
func (repo *Repository) NewBatch(ctx context.Context) (*Batch, error) {
|
||||
// Now because of some insanity with git cat-file not immediately failing if not run in a valid git directory we need to run git rev-parse first!
|
||||
if err := ensureValidGitRepository(ctx, repo.Path); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var batch Batch
|
||||
batch.Writer, batch.Reader, batch.cancel = catFileBatch(ctx, repo.Path)
|
||||
return &batch, nil
|
||||
}
|
||||
|
||||
func (repo *Repository) NewBatchCheck(ctx context.Context) (*Batch, error) {
|
||||
// Now because of some insanity with git cat-file not immediately failing if not run in a valid git directory we need to run git rev-parse first!
|
||||
if err := ensureValidGitRepository(ctx, repo.Path); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var check Batch
|
||||
check.Writer, check.Reader, check.cancel = catFileBatchCheck(ctx, repo.Path)
|
||||
return &check, nil
|
||||
}
|
||||
|
||||
func (b *Batch) Close() {
|
||||
if b.cancel != nil {
|
||||
b.cancel()
|
||||
b.Reader = nil
|
||||
b.Writer = nil
|
||||
b.cancel = nil
|
||||
}
|
||||
}
|
||||
@@ -26,10 +26,10 @@ type WriteCloserError interface {
|
||||
CloseWithError(err error) error
|
||||
}
|
||||
|
||||
// EnsureValidGitRepository runs git rev-parse in the repository path - thus ensuring that the repository is a valid repository.
|
||||
// ensureValidGitRepository runs git rev-parse in the repository path - thus ensuring that the repository is a valid repository.
|
||||
// Run before opening git cat-file.
|
||||
// This is needed otherwise the git cat-file will hang for invalid repositories.
|
||||
func EnsureValidGitRepository(ctx context.Context, repoPath string) error {
|
||||
func ensureValidGitRepository(ctx context.Context, repoPath string) error {
|
||||
stderr := strings.Builder{}
|
||||
err := NewCommand(ctx, "rev-parse").
|
||||
SetDescription(fmt.Sprintf("%s rev-parse [repo_path: %s]", GitExecutable, repoPath)).
|
||||
@@ -43,8 +43,8 @@ func EnsureValidGitRepository(ctx context.Context, repoPath string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CatFileBatchCheck opens git cat-file --batch-check in the provided repo and returns a stdin pipe, a stdout reader and cancel function
|
||||
func CatFileBatchCheck(ctx context.Context, repoPath string) (WriteCloserError, *bufio.Reader, func()) {
|
||||
// catFileBatchCheck opens git cat-file --batch-check in the provided repo and returns a stdin pipe, a stdout reader and cancel function
|
||||
func catFileBatchCheck(ctx context.Context, repoPath string) (WriteCloserError, *bufio.Reader, func()) {
|
||||
batchStdinReader, batchStdinWriter := io.Pipe()
|
||||
batchStdoutReader, batchStdoutWriter := io.Pipe()
|
||||
ctx, ctxCancel := context.WithCancel(ctx)
|
||||
@@ -93,8 +93,8 @@ func CatFileBatchCheck(ctx context.Context, repoPath string) (WriteCloserError,
|
||||
return batchStdinWriter, batchReader, cancel
|
||||
}
|
||||
|
||||
// CatFileBatch opens git cat-file --batch in the provided repo and returns a stdin pipe, a stdout reader and cancel function
|
||||
func CatFileBatch(ctx context.Context, repoPath string) (WriteCloserError, *bufio.Reader, func()) {
|
||||
// catFileBatch opens git cat-file --batch in the provided repo and returns a stdin pipe, a stdout reader and cancel function
|
||||
func catFileBatch(ctx context.Context, repoPath string) (WriteCloserError, *bufio.Reader, func()) {
|
||||
// We often want to feed the commits in order into cat-file --batch, followed by their trees and sub trees as necessary.
|
||||
// so let's create a batch stdin and stdout
|
||||
batchStdinReader, batchStdinWriter := io.Pipe()
|
||||
|
||||
@@ -14,6 +14,11 @@ func TestReadingBlameOutputSha256(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
if isGogit {
|
||||
t.Skip("Skipping test since gogit does not support sha256")
|
||||
return
|
||||
}
|
||||
|
||||
t.Run("Without .git-blame-ignore-revs", func(t *testing.T) {
|
||||
repo, err := OpenRepository(ctx, "./tests/repos/repo5_pulls_sha256")
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -26,9 +26,12 @@ type Blob struct {
|
||||
// DataAsync gets a ReadCloser for the contents of a blob without reading it all.
|
||||
// Calling the Close function on the result will discard all unread output.
|
||||
func (b *Blob) DataAsync() (io.ReadCloser, error) {
|
||||
wr, rd, cancel := b.repo.CatFileBatch(b.repo.Ctx)
|
||||
wr, rd, cancel, err := b.repo.CatFileBatch(b.repo.Ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err := wr.Write([]byte(b.ID.String() + "\n"))
|
||||
_, err = wr.Write([]byte(b.ID.String() + "\n"))
|
||||
if err != nil {
|
||||
cancel()
|
||||
return nil, err
|
||||
@@ -64,9 +67,13 @@ func (b *Blob) Size() int64 {
|
||||
return b.size
|
||||
}
|
||||
|
||||
wr, rd, cancel := b.repo.CatFileBatchCheck(b.repo.Ctx)
|
||||
wr, rd, cancel, err := b.repo.CatFileBatchCheck(b.repo.Ctx)
|
||||
if err != nil {
|
||||
log.Debug("error whilst reading size for %s in %s. Error: %v", b.ID.String(), b.repo.Path, err)
|
||||
return 0
|
||||
}
|
||||
defer cancel()
|
||||
_, err := wr.Write([]byte(b.ID.String() + "\n"))
|
||||
_, err = wr.Write([]byte(b.ID.String() + "\n"))
|
||||
if err != nil {
|
||||
log.Debug("error whilst reading size for %s in %s. Error: %v", b.ID.String(), b.repo.Path, err)
|
||||
return 0
|
||||
|
||||
@@ -124,7 +124,10 @@ func GetLastCommitForPaths(ctx context.Context, commit *Commit, treePath string,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
batchStdinWriter, batchReader, cancel := commit.repo.CatFileBatch(ctx)
|
||||
batchStdinWriter, batchReader, cancel, err := commit.repo.CatFileBatch(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
commitsMap := map[string]*Commit{}
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
package git
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -345,3 +347,18 @@ func TestGetCommitFileStatusMerges(t *testing.T) {
|
||||
assert.Equal(t, commitFileStatus.Removed, expected.Removed)
|
||||
assert.Equal(t, commitFileStatus.Modified, expected.Modified)
|
||||
}
|
||||
|
||||
func Test_GetCommitBranchStart(t *testing.T) {
|
||||
bareRepo1Path := filepath.Join(testReposDir, "repo1_bare")
|
||||
repo, err := OpenRepository(context.Background(), bareRepo1Path)
|
||||
assert.NoError(t, err)
|
||||
defer repo.Close()
|
||||
commit, err := repo.GetBranchCommit("branch1")
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, "2839944139e0de9737a044f78b0e4b40d989a9e3", commit.ID.String())
|
||||
|
||||
startCommitID, err := repo.GetCommitBranchStart(os.Environ(), "branch1", commit.ID.String())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, startCommitID)
|
||||
assert.EqualValues(t, "9c9aef8dd84e02bc7ec12641deb4c930a7c30185", startCommitID)
|
||||
}
|
||||
|
||||
@@ -271,7 +271,17 @@ func CutDiffAroundLine(originalDiff io.Reader, line int64, old bool, numbersOfLi
|
||||
}
|
||||
|
||||
// GetAffectedFiles returns the affected files between two commits
|
||||
func GetAffectedFiles(repo *Repository, oldCommitID, newCommitID string, env []string) ([]string, error) {
|
||||
func GetAffectedFiles(repo *Repository, branchName, oldCommitID, newCommitID string, env []string) ([]string, error) {
|
||||
if oldCommitID == emptySha1ObjectID.String() || oldCommitID == emptySha256ObjectID.String() {
|
||||
startCommitID, err := repo.GetCommitBranchStart(env, branchName, newCommitID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if startCommitID == "" {
|
||||
return nil, fmt.Errorf("cannot find the start commit of %s", newCommitID)
|
||||
}
|
||||
oldCommitID = startCommitID
|
||||
}
|
||||
stdoutReader, stdoutWriter, err := os.Pipe()
|
||||
if err != nil {
|
||||
log.Error("Unable to create os.Pipe for %s", repo.Path)
|
||||
|
||||
@@ -46,7 +46,10 @@ func FindLFSFile(repo *git.Repository, objectID git.ObjectID) ([]*LFSResult, err
|
||||
|
||||
// Next feed the commits in order into cat-file --batch, followed by their trees and sub trees as necessary.
|
||||
// so let's create a batch stdin and stdout
|
||||
batchStdinWriter, batchReader, cancel := repo.CatFileBatch(repo.Ctx)
|
||||
batchStdinWriter, batchReader, cancel, err := repo.CatFileBatch(repo.Ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
// We'll use a scanner for the revList because it's simpler than a bufio.Reader
|
||||
|
||||
@@ -25,15 +25,11 @@ type Repository struct {
|
||||
|
||||
gpgSettings *GPGSettings
|
||||
|
||||
batchInUse bool
|
||||
batchCancel context.CancelFunc
|
||||
batchReader *bufio.Reader
|
||||
batchWriter WriteCloserError
|
||||
batchInUse bool
|
||||
batch *Batch
|
||||
|
||||
checkInUse bool
|
||||
checkCancel context.CancelFunc
|
||||
checkReader *bufio.Reader
|
||||
checkWriter WriteCloserError
|
||||
checkInUse bool
|
||||
check *Batch
|
||||
|
||||
Ctx context.Context
|
||||
LastCommitCache *LastCommitCache
|
||||
@@ -55,63 +51,75 @@ func OpenRepository(ctx context.Context, repoPath string) (*Repository, error) {
|
||||
return nil, util.NewNotExistErrorf("no such file or directory")
|
||||
}
|
||||
|
||||
// Now because of some insanity with git cat-file not immediately failing if not run in a valid git directory we need to run git rev-parse first!
|
||||
if err := EnsureValidGitRepository(ctx, repoPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
repo := &Repository{
|
||||
return &Repository{
|
||||
Path: repoPath,
|
||||
tagCache: newObjectCache(),
|
||||
Ctx: ctx,
|
||||
}
|
||||
|
||||
repo.batchWriter, repo.batchReader, repo.batchCancel = CatFileBatch(ctx, repoPath)
|
||||
repo.checkWriter, repo.checkReader, repo.checkCancel = CatFileBatchCheck(ctx, repoPath)
|
||||
|
||||
return repo, nil
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CatFileBatch obtains a CatFileBatch for this repository
|
||||
func (repo *Repository) CatFileBatch(ctx context.Context) (WriteCloserError, *bufio.Reader, func()) {
|
||||
if repo.batchCancel == nil || repo.batchInUse {
|
||||
log.Debug("Opening temporary cat file batch for: %s", repo.Path)
|
||||
return CatFileBatch(ctx, repo.Path)
|
||||
func (repo *Repository) CatFileBatch(ctx context.Context) (WriteCloserError, *bufio.Reader, func(), error) {
|
||||
if repo.batch == nil {
|
||||
var err error
|
||||
repo.batch, err = repo.NewBatch(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
}
|
||||
repo.batchInUse = true
|
||||
return repo.batchWriter, repo.batchReader, func() {
|
||||
repo.batchInUse = false
|
||||
|
||||
if !repo.batchInUse {
|
||||
repo.batchInUse = true
|
||||
return repo.batch.Writer, repo.batch.Reader, func() {
|
||||
repo.batchInUse = false
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug("Opening temporary cat file batch for: %s", repo.Path)
|
||||
tempBatch, err := repo.NewBatch(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
return tempBatch.Writer, tempBatch.Reader, tempBatch.Close, nil
|
||||
}
|
||||
|
||||
// CatFileBatchCheck obtains a CatFileBatchCheck for this repository
|
||||
func (repo *Repository) CatFileBatchCheck(ctx context.Context) (WriteCloserError, *bufio.Reader, func()) {
|
||||
if repo.checkCancel == nil || repo.checkInUse {
|
||||
log.Debug("Opening temporary cat file batch-check for: %s", repo.Path)
|
||||
return CatFileBatchCheck(ctx, repo.Path)
|
||||
func (repo *Repository) CatFileBatchCheck(ctx context.Context) (WriteCloserError, *bufio.Reader, func(), error) {
|
||||
if repo.check == nil {
|
||||
var err error
|
||||
repo.check, err = repo.NewBatchCheck(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
}
|
||||
repo.checkInUse = true
|
||||
return repo.checkWriter, repo.checkReader, func() {
|
||||
repo.checkInUse = false
|
||||
|
||||
if !repo.checkInUse {
|
||||
repo.checkInUse = true
|
||||
return repo.check.Writer, repo.check.Reader, func() {
|
||||
repo.checkInUse = false
|
||||
}, nil
|
||||
}
|
||||
|
||||
log.Debug("Opening temporary cat file batch-check for: %s", repo.Path)
|
||||
tempBatchCheck, err := repo.NewBatchCheck(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
return tempBatchCheck.Writer, tempBatchCheck.Reader, tempBatchCheck.Close, nil
|
||||
}
|
||||
|
||||
func (repo *Repository) Close() error {
|
||||
if repo == nil {
|
||||
return nil
|
||||
}
|
||||
if repo.batchCancel != nil {
|
||||
repo.batchCancel()
|
||||
repo.batchReader = nil
|
||||
repo.batchWriter = nil
|
||||
repo.batchCancel = nil
|
||||
if repo.batch != nil {
|
||||
repo.batch.Close()
|
||||
repo.batch = nil
|
||||
repo.batchInUse = false
|
||||
}
|
||||
if repo.checkCancel != nil {
|
||||
repo.checkCancel()
|
||||
repo.checkCancel = nil
|
||||
repo.checkReader = nil
|
||||
repo.checkWriter = nil
|
||||
if repo.check != nil {
|
||||
repo.check.Close()
|
||||
repo.check = nil
|
||||
repo.checkInUse = false
|
||||
}
|
||||
repo.LastCommitCache = nil
|
||||
|
||||
@@ -14,30 +14,35 @@ import (
|
||||
"github.com/go-git/go-git/v5/plumbing/storer"
|
||||
)
|
||||
|
||||
// IsObjectExist returns true if given reference exists in the repository.
|
||||
// IsObjectExist returns true if the given object exists in the repository.
|
||||
// FIXME: Inconsistent behavior with nogogit edition
|
||||
// Unlike the implementation of IsObjectExist in nogogit edition, it does not support short hashes here.
|
||||
// For example, IsObjectExist("153f451") will return false, but it will return true in nogogit edition.
|
||||
// To fix this, the solution could be adding support for short hashes in gogit edition if it's really needed.
|
||||
func (repo *Repository) IsObjectExist(name string) bool {
|
||||
if name == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
_, err := repo.gogitRepo.Object(plumbing.AnyObject, plumbing.NewHash(name))
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// IsReferenceExist returns true if given reference exists in the repository.
|
||||
// FIXME: Inconsistent behavior with nogogit edition
|
||||
// Unlike the implementation of IsObjectExist in nogogit edition, it does not support blob hashes here.
|
||||
// For example, IsObjectExist([existing_blob_hash]) will return false, but it will return true in nogogit edition.
|
||||
// To fix this, the solution could be refusing to support blob hashes in nogogit edition since a blob hash is not a reference.
|
||||
func (repo *Repository) IsReferenceExist(name string) bool {
|
||||
if name == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
_, err := repo.gogitRepo.ResolveRevision(plumbing.Revision(name))
|
||||
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// IsReferenceExist returns true if given reference exists in the repository.
|
||||
func (repo *Repository) IsReferenceExist(name string) bool {
|
||||
if name == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
reference, err := repo.gogitRepo.Reference(plumbing.ReferenceName(name), true)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return reference.Type() != plumbing.InvalidReference
|
||||
}
|
||||
|
||||
// IsBranchExist returns true if given branch exists in current repository.
|
||||
func (repo *Repository) IsBranchExist(name string) bool {
|
||||
if name == "" {
|
||||
|
||||
@@ -16,15 +16,19 @@ import (
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
)
|
||||
|
||||
// IsObjectExist returns true if given reference exists in the repository.
|
||||
// IsObjectExist returns true if the given object exists in the repository.
|
||||
func (repo *Repository) IsObjectExist(name string) bool {
|
||||
if name == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
wr, rd, cancel := repo.CatFileBatchCheck(repo.Ctx)
|
||||
wr, rd, cancel, err := repo.CatFileBatchCheck(repo.Ctx)
|
||||
if err != nil {
|
||||
log.Debug("Error writing to CatFileBatchCheck %v", err)
|
||||
return false
|
||||
}
|
||||
defer cancel()
|
||||
_, err := wr.Write([]byte(name + "\n"))
|
||||
_, err = wr.Write([]byte(name + "\n"))
|
||||
if err != nil {
|
||||
log.Debug("Error writing to CatFileBatchCheck %v", err)
|
||||
return false
|
||||
@@ -39,9 +43,13 @@ func (repo *Repository) IsReferenceExist(name string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
wr, rd, cancel := repo.CatFileBatchCheck(repo.Ctx)
|
||||
wr, rd, cancel, err := repo.CatFileBatchCheck(repo.Ctx)
|
||||
if err != nil {
|
||||
log.Debug("Error writing to CatFileBatchCheck %v", err)
|
||||
return false
|
||||
}
|
||||
defer cancel()
|
||||
_, err := wr.Write([]byte(name + "\n"))
|
||||
_, err = wr.Write([]byte(name + "\n"))
|
||||
if err != nil {
|
||||
log.Debug("Error writing to CatFileBatchCheck %v", err)
|
||||
return false
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestRepository_GetBranches(t *testing.T) {
|
||||
@@ -94,3 +95,107 @@ func BenchmarkGetRefsBySha(b *testing.B) {
|
||||
_, _ = bareRepo5.GetRefsBySha("c83380d7056593c51a699d12b9c00627bd5743e9", "")
|
||||
_, _ = bareRepo5.GetRefsBySha("58a4bcc53ac13e7ff76127e0fb518b5262bf09af", "")
|
||||
}
|
||||
|
||||
func TestRepository_IsObjectExist(t *testing.T) {
|
||||
repo, err := openRepositoryWithDefaultContext(filepath.Join(testReposDir, "repo1_bare"))
|
||||
require.NoError(t, err)
|
||||
defer repo.Close()
|
||||
|
||||
// FIXME: Inconsistent behavior between gogit and nogogit editions
|
||||
// See the comment of IsObjectExist in gogit edition for more details.
|
||||
supportShortHash := !isGogit
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
arg string
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
arg: "",
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "branch",
|
||||
arg: "master",
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "commit hash",
|
||||
arg: "ce064814f4a0d337b333e646ece456cd39fab612",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "short commit hash",
|
||||
arg: "ce06481",
|
||||
want: supportShortHash,
|
||||
},
|
||||
{
|
||||
name: "blob hash",
|
||||
arg: "153f451b9ee7fa1da317ab17a127e9fd9d384310",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "short blob hash",
|
||||
arg: "153f451",
|
||||
want: supportShortHash,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, repo.IsObjectExist(tt.arg))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepository_IsReferenceExist(t *testing.T) {
|
||||
repo, err := openRepositoryWithDefaultContext(filepath.Join(testReposDir, "repo1_bare"))
|
||||
require.NoError(t, err)
|
||||
defer repo.Close()
|
||||
|
||||
// FIXME: Inconsistent behavior between gogit and nogogit editions
|
||||
// See the comment of IsReferenceExist in gogit edition for more details.
|
||||
supportBlobHash := !isGogit
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
arg string
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
arg: "",
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "branch",
|
||||
arg: "master",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "commit hash",
|
||||
arg: "ce064814f4a0d337b333e646ece456cd39fab612",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "short commit hash",
|
||||
arg: "ce06481",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "blob hash",
|
||||
arg: "153f451b9ee7fa1da317ab17a127e9fd9d384310",
|
||||
want: supportBlobHash,
|
||||
},
|
||||
{
|
||||
name: "short blob hash",
|
||||
arg: "153f451",
|
||||
want: supportBlobHash,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, repo.IsReferenceExist(tt.arg))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ package git
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@@ -414,7 +415,7 @@ func (repo *Repository) commitsBefore(id ObjectID, limit int) ([]*Commit, error)
|
||||
|
||||
commits := make([]*Commit, 0, len(formattedLog))
|
||||
for _, commit := range formattedLog {
|
||||
branches, err := repo.getBranches(commit, 2)
|
||||
branches, err := repo.getBranches(os.Environ(), commit.ID.String(), 2)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -437,12 +438,15 @@ func (repo *Repository) getCommitsBeforeLimit(id ObjectID, num int) ([]*Commit,
|
||||
return repo.commitsBefore(id, num)
|
||||
}
|
||||
|
||||
func (repo *Repository) getBranches(commit *Commit, limit int) ([]string, error) {
|
||||
func (repo *Repository) getBranches(env []string, commitID string, limit int) ([]string, error) {
|
||||
if DefaultFeatures().CheckVersionAtLeast("2.7.0") {
|
||||
stdout, _, err := NewCommand(repo.Ctx, "for-each-ref", "--format=%(refname:strip=2)").
|
||||
AddOptionFormat("--count=%d", limit).
|
||||
AddOptionValues("--contains", commit.ID.String(), BranchPrefix).
|
||||
RunStdString(&RunOpts{Dir: repo.Path})
|
||||
AddOptionValues("--contains", commitID, BranchPrefix).
|
||||
RunStdString(&RunOpts{
|
||||
Dir: repo.Path,
|
||||
Env: env,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -451,7 +455,10 @@ func (repo *Repository) getBranches(commit *Commit, limit int) ([]string, error)
|
||||
return branches, nil
|
||||
}
|
||||
|
||||
stdout, _, err := NewCommand(repo.Ctx, "branch").AddOptionValues("--contains", commit.ID.String()).RunStdString(&RunOpts{Dir: repo.Path})
|
||||
stdout, _, err := NewCommand(repo.Ctx, "branch").AddOptionValues("--contains", commitID).RunStdString(&RunOpts{
|
||||
Dir: repo.Path,
|
||||
Env: env,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -513,3 +520,35 @@ func (repo *Repository) AddLastCommitCache(cacheKey, fullName, sha string) error
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (repo *Repository) GetCommitBranchStart(env []string, branch, endCommitID string) (string, error) {
|
||||
cmd := NewCommand(repo.Ctx, "log", prettyLogFormat)
|
||||
cmd.AddDynamicArguments(endCommitID)
|
||||
|
||||
stdout, _, runErr := cmd.RunStdBytes(&RunOpts{
|
||||
Dir: repo.Path,
|
||||
Env: env,
|
||||
})
|
||||
if runErr != nil {
|
||||
return "", runErr
|
||||
}
|
||||
|
||||
parts := bytes.Split(bytes.TrimSpace(stdout), []byte{'\n'})
|
||||
|
||||
var startCommitID string
|
||||
for _, commitID := range parts {
|
||||
branches, err := repo.getBranches(env, string(commitID), 2)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, b := range branches {
|
||||
if b != branch {
|
||||
return startCommitID, nil
|
||||
}
|
||||
}
|
||||
|
||||
startCommitID = string(commitID)
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
@@ -33,9 +33,12 @@ func (repo *Repository) ResolveReference(name string) (string, error) {
|
||||
|
||||
// GetRefCommitID returns the last commit ID string of given reference (branch or tag).
|
||||
func (repo *Repository) GetRefCommitID(name string) (string, error) {
|
||||
wr, rd, cancel := repo.CatFileBatchCheck(repo.Ctx)
|
||||
wr, rd, cancel, err := repo.CatFileBatchCheck(repo.Ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer cancel()
|
||||
_, err := wr.Write([]byte(name + "\n"))
|
||||
_, err = wr.Write([]byte(name + "\n"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -61,12 +64,19 @@ func (repo *Repository) RemoveReference(name string) error {
|
||||
|
||||
// IsCommitExist returns true if given commit exists in current repository.
|
||||
func (repo *Repository) IsCommitExist(name string) bool {
|
||||
if err := ensureValidGitRepository(repo.Ctx, repo.Path); err != nil {
|
||||
log.Error("IsCommitExist: %v", err)
|
||||
return false
|
||||
}
|
||||
_, _, err := NewCommand(repo.Ctx, "cat-file", "-e").AddDynamicArguments(name).RunStdString(&RunOpts{Dir: repo.Path})
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func (repo *Repository) getCommit(id ObjectID) (*Commit, error) {
|
||||
wr, rd, cancel := repo.CatFileBatch(repo.Ctx)
|
||||
wr, rd, cancel, err := repo.CatFileBatch(repo.Ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
_, _ = wr.Write([]byte(id.String() + "\n"))
|
||||
@@ -143,7 +153,10 @@ func (repo *Repository) ConvertToGitID(commitID string) (ObjectID, error) {
|
||||
}
|
||||
}
|
||||
|
||||
wr, rd, cancel := repo.CatFileBatchCheck(repo.Ctx)
|
||||
wr, rd, cancel, err := repo.CatFileBatchCheck(repo.Ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cancel()
|
||||
_, err = wr.Write([]byte(commitID + "\n"))
|
||||
if err != nil {
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
package git
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
@@ -31,7 +32,7 @@ func TestRepository_GetCommitBranches(t *testing.T) {
|
||||
for _, testCase := range testCases {
|
||||
commit, err := bareRepo1.GetCommit(testCase.CommitID)
|
||||
assert.NoError(t, err)
|
||||
branches, err := bareRepo1.getBranches(commit, 2)
|
||||
branches, err := bareRepo1.getBranches(os.Environ(), commit.ID.String(), 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, testCase.ExpectedBranches, branches)
|
||||
}
|
||||
|
||||
@@ -20,7 +20,10 @@ import (
|
||||
func (repo *Repository) GetLanguageStats(commitID string) (map[string]int64, error) {
|
||||
// We will feed the commit IDs in order into cat-file --batch, followed by blobs as necessary.
|
||||
// so let's create a batch stdin and stdout
|
||||
batchStdinWriter, batchReader, cancel := repo.CatFileBatch(repo.Ctx)
|
||||
batchStdinWriter, batchReader, cancel, err := repo.CatFileBatch(repo.Ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
writeID := func(id string) error {
|
||||
|
||||
@@ -31,9 +31,12 @@ func (repo *Repository) GetTags(skip, limit int) (tags []string, err error) {
|
||||
|
||||
// GetTagType gets the type of the tag, either commit (simple) or tag (annotated)
|
||||
func (repo *Repository) GetTagType(id ObjectID) (string, error) {
|
||||
wr, rd, cancel := repo.CatFileBatchCheck(repo.Ctx)
|
||||
wr, rd, cancel, err := repo.CatFileBatchCheck(repo.Ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer cancel()
|
||||
_, err := wr.Write([]byte(id.String() + "\n"))
|
||||
_, err = wr.Write([]byte(id.String() + "\n"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -89,7 +92,10 @@ func (repo *Repository) getTag(tagID ObjectID, name string) (*Tag, error) {
|
||||
}
|
||||
|
||||
// The tag is an annotated tag with a message.
|
||||
wr, rd, cancel := repo.CatFileBatch(repo.Ctx)
|
||||
wr, rd, cancel, err := repo.CatFileBatch(repo.Ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
if _, err := wr.Write([]byte(tagID.String() + "\n")); err != nil {
|
||||
|
||||
@@ -6,11 +6,20 @@
|
||||
|
||||
package git
|
||||
|
||||
import "github.com/go-git/go-git/v5/plumbing"
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
)
|
||||
|
||||
func (repo *Repository) getTree(id ObjectID) (*Tree, error) {
|
||||
gogitTree, err := repo.gogitRepo.TreeObject(plumbing.Hash(id.RawValue()))
|
||||
if err != nil {
|
||||
if errors.Is(err, plumbing.ErrObjectNotFound) {
|
||||
return nil, ErrNotExist{
|
||||
ID: id.String(),
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,10 @@ import (
|
||||
)
|
||||
|
||||
func (repo *Repository) getTree(id ObjectID) (*Tree, error) {
|
||||
wr, rd, cancel := repo.CatFileBatch(repo.Ctx)
|
||||
wr, rd, cancel, err := repo.CatFileBatch(repo.Ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
_, _ = wr.Write([]byte(id.String() + "\n"))
|
||||
|
||||
@@ -42,9 +42,13 @@ func (te *TreeEntry) Size() int64 {
|
||||
return te.size
|
||||
}
|
||||
|
||||
wr, rd, cancel := te.ptree.repo.CatFileBatchCheck(te.ptree.repo.Ctx)
|
||||
wr, rd, cancel, err := te.ptree.repo.CatFileBatchCheck(te.ptree.repo.Ctx)
|
||||
if err != nil {
|
||||
log.Debug("error whilst reading size for %s in %s. Error: %v", te.ID.String(), te.ptree.repo.Path, err)
|
||||
return 0
|
||||
}
|
||||
defer cancel()
|
||||
_, err := wr.Write([]byte(te.ID.String() + "\n"))
|
||||
_, err = wr.Write([]byte(te.ID.String() + "\n"))
|
||||
if err != nil {
|
||||
log.Debug("error whilst reading size for %s in %s. Error: %v", te.ID.String(), te.ptree.repo.Path, err)
|
||||
return 0
|
||||
|
||||
@@ -33,7 +33,10 @@ func (t *Tree) ListEntries() (Entries, error) {
|
||||
}
|
||||
|
||||
if t.repo != nil {
|
||||
wr, rd, cancel := t.repo.CatFileBatch(t.repo.Ctx)
|
||||
wr, rd, cancel, err := t.repo.CatFileBatch(t.repo.Ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
_, _ = wr.Write([]byte(t.ID.String() + "\n"))
|
||||
|
||||
66
modules/globallock/globallock.go
Normal file
66
modules/globallock/globallock.go
Normal file
@@ -0,0 +1,66 @@
|
||||
// Copyright 2024 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package globallock
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultLocker Locker
|
||||
initOnce sync.Once
|
||||
initFunc = func() {
|
||||
// TODO: read the setting and initialize the default locker.
|
||||
// Before implementing this, don't use it.
|
||||
} // define initFunc as a variable to make it possible to change it in tests
|
||||
)
|
||||
|
||||
// DefaultLocker returns the default locker.
|
||||
func DefaultLocker() Locker {
|
||||
initOnce.Do(func() {
|
||||
initFunc()
|
||||
})
|
||||
return defaultLocker
|
||||
}
|
||||
|
||||
// Lock tries to acquire a lock for the given key, it uses the default locker.
|
||||
// Read the documentation of Locker.Lock for more information about the behavior.
|
||||
func Lock(ctx context.Context, key string) (ReleaseFunc, error) {
|
||||
return DefaultLocker().Lock(ctx, key)
|
||||
}
|
||||
|
||||
// TryLock tries to acquire a lock for the given key, it uses the default locker.
|
||||
// Read the documentation of Locker.TryLock for more information about the behavior.
|
||||
func TryLock(ctx context.Context, key string) (bool, ReleaseFunc, error) {
|
||||
return DefaultLocker().TryLock(ctx, key)
|
||||
}
|
||||
|
||||
// LockAndDo tries to acquire a lock for the given key and then calls the given function.
|
||||
// It uses the default locker, and it will return an error if failed to acquire the lock.
|
||||
func LockAndDo(ctx context.Context, key string, f func(context.Context) error) error {
|
||||
release, err := Lock(ctx, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer release()
|
||||
|
||||
return f(ctx)
|
||||
}
|
||||
|
||||
// TryLockAndDo tries to acquire a lock for the given key and then calls the given function.
|
||||
// It uses the default locker, and it will return false if failed to acquire the lock.
|
||||
func TryLockAndDo(ctx context.Context, key string, f func(context.Context) error) (bool, error) {
|
||||
ok, release, err := TryLock(ctx, key)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer release()
|
||||
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, f(ctx)
|
||||
}
|
||||
96
modules/globallock/globallock_test.go
Normal file
96
modules/globallock/globallock_test.go
Normal file
@@ -0,0 +1,96 @@
|
||||
// Copyright 2024 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package globallock
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestLockAndDo(t *testing.T) {
|
||||
t.Run("redis", func(t *testing.T) {
|
||||
url := "redis://127.0.0.1:6379/0"
|
||||
if os.Getenv("CI") == "" {
|
||||
// Make it possible to run tests against a local redis instance
|
||||
url = os.Getenv("TEST_REDIS_URL")
|
||||
if url == "" {
|
||||
t.Skip("TEST_REDIS_URL not set and not running in CI")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
oldDefaultLocker := defaultLocker
|
||||
oldInitFunc := initFunc
|
||||
defer func() {
|
||||
defaultLocker = oldDefaultLocker
|
||||
initFunc = oldInitFunc
|
||||
if defaultLocker == nil {
|
||||
initOnce = sync.Once{}
|
||||
}
|
||||
}()
|
||||
|
||||
initOnce = sync.Once{}
|
||||
initFunc = func() {
|
||||
defaultLocker = NewRedisLocker(url)
|
||||
}
|
||||
|
||||
testLockAndDo(t)
|
||||
require.NoError(t, defaultLocker.(*redisLocker).Close())
|
||||
})
|
||||
t.Run("memory", func(t *testing.T) {
|
||||
oldDefaultLocker := defaultLocker
|
||||
oldInitFunc := initFunc
|
||||
defer func() {
|
||||
defaultLocker = oldDefaultLocker
|
||||
initFunc = oldInitFunc
|
||||
if defaultLocker == nil {
|
||||
initOnce = sync.Once{}
|
||||
}
|
||||
}()
|
||||
|
||||
initOnce = sync.Once{}
|
||||
initFunc = func() {
|
||||
defaultLocker = NewMemoryLocker()
|
||||
}
|
||||
|
||||
testLockAndDo(t)
|
||||
})
|
||||
}
|
||||
|
||||
func testLockAndDo(t *testing.T) {
|
||||
const concurrency = 1000
|
||||
|
||||
ctx := context.Background()
|
||||
count := 0
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(concurrency)
|
||||
for i := 0; i < concurrency; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
err := LockAndDo(ctx, "test", func(ctx context.Context) error {
|
||||
count++
|
||||
|
||||
// It's impossible to acquire the lock inner the function
|
||||
ok, err := TryLockAndDo(ctx, "test", func(ctx context.Context) error {
|
||||
assert.Fail(t, "should not acquire the lock")
|
||||
return nil
|
||||
})
|
||||
assert.False(t, ok)
|
||||
assert.NoError(t, err)
|
||||
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
assert.Equal(t, concurrency, count)
|
||||
}
|
||||
38
modules/globallock/locker.go
Normal file
38
modules/globallock/locker.go
Normal file
@@ -0,0 +1,38 @@
|
||||
// Copyright 2024 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package globallock
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
type Locker interface {
|
||||
// Lock tries to acquire a lock for the given key, it blocks until the lock is acquired or the context is canceled.
|
||||
//
|
||||
// Lock returns a ReleaseFunc to release the lock, it cannot be nil.
|
||||
// It's always safe to call this function even if it fails to acquire the lock, and it will do nothing in that case.
|
||||
// And it's also safe to call it multiple times, but it will only release the lock once.
|
||||
// That's why it's called ReleaseFunc, not UnlockFunc.
|
||||
// But be aware that it's not safe to not call it at all; it could lead to a memory leak.
|
||||
// So a recommended pattern is to use defer to call it:
|
||||
// release, err := locker.Lock(ctx, "key")
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// defer release()
|
||||
//
|
||||
// Lock returns an error if failed to acquire the lock.
|
||||
// Be aware that even the context is not canceled, it's still possible to fail to acquire the lock.
|
||||
// For example, redis is down, or it reached the maximum number of tries.
|
||||
Lock(ctx context.Context, key string) (ReleaseFunc, error)
|
||||
|
||||
// TryLock tries to acquire a lock for the given key, it returns immediately.
|
||||
// It follows the same pattern as Lock, but it doesn't block.
|
||||
// And if it fails to acquire the lock because it's already locked, not other reasons like redis is down,
|
||||
// it will return false without any error.
|
||||
TryLock(ctx context.Context, key string) (bool, ReleaseFunc, error)
|
||||
}
|
||||
|
||||
// ReleaseFunc is a function that releases a lock.
|
||||
type ReleaseFunc func()
|
||||
181
modules/globallock/locker_test.go
Normal file
181
modules/globallock/locker_test.go
Normal file
@@ -0,0 +1,181 @@
|
||||
// Copyright 2024 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package globallock
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-redsync/redsync/v4"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestLocker(t *testing.T) {
|
||||
t.Run("redis", func(t *testing.T) {
|
||||
url := "redis://127.0.0.1:6379/0"
|
||||
if os.Getenv("CI") == "" {
|
||||
// Make it possible to run tests against a local redis instance
|
||||
url = os.Getenv("TEST_REDIS_URL")
|
||||
if url == "" {
|
||||
t.Skip("TEST_REDIS_URL not set and not running in CI")
|
||||
return
|
||||
}
|
||||
}
|
||||
oldExpiry := redisLockExpiry
|
||||
redisLockExpiry = 5 * time.Second // make it shorter for testing
|
||||
defer func() {
|
||||
redisLockExpiry = oldExpiry
|
||||
}()
|
||||
|
||||
locker := NewRedisLocker(url)
|
||||
testLocker(t, locker)
|
||||
testRedisLocker(t, locker.(*redisLocker))
|
||||
require.NoError(t, locker.(*redisLocker).Close())
|
||||
})
|
||||
t.Run("memory", func(t *testing.T) {
|
||||
locker := NewMemoryLocker()
|
||||
testLocker(t, locker)
|
||||
testMemoryLocker(t, locker.(*memoryLocker))
|
||||
})
|
||||
}
|
||||
|
||||
func testLocker(t *testing.T, locker Locker) {
|
||||
t.Run("lock", func(t *testing.T) {
|
||||
parentCtx := context.Background()
|
||||
release, err := locker.Lock(parentCtx, "test")
|
||||
defer release()
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
||||
func() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
release, err := locker.Lock(ctx, "test")
|
||||
defer release()
|
||||
|
||||
assert.Error(t, err)
|
||||
}()
|
||||
|
||||
release()
|
||||
|
||||
func() {
|
||||
release, err := locker.Lock(context.Background(), "test")
|
||||
defer release()
|
||||
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
})
|
||||
|
||||
t.Run("try lock", func(t *testing.T) {
|
||||
parentCtx := context.Background()
|
||||
ok, release, err := locker.TryLock(parentCtx, "test")
|
||||
defer release()
|
||||
|
||||
assert.True(t, ok)
|
||||
assert.NoError(t, err)
|
||||
|
||||
func() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
ok, release, err := locker.TryLock(ctx, "test")
|
||||
defer release()
|
||||
|
||||
assert.False(t, ok)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
release()
|
||||
|
||||
func() {
|
||||
ok, release, _ := locker.TryLock(context.Background(), "test")
|
||||
defer release()
|
||||
|
||||
assert.True(t, ok)
|
||||
}()
|
||||
})
|
||||
|
||||
t.Run("wait and acquired", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
release, err := locker.Lock(ctx, "test")
|
||||
require.NoError(t, err)
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
started := time.Now()
|
||||
release, err := locker.Lock(context.Background(), "test") // should be blocked for seconds
|
||||
defer release()
|
||||
assert.Greater(t, time.Since(started), time.Second)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
release()
|
||||
|
||||
wg.Wait()
|
||||
})
|
||||
|
||||
t.Run("multiple release", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
release1, err := locker.Lock(ctx, "test")
|
||||
require.NoError(t, err)
|
||||
|
||||
release1()
|
||||
|
||||
release2, err := locker.Lock(ctx, "test")
|
||||
defer release2()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Call release1 again,
|
||||
// it should not panic or block,
|
||||
// and it shouldn't affect the other lock
|
||||
release1()
|
||||
|
||||
ok, release3, err := locker.TryLock(ctx, "test")
|
||||
defer release3()
|
||||
require.NoError(t, err)
|
||||
// It should be able to acquire the lock;
|
||||
// otherwise, it means the lock has been released by release1
|
||||
assert.False(t, ok)
|
||||
})
|
||||
}
|
||||
|
||||
// testMemoryLocker does specific tests for memoryLocker
|
||||
func testMemoryLocker(t *testing.T, locker *memoryLocker) {
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
// testRedisLocker does specific tests for redisLocker
|
||||
func testRedisLocker(t *testing.T, locker *redisLocker) {
|
||||
defer func() {
|
||||
// This case should be tested at the end.
|
||||
// Otherwise, it will affect other tests.
|
||||
t.Run("close", func(t *testing.T) {
|
||||
assert.NoError(t, locker.Close())
|
||||
_, err := locker.Lock(context.Background(), "test")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}()
|
||||
|
||||
t.Run("failed extend", func(t *testing.T) {
|
||||
release, err := locker.Lock(context.Background(), "test")
|
||||
defer release()
|
||||
require.NoError(t, err)
|
||||
|
||||
// It simulates that there are some problems with extending like network issues or redis server down.
|
||||
v, ok := locker.mutexM.Load("test")
|
||||
require.True(t, ok)
|
||||
m := v.(*redsync.Mutex)
|
||||
_, _ = m.Unlock() // release it to make it impossible to extend
|
||||
|
||||
// In current design, callers can't know the lock can't be extended.
|
||||
// Just keep this case to improve the test coverage.
|
||||
})
|
||||
}
|
||||
67
modules/globallock/memory_locker.go
Normal file
67
modules/globallock/memory_locker.go
Normal file
@@ -0,0 +1,67 @@
|
||||
// Copyright 2024 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package globallock
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type memoryLocker struct {
|
||||
locks sync.Map
|
||||
}
|
||||
|
||||
var _ Locker = &memoryLocker{}
|
||||
|
||||
func NewMemoryLocker() Locker {
|
||||
return &memoryLocker{}
|
||||
}
|
||||
|
||||
func (l *memoryLocker) Lock(ctx context.Context, key string) (ReleaseFunc, error) {
|
||||
if l.tryLock(key) {
|
||||
releaseOnce := sync.Once{}
|
||||
return func() {
|
||||
releaseOnce.Do(func() {
|
||||
l.locks.Delete(key)
|
||||
})
|
||||
}, nil
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(time.Millisecond * 100)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return func() {}, ctx.Err()
|
||||
case <-ticker.C:
|
||||
if l.tryLock(key) {
|
||||
releaseOnce := sync.Once{}
|
||||
return func() {
|
||||
releaseOnce.Do(func() {
|
||||
l.locks.Delete(key)
|
||||
})
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *memoryLocker) TryLock(_ context.Context, key string) (bool, ReleaseFunc, error) {
|
||||
if l.tryLock(key) {
|
||||
releaseOnce := sync.Once{}
|
||||
return true, func() {
|
||||
releaseOnce.Do(func() {
|
||||
l.locks.Delete(key)
|
||||
})
|
||||
}, nil
|
||||
}
|
||||
|
||||
return false, func() {}, nil
|
||||
}
|
||||
|
||||
func (l *memoryLocker) tryLock(key string) bool {
|
||||
_, loaded := l.locks.LoadOrStore(key, struct{}{})
|
||||
return !loaded
|
||||
}
|
||||
137
modules/globallock/redis_locker.go
Normal file
137
modules/globallock/redis_locker.go
Normal file
@@ -0,0 +1,137 @@
|
||||
// Copyright 2024 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package globallock
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"code.gitea.io/gitea/modules/nosql"
|
||||
|
||||
"github.com/go-redsync/redsync/v4"
|
||||
"github.com/go-redsync/redsync/v4/redis/goredis/v9"
|
||||
)
|
||||
|
||||
const redisLockKeyPrefix = "gitea:globallock:"
|
||||
|
||||
// redisLockExpiry is the default expiry time for a lock.
|
||||
// Define it as a variable to make it possible to change it in tests.
|
||||
var redisLockExpiry = 30 * time.Second
|
||||
|
||||
type redisLocker struct {
|
||||
rs *redsync.Redsync
|
||||
|
||||
mutexM sync.Map
|
||||
closed atomic.Bool
|
||||
extendWg sync.WaitGroup
|
||||
}
|
||||
|
||||
var _ Locker = &redisLocker{}
|
||||
|
||||
func NewRedisLocker(connection string) Locker {
|
||||
l := &redisLocker{
|
||||
rs: redsync.New(
|
||||
goredis.NewPool(
|
||||
nosql.GetManager().GetRedisClient(connection),
|
||||
),
|
||||
),
|
||||
}
|
||||
|
||||
l.extendWg.Add(1)
|
||||
l.startExtend()
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *redisLocker) Lock(ctx context.Context, key string) (ReleaseFunc, error) {
|
||||
return l.lock(ctx, key, 0)
|
||||
}
|
||||
|
||||
func (l *redisLocker) TryLock(ctx context.Context, key string) (bool, ReleaseFunc, error) {
|
||||
f, err := l.lock(ctx, key, 1)
|
||||
|
||||
var (
|
||||
errTaken *redsync.ErrTaken
|
||||
errNodeTaken *redsync.ErrNodeTaken
|
||||
)
|
||||
if errors.As(err, &errTaken) || errors.As(err, &errNodeTaken) {
|
||||
return false, f, nil
|
||||
}
|
||||
return err == nil, f, err
|
||||
}
|
||||
|
||||
// Close closes the locker.
|
||||
// It will stop extending the locks and refuse to acquire new locks.
|
||||
// In actual use, it is not necessary to call this function.
|
||||
// But it's useful in tests to release resources.
|
||||
// It could take some time since it waits for the extending goroutine to finish.
|
||||
func (l *redisLocker) Close() error {
|
||||
l.closed.Store(true)
|
||||
l.extendWg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *redisLocker) lock(ctx context.Context, key string, tries int) (ReleaseFunc, error) {
|
||||
if l.closed.Load() {
|
||||
return func() {}, fmt.Errorf("locker is closed")
|
||||
}
|
||||
|
||||
options := []redsync.Option{
|
||||
redsync.WithExpiry(redisLockExpiry),
|
||||
}
|
||||
if tries > 0 {
|
||||
options = append(options, redsync.WithTries(tries))
|
||||
}
|
||||
mutex := l.rs.NewMutex(redisLockKeyPrefix+key, options...)
|
||||
if err := mutex.LockContext(ctx); err != nil {
|
||||
return func() {}, err
|
||||
}
|
||||
|
||||
l.mutexM.Store(key, mutex)
|
||||
|
||||
releaseOnce := sync.Once{}
|
||||
return func() {
|
||||
releaseOnce.Do(func() {
|
||||
l.mutexM.Delete(key)
|
||||
|
||||
// It's safe to ignore the error here,
|
||||
// if it failed to unlock, it will be released automatically after the lock expires.
|
||||
// Do not call mutex.UnlockContext(ctx) here, or it will fail to release when ctx has timed out.
|
||||
_, _ = mutex.Unlock()
|
||||
})
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (l *redisLocker) startExtend() {
|
||||
if l.closed.Load() {
|
||||
l.extendWg.Done()
|
||||
return
|
||||
}
|
||||
|
||||
toExtend := make([]*redsync.Mutex, 0)
|
||||
l.mutexM.Range(func(_, value any) bool {
|
||||
m := value.(*redsync.Mutex)
|
||||
|
||||
// Extend the lock if it is not expired.
|
||||
// Although the mutex will be removed from the map before it is released,
|
||||
// it still can be expired because of a failed extension.
|
||||
// If it happens, it does not need to be extended anymore.
|
||||
if time.Now().After(m.Until()) {
|
||||
return true
|
||||
}
|
||||
|
||||
toExtend = append(toExtend, m)
|
||||
return true
|
||||
})
|
||||
for _, v := range toExtend {
|
||||
// If it failed to extend, it will be released automatically after the lock expires.
|
||||
_, _ = v.Extend()
|
||||
}
|
||||
|
||||
time.AfterFunc(redisLockExpiry/2, l.startExtend)
|
||||
}
|
||||
@@ -16,10 +16,10 @@ import (
|
||||
"code.gitea.io/gitea/modules/analyze"
|
||||
"code.gitea.io/gitea/modules/charset"
|
||||
"code.gitea.io/gitea/modules/git"
|
||||
"code.gitea.io/gitea/modules/gitrepo"
|
||||
"code.gitea.io/gitea/modules/indexer/code/internal"
|
||||
indexer_internal "code.gitea.io/gitea/modules/indexer/internal"
|
||||
inner_bleve "code.gitea.io/gitea/modules/indexer/internal/bleve"
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
"code.gitea.io/gitea/modules/timeutil"
|
||||
"code.gitea.io/gitea/modules/typesniffer"
|
||||
@@ -189,21 +189,23 @@ func (b *Indexer) addDelete(filename string, repo *repo_model.Repository, batch
|
||||
func (b *Indexer) Index(ctx context.Context, repo *repo_model.Repository, sha string, changes *internal.RepoChanges) error {
|
||||
batch := inner_bleve.NewFlushingBatch(b.inner.Indexer, maxBatchSize)
|
||||
if len(changes.Updates) > 0 {
|
||||
// Now because of some insanity with git cat-file not immediately failing if not run in a valid git directory we need to run git rev-parse first!
|
||||
if err := git.EnsureValidGitRepository(ctx, repo.RepoPath()); err != nil {
|
||||
log.Error("Unable to open git repo: %s for %-v: %v", repo.RepoPath(), repo, err)
|
||||
r, err := gitrepo.OpenRepository(ctx, repo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
batchWriter, batchReader, cancel := git.CatFileBatch(ctx, repo.RepoPath())
|
||||
defer cancel()
|
||||
defer r.Close()
|
||||
gitBatch, err := r.NewBatch(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer gitBatch.Close()
|
||||
|
||||
for _, update := range changes.Updates {
|
||||
if err := b.addUpdate(ctx, batchWriter, batchReader, sha, update, repo, batch); err != nil {
|
||||
if err := b.addUpdate(ctx, gitBatch.Writer, gitBatch.Reader, sha, update, repo, batch); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
cancel()
|
||||
gitBatch.Close()
|
||||
}
|
||||
for _, filename := range changes.RemovedFilenames {
|
||||
if err := b.addDelete(filename, repo, batch); err != nil {
|
||||
|
||||
@@ -15,11 +15,11 @@ import (
|
||||
"code.gitea.io/gitea/modules/analyze"
|
||||
"code.gitea.io/gitea/modules/charset"
|
||||
"code.gitea.io/gitea/modules/git"
|
||||
"code.gitea.io/gitea/modules/gitrepo"
|
||||
"code.gitea.io/gitea/modules/indexer/code/internal"
|
||||
indexer_internal "code.gitea.io/gitea/modules/indexer/internal"
|
||||
inner_elasticsearch "code.gitea.io/gitea/modules/indexer/internal/elasticsearch"
|
||||
"code.gitea.io/gitea/modules/json"
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
"code.gitea.io/gitea/modules/timeutil"
|
||||
"code.gitea.io/gitea/modules/typesniffer"
|
||||
@@ -154,17 +154,19 @@ func (b *Indexer) addDelete(filename string, repo *repo_model.Repository) elasti
|
||||
func (b *Indexer) Index(ctx context.Context, repo *repo_model.Repository, sha string, changes *internal.RepoChanges) error {
|
||||
reqs := make([]elastic.BulkableRequest, 0)
|
||||
if len(changes.Updates) > 0 {
|
||||
// Now because of some insanity with git cat-file not immediately failing if not run in a valid git directory we need to run git rev-parse first!
|
||||
if err := git.EnsureValidGitRepository(ctx, repo.RepoPath()); err != nil {
|
||||
log.Error("Unable to open git repo: %s for %-v: %v", repo.RepoPath(), repo, err)
|
||||
r, err := gitrepo.OpenRepository(ctx, repo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
batchWriter, batchReader, cancel := git.CatFileBatch(ctx, repo.RepoPath())
|
||||
defer cancel()
|
||||
defer r.Close()
|
||||
batch, err := r.NewBatch(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer batch.Close()
|
||||
|
||||
for _, update := range changes.Updates {
|
||||
updateReqs, err := b.addUpdate(ctx, batchWriter, batchReader, sha, update, repo)
|
||||
updateReqs, err := b.addUpdate(ctx, batch.Writer, batch.Reader, sha, update, repo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -172,7 +174,7 @@ func (b *Indexer) Index(ctx context.Context, repo *repo_model.Repository, sha st
|
||||
reqs = append(reqs, updateReqs...)
|
||||
}
|
||||
}
|
||||
cancel()
|
||||
batch.Close()
|
||||
}
|
||||
|
||||
for _, filename := range changes.RemovedFilenames {
|
||||
|
||||
@@ -113,7 +113,24 @@ func nonGenesisChanges(ctx context.Context, repo *repo_model.Repository, revisio
|
||||
var changes internal.RepoChanges
|
||||
var err error
|
||||
updatedFilenames := make([]string, 0, 10)
|
||||
for _, line := range strings.Split(stdout, "\n") {
|
||||
|
||||
updateChanges := func() error {
|
||||
cmd := git.NewCommand(ctx, "ls-tree", "--full-tree", "-l").AddDynamicArguments(revision).
|
||||
AddDashesAndList(updatedFilenames...)
|
||||
lsTreeStdout, _, err := cmd.RunStdBytes(&git.RunOpts{Dir: repo.RepoPath()})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
updates, err1 := parseGitLsTreeOutput(lsTreeStdout)
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
changes.Updates = append(changes.Updates, updates...)
|
||||
return nil
|
||||
}
|
||||
lines := strings.Split(stdout, "\n")
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
@@ -161,15 +178,22 @@ func nonGenesisChanges(ctx context.Context, repo *repo_model.Repository, revisio
|
||||
default:
|
||||
log.Warn("Unrecognized status: %c (line=%s)", status, line)
|
||||
}
|
||||
|
||||
// According to https://learn.microsoft.com/en-us/troubleshoot/windows-client/shell-experience/command-line-string-limitation#more-information
|
||||
// the command line length should less than 8191 characters, assume filepath is 256, then 8191/256 = 31, so we use 30
|
||||
if len(updatedFilenames) >= 30 {
|
||||
if err := updateChanges(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
updatedFilenames = updatedFilenames[0:0]
|
||||
}
|
||||
}
|
||||
|
||||
cmd := git.NewCommand(ctx, "ls-tree", "--full-tree", "-l").AddDynamicArguments(revision).
|
||||
AddDashesAndList(updatedFilenames...)
|
||||
lsTreeStdout, _, err := cmd.RunStdBytes(&git.RunOpts{Dir: repo.RepoPath()})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if len(updatedFilenames) > 0 {
|
||||
if err := updateChanges(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
changes.Updates, err = parseGitLsTreeOutput(lsTreeStdout)
|
||||
return &changes, err
|
||||
}
|
||||
|
||||
@@ -71,6 +71,12 @@ func (i *Indexer) Search(ctx context.Context, options *internal.SearchOptions) (
|
||||
)),
|
||||
),
|
||||
)
|
||||
|
||||
if options.IsKeywordNumeric() {
|
||||
cond = cond.Or(
|
||||
builder.Eq{"`index`": options.Keyword},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
opt, err := ToDBOptions(ctx, options)
|
||||
|
||||
@@ -44,6 +44,12 @@ func ToSearchOptions(keyword string, opts *issues_model.IssuesOptions) *SearchOp
|
||||
searchOpt.ProjectID = optional.Some[int64](0) // Those issues with no project(projectid==0)
|
||||
}
|
||||
|
||||
if opts.AssigneeID > 0 {
|
||||
searchOpt.AssigneeID = optional.Some(opts.AssigneeID)
|
||||
} else if opts.AssigneeID == -1 { // FIXME: this is inconsistent from other places
|
||||
searchOpt.AssigneeID = optional.Some[int64](0)
|
||||
}
|
||||
|
||||
// See the comment of issues_model.SearchOptions for the reason why we need to convert
|
||||
convertID := func(id int64) optional.Option[int64] {
|
||||
if id > 0 {
|
||||
@@ -57,7 +63,6 @@ func ToSearchOptions(keyword string, opts *issues_model.IssuesOptions) *SearchOp
|
||||
|
||||
searchOpt.ProjectColumnID = convertID(opts.ProjectColumnID)
|
||||
searchOpt.PosterID = convertID(opts.PosterID)
|
||||
searchOpt.AssigneeID = convertID(opts.AssigneeID)
|
||||
searchOpt.MentionID = convertID(opts.MentionedID)
|
||||
searchOpt.ReviewedID = convertID(opts.ReviewedID)
|
||||
searchOpt.ReviewRequestedID = convertID(opts.ReviewRequestedID)
|
||||
|
||||
@@ -283,9 +283,9 @@ const (
|
||||
func SearchIssues(ctx context.Context, opts *SearchOptions) ([]int64, int64, error) {
|
||||
indexer := *globalIndexer.Load()
|
||||
|
||||
if opts.Keyword == "" {
|
||||
if opts.Keyword == "" || opts.IsKeywordNumeric() {
|
||||
// This is a conservative shortcut.
|
||||
// If the keyword is empty, db has better (at least not worse) performance to filter issues.
|
||||
// If the keyword is empty or an integer, db has better (at least not worse) performance to filter issues.
|
||||
// When the keyword is empty, it tends to listing rather than searching issues.
|
||||
// So if the user creates an issue and list issues immediately, the issue may not be listed because the indexer needs time to index the issue.
|
||||
// Even worse, the external indexer like elastic search may not be available for a while,
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"code.gitea.io/gitea/models/db"
|
||||
"code.gitea.io/gitea/models/issues"
|
||||
"code.gitea.io/gitea/models/unittest"
|
||||
"code.gitea.io/gitea/modules/indexer/issues/internal"
|
||||
"code.gitea.io/gitea/modules/optional"
|
||||
@@ -31,6 +32,7 @@ func TestDBSearchIssues(t *testing.T) {
|
||||
InitIssueIndexer(true)
|
||||
|
||||
t.Run("search issues with keyword", searchIssueWithKeyword)
|
||||
t.Run("search issues by index", searchIssueByIndex)
|
||||
t.Run("search issues in repo", searchIssueInRepo)
|
||||
t.Run("search issues by ID", searchIssueByID)
|
||||
t.Run("search issues is pr", searchIssueIsPull)
|
||||
@@ -87,6 +89,43 @@ func searchIssueWithKeyword(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func searchIssueByIndex(t *testing.T) {
|
||||
tests := []struct {
|
||||
opts SearchOptions
|
||||
expectedIDs []int64
|
||||
}{
|
||||
{
|
||||
SearchOptions{
|
||||
Keyword: "1000",
|
||||
RepoIDs: []int64{1},
|
||||
},
|
||||
[]int64{},
|
||||
},
|
||||
{
|
||||
SearchOptions{
|
||||
Keyword: "2",
|
||||
RepoIDs: []int64{1, 2, 3, 32},
|
||||
},
|
||||
[]int64{17, 12, 7, 2},
|
||||
},
|
||||
{
|
||||
SearchOptions{
|
||||
Keyword: "1",
|
||||
RepoIDs: []int64{58},
|
||||
},
|
||||
[]int64{19},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
issueIDs, _, err := SearchIssues(context.TODO(), &test.opts)
|
||||
if !assert.NoError(t, err) {
|
||||
return
|
||||
}
|
||||
assert.Equal(t, test.expectedIDs, issueIDs)
|
||||
}
|
||||
}
|
||||
|
||||
func searchIssueInRepo(t *testing.T) {
|
||||
tests := []struct {
|
||||
opts SearchOptions
|
||||
@@ -150,6 +189,11 @@ func searchIssueByID(t *testing.T) {
|
||||
},
|
||||
expectedIDs: []int64{6, 1},
|
||||
},
|
||||
{
|
||||
// NOTE: This tests no assignees filtering and also ToSearchOptions() to ensure it will set AssigneeID to 0 when it is passed as -1.
|
||||
opts: *ToSearchOptions("", &issues.IssuesOptions{AssigneeID: -1}),
|
||||
expectedIDs: []int64{22, 21, 16, 15, 14, 13, 12, 11, 20, 5, 19, 18, 10, 7, 4, 9, 8, 3, 2},
|
||||
},
|
||||
{
|
||||
opts: SearchOptions{
|
||||
MentionID: optional.Some(int64(4)),
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"code.gitea.io/gitea/models/db"
|
||||
"code.gitea.io/gitea/modules/optional"
|
||||
"code.gitea.io/gitea/modules/timeutil"
|
||||
@@ -124,6 +126,12 @@ func (o *SearchOptions) Copy(edit ...func(options *SearchOptions)) *SearchOption
|
||||
return &v
|
||||
}
|
||||
|
||||
// used for optimized issue index based search
|
||||
func (o *SearchOptions) IsKeywordNumeric() bool {
|
||||
_, err := strconv.Atoi(o.Keyword)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
type SortBy string
|
||||
|
||||
const (
|
||||
|
||||
@@ -88,6 +88,9 @@ func validateYaml(template *api.IssueTemplate) error {
|
||||
if err := validateBoolItem(position, field.Attributes, "multiple"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := validateBoolItem(position, field.Attributes, "list"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := validateOptions(field, idx); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -340,7 +343,13 @@ func (f *valuedField) WriteTo(builder *strings.Builder) {
|
||||
}
|
||||
}
|
||||
if len(checkeds) > 0 {
|
||||
_, _ = fmt.Fprintf(builder, "%s\n", strings.Join(checkeds, ", "))
|
||||
if list, ok := f.Attributes["list"].(bool); ok && list {
|
||||
for _, check := range checkeds {
|
||||
_, _ = fmt.Fprintf(builder, "- %s\n", check)
|
||||
}
|
||||
} else {
|
||||
_, _ = fmt.Fprintf(builder, "%s\n", strings.Join(checkeds, ", "))
|
||||
}
|
||||
} else {
|
||||
_, _ = fmt.Fprint(builder, blankPlaceholder)
|
||||
}
|
||||
|
||||
@@ -216,6 +216,20 @@ body:
|
||||
`,
|
||||
wantErr: "body[0](dropdown): 'multiple' should be a bool",
|
||||
},
|
||||
{
|
||||
name: "dropdown invalid list",
|
||||
content: `
|
||||
name: "test"
|
||||
about: "this is about"
|
||||
body:
|
||||
- type: "dropdown"
|
||||
id: "1"
|
||||
attributes:
|
||||
label: "a"
|
||||
list: "on"
|
||||
`,
|
||||
wantErr: "body[0](dropdown): 'list' should be a bool",
|
||||
},
|
||||
{
|
||||
name: "checkboxes invalid description",
|
||||
content: `
|
||||
@@ -452,6 +466,7 @@ name: Name
|
||||
title: Title
|
||||
about: About
|
||||
labels: ["label1", "label2"]
|
||||
assignees: ["user1", "user2"]
|
||||
ref: Ref
|
||||
body:
|
||||
- type: markdown
|
||||
@@ -509,11 +524,12 @@ body:
|
||||
visible: [form]
|
||||
`,
|
||||
want: &api.IssueTemplate{
|
||||
Name: "Name",
|
||||
Title: "Title",
|
||||
About: "About",
|
||||
Labels: []string{"label1", "label2"},
|
||||
Ref: "Ref",
|
||||
Name: "Name",
|
||||
Title: "Title",
|
||||
About: "About",
|
||||
Labels: []string{"label1", "label2"},
|
||||
Assignees: []string{"user1", "user2"},
|
||||
Ref: "Ref",
|
||||
Fields: []*api.IssueFormField{
|
||||
{
|
||||
Type: "markdown",
|
||||
@@ -807,7 +823,7 @@ body:
|
||||
- type: dropdown
|
||||
id: id5
|
||||
attributes:
|
||||
label: Label of dropdown
|
||||
label: Label of dropdown (one line)
|
||||
description: Description of dropdown
|
||||
multiple: true
|
||||
options:
|
||||
@@ -816,8 +832,21 @@ body:
|
||||
- Option 3 of dropdown
|
||||
validations:
|
||||
required: true
|
||||
- type: checkboxes
|
||||
- type: dropdown
|
||||
id: id6
|
||||
attributes:
|
||||
label: Label of dropdown (list)
|
||||
description: Description of dropdown
|
||||
multiple: true
|
||||
list: true
|
||||
options:
|
||||
- Option 1 of dropdown
|
||||
- Option 2 of dropdown
|
||||
- Option 3 of dropdown
|
||||
validations:
|
||||
required: true
|
||||
- type: checkboxes
|
||||
id: id7
|
||||
attributes:
|
||||
label: Label of checkboxes
|
||||
description: Description of checkboxes
|
||||
@@ -836,8 +865,9 @@ body:
|
||||
"form-field-id3": {"Value of id3"},
|
||||
"form-field-id4": {"Value of id4"},
|
||||
"form-field-id5": {"0,1"},
|
||||
"form-field-id6-0": {"on"},
|
||||
"form-field-id6-2": {"on"},
|
||||
"form-field-id6": {"1,2"},
|
||||
"form-field-id7-0": {"on"},
|
||||
"form-field-id7-2": {"on"},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -849,10 +879,15 @@ body:
|
||||
|
||||
Value of id4
|
||||
|
||||
### Label of dropdown
|
||||
### Label of dropdown (one line)
|
||||
|
||||
Option 1 of dropdown, Option 2 of dropdown
|
||||
|
||||
### Label of dropdown (list)
|
||||
|
||||
- Option 2 of dropdown
|
||||
- Option 3 of dropdown
|
||||
|
||||
### Label of checkboxes
|
||||
|
||||
- [x] Option 1 of checkboxes
|
||||
|
||||
@@ -136,14 +136,13 @@ func (c *HTTPClient) performOperation(ctx context.Context, objects []Pointer, dc
|
||||
|
||||
for _, object := range result.Objects {
|
||||
if object.Error != nil {
|
||||
objectError := errors.New(object.Error.Message)
|
||||
log.Trace("Error on object %v: %v", object.Pointer, objectError)
|
||||
log.Trace("Error on object %v: %v", object.Pointer, object.Error)
|
||||
if uc != nil {
|
||||
if _, err := uc(object.Pointer, objectError); err != nil {
|
||||
if _, err := uc(object.Pointer, object.Error); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := dc(object.Pointer, nil, objectError); err != nil {
|
||||
if err := dc(object.Pointer, nil, object.Error); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,11 @@
|
||||
package lfs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"code.gitea.io/gitea/modules/util"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -64,6 +68,39 @@ type ObjectError struct {
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
var (
|
||||
// See https://github.com/git-lfs/git-lfs/blob/main/docs/api/batch.md#successful-responses
|
||||
// LFS object error codes should match HTTP status codes where possible:
|
||||
// 404 - The object does not exist on the server.
|
||||
// 409 - The specified hash algorithm disagrees with the server's acceptable options.
|
||||
// 410 - The object was removed by the owner.
|
||||
// 422 - Validation error.
|
||||
|
||||
ErrObjectNotExist = util.ErrNotExist // the object does not exist on the server
|
||||
ErrObjectHashMismatch = errors.New("the specified hash algorithm disagrees with the server's acceptable options")
|
||||
ErrObjectRemoved = errors.New("the object was removed by the owner")
|
||||
ErrObjectValidation = errors.New("validation error")
|
||||
)
|
||||
|
||||
func (e *ObjectError) Error() string {
|
||||
return fmt.Sprintf("[%d] %s", e.Code, e.Message)
|
||||
}
|
||||
|
||||
func (e *ObjectError) Unwrap() error {
|
||||
switch e.Code {
|
||||
case 404:
|
||||
return ErrObjectNotExist
|
||||
case 409:
|
||||
return ErrObjectHashMismatch
|
||||
case 410:
|
||||
return ErrObjectRemoved
|
||||
case 422:
|
||||
return ErrObjectValidation
|
||||
default:
|
||||
return errors.New(e.Message)
|
||||
}
|
||||
}
|
||||
|
||||
// PointerBlob associates a Git blob with a Pointer.
|
||||
type PointerBlob struct {
|
||||
Hash string
|
||||
|
||||
@@ -1144,7 +1144,8 @@ func hashCurrentPatternProcessor(ctx *RenderContext, node *html.Node) {
|
||||
})
|
||||
}
|
||||
|
||||
exist = ctx.GitRepo.IsObjectExist(hash)
|
||||
// Don't use IsObjectExist since it doesn't support short hashs with gogit edition.
|
||||
exist = ctx.GitRepo.IsReferenceExist(hash)
|
||||
ctx.ShaExistCache[hash] = exist
|
||||
}
|
||||
|
||||
|
||||
@@ -4,8 +4,6 @@
|
||||
package markup
|
||||
|
||||
import (
|
||||
"path"
|
||||
|
||||
"code.gitea.io/gitea/modules/util"
|
||||
)
|
||||
|
||||
@@ -14,13 +12,9 @@ func ResolveLink(ctx *RenderContext, link, userContentAnchorPrefix string) (resu
|
||||
if !isAnchorFragment && !IsFullURLString(link) {
|
||||
linkBase := ctx.Links.Base
|
||||
if ctx.IsWiki {
|
||||
if ext := path.Ext(link); ext == "" || ext == ".-" {
|
||||
linkBase = ctx.Links.WikiLink() // the link is for a wiki page
|
||||
} else if DetectMarkupTypeByFileName(link) != "" {
|
||||
linkBase = ctx.Links.WikiLink() // the link is renderable as a wiki page
|
||||
} else {
|
||||
linkBase = ctx.Links.WikiRawLink() // otherwise, use a raw link instead to view&download medias
|
||||
}
|
||||
// no need to check if the link should be resolved as a wiki link or a wiki raw link
|
||||
// just use wiki link here and it will be redirected to a wiki raw link if necessary
|
||||
linkBase = ctx.Links.WikiLink()
|
||||
} else if ctx.Links.BranchPath != "" || ctx.Links.TreePath != "" {
|
||||
// if there is no BranchPath, then the link will be something like "/owner/repo/src/{the-file-path}"
|
||||
// and then this link will be handled by the "legacy-ref" code and be redirected to the default branch like "/owner/repo/src/branch/main/{the-file-path}"
|
||||
|
||||
@@ -437,7 +437,7 @@ func TestRender_ShortLinks(t *testing.T) {
|
||||
renderableFileURL := util.URLJoin(tree, "markdown_file.md")
|
||||
renderableFileURLWiki := util.URLJoin(markup.TestRepoURL, "wiki", "markdown_file.md")
|
||||
unrenderableFileURL := util.URLJoin(tree, "file.zip")
|
||||
unrenderableFileURLWiki := util.URLJoin(markup.TestRepoURL, "wiki", "raw", "file.zip")
|
||||
unrenderableFileURLWiki := util.URLJoin(markup.TestRepoURL, "wiki", "file.zip")
|
||||
favicon := "http://google.com/favicon.ico"
|
||||
|
||||
test(
|
||||
|
||||
@@ -672,9 +672,9 @@ space</p>
|
||||
Expected: `<p>space @mention-user<br/>
|
||||
/just/a/path.bin<br/>
|
||||
<a href="https://example.com/file.bin" rel="nofollow">https://example.com/file.bin</a><br/>
|
||||
<a href="/wiki/raw/file.bin" rel="nofollow">local link</a><br/>
|
||||
<a href="/wiki/file.bin" rel="nofollow">local link</a><br/>
|
||||
<a href="https://example.com" rel="nofollow">remote link</a><br/>
|
||||
<a href="/wiki/raw/file.bin" rel="nofollow">local link</a><br/>
|
||||
<a href="/wiki/file.bin" rel="nofollow">local link</a><br/>
|
||||
<a href="https://example.com" rel="nofollow">remote link</a><br/>
|
||||
<a href="/wiki/raw/image.jpg" target="_blank" rel="nofollow noopener"><img src="/wiki/raw/image.jpg" alt="local image"/></a><br/>
|
||||
<a href="/wiki/raw/path/file" target="_blank" rel="nofollow noopener"><img src="/wiki/raw/path/file" alt="local image"/></a><br/>
|
||||
@@ -730,9 +730,9 @@ space</p>
|
||||
Expected: `<p>space @mention-user<br/>
|
||||
/just/a/path.bin<br/>
|
||||
<a href="https://example.com/file.bin" rel="nofollow">https://example.com/file.bin</a><br/>
|
||||
<a href="https://gitea.io/wiki/raw/file.bin" rel="nofollow">local link</a><br/>
|
||||
<a href="https://gitea.io/wiki/file.bin" rel="nofollow">local link</a><br/>
|
||||
<a href="https://example.com" rel="nofollow">remote link</a><br/>
|
||||
<a href="https://gitea.io/wiki/raw/file.bin" rel="nofollow">local link</a><br/>
|
||||
<a href="https://gitea.io/wiki/file.bin" rel="nofollow">local link</a><br/>
|
||||
<a href="https://example.com" rel="nofollow">remote link</a><br/>
|
||||
<a href="https://gitea.io/wiki/raw/image.jpg" target="_blank" rel="nofollow noopener"><img src="https://gitea.io/wiki/raw/image.jpg" alt="local image"/></a><br/>
|
||||
<a href="https://gitea.io/wiki/raw/path/file" target="_blank" rel="nofollow noopener"><img src="https://gitea.io/wiki/raw/path/file" alt="local image"/></a><br/>
|
||||
@@ -788,9 +788,9 @@ space</p>
|
||||
Expected: `<p>space @mention-user<br/>
|
||||
/just/a/path.bin<br/>
|
||||
<a href="https://example.com/file.bin" rel="nofollow">https://example.com/file.bin</a><br/>
|
||||
<a href="/relative/path/wiki/raw/file.bin" rel="nofollow">local link</a><br/>
|
||||
<a href="/relative/path/wiki/file.bin" rel="nofollow">local link</a><br/>
|
||||
<a href="https://example.com" rel="nofollow">remote link</a><br/>
|
||||
<a href="/relative/path/wiki/raw/file.bin" rel="nofollow">local link</a><br/>
|
||||
<a href="/relative/path/wiki/file.bin" rel="nofollow">local link</a><br/>
|
||||
<a href="https://example.com" rel="nofollow">remote link</a><br/>
|
||||
<a href="/relative/path/wiki/raw/image.jpg" target="_blank" rel="nofollow noopener"><img src="/relative/path/wiki/raw/image.jpg" alt="local image"/></a><br/>
|
||||
<a href="/relative/path/wiki/raw/path/file" target="_blank" rel="nofollow noopener"><img src="/relative/path/wiki/raw/path/file" alt="local image"/></a><br/>
|
||||
@@ -848,9 +848,9 @@ space</p>
|
||||
Expected: `<p>space @mention-user<br/>
|
||||
/just/a/path.bin<br/>
|
||||
<a href="https://example.com/file.bin" rel="nofollow">https://example.com/file.bin</a><br/>
|
||||
<a href="/relative/path/wiki/raw/file.bin" rel="nofollow">local link</a><br/>
|
||||
<a href="/relative/path/wiki/file.bin" rel="nofollow">local link</a><br/>
|
||||
<a href="https://example.com" rel="nofollow">remote link</a><br/>
|
||||
<a href="/relative/path/wiki/raw/file.bin" rel="nofollow">local link</a><br/>
|
||||
<a href="/relative/path/wiki/file.bin" rel="nofollow">local link</a><br/>
|
||||
<a href="https://example.com" rel="nofollow">remote link</a><br/>
|
||||
<a href="/relative/path/wiki/raw/image.jpg" target="_blank" rel="nofollow noopener"><img src="/relative/path/wiki/raw/image.jpg" alt="local image"/></a><br/>
|
||||
<a href="/relative/path/wiki/raw/path/file" target="_blank" rel="nofollow noopener"><img src="/relative/path/wiki/raw/path/file" alt="local image"/></a><br/>
|
||||
@@ -908,9 +908,9 @@ space</p>
|
||||
Expected: `<p>space @mention-user<br/>
|
||||
/just/a/path.bin<br/>
|
||||
<a href="https://example.com/file.bin" rel="nofollow">https://example.com/file.bin</a><br/>
|
||||
<a href="/relative/path/wiki/raw/file.bin" rel="nofollow">local link</a><br/>
|
||||
<a href="/relative/path/wiki/file.bin" rel="nofollow">local link</a><br/>
|
||||
<a href="https://example.com" rel="nofollow">remote link</a><br/>
|
||||
<a href="/relative/path/wiki/raw/file.bin" rel="nofollow">local link</a><br/>
|
||||
<a href="/relative/path/wiki/file.bin" rel="nofollow">local link</a><br/>
|
||||
<a href="https://example.com" rel="nofollow">remote link</a><br/>
|
||||
<a href="/relative/path/wiki/raw/image.jpg" target="_blank" rel="nofollow noopener"><img src="/relative/path/wiki/raw/image.jpg" alt="local image"/></a><br/>
|
||||
<a href="/relative/path/wiki/raw/path/file" target="_blank" rel="nofollow noopener"><img src="/relative/path/wiki/raw/path/file" alt="local image"/></a><br/>
|
||||
@@ -970,9 +970,9 @@ space</p>
|
||||
Expected: `<p>space @mention-user<br/>
|
||||
/just/a/path.bin<br/>
|
||||
<a href="https://example.com/file.bin" rel="nofollow">https://example.com/file.bin</a><br/>
|
||||
<a href="/relative/path/wiki/raw/file.bin" rel="nofollow">local link</a><br/>
|
||||
<a href="/relative/path/wiki/file.bin" rel="nofollow">local link</a><br/>
|
||||
<a href="https://example.com" rel="nofollow">remote link</a><br/>
|
||||
<a href="/relative/path/wiki/raw/file.bin" rel="nofollow">local link</a><br/>
|
||||
<a href="/relative/path/wiki/file.bin" rel="nofollow">local link</a><br/>
|
||||
<a href="https://example.com" rel="nofollow">remote link</a><br/>
|
||||
<a href="/relative/path/wiki/raw/image.jpg" target="_blank" rel="nofollow noopener"><img src="/relative/path/wiki/raw/image.jpg" alt="local image"/></a><br/>
|
||||
<a href="/relative/path/wiki/raw/path/file" target="_blank" rel="nofollow noopener"><img src="/relative/path/wiki/raw/path/file" alt="local image"/></a><br/>
|
||||
|
||||
@@ -45,7 +45,7 @@ func (p *PullRequest) GetContext() DownloaderContext { return p.Context }
|
||||
|
||||
// IsForkPullRequest returns true if the pull request from a forked repository but not the same repository
|
||||
func (p *PullRequest) IsForkPullRequest() bool {
|
||||
return p.Head.RepoPath() != p.Base.RepoPath()
|
||||
return p.Head.RepoFullName() != p.Base.RepoFullName()
|
||||
}
|
||||
|
||||
// GetGitRefName returns pull request relative path to head
|
||||
@@ -62,8 +62,8 @@ type PullRequestBranch struct {
|
||||
OwnerName string `yaml:"owner_name"`
|
||||
}
|
||||
|
||||
// RepoPath returns pull request repo path
|
||||
func (p PullRequestBranch) RepoPath() string {
|
||||
// RepoFullName returns pull request repo full name
|
||||
func (p PullRequestBranch) RepoFullName() string {
|
||||
return fmt.Sprintf("%s/%s", p.OwnerName, p.RepoName)
|
||||
}
|
||||
|
||||
|
||||
@@ -13,8 +13,7 @@ import (
|
||||
"code.gitea.io/gitea/modules/json"
|
||||
"code.gitea.io/gitea/modules/util"
|
||||
"code.gitea.io/gitea/modules/validation"
|
||||
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"code.gitea.io/gitea/modules/zstd"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -10,8 +10,9 @@ import (
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"code.gitea.io/gitea/modules/zstd"
|
||||
|
||||
"github.com/dsnet/compress/bzip2"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
|
||||
@@ -14,9 +14,9 @@ import (
|
||||
|
||||
"code.gitea.io/gitea/modules/util"
|
||||
"code.gitea.io/gitea/modules/validation"
|
||||
"code.gitea.io/gitea/modules/zstd"
|
||||
|
||||
"github.com/blakesmith/ar"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/ulikunitz/xz"
|
||||
)
|
||||
|
||||
|
||||
@@ -10,8 +10,9 @@ import (
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"code.gitea.io/gitea/modules/zstd"
|
||||
|
||||
"github.com/blakesmith/ar"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/ulikunitz/xz"
|
||||
)
|
||||
|
||||
@@ -5,6 +5,7 @@ package repository
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
@@ -181,6 +182,10 @@ func StoreMissingLfsObjectsInRepository(ctx context.Context, repo *repo_model.Re
|
||||
downloadObjects := func(pointers []lfs.Pointer) error {
|
||||
err := lfsClient.Download(ctx, pointers, func(p lfs.Pointer, content io.ReadCloser, objectError error) error {
|
||||
if objectError != nil {
|
||||
if errors.Is(objectError, lfs.ErrObjectNotExist) {
|
||||
log.Warn("Repo[%-v]: Ignore missing LFS object %-v: %v", repo, p, objectError)
|
||||
return nil
|
||||
}
|
||||
return objectError
|
||||
}
|
||||
|
||||
|
||||
@@ -14,10 +14,12 @@ import (
|
||||
// Actions settings
|
||||
var (
|
||||
Actions = struct {
|
||||
LogStorage *Storage // how the created logs should be stored
|
||||
ArtifactStorage *Storage // how the created artifacts should be stored
|
||||
ArtifactRetentionDays int64 `ini:"ARTIFACT_RETENTION_DAYS"`
|
||||
Enabled bool
|
||||
LogStorage *Storage // how the created logs should be stored
|
||||
LogRetentionDays int64 `ini:"LOG_RETENTION_DAYS"`
|
||||
LogCompression logCompression `ini:"LOG_COMPRESSION"`
|
||||
ArtifactStorage *Storage // how the created artifacts should be stored
|
||||
ArtifactRetentionDays int64 `ini:"ARTIFACT_RETENTION_DAYS"`
|
||||
DefaultActionsURL defaultActionsURL `ini:"DEFAULT_ACTIONS_URL"`
|
||||
ZombieTaskTimeout time.Duration `ini:"ZOMBIE_TASK_TIMEOUT"`
|
||||
EndlessTaskTimeout time.Duration `ini:"ENDLESS_TASK_TIMEOUT"`
|
||||
@@ -53,6 +55,20 @@ const (
|
||||
// please consider to use `uses: https://the_url_you_want_to_use/username/action_name@version` instead.
|
||||
)
|
||||
|
||||
type logCompression string
|
||||
|
||||
func (c logCompression) IsValid() bool {
|
||||
return c.IsNone() || c.IsZstd()
|
||||
}
|
||||
|
||||
func (c logCompression) IsNone() bool {
|
||||
return c == "" || strings.ToLower(string(c)) == "none"
|
||||
}
|
||||
|
||||
func (c logCompression) IsZstd() bool {
|
||||
return strings.ToLower(string(c)) == "zstd"
|
||||
}
|
||||
|
||||
func loadActionsFrom(rootCfg ConfigProvider) error {
|
||||
sec := rootCfg.Section("actions")
|
||||
err := sec.MapTo(&Actions)
|
||||
@@ -78,10 +94,17 @@ func loadActionsFrom(rootCfg ConfigProvider) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// default to 1 year
|
||||
if Actions.LogRetentionDays <= 0 {
|
||||
Actions.LogRetentionDays = 365
|
||||
}
|
||||
|
||||
actionsSec, _ := rootCfg.GetSection("actions.artifacts")
|
||||
|
||||
Actions.ArtifactStorage, err = getStorage(rootCfg, "actions_artifacts", "", actionsSec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// default to 90 days in Github Actions
|
||||
if Actions.ArtifactRetentionDays <= 0 {
|
||||
@@ -92,5 +115,9 @@ func loadActionsFrom(rootCfg ConfigProvider) error {
|
||||
Actions.EndlessTaskTimeout = sec.Key("ENDLESS_TASK_TIMEOUT").MustDuration(3 * time.Hour)
|
||||
Actions.AbandonedJobTimeout = sec.Key("ABANDONED_JOB_TIMEOUT").MustDuration(24 * time.Hour)
|
||||
|
||||
return err
|
||||
if !Actions.LogCompression.IsValid() {
|
||||
return fmt.Errorf("invalid [actions] LOG_COMPRESSION: %q", Actions.LogCompression)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -20,11 +20,13 @@ func loadAdminFrom(rootCfg ConfigProvider) {
|
||||
Admin.DisableRegularOrgCreation = sec.Key("DISABLE_REGULAR_ORG_CREATION").MustBool(false)
|
||||
Admin.DefaultEmailNotification = sec.Key("DEFAULT_EMAIL_NOTIFICATIONS").MustString("enabled")
|
||||
Admin.UserDisabledFeatures = container.SetOf(sec.Key("USER_DISABLED_FEATURES").Strings(",")...)
|
||||
Admin.ExternalUserDisableFeatures = container.SetOf(sec.Key("EXTERNAL_USER_DISABLE_FEATURES").Strings(",")...)
|
||||
Admin.ExternalUserDisableFeatures = container.SetOf(sec.Key("EXTERNAL_USER_DISABLE_FEATURES").Strings(",")...).Union(Admin.UserDisabledFeatures)
|
||||
}
|
||||
|
||||
const (
|
||||
UserFeatureDeletion = "deletion"
|
||||
UserFeatureManageSSHKeys = "manage_ssh_keys"
|
||||
UserFeatureManageGPGKeys = "manage_gpg_keys"
|
||||
UserFeatureDeletion = "deletion"
|
||||
UserFeatureManageSSHKeys = "manage_ssh_keys"
|
||||
UserFeatureManageGPGKeys = "manage_gpg_keys"
|
||||
UserFeatureManageMFA = "manage_mfa"
|
||||
UserFeatureManageCredentials = "manage_credentials"
|
||||
)
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"net"
|
||||
"net/mail"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
@@ -46,6 +47,10 @@ type Mailer struct {
|
||||
SendmailArgs []string `ini:"-"`
|
||||
SendmailTimeout time.Duration `ini:"SENDMAIL_TIMEOUT"`
|
||||
SendmailConvertCRLF bool `ini:"SENDMAIL_CONVERT_CRLF"`
|
||||
|
||||
// Customization
|
||||
FromDisplayNameFormat string `ini:"FROM_DISPLAY_NAME_FORMAT"`
|
||||
FromDisplayNameFormatTemplate *template.Template `ini:"-"`
|
||||
}
|
||||
|
||||
// MailService the global mailer
|
||||
@@ -226,6 +231,16 @@ func loadMailerFrom(rootCfg ConfigProvider) {
|
||||
log.Error("no mailer.FROM provided, email system may not work.")
|
||||
}
|
||||
|
||||
MailService.FromDisplayNameFormatTemplate, _ = template.New("mailFrom").Parse("{{ .DisplayName }}")
|
||||
if MailService.FromDisplayNameFormat != "" {
|
||||
template, err := template.New("mailFrom").Parse(MailService.FromDisplayNameFormat)
|
||||
if err != nil {
|
||||
log.Error("mailer.FROM_DISPLAY_NAME_FORMAT is no valid template: %v", err)
|
||||
} else {
|
||||
MailService.FromDisplayNameFormatTemplate = template
|
||||
}
|
||||
}
|
||||
|
||||
switch MailService.EnvelopeFrom {
|
||||
case "":
|
||||
MailService.OverrideEnvelopeFrom = false
|
||||
|
||||
@@ -42,6 +42,8 @@ var (
|
||||
LimitSizeRubyGems int64
|
||||
LimitSizeSwift int64
|
||||
LimitSizeVagrant int64
|
||||
|
||||
DefaultRPMSignEnabled bool
|
||||
}{
|
||||
Enabled: true,
|
||||
LimitTotalOwnerCount: -1,
|
||||
@@ -97,6 +99,7 @@ func loadPackagesFrom(rootCfg ConfigProvider) (err error) {
|
||||
Packages.LimitSizeRubyGems = mustBytes(sec, "LIMIT_SIZE_RUBYGEMS")
|
||||
Packages.LimitSizeSwift = mustBytes(sec, "LIMIT_SIZE_SWIFT")
|
||||
Packages.LimitSizeVagrant = mustBytes(sec, "LIMIT_SIZE_VAGRANT")
|
||||
Packages.DefaultRPMSignEnabled = sec.Key("DEFAULT_RPM_SIGN_ENABLED").MustBool(false)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -494,3 +494,17 @@ type PackagePayload struct {
|
||||
func (p *PackagePayload) JSONPayload() ([]byte, error) {
|
||||
return json.MarshalIndent(p, "", " ")
|
||||
}
|
||||
|
||||
// WorkflowDispatchPayload represents a workflow dispatch payload
|
||||
type WorkflowDispatchPayload struct {
|
||||
Workflow string `json:"workflow"`
|
||||
Ref string `json:"ref"`
|
||||
Inputs map[string]any `json:"inputs"`
|
||||
Repository *Repository `json:"repository"`
|
||||
Sender *User `json:"sender"`
|
||||
}
|
||||
|
||||
// JSONPayload implements Payload
|
||||
func (p *WorkflowDispatchPayload) JSONPayload() ([]byte, error) {
|
||||
return json.MarshalIndent(p, "", " ")
|
||||
}
|
||||
|
||||
@@ -191,19 +191,20 @@ const (
|
||||
// IssueTemplate represents an issue template for a repository
|
||||
// swagger:model
|
||||
type IssueTemplate struct {
|
||||
Name string `json:"name" yaml:"name"`
|
||||
Title string `json:"title" yaml:"title"`
|
||||
About string `json:"about" yaml:"about"` // Using "description" in a template file is compatible
|
||||
Labels IssueTemplateLabels `json:"labels" yaml:"labels"`
|
||||
Ref string `json:"ref" yaml:"ref"`
|
||||
Content string `json:"content" yaml:"-"`
|
||||
Fields []*IssueFormField `json:"body" yaml:"body"`
|
||||
FileName string `json:"file_name" yaml:"-"`
|
||||
Name string `json:"name" yaml:"name"`
|
||||
Title string `json:"title" yaml:"title"`
|
||||
About string `json:"about" yaml:"about"` // Using "description" in a template file is compatible
|
||||
Labels IssueTemplateStringSlice `json:"labels" yaml:"labels"`
|
||||
Assignees IssueTemplateStringSlice `json:"assignees" yaml:"assignees"`
|
||||
Ref string `json:"ref" yaml:"ref"`
|
||||
Content string `json:"content" yaml:"-"`
|
||||
Fields []*IssueFormField `json:"body" yaml:"body"`
|
||||
FileName string `json:"file_name" yaml:"-"`
|
||||
}
|
||||
|
||||
type IssueTemplateLabels []string
|
||||
type IssueTemplateStringSlice []string
|
||||
|
||||
func (l *IssueTemplateLabels) UnmarshalYAML(value *yaml.Node) error {
|
||||
func (l *IssueTemplateStringSlice) UnmarshalYAML(value *yaml.Node) error {
|
||||
var labels []string
|
||||
if value.IsZero() {
|
||||
*l = labels
|
||||
@@ -231,7 +232,7 @@ func (l *IssueTemplateLabels) UnmarshalYAML(value *yaml.Node) error {
|
||||
*l = labels
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("line %d: cannot unmarshal %s into IssueTemplateLabels", value.Line, value.ShortTag())
|
||||
return fmt.Errorf("line %d: cannot unmarshal %s into IssueTemplateStringSlice", value.Line, value.ShortTag())
|
||||
}
|
||||
|
||||
type IssueConfigContactLink struct {
|
||||
|
||||
@@ -42,7 +42,7 @@ func TestIssueTemplate_Type(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssueTemplateLabels_UnmarshalYAML(t *testing.T) {
|
||||
func TestIssueTemplateStringSlice_UnmarshalYAML(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
content string
|
||||
@@ -88,7 +88,7 @@ labels:
|
||||
b: bb
|
||||
`,
|
||||
tmpl: &IssueTemplate{},
|
||||
wantErr: "line 3: cannot unmarshal !!map into IssueTemplateLabels",
|
||||
wantErr: "line 3: cannot unmarshal !!map into IssueTemplateStringSlice",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
|
||||
@@ -9,21 +9,22 @@ import (
|
||||
|
||||
// PullRequest represents a pull request
|
||||
type PullRequest struct {
|
||||
ID int64 `json:"id"`
|
||||
URL string `json:"url"`
|
||||
Index int64 `json:"number"`
|
||||
Poster *User `json:"user"`
|
||||
Title string `json:"title"`
|
||||
Body string `json:"body"`
|
||||
Labels []*Label `json:"labels"`
|
||||
Milestone *Milestone `json:"milestone"`
|
||||
Assignee *User `json:"assignee"`
|
||||
Assignees []*User `json:"assignees"`
|
||||
RequestedReviewers []*User `json:"requested_reviewers"`
|
||||
State StateType `json:"state"`
|
||||
Draft bool `json:"draft"`
|
||||
IsLocked bool `json:"is_locked"`
|
||||
Comments int `json:"comments"`
|
||||
ID int64 `json:"id"`
|
||||
URL string `json:"url"`
|
||||
Index int64 `json:"number"`
|
||||
Poster *User `json:"user"`
|
||||
Title string `json:"title"`
|
||||
Body string `json:"body"`
|
||||
Labels []*Label `json:"labels"`
|
||||
Milestone *Milestone `json:"milestone"`
|
||||
Assignee *User `json:"assignee"`
|
||||
Assignees []*User `json:"assignees"`
|
||||
RequestedReviewers []*User `json:"requested_reviewers"`
|
||||
RequestedReviewersTeams []*Team `json:"requested_reviewers_teams"`
|
||||
State StateType `json:"state"`
|
||||
Draft bool `json:"draft"`
|
||||
IsLocked bool `json:"is_locked"`
|
||||
Comments int `json:"comments"`
|
||||
// number of review comments made on the diff of a PR review (not including comments on commits or issues in a PR)
|
||||
ReviewComments int `json:"review_comments"`
|
||||
Additions int `json:"additions"`
|
||||
|
||||
@@ -5,6 +5,7 @@ package structs
|
||||
|
||||
// AddCollaboratorOption options when adding a user as a collaborator of a repository
|
||||
type AddCollaboratorOption struct {
|
||||
// enum: read,write,admin
|
||||
Permission *string `json:"permission"`
|
||||
}
|
||||
|
||||
|
||||
@@ -31,21 +31,23 @@ type CreateAccessTokenOption struct {
|
||||
|
||||
// CreateOAuth2ApplicationOptions holds options to create an oauth2 application
|
||||
type CreateOAuth2ApplicationOptions struct {
|
||||
Name string `json:"name" binding:"Required"`
|
||||
ConfidentialClient bool `json:"confidential_client"`
|
||||
RedirectURIs []string `json:"redirect_uris" binding:"Required"`
|
||||
Name string `json:"name" binding:"Required"`
|
||||
ConfidentialClient bool `json:"confidential_client"`
|
||||
SkipSecondaryAuthorization bool `json:"skip_secondary_authorization"`
|
||||
RedirectURIs []string `json:"redirect_uris" binding:"Required"`
|
||||
}
|
||||
|
||||
// OAuth2Application represents an OAuth2 application.
|
||||
// swagger:response OAuth2Application
|
||||
type OAuth2Application struct {
|
||||
ID int64 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
ClientID string `json:"client_id"`
|
||||
ClientSecret string `json:"client_secret"`
|
||||
ConfidentialClient bool `json:"confidential_client"`
|
||||
RedirectURIs []string `json:"redirect_uris"`
|
||||
Created time.Time `json:"created"`
|
||||
ID int64 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
ClientID string `json:"client_id"`
|
||||
ClientSecret string `json:"client_secret"`
|
||||
ConfidentialClient bool `json:"confidential_client"`
|
||||
SkipSecondaryAuthorization bool `json:"skip_secondary_authorization"`
|
||||
RedirectURIs []string `json:"redirect_uris"`
|
||||
Created time.Time `json:"created"`
|
||||
}
|
||||
|
||||
// OAuth2ApplicationList represents a list of OAuth2 applications.
|
||||
|
||||
46
modules/zstd/option.go
Normal file
46
modules/zstd/option.go
Normal file
@@ -0,0 +1,46 @@
|
||||
// Copyright 2024 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package zstd
|
||||
|
||||
import "github.com/klauspost/compress/zstd"
|
||||
|
||||
type WriterOption = zstd.EOption
|
||||
|
||||
var (
|
||||
WithEncoderCRC = zstd.WithEncoderCRC
|
||||
WithEncoderConcurrency = zstd.WithEncoderConcurrency
|
||||
WithWindowSize = zstd.WithWindowSize
|
||||
WithEncoderPadding = zstd.WithEncoderPadding
|
||||
WithEncoderLevel = zstd.WithEncoderLevel
|
||||
WithZeroFrames = zstd.WithZeroFrames
|
||||
WithAllLitEntropyCompression = zstd.WithAllLitEntropyCompression
|
||||
WithNoEntropyCompression = zstd.WithNoEntropyCompression
|
||||
WithSingleSegment = zstd.WithSingleSegment
|
||||
WithLowerEncoderMem = zstd.WithLowerEncoderMem
|
||||
WithEncoderDict = zstd.WithEncoderDict
|
||||
WithEncoderDictRaw = zstd.WithEncoderDictRaw
|
||||
)
|
||||
|
||||
type EncoderLevel = zstd.EncoderLevel
|
||||
|
||||
const (
|
||||
SpeedFastest EncoderLevel = zstd.SpeedFastest
|
||||
SpeedDefault EncoderLevel = zstd.SpeedDefault
|
||||
SpeedBetterCompression EncoderLevel = zstd.SpeedBetterCompression
|
||||
SpeedBestCompression EncoderLevel = zstd.SpeedBestCompression
|
||||
)
|
||||
|
||||
type ReaderOption = zstd.DOption
|
||||
|
||||
var (
|
||||
WithDecoderLowmem = zstd.WithDecoderLowmem
|
||||
WithDecoderConcurrency = zstd.WithDecoderConcurrency
|
||||
WithDecoderMaxMemory = zstd.WithDecoderMaxMemory
|
||||
WithDecoderDicts = zstd.WithDecoderDicts
|
||||
WithDecoderDictRaw = zstd.WithDecoderDictRaw
|
||||
WithDecoderMaxWindow = zstd.WithDecoderMaxWindow
|
||||
WithDecodeAllCapLimit = zstd.WithDecodeAllCapLimit
|
||||
WithDecodeBuffersBelow = zstd.WithDecodeBuffersBelow
|
||||
IgnoreChecksum = zstd.IgnoreChecksum
|
||||
)
|
||||
163
modules/zstd/zstd.go
Normal file
163
modules/zstd/zstd.go
Normal file
@@ -0,0 +1,163 @@
|
||||
// Copyright 2024 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
// Package zstd provides a high-level API for reading and writing zstd-compressed data.
|
||||
// It supports both regular and seekable zstd streams.
|
||||
// It's not a new wheel, but a wrapper around the zstd and zstd-seekable-format-go packages.
|
||||
package zstd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
seekable "github.com/SaveTheRbtz/zstd-seekable-format-go/pkg"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
)
|
||||
|
||||
type Writer zstd.Encoder
|
||||
|
||||
var _ io.WriteCloser = (*Writer)(nil)
|
||||
|
||||
// NewWriter returns a new zstd writer.
|
||||
func NewWriter(w io.Writer, opts ...WriterOption) (*Writer, error) {
|
||||
zstdW, err := zstd.NewWriter(w, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return (*Writer)(zstdW), nil
|
||||
}
|
||||
|
||||
func (w *Writer) Write(p []byte) (int, error) {
|
||||
return (*zstd.Encoder)(w).Write(p)
|
||||
}
|
||||
|
||||
func (w *Writer) Close() error {
|
||||
return (*zstd.Encoder)(w).Close()
|
||||
}
|
||||
|
||||
type Reader zstd.Decoder
|
||||
|
||||
var _ io.ReadCloser = (*Reader)(nil)
|
||||
|
||||
// NewReader returns a new zstd reader.
|
||||
func NewReader(r io.Reader, opts ...ReaderOption) (*Reader, error) {
|
||||
zstdR, err := zstd.NewReader(r, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return (*Reader)(zstdR), nil
|
||||
}
|
||||
|
||||
func (r *Reader) Read(p []byte) (int, error) {
|
||||
return (*zstd.Decoder)(r).Read(p)
|
||||
}
|
||||
|
||||
func (r *Reader) Close() error {
|
||||
(*zstd.Decoder)(r).Close() // no error returned
|
||||
return nil
|
||||
}
|
||||
|
||||
type SeekableWriter struct {
|
||||
buf []byte
|
||||
n int
|
||||
w seekable.Writer
|
||||
}
|
||||
|
||||
var _ io.WriteCloser = (*SeekableWriter)(nil)
|
||||
|
||||
// NewSeekableWriter returns a zstd writer to compress data to seekable format.
|
||||
// blockSize is an important parameter, it should be decided according to the actual business requirements.
|
||||
// If it's too small, the compression ratio could be very bad, even no compression at all.
|
||||
// If it's too large, it could cost more traffic when reading the data partially from underlying storage.
|
||||
func NewSeekableWriter(w io.Writer, blockSize int, opts ...WriterOption) (*SeekableWriter, error) {
|
||||
zstdW, err := zstd.NewWriter(nil, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
seekableW, err := seekable.NewWriter(w, zstdW)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &SeekableWriter{
|
||||
buf: make([]byte, blockSize),
|
||||
w: seekableW,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (w *SeekableWriter) Write(p []byte) (int, error) {
|
||||
written := 0
|
||||
for len(p) > 0 {
|
||||
n := copy(w.buf[w.n:], p)
|
||||
w.n += n
|
||||
written += n
|
||||
p = p[n:]
|
||||
|
||||
if w.n == len(w.buf) {
|
||||
if _, err := w.w.Write(w.buf); err != nil {
|
||||
return written, err
|
||||
}
|
||||
w.n = 0
|
||||
}
|
||||
}
|
||||
return written, nil
|
||||
}
|
||||
|
||||
func (w *SeekableWriter) Close() error {
|
||||
if w.n > 0 {
|
||||
if _, err := w.w.Write(w.buf[:w.n]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return w.w.Close()
|
||||
}
|
||||
|
||||
type SeekableReader struct {
|
||||
r seekable.Reader
|
||||
c func() error
|
||||
}
|
||||
|
||||
var _ io.ReadSeekCloser = (*SeekableReader)(nil)
|
||||
|
||||
// NewSeekableReader returns a zstd reader to decompress data from seekable format.
|
||||
func NewSeekableReader(r io.ReadSeeker, opts ...ReaderOption) (*SeekableReader, error) {
|
||||
zstdR, err := zstd.NewReader(nil, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
seekableR, err := seekable.NewReader(r, zstdR)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret := &SeekableReader{
|
||||
r: seekableR,
|
||||
}
|
||||
if closer, ok := r.(io.Closer); ok {
|
||||
ret.c = closer.Close
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (r *SeekableReader) Read(p []byte) (int, error) {
|
||||
return r.r.Read(p)
|
||||
}
|
||||
|
||||
func (r *SeekableReader) Seek(offset int64, whence int) (int64, error) {
|
||||
return r.r.Seek(offset, whence)
|
||||
}
|
||||
|
||||
func (r *SeekableReader) Close() error {
|
||||
return errors.Join(
|
||||
func() error {
|
||||
if r.c != nil {
|
||||
return r.c()
|
||||
}
|
||||
return nil
|
||||
}(),
|
||||
r.r.Close(),
|
||||
)
|
||||
}
|
||||
304
modules/zstd/zstd_test.go
Normal file
304
modules/zstd/zstd_test.go
Normal file
@@ -0,0 +1,304 @@
|
||||
// Copyright 2024 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package zstd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestWriterReader(t *testing.T) {
|
||||
testData := prepareTestData(t, 20_000_000)
|
||||
|
||||
result := bytes.NewBuffer(nil)
|
||||
|
||||
t.Run("regular", func(t *testing.T) {
|
||||
result.Reset()
|
||||
writer, err := NewWriter(result)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = io.Copy(writer, bytes.NewReader(testData))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, writer.Close())
|
||||
|
||||
t.Logf("original size: %d, compressed size: %d, rate: %.2f%%", len(testData), result.Len(), float64(result.Len())/float64(len(testData))*100)
|
||||
|
||||
reader, err := NewReader(result)
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := io.ReadAll(reader)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, reader.Close())
|
||||
|
||||
assert.Equal(t, testData, data)
|
||||
})
|
||||
|
||||
t.Run("with options", func(t *testing.T) {
|
||||
result.Reset()
|
||||
writer, err := NewWriter(result, WithEncoderLevel(SpeedBestCompression))
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = io.Copy(writer, bytes.NewReader(testData))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, writer.Close())
|
||||
|
||||
t.Logf("original size: %d, compressed size: %d, rate: %.2f%%", len(testData), result.Len(), float64(result.Len())/float64(len(testData))*100)
|
||||
|
||||
reader, err := NewReader(result, WithDecoderLowmem(true))
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := io.ReadAll(reader)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, reader.Close())
|
||||
|
||||
assert.Equal(t, testData, data)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSeekableWriterReader(t *testing.T) {
|
||||
testData := prepareTestData(t, 20_000_000)
|
||||
|
||||
result := bytes.NewBuffer(nil)
|
||||
|
||||
t.Run("regular", func(t *testing.T) {
|
||||
result.Reset()
|
||||
blockSize := 100_000
|
||||
|
||||
writer, err := NewSeekableWriter(result, blockSize)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = io.Copy(writer, bytes.NewReader(testData))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, writer.Close())
|
||||
|
||||
t.Logf("original size: %d, compressed size: %d, rate: %.2f%%", len(testData), result.Len(), float64(result.Len())/float64(len(testData))*100)
|
||||
|
||||
reader, err := NewSeekableReader(bytes.NewReader(result.Bytes()))
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := io.ReadAll(reader)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, reader.Close())
|
||||
|
||||
assert.Equal(t, testData, data)
|
||||
})
|
||||
|
||||
t.Run("seek read", func(t *testing.T) {
|
||||
result.Reset()
|
||||
blockSize := 100_000
|
||||
|
||||
writer, err := NewSeekableWriter(result, blockSize)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = io.Copy(writer, bytes.NewReader(testData))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, writer.Close())
|
||||
|
||||
t.Logf("original size: %d, compressed size: %d, rate: %.2f%%", len(testData), result.Len(), float64(result.Len())/float64(len(testData))*100)
|
||||
|
||||
assertReader := &assertReadSeeker{r: bytes.NewReader(result.Bytes())}
|
||||
|
||||
reader, err := NewSeekableReader(assertReader)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = reader.Seek(10_000_000, io.SeekStart)
|
||||
require.NoError(t, err)
|
||||
|
||||
data := make([]byte, 1000)
|
||||
_, err = io.ReadFull(reader, data)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, reader.Close())
|
||||
|
||||
assert.Equal(t, testData[10_000_000:10_000_000+1000], data)
|
||||
|
||||
// Should seek 3 times,
|
||||
// the first two times are for getting the index,
|
||||
// and the third time is for reading the data.
|
||||
assert.Equal(t, 3, assertReader.SeekTimes)
|
||||
// Should read less than 2 blocks,
|
||||
// even if the compression ratio is not good and the data is not in the same block.
|
||||
assert.Less(t, assertReader.ReadBytes, blockSize*2)
|
||||
// Should close the underlying reader if it is Closer.
|
||||
assert.True(t, assertReader.Closed)
|
||||
})
|
||||
|
||||
t.Run("tidy data", func(t *testing.T) {
|
||||
testData := prepareTestData(t, 1000) // data size is less than a block
|
||||
|
||||
result.Reset()
|
||||
blockSize := 100_000
|
||||
|
||||
writer, err := NewSeekableWriter(result, blockSize)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = io.Copy(writer, bytes.NewReader(testData))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, writer.Close())
|
||||
|
||||
t.Logf("original size: %d, compressed size: %d, rate: %.2f%%", len(testData), result.Len(), float64(result.Len())/float64(len(testData))*100)
|
||||
|
||||
reader, err := NewSeekableReader(bytes.NewReader(result.Bytes()))
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := io.ReadAll(reader)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, reader.Close())
|
||||
|
||||
assert.Equal(t, testData, data)
|
||||
})
|
||||
|
||||
t.Run("tidy block", func(t *testing.T) {
|
||||
result.Reset()
|
||||
blockSize := 100
|
||||
|
||||
writer, err := NewSeekableWriter(result, blockSize)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = io.Copy(writer, bytes.NewReader(testData))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, writer.Close())
|
||||
|
||||
t.Logf("original size: %d, compressed size: %d, rate: %.2f%%", len(testData), result.Len(), float64(result.Len())/float64(len(testData))*100)
|
||||
// A too small block size will cause a bad compression rate,
|
||||
// even the compressed data is larger than the original data.
|
||||
assert.Greater(t, result.Len(), len(testData))
|
||||
|
||||
reader, err := NewSeekableReader(bytes.NewReader(result.Bytes()))
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := io.ReadAll(reader)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, reader.Close())
|
||||
|
||||
assert.Equal(t, testData, data)
|
||||
})
|
||||
|
||||
t.Run("compatible reader", func(t *testing.T) {
|
||||
result.Reset()
|
||||
blockSize := 100_000
|
||||
|
||||
writer, err := NewSeekableWriter(result, blockSize)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = io.Copy(writer, bytes.NewReader(testData))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, writer.Close())
|
||||
|
||||
t.Logf("original size: %d, compressed size: %d, rate: %.2f%%", len(testData), result.Len(), float64(result.Len())/float64(len(testData))*100)
|
||||
|
||||
// It should be able to read the data with a regular reader.
|
||||
reader, err := NewReader(bytes.NewReader(result.Bytes()))
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := io.ReadAll(reader)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, reader.Close())
|
||||
|
||||
assert.Equal(t, testData, data)
|
||||
})
|
||||
|
||||
t.Run("wrong reader", func(t *testing.T) {
|
||||
result.Reset()
|
||||
|
||||
// Use a regular writer to compress the data.
|
||||
writer, err := NewWriter(result)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = io.Copy(writer, bytes.NewReader(testData))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, writer.Close())
|
||||
|
||||
t.Logf("original size: %d, compressed size: %d, rate: %.2f%%", len(testData), result.Len(), float64(result.Len())/float64(len(testData))*100)
|
||||
|
||||
// But use a seekable reader to read the data, it should fail.
|
||||
_, err = NewSeekableReader(bytes.NewReader(result.Bytes()))
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
// prepareTestData prepares test data to test compression.
|
||||
// Random data is not suitable for testing compression,
|
||||
// so it collects code files from the project to get enough data.
|
||||
func prepareTestData(t *testing.T, size int) []byte {
|
||||
// .../gitea/modules/zstd
|
||||
dir, err := os.Getwd()
|
||||
require.NoError(t, err)
|
||||
// .../gitea/
|
||||
dir = filepath.Join(dir, "../../")
|
||||
|
||||
textExt := []string{".go", ".tmpl", ".ts", ".yml", ".css"} // add more if not enough data collected
|
||||
isText := func(info os.FileInfo) bool {
|
||||
if info.Size() == 0 {
|
||||
return false
|
||||
}
|
||||
for _, ext := range textExt {
|
||||
if strings.HasSuffix(info.Name(), ext) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
ret := make([]byte, size)
|
||||
n := 0
|
||||
count := 0
|
||||
|
||||
queue := []string{dir}
|
||||
for len(queue) > 0 && n < size {
|
||||
file := queue[0]
|
||||
queue = queue[1:]
|
||||
info, err := os.Stat(file)
|
||||
require.NoError(t, err)
|
||||
if info.IsDir() {
|
||||
entries, err := os.ReadDir(file)
|
||||
require.NoError(t, err)
|
||||
for _, entry := range entries {
|
||||
queue = append(queue, filepath.Join(file, entry.Name()))
|
||||
}
|
||||
continue
|
||||
}
|
||||
if !isText(info) { // text file only
|
||||
continue
|
||||
}
|
||||
data, err := os.ReadFile(file)
|
||||
require.NoError(t, err)
|
||||
n += copy(ret[n:], data)
|
||||
count++
|
||||
}
|
||||
|
||||
if n < size {
|
||||
require.Failf(t, "Not enough data", "Only %d bytes collected from %d files", n, count)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
type assertReadSeeker struct {
|
||||
r io.ReadSeeker
|
||||
SeekTimes int
|
||||
ReadBytes int
|
||||
Closed bool
|
||||
}
|
||||
|
||||
func (a *assertReadSeeker) Read(p []byte) (int, error) {
|
||||
n, err := a.r.Read(p)
|
||||
a.ReadBytes += n
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (a *assertReadSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||
a.SeekTimes++
|
||||
return a.r.Seek(offset, whence)
|
||||
}
|
||||
|
||||
func (a *assertReadSeeker) Close() error {
|
||||
a.Closed = true
|
||||
return nil
|
||||
}
|
||||
Reference in New Issue
Block a user