1
1
mirror of https://github.com/go-gitea/gitea synced 2025-12-07 13:28:25 +00:00

Merge branch 'main' into api-repo-actions

This commit is contained in:
Chester
2023-10-20 14:56:35 -04:00
committed by GitHub
1440 changed files with 19571 additions and 17167 deletions

View File

@@ -5,6 +5,7 @@ package activitypub
import (
"bytes"
"context"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
@@ -61,14 +62,14 @@ type Client struct {
}
// NewClient function
func NewClient(user *user_model.User, pubID string) (c *Client, err error) {
func NewClient(ctx context.Context, user *user_model.User, pubID string) (c *Client, err error) {
if err = containsRequiredHTTPHeaders(http.MethodGet, setting.Federation.GetHeaders); err != nil {
return nil, err
} else if err = containsRequiredHTTPHeaders(http.MethodPost, setting.Federation.PostHeaders); err != nil {
return nil, err
}
priv, err := GetPrivateKey(user)
priv, err := GetPrivateKey(ctx, user)
if err != nil {
return nil, err
}

View File

@@ -11,6 +11,7 @@ import (
"regexp"
"testing"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/models/unittest"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/setting"
@@ -22,7 +23,7 @@ func TestActivityPubSignedPost(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
pubID := "https://example.com/pubID"
c, err := NewClient(user, pubID)
c, err := NewClient(db.DefaultContext, user, pubID)
assert.NoError(t, err)
expected := "BODY"

View File

@@ -4,7 +4,6 @@
package activitypub
import (
"path/filepath"
"testing"
"code.gitea.io/gitea/models/unittest"
@@ -15,7 +14,5 @@ import (
)
func TestMain(m *testing.M) {
unittest.MainTest(m, &unittest.TestOptions{
GiteaRootPath: filepath.Join("..", ".."),
})
unittest.MainTest(m)
}

View File

@@ -4,6 +4,8 @@
package activitypub
import (
"context"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/util"
)
@@ -11,19 +13,19 @@ import (
const rsaBits = 3072
// GetKeyPair function returns a user's private and public keys
func GetKeyPair(user *user_model.User) (pub, priv string, err error) {
func GetKeyPair(ctx context.Context, user *user_model.User) (pub, priv string, err error) {
var settings map[string]*user_model.Setting
settings, err = user_model.GetSettings(user.ID, []string{user_model.UserActivityPubPrivPem, user_model.UserActivityPubPubPem})
settings, err = user_model.GetSettings(ctx, user.ID, []string{user_model.UserActivityPubPrivPem, user_model.UserActivityPubPubPem})
if err != nil {
return pub, priv, err
} else if len(settings) == 0 {
if priv, pub, err = util.GenerateKeyPair(rsaBits); err != nil {
return pub, priv, err
}
if err = user_model.SetUserSetting(user.ID, user_model.UserActivityPubPrivPem, priv); err != nil {
if err = user_model.SetUserSetting(ctx, user.ID, user_model.UserActivityPubPrivPem, priv); err != nil {
return pub, priv, err
}
if err = user_model.SetUserSetting(user.ID, user_model.UserActivityPubPubPem, pub); err != nil {
if err = user_model.SetUserSetting(ctx, user.ID, user_model.UserActivityPubPubPem, pub); err != nil {
return pub, priv, err
}
return pub, priv, err
@@ -35,13 +37,13 @@ func GetKeyPair(user *user_model.User) (pub, priv string, err error) {
}
// GetPublicKey function returns a user's public key
func GetPublicKey(user *user_model.User) (pub string, err error) {
pub, _, err = GetKeyPair(user)
func GetPublicKey(ctx context.Context, user *user_model.User) (pub string, err error) {
pub, _, err = GetKeyPair(ctx, user)
return pub, err
}
// GetPrivateKey function returns a user's private key
func GetPrivateKey(user *user_model.User) (priv string, err error) {
_, priv, err = GetKeyPair(user)
func GetPrivateKey(ctx context.Context, user *user_model.User) (priv string, err error) {
_, priv, err = GetKeyPair(ctx, user)
return priv, err
}

View File

@@ -6,6 +6,7 @@ package activitypub
import (
"testing"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/models/unittest"
user_model "code.gitea.io/gitea/models/user"
@@ -17,12 +18,12 @@ import (
func TestUserSettings(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
user1 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1})
pub, priv, err := GetKeyPair(user1)
pub, priv, err := GetKeyPair(db.DefaultContext, user1)
assert.NoError(t, err)
pub1, err := GetPublicKey(user1)
pub1, err := GetPublicKey(db.DefaultContext, user1)
assert.NoError(t, err)
assert.Equal(t, pub, pub1)
priv1, err := GetPrivateKey(user1)
priv1, err := GetPrivateKey(db.DefaultContext, user1)
assert.NoError(t, err)
assert.Equal(t, priv, priv1)
}

View File

@@ -68,7 +68,7 @@ func (u *User) WebAuthnIcon() string {
// WebAuthnCredentials implementns the webauthn.User interface
func (u *User) WebAuthnCredentials() []webauthn.Credential {
dbCreds, err := auth.GetWebAuthnCredentialsByUID(u.ID)
dbCreds, err := auth.GetWebAuthnCredentialsByUID(db.DefaultContext, u.ID)
if err != nil {
return nil
}

View File

@@ -101,6 +101,12 @@ type APIRedirect struct{}
// swagger:response string
type APIString string
// APIRepoArchivedError is an error that is raised when an archived repo should be modified
// swagger:response repoArchivedError
type APIRepoArchivedError struct {
APIError
}
// ServerError responds with error message, status is 500
func (ctx *APIContext) ServerError(title string, err error) {
ctx.Error(http.StatusInternalServerError, title, err)
@@ -212,7 +218,7 @@ func (ctx *APIContext) CheckForOTP() {
}
otpHeader := ctx.Req.Header.Get("X-Gitea-OTP")
twofa, err := auth.GetTwoFactorByUID(ctx.Doer.ID)
twofa, err := auth.GetTwoFactorByUID(ctx, ctx.Doer.ID)
if err != nil {
if auth.IsErrTwoFactorNotEnrolled(err) {
return // No 2FA enrollment for this user

View File

@@ -4,16 +4,11 @@
package context
import (
"encoding/hex"
"net/http"
"strings"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/util"
"code.gitea.io/gitea/modules/web/middleware"
"github.com/minio/sha256-simd"
"golang.org/x/crypto/pbkdf2"
)
const CookieNameFlash = "gitea_flash"
@@ -45,42 +40,3 @@ func (ctx *Context) DeleteSiteCookie(name string) {
func (ctx *Context) GetSiteCookie(name string) string {
return middleware.GetSiteCookie(ctx.Req, name)
}
// GetSuperSecureCookie returns given cookie value from request header with secret string.
func (ctx *Context) GetSuperSecureCookie(secret, name string) (string, bool) {
val := ctx.GetSiteCookie(name)
return ctx.CookieDecrypt(secret, val)
}
// CookieDecrypt returns given value from with secret string.
func (ctx *Context) CookieDecrypt(secret, val string) (string, bool) {
if val == "" {
return "", false
}
text, err := hex.DecodeString(val)
if err != nil {
return "", false
}
key := pbkdf2.Key([]byte(secret), []byte(secret), 1000, 16, sha256.New)
text, err = util.AESGCMDecrypt(key, text)
return string(text), err == nil
}
// SetSuperSecureCookie sets given cookie value to response header with secret string.
func (ctx *Context) SetSuperSecureCookie(secret, name, value string, maxAge int) {
text := ctx.CookieEncrypt(secret, value)
ctx.SetSiteCookie(name, text, maxAge)
}
// CookieEncrypt encrypts a given value using the provided secret
func (ctx *Context) CookieEncrypt(secret, value string) string {
key := pbkdf2.Key([]byte(secret), []byte(secret), 1000, 16, sha256.New)
text, err := util.AESGCMEncrypt(key, []byte(value))
if err != nil {
panic("error encrypting cookie: " + err.Error())
}
return hex.EncodeToString(text)
}

View File

@@ -46,7 +46,7 @@ func GetOrganizationByParams(ctx *Context) {
ctx.Org.Organization, err = organization.GetOrgByName(ctx, orgName)
if err != nil {
if organization.IsErrOrgNotExist(err) {
redirectUserID, err := user_model.LookupUserRedirect(orgName)
redirectUserID, err := user_model.LookupUserRedirect(ctx, orgName)
if err == nil {
RedirectToUser(ctx.Base, orgName, redirectUserID)
} else if user_model.IsErrUserRedirectNotExist(err) {
@@ -128,7 +128,7 @@ func HandleOrgAssignment(ctx *Context, args ...bool) {
ctx.Org.IsTeamAdmin = true
ctx.Org.CanCreateOrgRepo = true
} else if ctx.IsSigned {
ctx.Org.IsOwner, err = org.IsOwnedBy(ctx.Doer.ID)
ctx.Org.IsOwner, err = org.IsOwnedBy(ctx, ctx.Doer.ID)
if err != nil {
ctx.ServerError("IsOwnedBy", err)
return
@@ -140,12 +140,12 @@ func HandleOrgAssignment(ctx *Context, args ...bool) {
ctx.Org.IsTeamAdmin = true
ctx.Org.CanCreateOrgRepo = true
} else {
ctx.Org.IsMember, err = org.IsOrgMember(ctx.Doer.ID)
ctx.Org.IsMember, err = org.IsOrgMember(ctx, ctx.Doer.ID)
if err != nil {
ctx.ServerError("IsOrgMember", err)
return
}
ctx.Org.CanCreateOrgRepo, err = org.CanCreateOrgRepo(ctx.Doer.ID)
ctx.Org.CanCreateOrgRepo, err = org.CanCreateOrgRepo(ctx, ctx.Doer.ID)
if err != nil {
ctx.ServerError("CanCreateOrgRepo", err)
return
@@ -165,7 +165,7 @@ func HandleOrgAssignment(ctx *Context, args ...bool) {
ctx.Data["IsPackageEnabled"] = setting.Packages.Enabled
ctx.Data["IsRepoIndexerEnabled"] = setting.Indexer.RepoIndexerEnabled
ctx.Data["IsPublicMember"] = func(uid int64) bool {
is, _ := organization.IsPublicMembership(ctx.Org.Organization.ID, uid)
is, _ := organization.IsPublicMembership(ctx, ctx.Org.Organization.ID, uid)
return is
}
ctx.Data["CanCreateOrgRepo"] = ctx.Org.CanCreateOrgRepo
@@ -179,7 +179,7 @@ func HandleOrgAssignment(ctx *Context, args ...bool) {
OrgID: org.ID,
PublicOnly: ctx.Org.PublicMemberOnly,
}
ctx.Data["NumMembers"], err = organization.CountOrgMembers(opts)
ctx.Data["NumMembers"], err = organization.CountOrgMembers(ctx, opts)
if err != nil {
ctx.ServerError("CountOrgMembers", err)
return
@@ -191,7 +191,7 @@ func HandleOrgAssignment(ctx *Context, args ...bool) {
if ctx.Org.IsOwner {
shouldSeeAllTeams = true
} else {
teams, err := org.GetUserTeams(ctx.Doer.ID)
teams, err := org.GetUserTeams(ctx, ctx.Doer.ID)
if err != nil {
ctx.ServerError("GetUserTeams", err)
return
@@ -204,13 +204,13 @@ func HandleOrgAssignment(ctx *Context, args ...bool) {
}
}
if shouldSeeAllTeams {
ctx.Org.Teams, err = org.LoadTeams()
ctx.Org.Teams, err = org.LoadTeams(ctx)
if err != nil {
ctx.ServerError("LoadTeams", err)
return
}
} else {
ctx.Org.Teams, err = org.GetUserTeams(ctx.Doer.ID)
ctx.Org.Teams, err = org.GetUserTeams(ctx, ctx.Doer.ID)
if err != nil {
ctx.ServerError("GetUserTeams", err)
return

View File

@@ -109,7 +109,7 @@ func determineAccessMode(ctx *Base, pkg *Package, doer *user_model.User) (perm.A
if doer != nil && !doer.IsGhost() {
// 1. If user is logged in, check all team packages permissions
var err error
accessMode, err = org.GetOrgUserMaxAuthorizeLevel(doer.ID)
accessMode, err = org.GetOrgUserMaxAuthorizeLevel(ctx, doer.ID)
if err != nil {
return accessMode, err
}

View File

@@ -144,18 +144,18 @@ func (r *Repository) CanCommitToBranch(ctx context.Context, doer *user_model.Use
}
// CanUseTimetracker returns whether or not a user can use the timetracker.
func (r *Repository) CanUseTimetracker(issue *issues_model.Issue, user *user_model.User) bool {
func (r *Repository) CanUseTimetracker(ctx context.Context, issue *issues_model.Issue, user *user_model.User) bool {
// Checking for following:
// 1. Is timetracker enabled
// 2. Is the user a contributor, admin, poster or assignee and do the repository policies require this?
isAssigned, _ := issues_model.IsUserAssignedToIssue(db.DefaultContext, issue, user)
return r.Repository.IsTimetrackerEnabled(db.DefaultContext) && (!r.Repository.AllowOnlyContributorsToTrackTime(db.DefaultContext) ||
isAssigned, _ := issues_model.IsUserAssignedToIssue(ctx, issue, user)
return r.Repository.IsTimetrackerEnabled(ctx) && (!r.Repository.AllowOnlyContributorsToTrackTime(ctx) ||
r.Permission.CanWriteIssuesOrPulls(issue.IsPull) || issue.IsPoster(user.ID) || isAssigned)
}
// CanCreateIssueDependencies returns whether or not a user can create dependencies.
func (r *Repository) CanCreateIssueDependencies(user *user_model.User, isPull bool) bool {
return r.Repository.IsDependenciesEnabled(db.DefaultContext) && r.Permission.CanWriteIssuesOrPulls(isPull)
func (r *Repository) CanCreateIssueDependencies(ctx context.Context, user *user_model.User, isPull bool) bool {
return r.Repository.IsDependenciesEnabled(ctx) && r.Permission.CanWriteIssuesOrPulls(isPull)
}
// GetCommitsCount returns cached commit count for current view
@@ -456,7 +456,7 @@ func RepoAssignment(ctx *Context) context.CancelFunc {
return nil
}
if redirectUserID, err := user_model.LookupUserRedirect(userName); err == nil {
if redirectUserID, err := user_model.LookupUserRedirect(ctx, userName); err == nil {
RedirectToUser(ctx.Base, userName, redirectUserID)
} else if user_model.IsErrUserRedirectNotExist(err) {
ctx.NotFound("GetUserByName", nil)
@@ -495,10 +495,10 @@ func RepoAssignment(ctx *Context) context.CancelFunc {
}
// Get repository.
repo, err := repo_model.GetRepositoryByName(owner.ID, repoName)
repo, err := repo_model.GetRepositoryByName(ctx, owner.ID, repoName)
if err != nil {
if repo_model.IsErrRepoNotExist(err) {
redirectRepoID, err := repo_model.LookupRedirect(owner.ID, repoName)
redirectRepoID, err := repo_model.LookupRedirect(ctx, owner.ID, repoName)
if err == nil {
RedirectToRepo(ctx.Base, redirectRepoID)
} else if repo_model.IsErrRedirectNotExist(err) {
@@ -545,7 +545,10 @@ func RepoAssignment(ctx *Context) context.CancelFunc {
ctx.ServerError("GetReleaseCountByRepoID", err)
return nil
}
ctx.Data["NumReleases"], err = repo_model.GetReleaseCountByRepoID(ctx, ctx.Repo.Repository.ID, repo_model.FindReleasesOptions{})
ctx.Data["NumReleases"], err = repo_model.GetReleaseCountByRepoID(ctx, ctx.Repo.Repository.ID, repo_model.FindReleasesOptions{
// only show draft releases for users who can write, read-only users shouldn't see draft releases.
IncludeDrafts: ctx.Repo.CanWrite(unit_model.TypeReleases),
})
if err != nil {
ctx.ServerError("GetReleaseCountByRepoID", err)
return nil
@@ -561,7 +564,7 @@ func RepoAssignment(ctx *Context) context.CancelFunc {
ctx.Data["CanWriteIssues"] = ctx.Repo.CanWrite(unit_model.TypeIssues)
ctx.Data["CanWritePulls"] = ctx.Repo.CanWrite(unit_model.TypePullRequests)
canSignedUserFork, err := repo_module.CanUserForkRepo(ctx.Doer, ctx.Repo.Repository)
canSignedUserFork, err := repo_module.CanUserForkRepo(ctx, ctx.Doer, ctx.Repo.Repository)
if err != nil {
ctx.ServerError("CanUserForkRepo", err)
return nil
@@ -598,7 +601,7 @@ func RepoAssignment(ctx *Context) context.CancelFunc {
}
if ctx.IsSigned {
ctx.Data["IsWatchingRepo"] = repo_model.IsWatching(ctx.Doer.ID, repo.ID)
ctx.Data["IsWatchingRepo"] = repo_model.IsWatching(ctx, ctx.Doer.ID, repo.ID)
ctx.Data["IsStaringRepo"] = repo_model.IsStaring(ctx, ctx.Doer.ID, repo.ID)
}
@@ -703,18 +706,18 @@ func RepoAssignment(ctx *Context) context.CancelFunc {
// People who have push access or have forked repository can propose a new pull request.
canPush := ctx.Repo.CanWrite(unit_model.TypeCode) ||
(ctx.IsSigned && repo_model.HasForkedRepo(ctx.Doer.ID, ctx.Repo.Repository.ID))
(ctx.IsSigned && repo_model.HasForkedRepo(ctx, ctx.Doer.ID, ctx.Repo.Repository.ID))
canCompare := false
// Pull request is allowed if this is a fork repository
// and base repository accepts pull requests.
if repo.BaseRepo != nil && repo.BaseRepo.AllowsPulls() {
if repo.BaseRepo != nil && repo.BaseRepo.AllowsPulls(ctx) {
canCompare = true
ctx.Data["BaseRepo"] = repo.BaseRepo
ctx.Repo.PullRequest.BaseRepo = repo.BaseRepo
ctx.Repo.PullRequest.Allowed = canPush
ctx.Repo.PullRequest.HeadInfoSubURL = url.PathEscape(ctx.Repo.Owner.Name) + ":" + util.PathEscapeSegments(ctx.Repo.BranchName)
} else if repo.AllowsPulls() {
} else if repo.AllowsPulls(ctx) {
// Or, this is repository accepts pull requests between branches.
canCompare = true
ctx.Data["BaseRepo"] = repo
@@ -740,7 +743,7 @@ func RepoAssignment(ctx *Context) context.CancelFunc {
ctx.Data["RepoTransfer"] = repoTransfer
if ctx.Doer != nil {
ctx.Data["CanUserAcceptTransfer"] = repoTransfer.CanUserAcceptTransfer(ctx.Doer)
ctx.Data["CanUserAcceptTransfer"] = repoTransfer.CanUserAcceptTransfer(ctx, ctx.Doer)
}
}
@@ -773,6 +776,8 @@ const (
RepoRefBlob
)
const headRefName = "HEAD"
// RepoRef handles repository reference names when the ref name is not
// explicitly given
func RepoRef() func(*Context) context.CancelFunc {
@@ -833,6 +838,14 @@ func getRefName(ctx *Base, repo *Repository, pathType RepoRefType) string {
case RepoRefBranch:
ref := getRefNameFromPath(ctx, repo, path, repo.GitRepo.IsBranchExist)
if len(ref) == 0 {
// check if ref is HEAD
parts := strings.Split(path, "/")
if parts[0] == headRefName {
repo.TreePath = strings.Join(parts[1:], "/")
return repo.Repository.DefaultBranch
}
// maybe it's a renamed branch
return getRefNameFromPath(ctx, repo, path, func(s string) bool {
b, exist, err := git_model.FindRenamedBranch(ctx, repo.Repository.ID, s)
@@ -861,6 +874,16 @@ func getRefName(ctx *Base, repo *Repository, pathType RepoRefType) string {
repo.TreePath = strings.Join(parts[1:], "/")
return parts[0]
}
if len(parts) > 0 && parts[0] == headRefName {
// HEAD ref points to last default branch commit
commit, err := repo.GitRepo.GetBranchCommit(repo.Repository.DefaultBranch)
if err != nil {
return ""
}
repo.TreePath = strings.Join(parts[1:], "/")
return commit.ID.String()
}
case RepoRefBlob:
_, err := repo.GitRepo.GetBlob(path)
if err != nil {

View File

@@ -83,8 +83,7 @@ func LoadRepo(t *testing.T, ctx gocontext.Context, repoID int64) {
ctx.Repo = repo
doer = ctx.Doer
default:
assert.Fail(t, "context is not *context.Context or *context.APIContext")
return
assert.FailNow(t, "context is not *context.Context or *context.APIContext")
}
repo.Repository = unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: repoID})
@@ -105,8 +104,7 @@ func LoadRepoCommit(t *testing.T, ctx gocontext.Context) {
case *context.APIContext:
repo = ctx.Repo
default:
assert.Fail(t, "context is not *context.Context or *context.APIContext")
return
assert.FailNow(t, "context is not *context.Context or *context.APIContext")
}
gitRepo, err := git.OpenRepository(ctx, repo.Repository.RepoPath())
@@ -130,8 +128,7 @@ func LoadUser(t *testing.T, ctx gocontext.Context, userID int64) {
case *context.APIContext:
ctx.Doer = doer
default:
assert.Fail(t, "context is not *context.Context or *context.APIContext")
return
assert.FailNow(t, "context is not *context.Context or *context.APIContext")
}
}

View File

@@ -33,7 +33,7 @@ func checkAuthorizedKeys(ctx context.Context, logger log.Logger, autofix bool) e
return fmt.Errorf("Unable to open authorized_keys file. ERROR: %w", err)
}
logger.Warn("Unable to open authorized_keys. (ERROR: %v). Attempting to rewrite...", err)
if err = asymkey_model.RewriteAllPublicKeys(); err != nil {
if err = asymkey_model.RewriteAllPublicKeys(ctx); err != nil {
logger.Critical("Unable to rewrite authorized_keys file. ERROR: %v", err)
return fmt.Errorf("Unable to rewrite authorized_keys file. ERROR: %w", err)
}
@@ -76,7 +76,7 @@ func checkAuthorizedKeys(ctx context.Context, logger log.Logger, autofix bool) e
return fmt.Errorf(`authorized_keys is out of date and should be regenerated with "gitea admin regenerate keys" or "gitea doctor --run authorized-keys --fix"`)
}
logger.Warn("authorized_keys is out of date. Attempting rewrite...")
err = asymkey_model.RewriteAllPublicKeys()
err = asymkey_model.RewriteAllPublicKeys(ctx)
if err != nil {
logger.Critical("Unable to rewrite authorized_keys file. ERROR: %v", err)
return fmt.Errorf("Unable to rewrite authorized_keys file. ERROR: %w", err)

View File

@@ -101,7 +101,7 @@ func checkDBConsistency(ctx context.Context, logger log.Logger, autofix bool) er
},
// find releases without existing repository
genericOrphanCheck("Orphaned Releases without existing repository",
"release", "repository", "release.repo_id=repository.id"),
"release", "repository", "`release`.repo_id=repository.id"),
// find pulls without existing issues
genericOrphanCheck("Orphaned PullRequests without existing issue",
"pull_request", "issue", "pull_request.issue_id=issue.id"),
@@ -168,9 +168,9 @@ func checkDBConsistency(ctx context.Context, logger log.Logger, autofix bool) er
// find protected branches without existing repository
genericOrphanCheck("Protected Branches without existing repository",
"protected_branch", "repository", "protected_branch.repo_id=repository.id"),
// find deleted branches without existing repository
genericOrphanCheck("Deleted Branches without existing repository",
"deleted_branch", "repository", "deleted_branch.repo_id=repository.id"),
// find branches without existing repository
genericOrphanCheck("Branches without existing repository",
"branch", "repository", "branch.repo_id=repository.id"),
// find LFS locks without existing repository
genericOrphanCheck("LFS locks without existing repository",
"lfs_lock", "repository", "lfs_lock.repo_id=repository.id"),
@@ -189,6 +189,9 @@ func checkDBConsistency(ctx context.Context, logger log.Logger, autofix bool) er
// find action without repository
genericOrphanCheck("Action entries without existing repository",
"action", "repository", "action.repo_id=repository.id"),
// find action without user
genericOrphanCheck("Action entries without existing user",
"action", "user", "action.act_user_id=`user`.id"),
// find OAuth2Grant without existing user
genericOrphanCheck("Orphaned OAuth2Grant without existing User",
"oauth2_grant", "user", "oauth2_grant.user_id=`user`.id"),

View File

@@ -290,7 +290,7 @@ func fixBrokenRepoUnits16961(ctx context.Context, logger log.Logger, autofix boo
return nil
}
return repo_model.UpdateRepoUnit(repoUnit)
return repo_model.UpdateRepoUnit(ctx, repoUnit)
},
)
if err != nil {

View File

@@ -29,7 +29,7 @@ func fixOwnerTeamCreateOrgRepo(ctx context.Context, logger log.Logger, autofix b
return nil
}
return models.UpdateTeam(team, false, false)
return models.UpdateTeam(ctx, team, false, false)
},
)
if err != nil {

View File

@@ -74,7 +74,7 @@ func checkPRMergeBase(ctx context.Context, logger log.Logger, autofix bool) erro
pr.MergeBase = strings.TrimSpace(pr.MergeBase)
if pr.MergeBase != oldMergeBase {
if autofix {
if err := pr.UpdateCols("merge_base"); err != nil {
if err := pr.UpdateCols(ctx, "merge_base"); err != nil {
logger.Critical("Failed to update merge_base. ERROR: %v", err)
return fmt.Errorf("Failed to update merge_base. ERROR: %w", err)
}

View File

@@ -74,7 +74,7 @@ func checkHooks(ctx context.Context, logger log.Logger, autofix bool) error {
func checkUserStarNum(ctx context.Context, logger log.Logger, autofix bool) error {
if autofix {
if err := models.DoctorUserStarNum(); err != nil {
if err := models.DoctorUserStarNum(ctx); err != nil {
logger.Critical("Unable update User Stars numbers")
return err
}

View File

@@ -0,0 +1,70 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package doctor
import (
"context"
"code.gitea.io/gitea/models/db"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/log"
repo_service "code.gitea.io/gitea/services/repository"
"xorm.io/builder"
)
func handleDeleteOrphanedRepos(ctx context.Context, logger log.Logger, autofix bool) error {
test := &consistencyCheck{
Name: "Repos with no existing owner",
Counter: countOrphanedRepos,
Fixer: deleteOrphanedRepos,
FixedMessage: "Deleted all content related to orphaned repos",
}
return test.Run(ctx, logger, autofix)
}
// countOrphanedRepos count repository where user of owner_id do not exist
func countOrphanedRepos(ctx context.Context) (int64, error) {
return db.CountOrphanedObjects(ctx, "repository", "user", "repository.owner_id=user.id")
}
// deleteOrphanedRepos delete repository where user of owner_id do not exist
func deleteOrphanedRepos(ctx context.Context) (int64, error) {
batchSize := db.MaxBatchInsertSize("repository")
e := db.GetEngine(ctx)
var deleted int64
adminUser := &user_model.User{IsAdmin: true}
for {
var ids []int64
if err := e.Table("`repository`").
Join("LEFT", "`user`", "repository.owner_id=user.id").
Where(builder.IsNull{"`user`.id"}).
Select("`repository`.id").Limit(batchSize).Find(&ids); err != nil {
return deleted, err
}
// if we don't get ids we have deleted them all
if len(ids) == 0 {
return deleted, nil
}
for _, id := range ids {
if err := repo_service.DeleteRepositoryDirectly(ctx, adminUser, id, true); err != nil {
return deleted, err
}
deleted++
}
}
}
func init() {
Register(&Check{
Title: "Deleted all content related to orphaned repos",
Name: "delete-orphaned-repos",
IsDefault: false,
Run: handleDeleteOrphanedRepos,
Priority: 4,
})
}

View File

@@ -11,14 +11,14 @@ import (
)
func checkUserType(ctx context.Context, logger log.Logger, autofix bool) error {
count, err := user_model.CountWrongUserType()
count, err := user_model.CountWrongUserType(ctx)
if err != nil {
logger.Critical("Error: %v whilst counting wrong user types")
return err
}
if count > 0 {
if autofix {
if count, err = user_model.FixWrongUserType(); err != nil {
if count, err = user_model.FixWrongUserType(ctx); err != nil {
logger.Critical("Error: %v whilst fixing wrong user types")
return err
}

View File

@@ -71,7 +71,7 @@ loop:
now := timeutil.TimeStampNow().Add(-2)
uidCounts, err := activities_model.GetUIDsAndNotificationCounts(then, now)
uidCounts, err := activities_model.GetUIDsAndNotificationCounts(ctx, then, now)
if err != nil {
log.Error("Unable to get UIDcounts: %v", err)
}
@@ -84,14 +84,14 @@ loop:
then = now
if setting.Service.EnableTimetracking {
usersStopwatches, err := issues_model.GetUIDsAndStopwatch()
usersStopwatches, err := issues_model.GetUIDsAndStopwatch(ctx)
if err != nil {
log.Error("Unable to get GetUIDsAndStopwatch: %v", err)
return
}
for _, userStopwatches := range usersStopwatches {
apiSWs, err := convert.ToStopWatches(userStopwatches.StopWatches)
apiSWs, err := convert.ToStopWatches(ctx, userStopwatches.StopWatches)
if err != nil {
if !issues_model.IsErrIssueNotExist(err) {
log.Error("Unable to APIFormat stopwatches: %v", err)

View File

@@ -13,6 +13,7 @@ import (
"regexp"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/util"
)
// BlamePart represents block of blame - continuous lines with one sha
@@ -23,12 +24,16 @@ type BlamePart struct {
// BlameReader returns part of file blame one by one
type BlameReader struct {
cmd *Command
output io.WriteCloser
reader io.ReadCloser
bufferedReader *bufio.Reader
done chan error
lastSha *string
ignoreRevsFile *string
}
func (r *BlameReader) UsesIgnoreRevs() bool {
return r.ignoreRevsFile != nil
}
var shaLineRegex = regexp.MustCompile("^([a-z0-9]{40})")
@@ -101,28 +106,44 @@ func (r *BlameReader) Close() error {
r.bufferedReader = nil
_ = r.reader.Close()
_ = r.output.Close()
if r.ignoreRevsFile != nil {
_ = util.Remove(*r.ignoreRevsFile)
}
return err
}
// CreateBlameReader creates reader for given repository, commit and file
func CreateBlameReader(ctx context.Context, repoPath, commitID, file string) (*BlameReader, error) {
cmd := NewCommandContextNoGlobals(ctx, "blame", "--porcelain").
AddDynamicArguments(commitID).
func CreateBlameReader(ctx context.Context, repoPath string, commit *Commit, file string, bypassBlameIgnore bool) (*BlameReader, error) {
var ignoreRevsFile *string
if CheckGitVersionAtLeast("2.23") == nil && !bypassBlameIgnore {
ignoreRevsFile = tryCreateBlameIgnoreRevsFile(commit)
}
cmd := NewCommandContextNoGlobals(ctx, "blame", "--porcelain")
if ignoreRevsFile != nil {
// Possible improvement: use --ignore-revs-file /dev/stdin on unix
// There is no equivalent on Windows. May be implemented if Gitea uses an external git backend.
cmd.AddOptionValues("--ignore-revs-file", *ignoreRevsFile)
}
cmd.AddDynamicArguments(commit.ID.String()).
AddDashesAndList(file).
SetDescription(fmt.Sprintf("GetBlame [repo_path: %s]", repoPath))
reader, stdout, err := os.Pipe()
if err != nil {
if ignoreRevsFile != nil {
_ = util.Remove(*ignoreRevsFile)
}
return nil, err
}
done := make(chan error, 1)
go func(cmd *Command, dir string, stdout io.WriteCloser, done chan error) {
go func() {
stderr := bytes.Buffer{}
// TODO: it doesn't work for directories (the directories shouldn't be "blamed"), and the "err" should be returned by "Read" but not by "Close"
err := cmd.Run(&RunOpts{
UseContextTimeout: true,
Dir: dir,
Dir: repoPath,
Stdout: stdout,
Stderr: &stderr,
})
@@ -131,15 +152,42 @@ func CreateBlameReader(ctx context.Context, repoPath, commitID, file string) (*B
if err != nil {
log.Error("Error running git blame (dir: %v): %v, stderr: %v", repoPath, err, stderr.String())
}
}(cmd, repoPath, stdout, done)
}()
bufferedReader := bufio.NewReader(reader)
return &BlameReader{
cmd: cmd,
output: stdout,
reader: reader,
bufferedReader: bufferedReader,
done: done,
ignoreRevsFile: ignoreRevsFile,
}, nil
}
func tryCreateBlameIgnoreRevsFile(commit *Commit) *string {
entry, err := commit.GetTreeEntryByPath(".git-blame-ignore-revs")
if err != nil {
return nil
}
r, err := entry.Blob().DataAsync()
if err != nil {
return nil
}
defer r.Close()
f, err := os.CreateTemp("", "gitea_git-blame-ignore-revs")
if err != nil {
return nil
}
_, err = io.Copy(f, r)
_ = f.Close()
if err != nil {
_ = util.Remove(f.Name())
return nil
}
return util.ToPointer(f.Name())
}

View File

@@ -14,27 +14,127 @@ func TestReadingBlameOutput(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
blameReader, err := CreateBlameReader(ctx, "./tests/repos/repo5_pulls", "f32b0a9dfd09a60f616f29158f772cedd89942d2", "README.md")
assert.NoError(t, err)
defer blameReader.Close()
parts := []*BlamePart{
{
"72866af952e98d02a73003501836074b286a78f6",
[]string{
"# test_repo",
"Test repository for testing migration from github to gitea",
},
},
{
"f32b0a9dfd09a60f616f29158f772cedd89942d2",
[]string{"", "Do not make any changes to this repo it is used for unit testing"},
},
}
for _, part := range parts {
actualPart, err := blameReader.NextPart()
t.Run("Without .git-blame-ignore-revs", func(t *testing.T) {
repo, err := OpenRepository(ctx, "./tests/repos/repo5_pulls")
assert.NoError(t, err)
assert.Equal(t, part, actualPart)
}
defer repo.Close()
commit, err := repo.GetCommit("f32b0a9dfd09a60f616f29158f772cedd89942d2")
assert.NoError(t, err)
parts := []*BlamePart{
{
"72866af952e98d02a73003501836074b286a78f6",
[]string{
"# test_repo",
"Test repository for testing migration from github to gitea",
},
},
{
"f32b0a9dfd09a60f616f29158f772cedd89942d2",
[]string{"", "Do not make any changes to this repo it is used for unit testing"},
},
}
for _, bypass := range []bool{false, true} {
blameReader, err := CreateBlameReader(ctx, "./tests/repos/repo5_pulls", commit, "README.md", bypass)
assert.NoError(t, err)
assert.NotNil(t, blameReader)
defer blameReader.Close()
assert.False(t, blameReader.UsesIgnoreRevs())
for _, part := range parts {
actualPart, err := blameReader.NextPart()
assert.NoError(t, err)
assert.Equal(t, part, actualPart)
}
// make sure all parts have been read
actualPart, err := blameReader.NextPart()
assert.Nil(t, actualPart)
assert.NoError(t, err)
}
})
t.Run("With .git-blame-ignore-revs", func(t *testing.T) {
repo, err := OpenRepository(ctx, "./tests/repos/repo6_blame")
assert.NoError(t, err)
defer repo.Close()
full := []*BlamePart{
{
"af7486bd54cfc39eea97207ca666aa69c9d6df93",
[]string{"line", "line"},
},
{
"45fb6cbc12f970b04eacd5cd4165edd11c8d7376",
[]string{"changed line"},
},
{
"af7486bd54cfc39eea97207ca666aa69c9d6df93",
[]string{"line", "line", ""},
},
}
cases := []struct {
CommitID string
UsesIgnoreRevs bool
Bypass bool
Parts []*BlamePart
}{
{
CommitID: "544d8f7a3b15927cddf2299b4b562d6ebd71b6a7",
UsesIgnoreRevs: true,
Bypass: false,
Parts: []*BlamePart{
{
"af7486bd54cfc39eea97207ca666aa69c9d6df93",
[]string{"line", "line", "changed line", "line", "line", ""},
},
},
},
{
CommitID: "544d8f7a3b15927cddf2299b4b562d6ebd71b6a7",
UsesIgnoreRevs: false,
Bypass: true,
Parts: full,
},
{
CommitID: "45fb6cbc12f970b04eacd5cd4165edd11c8d7376",
UsesIgnoreRevs: false,
Bypass: false,
Parts: full,
},
{
CommitID: "45fb6cbc12f970b04eacd5cd4165edd11c8d7376",
UsesIgnoreRevs: false,
Bypass: false,
Parts: full,
},
}
for _, c := range cases {
commit, err := repo.GetCommit(c.CommitID)
assert.NoError(t, err)
blameReader, err := CreateBlameReader(ctx, "./tests/repos/repo6_blame", commit, "blame.txt", c.Bypass)
assert.NoError(t, err)
assert.NotNil(t, blameReader)
defer blameReader.Close()
assert.Equal(t, c.UsesIgnoreRevs, blameReader.UsesIgnoreRevs())
for _, part := range c.Parts {
actualPart, err := blameReader.NextPart()
assert.NoError(t, err)
assert.Equal(t, part, actualPart)
}
// make sure all parts have been read
actualPart, err := blameReader.NextPart()
assert.Nil(t, actualPart)
assert.NoError(t, err)
}
})
}

View File

@@ -86,7 +86,8 @@ func (repo *Repository) IsEmpty() (bool, error) {
Stdout: &output,
Stderr: &errbuf,
}); err != nil {
if err.Error() == "exit status 1" && errbuf.String() == "" {
if (err.Error() == "exit status 1" && strings.TrimSpace(errbuf.String()) == "") || err.Error() == "exit status 129" {
// git 2.11 exits with 129 if the repo is empty
return true, nil
}
return true, fmt.Errorf("check empty: %w - %s", err, errbuf.String())

View File

@@ -27,7 +27,7 @@ func Test_nulSeparatedAttributeWriter_ReadAttribute(t *testing.T) {
assert.Equal(t, "linguist-vendored", attr.Attribute)
assert.Equal(t, "unspecified", attr.Value)
case <-time.After(100 * time.Millisecond):
assert.Fail(t, "took too long to read an attribute from the list")
assert.FailNow(t, "took too long to read an attribute from the list")
}
// Write a second attribute again
n, err = wr.Write([]byte(testStr))
@@ -41,7 +41,7 @@ func Test_nulSeparatedAttributeWriter_ReadAttribute(t *testing.T) {
assert.Equal(t, "linguist-vendored", attr.Attribute)
assert.Equal(t, "unspecified", attr.Value)
case <-time.After(100 * time.Millisecond):
assert.Fail(t, "took too long to read an attribute from the list")
assert.FailNow(t, "took too long to read an attribute from the list")
}
// Write a partial attribute
@@ -52,14 +52,14 @@ func Test_nulSeparatedAttributeWriter_ReadAttribute(t *testing.T) {
select {
case <-wr.ReadAttribute():
assert.Fail(t, "There should not be an attribute ready to read")
assert.FailNow(t, "There should not be an attribute ready to read")
case <-time.After(100 * time.Millisecond):
}
_, err = wr.Write([]byte("attribute\x00"))
assert.NoError(t, err)
select {
case <-wr.ReadAttribute():
assert.Fail(t, "There should not be an attribute ready to read")
assert.FailNow(t, "There should not be an attribute ready to read")
case <-time.After(100 * time.Millisecond):
}

View File

@@ -116,17 +116,13 @@ func (repo *Repository) searchCommits(id SHA1, opts SearchCommitsOptions) ([]*Co
c.AddArguments("-i")
// add authors if present in search query
if len(opts.Authors) > 0 {
for _, v := range opts.Authors {
c.AddOptionFormat("--author=%s", v)
}
for _, v := range opts.Authors {
c.AddOptionFormat("--author=%s", v)
}
// add committers if present in search query
if len(opts.Committers) > 0 {
for _, v := range opts.Committers {
c.AddOptionFormat("--committer=%s", v)
}
for _, v := range opts.Committers {
c.AddOptionFormat("--committer=%s", v)
}
// add time constraints if present in search query
@@ -150,10 +146,8 @@ func (repo *Repository) searchCommits(id SHA1, opts SearchCommitsOptions) ([]*Co
// add remaining keywords from search string
// note this is done only for command created above
if len(opts.Keywords) > 0 {
for _, v := range opts.Keywords {
cmd.AddOptionFormat("--grep=%s", v)
}
for _, v := range opts.Keywords {
cmd.AddOptionFormat("--grep=%s", v)
}
// search for commits matching given constraints and keywords in commit msg
@@ -168,25 +162,23 @@ func (repo *Repository) searchCommits(id SHA1, opts SearchCommitsOptions) ([]*Co
// if there are any keywords (ie not committer:, author:, time:)
// then let's iterate over them
if len(opts.Keywords) > 0 {
for _, v := range opts.Keywords {
// ignore anything not matching a valid sha pattern
if IsValidSHAPattern(v) {
// create new git log command with 1 commit limit
hashCmd := NewCommand(repo.Ctx, "log", "-1", prettyLogFormat)
// add previous arguments except for --grep and --all
addCommonSearchArgs(hashCmd)
// add keyword as <commit>
hashCmd.AddDynamicArguments(v)
for _, v := range opts.Keywords {
// ignore anything not matching a valid sha pattern
if IsValidSHAPattern(v) {
// create new git log command with 1 commit limit
hashCmd := NewCommand(repo.Ctx, "log", "-1", prettyLogFormat)
// add previous arguments except for --grep and --all
addCommonSearchArgs(hashCmd)
// add keyword as <commit>
hashCmd.AddDynamicArguments(v)
// search with given constraints for commit matching sha hash of v
hashMatching, _, err := hashCmd.RunStdBytes(&RunOpts{Dir: repo.Path})
if err != nil || bytes.Contains(stdout, hashMatching) {
continue
}
stdout = append(stdout, hashMatching...)
stdout = append(stdout, '\n')
// search with given constraints for commit matching sha hash of v
hashMatching, _, err := hashCmd.RunStdBytes(&RunOpts{Dir: repo.Path})
if err != nil || bytes.Contains(stdout, hashMatching) {
continue
}
stdout = append(stdout, hashMatching...)
stdout = append(stdout, '\n')
}
}

View File

@@ -71,7 +71,6 @@ func TestRepository_GetTag(t *testing.T) {
if lTag == nil {
assert.NotNil(t, lTag)
assert.FailNow(t, "nil lTag: %s", lTagName)
return
}
assert.EqualValues(t, lTagName, lTag.Name)
assert.EqualValues(t, lTagCommitID, lTag.ID.String())
@@ -105,7 +104,6 @@ func TestRepository_GetTag(t *testing.T) {
if aTag == nil {
assert.NotNil(t, aTag)
assert.FailNow(t, "nil aTag: %s", aTagName)
return
}
assert.EqualValues(t, aTagName, aTag.Name)
assert.EqualValues(t, aTagID, aTag.ID.String())

View File

@@ -0,0 +1 @@
ref: refs/heads/master

View File

@@ -0,0 +1,4 @@
[core]
repositoryformatversion = 0
filemode = true
bare = true

View File

@@ -0,0 +1 @@
544d8f7a3b15927cddf2299b4b562d6ebd71b6a7

View File

@@ -116,10 +116,10 @@ func (graph *Graph) LoadAndProcessCommits(ctx context.Context, repository *repo_
c.Verification = asymkey_model.ParseCommitWithSignature(ctx, c.Commit)
_ = asymkey_model.CalculateTrustStatus(c.Verification, repository.GetTrustModel(), func(user *user_model.User) (bool, error) {
return repo_model.IsOwnerMemberCollaborator(repository, user.ID)
return repo_model.IsOwnerMemberCollaborator(ctx, repository, user.ID)
}, &keyMap)
statuses, _, err := git_model.GetLatestCommitStatus(db.DefaultContext, repository.ID, c.Commit.ID.String(), db.ListOptions{})
statuses, _, err := git_model.GetLatestCommitStatus(ctx, repository.ID, c.Commit.ID.String(), db.ListOptions{})
if err != nil {
log.Error("GetLatestCommitStatus: %v", err)
} else {

View File

@@ -7,12 +7,17 @@ import (
"context"
"fmt"
"net"
"net/url"
"syscall"
"time"
)
// NewDialContext returns a DialContext for Transport, the DialContext will do allow/block list check
func NewDialContext(usage string, allowList, blockList *HostMatchList) func(ctx context.Context, network, addr string) (net.Conn, error) {
return NewDialContextWithProxy(usage, allowList, blockList, nil)
}
func NewDialContextWithProxy(usage string, allowList, blockList *HostMatchList, proxy *url.URL) func(ctx context.Context, network, addr string) (net.Conn, error) {
// How Go HTTP Client works with redirection:
// transport.RoundTrip URL=http://domain.com, Host=domain.com
// transport.DialContext addrOrHost=domain.com:80
@@ -26,11 +31,18 @@ func NewDialContext(usage string, allowList, blockList *HostMatchList) func(ctx
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
Control: func(network, ipAddr string, c syscall.RawConn) (err error) {
var host string
if host, _, err = net.SplitHostPort(addrOrHost); err != nil {
Control: func(network, ipAddr string, c syscall.RawConn) error {
host, port, err := net.SplitHostPort(addrOrHost)
if err != nil {
return err
}
if proxy != nil {
// Always allow the host of the proxy, but only on the specified port.
if host == proxy.Hostname() && port == proxy.Port() {
return nil
}
}
// in Control func, the addr was already resolved to IP:PORT format, there is no cost to do ResolveTCPAddr here
tcpAddr, err := net.ResolveTCPAddr(network, ipAddr)
if err != nil {

View File

@@ -30,7 +30,14 @@ func getRepoChanges(ctx context.Context, repo *repo_model.Repository, revision s
return nil, err
}
if len(status.CommitSha) == 0 {
needGenesis := len(status.CommitSha) == 0
if !needGenesis {
hasAncestorCmd := git.NewCommand(ctx, "merge-base").AddDynamicArguments(repo.CodeIndexerStatus.CommitSha, revision)
stdout, _, _ := hasAncestorCmd.RunStdString(&git.RunOpts{Dir: repo.RepoPath()})
needGenesis = len(stdout) == 0
}
if needGenesis {
return genesisChanges(ctx, repo, revision)
}
return nonGenesisChanges(ctx, repo, revision)

View File

@@ -288,7 +288,7 @@ func populateRepoIndexer(ctx context.Context) {
return
default:
}
ids, err := repo_model.GetUnindexedRepos(repo_model.RepoIndexerTypeCode, maxRepoID, 0, 50)
ids, err := repo_model.GetUnindexedRepos(ctx, repo_model.RepoIndexerTypeCode, maxRepoID, 0, 50)
if err != nil {
log.Error("populateRepoIndexer: %v", err)
return

View File

@@ -6,7 +6,6 @@ package code
import (
"context"
"os"
"path/filepath"
"testing"
"code.gitea.io/gitea/models/unittest"
@@ -23,9 +22,7 @@ import (
)
func TestMain(m *testing.M) {
unittest.MainTest(m, &unittest.TestOptions{
GiteaRootPath: filepath.Join("..", "..", ".."),
})
unittest.MainTest(m)
}
func testIndexer(name string, t *testing.T, indexer internal.Indexer) {
@@ -99,11 +96,10 @@ func TestBleveIndexAndSearch(t *testing.T) {
idx := bleve.NewIndexer(dir)
_, err := idx.Init(context.Background())
if err != nil {
assert.Fail(t, "Unable to create bleve indexer Error: %v", err)
if idx != nil {
idx.Close()
}
return
assert.FailNow(t, "Unable to create bleve indexer Error: %v", err)
}
defer idx.Close()
@@ -121,11 +117,10 @@ func TestESIndexAndSearch(t *testing.T) {
indexer := elasticsearch.NewIndexer(u, "gitea_codes")
if _, err := indexer.Init(context.Background()); err != nil {
assert.Fail(t, "Unable to init ES indexer Error: %v", err)
if indexer != nil {
indexer.Close()
}
return
assert.FailNow(t, "Unable to init ES indexer Error: %v", err)
}
defer indexer.Close()

View File

@@ -96,8 +96,7 @@ func ToDBOptions(ctx context.Context, options *internal.SearchOptions) (*issue_m
}
if len(options.IncludedLabelIDs) == 0 && len(options.IncludedAnyLabelIDs) > 0 {
_ = ctx // issue_model.GetLabelsByIDs should be called with ctx, this line can be removed when it's done.
labels, err := issue_model.GetLabelsByIDs(options.IncludedAnyLabelIDs, "name")
labels, err := issue_model.GetLabelsByIDs(ctx, options.IncludedAnyLabelIDs, "name")
if err != nil {
return nil, fmt.Errorf("GetLabelsByIDs: %v", err)
}

View File

@@ -204,12 +204,13 @@ func getIssueIndexerQueueHandler(ctx context.Context) func(items ...*IndexerMeta
func populateIssueIndexer(ctx context.Context) {
ctx, _, finished := process.GetManager().AddTypedContext(ctx, "Service: PopulateIssueIndexer", process.SystemProcessType, true)
defer finished()
if err := PopulateIssueIndexer(ctx, true); err != nil {
ctx = contextWithKeepRetry(ctx) // keep retrying since it's a background task
if err := PopulateIssueIndexer(ctx); err != nil {
log.Error("Issue indexer population failed: %v", err)
}
}
func PopulateIssueIndexer(ctx context.Context, keepRetrying bool) error {
func PopulateIssueIndexer(ctx context.Context) error {
for page := 1; ; page++ {
select {
case <-ctx.Done():
@@ -232,20 +233,8 @@ func PopulateIssueIndexer(ctx context.Context, keepRetrying bool) error {
}
for _, repo := range repos {
for {
select {
case <-ctx.Done():
return fmt.Errorf("shutdown before completion: %w", ctx.Err())
default:
}
if err := updateRepoIndexer(ctx, repo.ID); err != nil {
if keepRetrying && ctx.Err() == nil {
log.Warn("Retry to populate issue indexer for repo %d: %v", repo.ID, err)
continue
}
return fmt.Errorf("populate issue indexer for repo %d: %v", repo.ID, err)
}
break
if err := updateRepoIndexer(ctx, repo.ID); err != nil {
return fmt.Errorf("populate issue indexer for repo %d: %v", repo.ID, err)
}
}
}
@@ -259,8 +248,8 @@ func UpdateRepoIndexer(ctx context.Context, repoID int64) {
}
// UpdateIssueIndexer add/update an issue to the issue indexer
func UpdateIssueIndexer(issueID int64) {
if err := updateIssueIndexer(issueID); err != nil {
func UpdateIssueIndexer(ctx context.Context, issueID int64) {
if err := updateIssueIndexer(ctx, issueID); err != nil {
log.Error("Unable to push issue %d to issue indexer: %v", issueID, err)
}
}

View File

@@ -5,14 +5,13 @@ package issues
import (
"context"
"path"
"path/filepath"
"testing"
"time"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/models/unittest"
"code.gitea.io/gitea/modules/indexer/issues/bleve"
"code.gitea.io/gitea/modules/indexer/issues/internal"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/util"
_ "code.gitea.io/gitea/models"
_ "code.gitea.io/gitea/models/actions"
@@ -22,71 +21,7 @@ import (
)
func TestMain(m *testing.M) {
unittest.MainTest(m, &unittest.TestOptions{
GiteaRootPath: filepath.Join("..", "..", ".."),
})
}
func TestBleveSearchIssues(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
setting.CfgProvider, _ = setting.NewConfigProviderFromData("")
tmpIndexerDir := t.TempDir()
setting.CfgProvider.Section("queue.issue_indexer").Key("DATADIR").MustString(path.Join(tmpIndexerDir, "issues.queue"))
oldIssuePath := setting.Indexer.IssuePath
setting.Indexer.IssuePath = path.Join(tmpIndexerDir, "issues.queue")
defer func() {
setting.Indexer.IssuePath = oldIssuePath
}()
setting.Indexer.IssueType = "bleve"
setting.LoadQueueSettings()
InitIssueIndexer(true)
defer func() {
if bleveIndexer, ok := (*globalIndexer.Load()).(*bleve.Indexer); ok {
bleveIndexer.Close()
}
}()
time.Sleep(5 * time.Second)
t.Run("issue2", func(t *testing.T) {
ids, _, err := SearchIssues(context.TODO(), &SearchOptions{
Keyword: "issue2",
RepoIDs: []int64{1},
})
assert.NoError(t, err)
assert.EqualValues(t, []int64{2}, ids)
})
t.Run("first", func(t *testing.T) {
ids, _, err := SearchIssues(context.TODO(), &SearchOptions{
Keyword: "first",
RepoIDs: []int64{1},
})
assert.NoError(t, err)
assert.EqualValues(t, []int64{1}, ids)
})
t.Run("for", func(t *testing.T) {
ids, _, err := SearchIssues(context.TODO(), &SearchOptions{
Keyword: "for",
RepoIDs: []int64{1},
})
assert.NoError(t, err)
assert.ElementsMatch(t, []int64{1, 2, 3, 5, 11}, ids)
})
t.Run("good", func(t *testing.T) {
ids, _, err := SearchIssues(context.TODO(), &SearchOptions{
Keyword: "good",
RepoIDs: []int64{1},
})
assert.NoError(t, err)
assert.EqualValues(t, []int64{1}, ids)
})
unittest.MainTest(m)
}
func TestDBSearchIssues(t *testing.T) {
@@ -95,39 +30,390 @@ func TestDBSearchIssues(t *testing.T) {
setting.Indexer.IssueType = "db"
InitIssueIndexer(true)
t.Run("issue2", func(t *testing.T) {
ids, _, err := SearchIssues(context.TODO(), &SearchOptions{
Keyword: "issue2",
RepoIDs: []int64{1},
})
assert.NoError(t, err)
assert.EqualValues(t, []int64{2}, ids)
})
t.Run("first", func(t *testing.T) {
ids, _, err := SearchIssues(context.TODO(), &SearchOptions{
Keyword: "first",
RepoIDs: []int64{1},
})
assert.NoError(t, err)
assert.EqualValues(t, []int64{1}, ids)
})
t.Run("for", func(t *testing.T) {
ids, _, err := SearchIssues(context.TODO(), &SearchOptions{
Keyword: "for",
RepoIDs: []int64{1},
})
assert.NoError(t, err)
assert.ElementsMatch(t, []int64{1, 2, 3, 5, 11}, ids)
})
t.Run("good", func(t *testing.T) {
ids, _, err := SearchIssues(context.TODO(), &SearchOptions{
Keyword: "good",
RepoIDs: []int64{1},
})
assert.NoError(t, err)
assert.EqualValues(t, []int64{1}, ids)
})
t.Run("search issues with keyword", searchIssueWithKeyword)
t.Run("search issues in repo", searchIssueInRepo)
t.Run("search issues by ID", searchIssueByID)
t.Run("search issues is pr", searchIssueIsPull)
t.Run("search issues is closed", searchIssueIsClosed)
t.Run("search issues by milestone", searchIssueByMilestoneID)
t.Run("search issues by label", searchIssueByLabelID)
t.Run("search issues by time", searchIssueByTime)
t.Run("search issues with order", searchIssueWithOrder)
t.Run("search issues in project", searchIssueInProject)
t.Run("search issues with paginator", searchIssueWithPaginator)
}
func searchIssueWithKeyword(t *testing.T) {
tests := []struct {
opts SearchOptions
expectedIDs []int64
}{
{
SearchOptions{
Keyword: "issue2",
RepoIDs: []int64{1},
},
[]int64{2},
},
{
SearchOptions{
Keyword: "first",
RepoIDs: []int64{1},
},
[]int64{1},
},
{
SearchOptions{
Keyword: "for",
RepoIDs: []int64{1},
},
[]int64{11, 5, 3, 2, 1},
},
{
SearchOptions{
Keyword: "good",
RepoIDs: []int64{1},
},
[]int64{1},
},
}
for _, test := range tests {
issueIDs, _, err := SearchIssues(context.TODO(), &test.opts)
if !assert.NoError(t, err) {
return
}
assert.Equal(t, test.expectedIDs, issueIDs)
}
}
func searchIssueInRepo(t *testing.T) {
tests := []struct {
opts SearchOptions
expectedIDs []int64
}{
{
SearchOptions{
RepoIDs: []int64{1},
},
[]int64{11, 5, 3, 2, 1},
},
{
SearchOptions{
RepoIDs: []int64{2},
},
[]int64{7, 4},
},
{
SearchOptions{
RepoIDs: []int64{3},
},
[]int64{12, 6},
},
{
SearchOptions{
RepoIDs: []int64{4},
},
[]int64{},
},
{
SearchOptions{
RepoIDs: []int64{5},
},
[]int64{15},
},
}
for _, test := range tests {
issueIDs, _, err := SearchIssues(context.TODO(), &test.opts)
if !assert.NoError(t, err) {
return
}
assert.Equal(t, test.expectedIDs, issueIDs)
}
}
func searchIssueByID(t *testing.T) {
int64Pointer := func(x int64) *int64 {
return &x
}
tests := []struct {
opts SearchOptions
expectedIDs []int64
}{
{
SearchOptions{
PosterID: int64Pointer(1),
},
[]int64{11, 6, 3, 2, 1},
},
{
SearchOptions{
AssigneeID: int64Pointer(1),
},
[]int64{6, 1},
},
{
SearchOptions{
MentionID: int64Pointer(4),
},
[]int64{1},
},
{
SearchOptions{
ReviewedID: int64Pointer(1),
},
[]int64{},
},
{
SearchOptions{
ReviewRequestedID: int64Pointer(1),
},
[]int64{12},
},
{
SearchOptions{
SubscriberID: int64Pointer(1),
},
[]int64{11, 6, 5, 3, 2, 1},
},
{
// issue 20 request user 15 and team 5 which user 15 belongs to
// the review request number of issue 20 should be 1
SearchOptions{
ReviewRequestedID: int64Pointer(15),
},
[]int64{12, 20},
},
{
// user 20 approved the issue 20, so return nothing
SearchOptions{
ReviewRequestedID: int64Pointer(20),
},
[]int64{},
},
}
for _, test := range tests {
issueIDs, _, err := SearchIssues(context.TODO(), &test.opts)
if !assert.NoError(t, err) {
return
}
assert.Equal(t, test.expectedIDs, issueIDs)
}
}
func searchIssueIsPull(t *testing.T) {
tests := []struct {
opts SearchOptions
expectedIDs []int64
}{
{
SearchOptions{
IsPull: util.OptionalBoolFalse,
},
[]int64{17, 16, 15, 14, 13, 6, 5, 18, 10, 7, 4, 1},
},
{
SearchOptions{
IsPull: util.OptionalBoolTrue,
},
[]int64{12, 11, 20, 19, 9, 8, 3, 2},
},
}
for _, test := range tests {
issueIDs, _, err := SearchIssues(context.TODO(), &test.opts)
if !assert.NoError(t, err) {
return
}
assert.Equal(t, test.expectedIDs, issueIDs)
}
}
func searchIssueIsClosed(t *testing.T) {
tests := []struct {
opts SearchOptions
expectedIDs []int64
}{
{
SearchOptions{
IsClosed: util.OptionalBoolFalse,
},
[]int64{17, 16, 15, 14, 13, 12, 11, 20, 6, 19, 18, 10, 7, 9, 8, 3, 2, 1},
},
{
SearchOptions{
IsClosed: util.OptionalBoolTrue,
},
[]int64{5, 4},
},
}
for _, test := range tests {
issueIDs, _, err := SearchIssues(context.TODO(), &test.opts)
if !assert.NoError(t, err) {
return
}
assert.Equal(t, test.expectedIDs, issueIDs)
}
}
func searchIssueByMilestoneID(t *testing.T) {
tests := []struct {
opts SearchOptions
expectedIDs []int64
}{
{
SearchOptions{
MilestoneIDs: []int64{1},
},
[]int64{2},
},
{
SearchOptions{
MilestoneIDs: []int64{3},
},
[]int64{3},
},
}
for _, test := range tests {
issueIDs, _, err := SearchIssues(context.TODO(), &test.opts)
if !assert.NoError(t, err) {
return
}
assert.Equal(t, test.expectedIDs, issueIDs)
}
}
func searchIssueByLabelID(t *testing.T) {
tests := []struct {
opts SearchOptions
expectedIDs []int64
}{
{
SearchOptions{
IncludedLabelIDs: []int64{1},
},
[]int64{2, 1},
},
{
SearchOptions{
IncludedLabelIDs: []int64{4},
},
[]int64{2},
},
{
SearchOptions{
ExcludedLabelIDs: []int64{1},
},
[]int64{17, 16, 15, 14, 13, 12, 11, 20, 6, 5, 19, 18, 10, 7, 4, 9, 8, 3},
},
}
for _, test := range tests {
issueIDs, _, err := SearchIssues(context.TODO(), &test.opts)
if !assert.NoError(t, err) {
return
}
assert.Equal(t, test.expectedIDs, issueIDs)
}
}
func searchIssueByTime(t *testing.T) {
int64Pointer := func(i int64) *int64 {
return &i
}
tests := []struct {
opts SearchOptions
expectedIDs []int64
}{
{
SearchOptions{
UpdatedAfterUnix: int64Pointer(0),
},
[]int64{17, 16, 15, 14, 13, 12, 11, 20, 6, 5, 19, 18, 10, 7, 4, 9, 8, 3, 2, 1},
},
}
for _, test := range tests {
issueIDs, _, err := SearchIssues(context.TODO(), &test.opts)
if !assert.NoError(t, err) {
return
}
assert.Equal(t, test.expectedIDs, issueIDs)
}
}
func searchIssueWithOrder(t *testing.T) {
tests := []struct {
opts SearchOptions
expectedIDs []int64
}{
{
SearchOptions{
SortBy: internal.SortByCreatedAsc,
},
[]int64{1, 2, 3, 8, 9, 4, 7, 10, 18, 19, 5, 6, 20, 11, 12, 13, 14, 15, 16, 17},
},
}
for _, test := range tests {
issueIDs, _, err := SearchIssues(context.TODO(), &test.opts)
if !assert.NoError(t, err) {
return
}
assert.Equal(t, test.expectedIDs, issueIDs)
}
}
func searchIssueInProject(t *testing.T) {
int64Pointer := func(i int64) *int64 {
return &i
}
tests := []struct {
opts SearchOptions
expectedIDs []int64
}{
{
SearchOptions{
ProjectID: int64Pointer(1),
},
[]int64{5, 3, 2, 1},
},
{
SearchOptions{
ProjectBoardID: int64Pointer(1),
},
[]int64{1},
},
}
for _, test := range tests {
issueIDs, _, err := SearchIssues(context.TODO(), &test.opts)
if !assert.NoError(t, err) {
return
}
assert.Equal(t, test.expectedIDs, issueIDs)
}
}
func searchIssueWithPaginator(t *testing.T) {
tests := []struct {
opts SearchOptions
expectedIDs []int64
expectedTotal int64
}{
{
SearchOptions{
Paginator: &db.ListOptions{
PageSize: 5,
},
},
[]int64{17, 16, 15, 14, 13},
20,
},
}
for _, test := range tests {
issueIDs, total, err := SearchIssues(context.TODO(), &test.opts)
if !assert.NoError(t, err) {
return
}
assert.Equal(t, test.expectedIDs, issueIDs)
assert.Equal(t, test.expectedTotal, total)
}
}

View File

@@ -107,7 +107,7 @@ func getIssueIndexerData(ctx context.Context, issueID int64) (*internal.IndexerD
NoLabel: len(labels) == 0,
MilestoneID: issue.MilestoneID,
ProjectID: projectID,
ProjectBoardID: issue.ProjectBoardID(),
ProjectBoardID: issue.ProjectBoardID(ctx),
PosterID: issue.PosterID,
AssigneeID: issue.AssigneeID,
MentionIDs: mentionIDs,
@@ -127,15 +127,15 @@ func updateRepoIndexer(ctx context.Context, repoID int64) error {
return fmt.Errorf("issue_model.GetIssueIDsByRepoID: %w", err)
}
for _, id := range ids {
if err := updateIssueIndexer(id); err != nil {
if err := updateIssueIndexer(ctx, id); err != nil {
return err
}
}
return nil
}
func updateIssueIndexer(issueID int64) error {
return pushIssueIndexerQueue(&IndexerMetadata{ID: issueID})
func updateIssueIndexer(ctx context.Context, issueID int64) error {
return pushIssueIndexerQueue(ctx, &IndexerMetadata{ID: issueID})
}
func deleteRepoIssueIndexer(ctx context.Context, repoID int64) error {
@@ -148,13 +148,21 @@ func deleteRepoIssueIndexer(ctx context.Context, repoID int64) error {
if len(ids) == 0 {
return nil
}
return pushIssueIndexerQueue(&IndexerMetadata{
return pushIssueIndexerQueue(ctx, &IndexerMetadata{
IDs: ids,
IsDelete: true,
})
}
func pushIssueIndexerQueue(data *IndexerMetadata) error {
type keepRetryKey struct{}
// contextWithKeepRetry returns a context with a key indicating that the indexer should keep retrying.
// Please note that it's for background tasks only, and it should not be used for user requests, or it may cause blocking.
func contextWithKeepRetry(ctx context.Context) context.Context {
return context.WithValue(ctx, keepRetryKey{}, true)
}
func pushIssueIndexerQueue(ctx context.Context, data *IndexerMetadata) error {
if issueIndexerQueue == nil {
// Some unit tests will trigger indexing, but the queue is not initialized.
// It's OK to ignore it, but log a warning message in case it's not a unit test.
@@ -162,12 +170,26 @@ func pushIssueIndexerQueue(data *IndexerMetadata) error {
return nil
}
err := issueIndexerQueue.Push(data)
if errors.Is(err, queue.ErrAlreadyInQueue) {
return nil
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
err := issueIndexerQueue.Push(data)
if errors.Is(err, queue.ErrAlreadyInQueue) {
return nil
}
if errors.Is(err, context.DeadlineExceeded) { // the queue is full
log.Warn("It seems that issue indexer is slow and the queue is full. Please check the issue indexer or increase the queue size.")
if ctx.Value(keepRetryKey{}) == nil {
return err
}
// It will be better to increase the queue size instead of retrying, but users may ignore the previous warning message.
// However, even it retries, it may still cause index loss when there's a deadline in the context.
log.Debug("Retry to push %+v to issue indexer queue", data)
continue
}
return err
}
if errors.Is(err, context.DeadlineExceeded) {
log.Warn("It seems that issue indexer is slow and the queue is full. Please check the issue indexer or increase the queue size.")
}
return err
}

View File

@@ -68,7 +68,7 @@ func (db *DBIndexer) Index(id int64) error {
}
return err
}
err = repo_model.UpdateLanguageStats(repo, commitID, stats)
err = repo_model.UpdateLanguageStats(ctx, repo, commitID, stats)
if err != nil {
log.Error("Unable to update language stats for ID %s for default branch %s in %s. Error: %v", commitID, repo.DefaultBranch, repo.RepoPath(), err)
return err

View File

@@ -4,6 +4,8 @@
package stats
import (
"context"
"code.gitea.io/gitea/models/db"
repo_model "code.gitea.io/gitea/models/repo"
"code.gitea.io/gitea/modules/graceful"
@@ -28,14 +30,14 @@ func Init() error {
return err
}
go populateRepoIndexer()
go populateRepoIndexer(db.DefaultContext)
return nil
}
// populateRepoIndexer populate the repo indexer with pre-existing data. This
// should only be run when the indexer is created for the first time.
func populateRepoIndexer() {
func populateRepoIndexer(ctx context.Context) {
log.Info("Populating the repo stats indexer with existing repositories")
isShutdown := graceful.GetManager().IsShutdown()
@@ -62,7 +64,7 @@ func populateRepoIndexer() {
return
default:
}
ids, err := repo_model.GetUnindexedRepos(repo_model.RepoIndexerTypeStats, maxRepoID, 0, 50)
ids, err := repo_model.GetUnindexedRepos(ctx, repo_model.RepoIndexerTypeStats, maxRepoID, 0, 50)
if err != nil {
log.Error("populateRepoIndexer: %v", err)
return

View File

@@ -5,7 +5,6 @@ package stats
import (
"context"
"path/filepath"
"testing"
"time"
@@ -23,9 +22,7 @@ import (
)
func TestMain(m *testing.M) {
unittest.MainTest(m, &unittest.TestOptions{
GiteaRootPath: filepath.Join("..", "..", ".."),
})
unittest.MainTest(m)
}
func TestRepoStatsIndex(t *testing.T) {
@@ -48,7 +45,7 @@ func TestRepoStatsIndex(t *testing.T) {
status, err := repo_model.GetIndexerStatus(db.DefaultContext, repo, repo_model.RepoIndexerTypeStats)
assert.NoError(t, err)
assert.Equal(t, "65f1bf27bc3bf70f64657658635e66094edbcb4d", status.CommitSha)
langs, err := repo_model.GetTopLanguageStats(repo, 5)
langs, err := repo_model.GetTopLanguageStats(db.DefaultContext, repo, 5)
assert.NoError(t, err)
assert.Empty(t, langs)
}

View File

@@ -15,7 +15,7 @@ import (
// FilesystemClient is used to read LFS data from a filesystem path
type FilesystemClient struct {
lfsdir string
lfsDir string
}
// BatchSize returns the preferred size of batchs to process
@@ -25,16 +25,12 @@ func (c *FilesystemClient) BatchSize() int {
func newFilesystemClient(endpoint *url.URL) *FilesystemClient {
path, _ := util.FileURLToPath(endpoint)
lfsdir := filepath.Join(path, "lfs", "objects")
client := &FilesystemClient{lfsdir}
return client
lfsDir := filepath.Join(path, "lfs", "objects")
return &FilesystemClient{lfsDir}
}
func (c *FilesystemClient) objectPath(oid string) string {
return filepath.Join(c.lfsdir, oid[0:2], oid[2:4], oid)
return filepath.Join(c.lfsDir, oid[0:2], oid[2:4], oid)
}
// Download reads the specific LFS object from the target path

View File

@@ -8,6 +8,7 @@ import (
"context"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strings"
@@ -17,7 +18,7 @@ import (
"code.gitea.io/gitea/modules/proxy"
)
const batchSize = 20
const httpBatchSize = 20
// HTTPClient is used to communicate with the LFS server
// https://github.com/git-lfs/git-lfs/blob/main/docs/api/batch.md
@@ -29,7 +30,7 @@ type HTTPClient struct {
// BatchSize returns the preferred size of batchs to process
func (c *HTTPClient) BatchSize() int {
return batchSize
return httpBatchSize
}
func newHTTPClient(endpoint *url.URL, httpTransport *http.Transport) *HTTPClient {
@@ -43,28 +44,25 @@ func newHTTPClient(endpoint *url.URL, httpTransport *http.Transport) *HTTPClient
Transport: httpTransport,
}
client := &HTTPClient{
client: hc,
endpoint: strings.TrimSuffix(endpoint.String(), "/"),
transfers: make(map[string]TransferAdapter),
}
basic := &BasicTransferAdapter{hc}
client.transfers[basic.Name()] = basic
client := &HTTPClient{
client: hc,
endpoint: strings.TrimSuffix(endpoint.String(), "/"),
transfers: map[string]TransferAdapter{
basic.Name(): basic,
},
}
return client
}
func (c *HTTPClient) transferNames() []string {
keys := make([]string, len(c.transfers))
i := 0
for k := range c.transfers {
keys[i] = k
i++
}
return keys
}
@@ -74,7 +72,6 @@ func (c *HTTPClient) batch(ctx context.Context, operation string, objects []Poin
url := fmt.Sprintf("%s/objects/batch", c.endpoint)
request := &BatchRequest{operation, c.transferNames(), nil, objects}
payload := new(bytes.Buffer)
err := json.NewEncoder(payload).Encode(request)
if err != nil {
@@ -82,32 +79,17 @@ func (c *HTTPClient) batch(ctx context.Context, operation string, objects []Poin
return nil, err
}
log.Trace("Calling: %s", url)
req, err := http.NewRequestWithContext(ctx, "POST", url, payload)
req, err := createRequest(ctx, http.MethodPost, url, map[string]string{"Content-Type": MediaType}, payload)
if err != nil {
log.Error("Error creating request: %v", err)
return nil, err
}
req.Header.Set("Content-type", MediaType)
req.Header.Set("Accept", MediaType)
res, err := c.client.Do(req)
res, err := performRequest(ctx, c.client, req)
if err != nil {
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
log.Error("Error while processing request: %v", err)
return nil, err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return nil, fmt.Errorf("Unexpected server response: %s", res.Status)
}
var response BatchResponse
err = json.NewDecoder(res.Body).Decode(&response)
if err != nil {
@@ -177,7 +159,7 @@ func (c *HTTPClient) performOperation(ctx context.Context, objects []Pointer, dc
link, ok := object.Actions["upload"]
if !ok {
log.Debug("%+v", object)
return errors.New("Missing action 'upload'")
return errors.New("missing action 'upload'")
}
content, err := uc(object.Pointer, nil)
@@ -187,8 +169,6 @@ func (c *HTTPClient) performOperation(ctx context.Context, objects []Pointer, dc
err = transferAdapter.Upload(ctx, link, object.Pointer, content)
content.Close()
if err != nil {
return err
}
@@ -203,7 +183,7 @@ func (c *HTTPClient) performOperation(ctx context.Context, objects []Pointer, dc
link, ok := object.Actions["download"]
if !ok {
log.Debug("%+v", object)
return errors.New("Missing action 'download'")
return errors.New("missing action 'download'")
}
content, err := transferAdapter.Download(ctx, link)
@@ -219,3 +199,59 @@ func (c *HTTPClient) performOperation(ctx context.Context, objects []Pointer, dc
return nil
}
// createRequest creates a new request, and sets the headers.
func createRequest(ctx context.Context, method, url string, headers map[string]string, body io.Reader) (*http.Request, error) {
log.Trace("createRequest: %s", url)
req, err := http.NewRequestWithContext(ctx, method, url, body)
if err != nil {
log.Error("Error creating request: %v", err)
return nil, err
}
for key, value := range headers {
req.Header.Set(key, value)
}
req.Header.Set("Accept", MediaType)
return req, nil
}
// performRequest sends a request, optionally performs a callback on the request and returns the response.
// If the status code is 200, the response is returned, and it will contain a non-nil Body.
// Otherwise, it will return an error, and the Body will be nil or closed.
func performRequest(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
log.Trace("performRequest: %s", req.URL)
res, err := client.Do(req)
if err != nil {
select {
case <-ctx.Done():
return res, ctx.Err()
default:
}
log.Error("Error while processing request: %v", err)
return res, err
}
if res.StatusCode != http.StatusOK {
defer res.Body.Close()
return res, handleErrorResponse(res)
}
return res, nil
}
func handleErrorResponse(resp *http.Response) error {
var er ErrorResponse
err := json.NewDecoder(resp.Body).Decode(&er)
if err != nil {
if err == io.EOF {
return io.ErrUnexpectedEOF
}
log.Error("Error decoding json: %v", err)
return err
}
log.Trace("ErrorResponse: %v", er)
return errors.New(er.Message)
}

View File

@@ -177,7 +177,7 @@ func TestHTTPClientDownload(t *testing.T) {
// case 0
{
endpoint: "https://status-not-ok.io",
expectederror: "Unexpected server response: ",
expectederror: io.ErrUnexpectedEOF.Error(),
},
// case 1
{
@@ -207,7 +207,7 @@ func TestHTTPClientDownload(t *testing.T) {
// case 6
{
endpoint: "https://empty-actions-map.io",
expectederror: "Missing action 'download'",
expectederror: "missing action 'download'",
},
// case 7
{
@@ -217,27 +217,28 @@ func TestHTTPClientDownload(t *testing.T) {
// case 8
{
endpoint: "https://upload-actions-map.io",
expectederror: "Missing action 'download'",
expectederror: "missing action 'download'",
},
// case 9
{
endpoint: "https://verify-actions-map.io",
expectederror: "Missing action 'download'",
expectederror: "missing action 'download'",
},
// case 10
{
endpoint: "https://unknown-actions-map.io",
expectederror: "Missing action 'download'",
expectederror: "missing action 'download'",
},
}
for n, c := range cases {
client := &HTTPClient{
client: hc,
endpoint: c.endpoint,
transfers: make(map[string]TransferAdapter),
client: hc,
endpoint: c.endpoint,
transfers: map[string]TransferAdapter{
"dummy": dummy,
},
}
client.transfers["dummy"] = dummy
err := client.Download(context.Background(), []Pointer{p}, func(p Pointer, content io.ReadCloser, objectError error) error {
if objectError != nil {
@@ -284,7 +285,7 @@ func TestHTTPClientUpload(t *testing.T) {
// case 0
{
endpoint: "https://status-not-ok.io",
expectederror: "Unexpected server response: ",
expectederror: io.ErrUnexpectedEOF.Error(),
},
// case 1
{
@@ -319,7 +320,7 @@ func TestHTTPClientUpload(t *testing.T) {
// case 7
{
endpoint: "https://download-actions-map.io",
expectederror: "Missing action 'upload'",
expectederror: "missing action 'upload'",
},
// case 8
{
@@ -329,22 +330,23 @@ func TestHTTPClientUpload(t *testing.T) {
// case 9
{
endpoint: "https://verify-actions-map.io",
expectederror: "Missing action 'upload'",
expectederror: "missing action 'upload'",
},
// case 10
{
endpoint: "https://unknown-actions-map.io",
expectederror: "Missing action 'upload'",
expectederror: "missing action 'upload'",
},
}
for n, c := range cases {
client := &HTTPClient{
client: hc,
endpoint: c.endpoint,
transfers: make(map[string]TransferAdapter),
client: hc,
endpoint: c.endpoint,
transfers: map[string]TransferAdapter{
"dummy": dummy,
},
}
client.transfers["dummy"] = dummy
err := client.Upload(context.Background(), []Pointer{p}, func(p Pointer, objectError error) (io.ReadCloser, error) {
return io.NopCloser(new(bytes.Buffer)), objectError

View File

@@ -29,10 +29,10 @@ const (
var (
// ErrMissingPrefix occurs if the content lacks the LFS prefix
ErrMissingPrefix = errors.New("Content lacks the LFS prefix")
ErrMissingPrefix = errors.New("content lacks the LFS prefix")
// ErrInvalidStructure occurs if the content has an invalid structure
ErrInvalidStructure = errors.New("Content has an invalid structure")
ErrInvalidStructure = errors.New("content has an invalid structure")
// ErrInvalidOIDFormat occurs if the oid has an invalid format
ErrInvalidOIDFormat = errors.New("OID has an invalid format")

View File

@@ -6,8 +6,6 @@ package lfs
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"net/http"
@@ -15,7 +13,7 @@ import (
"code.gitea.io/gitea/modules/log"
)
// TransferAdapter represents an adapter for downloading/uploading LFS objects
// TransferAdapter represents an adapter for downloading/uploading LFS objects.
type TransferAdapter interface {
Name() string
Download(ctx context.Context, l *Link) (io.ReadCloser, error)
@@ -23,41 +21,48 @@ type TransferAdapter interface {
Verify(ctx context.Context, l *Link, p Pointer) error
}
// BasicTransferAdapter implements the "basic" adapter
// BasicTransferAdapter implements the "basic" adapter.
type BasicTransferAdapter struct {
client *http.Client
}
// Name returns the name of the adapter
// Name returns the name of the adapter.
func (a *BasicTransferAdapter) Name() string {
return "basic"
}
// Download reads the download location and downloads the data
// Download reads the download location and downloads the data.
func (a *BasicTransferAdapter) Download(ctx context.Context, l *Link) (io.ReadCloser, error) {
resp, err := a.performRequest(ctx, "GET", l, nil, nil)
req, err := createRequest(ctx, http.MethodGet, l.Href, l.Header, nil)
if err != nil {
return nil, err
}
resp, err := performRequest(ctx, a.client, req)
if err != nil {
return nil, err
}
return resp.Body, nil
}
// Upload sends the content to the LFS server
// Upload sends the content to the LFS server.
func (a *BasicTransferAdapter) Upload(ctx context.Context, l *Link, p Pointer, r io.Reader) error {
_, err := a.performRequest(ctx, "PUT", l, r, func(req *http.Request) {
if len(req.Header.Get("Content-Type")) == 0 {
req.Header.Set("Content-Type", "application/octet-stream")
}
if req.Header.Get("Transfer-Encoding") == "chunked" {
req.TransferEncoding = []string{"chunked"}
}
req.ContentLength = p.Size
})
req, err := createRequest(ctx, http.MethodPut, l.Href, l.Header, r)
if err != nil {
return err
}
if req.Header.Get("Content-Type") == "" {
req.Header.Set("Content-Type", "application/octet-stream")
}
if req.Header.Get("Transfer-Encoding") == "chunked" {
req.TransferEncoding = []string{"chunked"}
}
req.ContentLength = p.Size
res, err := performRequest(ctx, a.client, req)
if err != nil {
return err
}
defer res.Body.Close()
return nil
}
@@ -69,66 +74,15 @@ func (a *BasicTransferAdapter) Verify(ctx context.Context, l *Link, p Pointer) e
return err
}
_, err = a.performRequest(ctx, "POST", l, bytes.NewReader(b), func(req *http.Request) {
req.Header.Set("Content-Type", MediaType)
})
req, err := createRequest(ctx, http.MethodPost, l.Href, l.Header, bytes.NewReader(b))
if err != nil {
return err
}
req.Header.Set("Content-Type", MediaType)
res, err := performRequest(ctx, a.client, req)
if err != nil {
return err
}
defer res.Body.Close()
return nil
}
func (a *BasicTransferAdapter) performRequest(ctx context.Context, method string, l *Link, body io.Reader, callback func(*http.Request)) (*http.Response, error) {
log.Trace("Calling: %s %s", method, l.Href)
req, err := http.NewRequestWithContext(ctx, method, l.Href, body)
if err != nil {
log.Error("Error creating request: %v", err)
return nil, err
}
for key, value := range l.Header {
req.Header.Set(key, value)
}
req.Header.Set("Accept", MediaType)
if callback != nil {
callback(req)
}
res, err := a.client.Do(req)
if err != nil {
select {
case <-ctx.Done():
return res, ctx.Err()
default:
}
log.Error("Error while processing request: %v", err)
return res, err
}
if res.StatusCode != http.StatusOK {
return res, handleErrorResponse(res)
}
return res, nil
}
func handleErrorResponse(resp *http.Response) error {
defer resp.Body.Close()
er, err := decodeResponseError(resp.Body)
if err != nil {
return fmt.Errorf("Request failed with status %s", resp.Status)
}
log.Trace("ErrorRespone: %v", er)
return errors.New(er.Message)
}
func decodeResponseError(r io.Reader) (ErrorResponse, error) {
var er ErrorResponse
err := json.NewDecoder(r).Decode(&er)
if err != nil {
log.Error("Error decoding json: %v", err)
}
return er, err
}

View File

@@ -153,18 +153,31 @@ func (r *Writer) WriteRegularLink(l org.RegularLink) {
link = []byte(util.URLJoin(r.URLPrefix, lnk))
}
description := string(link)
if l.Description != nil {
description = r.WriteNodesAsString(l.Description...)
}
// Inspired by https://github.com/niklasfasching/go-org/blob/6eb20dbda93cb88c3503f7508dc78cbbc639378f/org/html_writer.go#L406-L427
switch l.Kind() {
case "image":
imageSrc := getMediaURL(link)
fmt.Fprintf(r, `<img src="%s" alt="%s" title="%s" />`, imageSrc, description, description)
if l.Description == nil {
imageSrc := getMediaURL(link)
fmt.Fprintf(r, `<img src="%s" alt="%s" title="%s" />`, imageSrc, link, link)
} else {
description := strings.TrimPrefix(org.String(l.Description...), "file:")
imageSrc := getMediaURL([]byte(description))
fmt.Fprintf(r, `<a href="%s"><img src="%s" alt="%s" /></a>`, link, imageSrc, imageSrc)
}
case "video":
videoSrc := getMediaURL(link)
fmt.Fprintf(r, `<video src="%s" title="%s">%s</video>`, videoSrc, description, description)
if l.Description == nil {
imageSrc := getMediaURL(link)
fmt.Fprintf(r, `<video src="%s" title="%s">%s</video>`, imageSrc, link, link)
} else {
description := strings.TrimPrefix(org.String(l.Description...), "file:")
videoSrc := getMediaURL([]byte(description))
fmt.Fprintf(r, `<a href="%s"><video src="%s" title="%s"></video></a>`, link, videoSrc, videoSrc)
}
default:
description := string(link)
if l.Description != nil {
description = r.WriteNodesAsString(l.Description...)
}
fmt.Fprintf(r, `<a href="%s" title="%s">%s</a>`, link, description, description)
}
}

View File

@@ -42,7 +42,7 @@ func TestRender_StandardLinks(t *testing.T) {
"<p><a href=\""+lnk+"\" title=\"WikiPage\">WikiPage</a></p>")
}
func TestRender_Images(t *testing.T) {
func TestRender_Media(t *testing.T) {
setting.AppURL = AppURL
setting.AppSubURL = AppSubURL
@@ -60,6 +60,18 @@ func TestRender_Images(t *testing.T) {
test("[[file:"+url+"]]",
"<p><img src=\""+result+"\" alt=\""+result+"\" title=\""+result+"\" /></p>")
// With description.
test("[[https://example.com][https://example.com/example.svg]]",
`<p><a href="https://example.com"><img src="https://example.com/example.svg" alt="https://example.com/example.svg" /></a></p>`)
test("[[https://example.com][https://example.com/example.mp4]]",
`<p><a href="https://example.com"><video src="https://example.com/example.mp4" title="https://example.com/example.mp4"></video></a></p>`)
// Without description.
test("[[https://example.com/example.svg]]",
`<p><img src="https://example.com/example.svg" alt="https://example.com/example.svg" title="https://example.com/example.svg" /></p>`)
test("[[https://example.com/example.mp4]]",
`<p><video src="https://example.com/example.mp4" title="https://example.com/example.mp4">https://example.com/example.mp4</video></p>`)
}
func TestRender_Source(t *testing.T) {

View File

@@ -7,6 +7,7 @@ import (
"runtime"
activities_model "code.gitea.io/gitea/models/activities"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/modules/setting"
"github.com/prometheus/client_golang/prometheus"
@@ -232,7 +233,7 @@ func (c Collector) Describe(ch chan<- *prometheus.Desc) {
// Collect returns the metrics with values
func (c Collector) Collect(ch chan<- prometheus.Metric) {
stats := activities_model.GetStatistic()
stats := activities_model.GetStatistic(db.DefaultContext)
ch <- prometheus.MustNewConstMetric(
c.Accesses,

View File

@@ -50,7 +50,7 @@ func TestManager_Cancel(t *testing.T) {
select {
case <-ctx.Done():
default:
assert.Fail(t, "Cancel should cancel the provided context")
assert.FailNow(t, "Cancel should cancel the provided context")
}
finished()
@@ -62,7 +62,7 @@ func TestManager_Cancel(t *testing.T) {
select {
case <-ctx.Done():
default:
assert.Fail(t, "Cancel should cancel the provided context")
assert.FailNow(t, "Cancel should cancel the provided context")
}
finished()
}

View File

@@ -46,7 +46,7 @@ CONN_STR = redis://
assert.Equal(t, "default", q.GetName())
assert.Equal(t, "level", q.GetType())
assert.Equal(t, filepath.Join(setting.AppDataPath, "queues/common"), q.baseConfig.DataFullDir)
assert.Equal(t, 100, q.baseConfig.Length)
assert.Equal(t, 100000, q.baseConfig.Length)
assert.Equal(t, 20, q.batchLength)
assert.Equal(t, "", q.baseConfig.ConnStr)
assert.Equal(t, "default_queue", q.baseConfig.QueueFullName)

View File

@@ -9,6 +9,8 @@
// - An item can be a simple value, such as an integer, or a more complex structure that has multiple fields.
// Usually a item serves as a task or a message. Sets of items will be sent to a queue handler to be processed.
// - It's represented as a JSON-marshaled binary slice in the queue
// - Since the item is marshaled by JSON, and JSON doesn't have stable key-order/type support,
// so the decoded handler item may not be the same as the original "pushed" one if you use map/any types,
//
// 2. Batch:
// - A collection of items that are grouped together for processing. Each worker receives a batch of items.

View File

@@ -78,15 +78,15 @@ func TestFindAllIssueReferences(t *testing.T) {
[]testResult{},
},
{
"This user3/repo4#200 yes.",
"This org3/repo4#200 yes.",
[]testResult{
{200, "user3", "repo4", "200", false, XRefActionNone, &RefSpan{Start: 5, End: 20}, nil, ""},
{200, "org3", "repo4", "200", false, XRefActionNone, &RefSpan{Start: 5, End: 19}, nil, ""},
},
},
{
"This user3/repo4!200 yes.",
"This org3/repo4!200 yes.",
[]testResult{
{200, "user3", "repo4", "200", true, XRefActionNone, &RefSpan{Start: 5, End: 20}, nil, ""},
{200, "org3", "repo4", "200", true, XRefActionNone, &RefSpan{Start: 5, End: 19}, nil, ""},
},
},
{
@@ -106,13 +106,13 @@ func TestFindAllIssueReferences(t *testing.T) {
},
},
{
"This [four](http://gitea.com:3000/user3/repo4/issues/203) yes.",
"This [four](http://gitea.com:3000/org3/repo4/issues/203) yes.",
[]testResult{
{203, "user3", "repo4", "203", false, XRefActionNone, nil, nil, ""},
{203, "org3", "repo4", "203", false, XRefActionNone, nil, nil, ""},
},
},
{
"This [five](http://github.com/user3/repo4/issues/204) no.",
"This [five](http://github.com/org3/repo4/issues/204) no.",
[]testResult{},
},
{
@@ -151,9 +151,9 @@ func TestFindAllIssueReferences(t *testing.T) {
},
},
{
"Do you fix user6/repo6#300 ? yes",
"Do you fix org6/repo6#300 ? yes",
[]testResult{
{300, "user6", "repo6", "300", false, XRefActionCloses, &RefSpan{Start: 11, End: 26}, &RefSpan{Start: 7, End: 10}, ""},
{300, "org6", "repo6", "300", false, XRefActionCloses, &RefSpan{Start: 11, End: 25}, &RefSpan{Start: 7, End: 10}, ""},
},
},
{
@@ -190,9 +190,9 @@ func TestFindAllIssueReferences(t *testing.T) {
},
},
{
"This user3/repo4#200, yes.",
"This org3/repo4#200, yes.",
[]testResult{
{200, "user3", "repo4", "200", false, XRefActionNone, &RefSpan{Start: 5, End: 20}, nil, ""},
{200, "org3", "repo4", "200", false, XRefActionNone, &RefSpan{Start: 5, End: 19}, nil, ""},
},
},
{
@@ -498,15 +498,15 @@ func TestCustomizeCloseKeywords(t *testing.T) {
},
},
{
"Cerró user6/repo6#300 yes",
"Cerró org6/repo6#300 yes",
[]testResult{
{300, "user6", "repo6", "300", false, XRefActionCloses, &RefSpan{Start: 7, End: 22}, &RefSpan{Start: 0, End: 6}, ""},
{300, "org6", "repo6", "300", false, XRefActionCloses, &RefSpan{Start: 7, End: 21}, &RefSpan{Start: 0, End: 6}, ""},
},
},
{
"Reabre user3/repo4#200 yes",
"Reabre org3/repo4#200 yes",
[]testResult{
{200, "user3", "repo4", "200", false, XRefActionReopens, &RefSpan{Start: 7, End: 22}, &RefSpan{Start: 0, End: 6}, ""},
{200, "org3", "repo4", "200", false, XRefActionReopens, &RefSpan{Start: 7, End: 21}, &RefSpan{Start: 0, End: 6}, ""},
},
},
}

View File

@@ -244,7 +244,7 @@ func TestRepoPermissionPrivateOrgRepo(t *testing.T) {
// update team information and then check permission
team := unittest.AssertExistsAndLoadBean(t, &organization.Team{ID: 5})
err = organization.UpdateTeamUnits(team, nil)
err = organization.UpdateTeamUnits(db.DefaultContext, team, nil)
assert.NoError(t, err)
perm, err = access_model.GetUserRepoPermission(db.DefaultContext, repo, owner)
assert.NoError(t, err)

View File

@@ -12,7 +12,6 @@ import (
"code.gitea.io/gitea/models/db"
repo_model "code.gitea.io/gitea/models/repo"
system_model "code.gitea.io/gitea/models/system"
"code.gitea.io/gitea/models/unittest"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/setting"
@@ -103,12 +102,6 @@ func TestPushCommits_ToAPIPayloadCommits(t *testing.T) {
assert.EqualValues(t, []string{"readme.md"}, headCommit.Modified)
}
func initGravatarSource(t *testing.T) {
setting.GravatarSource = "https://secure.gravatar.com/avatar"
err := system_model.Init(db.DefaultContext)
assert.NoError(t, err)
}
func TestPushCommits_AvatarLink(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
@@ -132,7 +125,7 @@ func TestPushCommits_AvatarLink(t *testing.T) {
},
}
initGravatarSource(t)
setting.GravatarSource = "https://secure.gravatar.com/avatar"
assert.Equal(t,
"https://secure.gravatar.com/avatar/ab53a2911ddf9b4817ac01ddcd3d975f?d=identicon&s="+strconv.Itoa(28*setting.Avatar.RenderedSizeFactor),

View File

@@ -4,24 +4,25 @@
package repository
import (
"code.gitea.io/gitea/models/db"
"context"
"code.gitea.io/gitea/models/organization"
repo_model "code.gitea.io/gitea/models/repo"
user_model "code.gitea.io/gitea/models/user"
)
// CanUserDelete returns true if user could delete the repository
func CanUserDelete(repo *repo_model.Repository, user *user_model.User) (bool, error) {
func CanUserDelete(ctx context.Context, repo *repo_model.Repository, user *user_model.User) (bool, error) {
if user.IsAdmin || user.ID == repo.OwnerID {
return true, nil
}
if err := repo.LoadOwner(db.DefaultContext); err != nil {
if err := repo.LoadOwner(ctx); err != nil {
return false, err
}
if repo.Owner.IsOrganization() {
isAdmin, err := organization.OrgFromUser(repo.Owner).IsOrgAdmin(user.ID)
isAdmin, err := organization.OrgFromUser(repo.Owner).IsOrgAdmin(ctx, user.ID)
if err != nil {
return false, err
}

View File

@@ -4,25 +4,27 @@
package repository
import (
"context"
"code.gitea.io/gitea/models/organization"
repo_model "code.gitea.io/gitea/models/repo"
user_model "code.gitea.io/gitea/models/user"
)
// CanUserForkRepo returns true if specified user can fork repository.
func CanUserForkRepo(user *user_model.User, repo *repo_model.Repository) (bool, error) {
func CanUserForkRepo(ctx context.Context, user *user_model.User, repo *repo_model.Repository) (bool, error) {
if user == nil {
return false, nil
}
if repo.OwnerID != user.ID && !repo_model.HasForkedRepo(user.ID, repo.ID) {
if repo.OwnerID != user.ID && !repo_model.HasForkedRepo(ctx, user.ID, repo.ID) {
return true, nil
}
ownedOrgs, err := organization.GetOrgsCanCreateRepoByUserID(user.ID)
ownedOrgs, err := organization.GetOrgsCanCreateRepoByUserID(ctx, user.ID)
if err != nil {
return false, err
}
for _, org := range ownedOrgs {
if repo.OwnerID != org.ID && !repo_model.HasForkedRepo(org.ID, repo.ID) {
if repo.OwnerID != org.ID && !repo_model.HasForkedRepo(ctx, org.ID, repo.ID) {
return true, nil
}
}

View File

@@ -4,7 +4,6 @@
package repository
import (
"path/filepath"
"testing"
"code.gitea.io/gitea/models/unittest"
@@ -13,7 +12,5 @@ import (
)
func TestMain(m *testing.M) {
unittest.MainTest(m, &unittest.TestOptions{
GiteaRootPath: filepath.Join("..", ".."),
})
unittest.MainTest(m)
}

View File

@@ -159,7 +159,7 @@ func MigrateRepositoryGitData(ctx context.Context, u *user_model.User,
// note: this will greatly improve release (tag) sync
// for pull-mirrors with many tags
repo.IsMirror = opts.Mirror
if err = SyncReleasesWithTags(repo, gitRepo); err != nil {
if err = SyncReleasesWithTags(ctx, repo, gitRepo); err != nil {
log.Error("Failed to synchronize tags to releases for repository: %v", err)
}
}
@@ -180,12 +180,17 @@ func MigrateRepositoryGitData(ctx context.Context, u *user_model.User,
defer committer.Close()
if opts.Mirror {
remoteAddress, err := util.SanitizeURL(opts.CloneAddr)
if err != nil {
return repo, err
}
mirrorModel := repo_model.Mirror{
RepoID: repo.ID,
Interval: setting.Mirror.DefaultInterval,
EnablePrune: true,
NextUpdateUnix: timeutil.TimeStampNow().AddDuration(setting.Mirror.DefaultInterval),
LFS: opts.LFS,
RemoteAddress: remoteAddress,
}
if opts.LFS {
mirrorModel.LFSEndpoint = opts.LFSEndpoint
@@ -280,13 +285,13 @@ func CleanUpMigrateInfo(ctx context.Context, repo *repo_model.Repository) (*repo
}
// SyncReleasesWithTags synchronizes release table with repository tags
func SyncReleasesWithTags(repo *repo_model.Repository, gitRepo *git.Repository) error {
func SyncReleasesWithTags(ctx context.Context, repo *repo_model.Repository, gitRepo *git.Repository) error {
log.Debug("SyncReleasesWithTags: in Repo[%d:%s/%s]", repo.ID, repo.OwnerName, repo.Name)
// optimized procedure for pull-mirrors which saves a lot of time (in
// particular for repos with many tags).
if repo.IsMirror {
return pullMirrorReleaseSync(repo, gitRepo)
return pullMirrorReleaseSync(ctx, repo, gitRepo)
}
existingRelTags := make(container.Set[string])
@@ -313,7 +318,7 @@ func SyncReleasesWithTags(repo *repo_model.Repository, gitRepo *git.Repository)
return fmt.Errorf("unable to GetTagCommitID for %q in Repo[%d:%s/%s]: %w", rel.TagName, repo.ID, repo.OwnerName, repo.Name, err)
}
if git.IsErrNotExist(err) || commitID != rel.Sha1 {
if err := repo_model.PushUpdateDeleteTag(repo, rel.TagName); err != nil {
if err := repo_model.PushUpdateDeleteTag(ctx, repo, rel.TagName); err != nil {
return fmt.Errorf("unable to PushUpdateDeleteTag: %q in Repo[%d:%s/%s]: %w", rel.TagName, repo.ID, repo.OwnerName, repo.Name, err)
}
} else {
@@ -328,7 +333,7 @@ func SyncReleasesWithTags(repo *repo_model.Repository, gitRepo *git.Repository)
return nil
}
if err := PushUpdateAddTag(db.DefaultContext, repo, gitRepo, tagName, sha1, refname); err != nil {
if err := PushUpdateAddTag(ctx, repo, gitRepo, tagName, sha1, refname); err != nil {
return fmt.Errorf("unable to PushUpdateAddTag: %q to Repo[%d:%s/%s]: %w", tagName, repo.ID, repo.OwnerName, repo.Name, err)
}
@@ -385,7 +390,7 @@ func PushUpdateAddTag(ctx context.Context, repo *repo_model.Repository, gitRepo
rel.PublisherID = author.ID
}
return repo_model.SaveOrUpdateTag(repo, &rel)
return repo_model.SaveOrUpdateTag(ctx, repo, &rel)
}
// StoreMissingLfsObjectsInRepository downloads missing LFS objects
@@ -492,13 +497,13 @@ func StoreMissingLfsObjectsInRepository(ctx context.Context, repo *repo_model.Re
// upstream. Hence, after each sync we want the pull-mirror release set to be
// identical to the upstream tag set. This is much more efficient for
// repositories like https://github.com/vim/vim (with over 13000 tags).
func pullMirrorReleaseSync(repo *repo_model.Repository, gitRepo *git.Repository) error {
func pullMirrorReleaseSync(ctx context.Context, repo *repo_model.Repository, gitRepo *git.Repository) error {
log.Trace("pullMirrorReleaseSync: rebuilding releases for pull-mirror Repo[%d:%s/%s]", repo.ID, repo.OwnerName, repo.Name)
tags, numTags, err := gitRepo.GetTagInfos(0, 0)
if err != nil {
return fmt.Errorf("unable to GetTagInfos in pull-mirror Repo[%d:%s/%s]: %w", repo.ID, repo.OwnerName, repo.Name, err)
}
err = db.WithTx(db.DefaultContext, func(ctx context.Context) error {
err = db.WithTx(ctx, func(ctx context.Context) error {
//
// clear out existing releases
//

View File

@@ -8,6 +8,7 @@ import (
"sync"
"code.gitea.io/gitea/models/auth"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/modules/timeutil"
"gitea.com/go-chi/session"
@@ -71,7 +72,7 @@ func (s *DBStore) Release() error {
return err
}
return auth.UpdateSession(s.sid, data)
return auth.UpdateSession(db.DefaultContext, s.sid, data)
}
// Flush deletes all session data.
@@ -97,7 +98,7 @@ func (p *DBProvider) Init(maxLifetime int64, connStr string) error {
// Read returns raw session store by session ID.
func (p *DBProvider) Read(sid string) (session.RawStore, error) {
s, err := auth.ReadSession(sid)
s, err := auth.ReadSession(db.DefaultContext, sid)
if err != nil {
return nil, err
}
@@ -117,7 +118,7 @@ func (p *DBProvider) Read(sid string) (session.RawStore, error) {
// Exist returns true if session with given ID exists.
func (p *DBProvider) Exist(sid string) bool {
has, err := auth.ExistSession(sid)
has, err := auth.ExistSession(db.DefaultContext, sid)
if err != nil {
panic("session/DB: error checking existence: " + err.Error())
}
@@ -126,12 +127,12 @@ func (p *DBProvider) Exist(sid string) bool {
// Destroy deletes a session by session ID.
func (p *DBProvider) Destroy(sid string) error {
return auth.DestroySession(sid)
return auth.DestroySession(db.DefaultContext, sid)
}
// Regenerate regenerates a session store from old session ID to new one.
func (p *DBProvider) Regenerate(oldsid, sid string) (_ session.RawStore, err error) {
s, err := auth.RegenerateSession(oldsid, sid)
s, err := auth.RegenerateSession(db.DefaultContext, oldsid, sid)
if err != nil {
return nil, err
}
@@ -151,7 +152,7 @@ func (p *DBProvider) Regenerate(oldsid, sid string) (_ session.RawStore, err err
// Count counts and returns number of sessions.
func (p *DBProvider) Count() int {
total, err := auth.CountSessions()
total, err := auth.CountSessions(db.DefaultContext)
if err != nil {
panic("session/DB: error counting records: " + err.Error())
}
@@ -160,7 +161,7 @@ func (p *DBProvider) Count() int {
// GC calls GC to clean expired sessions.
func (p *DBProvider) GC() {
if err := auth.CleanupSessions(p.maxLifetime); err != nil {
if err := auth.CleanupSessions(db.DefaultContext, p.maxLifetime); err != nil {
log.Printf("session/DB: error garbage collecting: %v", err)
}
}

View File

@@ -6,6 +6,7 @@ package setting
import (
"fmt"
"strings"
"time"
"code.gitea.io/gitea/modules/log"
)
@@ -18,8 +19,11 @@ var (
ArtifactRetentionDays int64 `ini:"ARTIFACT_RETENTION_DAYS"`
Enabled bool
DefaultActionsURL defaultActionsURL `ini:"DEFAULT_ACTIONS_URL"`
ZombieTaskTimeout time.Duration `ini:"ZOMBIE_TASK_TIMEOUT"`
EndlessTaskTimeout time.Duration `ini:"ENDLESS_TASK_TIMEOUT"`
AbandonedJobTimeout time.Duration `ini:"ABANDONED_JOB_TIMEOUT"`
}{
Enabled: false,
Enabled: true,
DefaultActionsURL: defaultActionsURLGitHub,
}
)
@@ -82,5 +86,9 @@ func loadActionsFrom(rootCfg ConfigProvider) error {
Actions.ArtifactRetentionDays = 90
}
Actions.ZombieTaskTimeout = sec.Key("ZOMBIE_TASK_TIMEOUT").MustDuration(10 * time.Minute)
Actions.EndlessTaskTimeout = sec.Key("ENDLESS_TASK_TIMEOUT").MustDuration(3 * time.Hour)
Actions.AbandonedJobTimeout = sec.Key("ABANDONED_JOB_TIMEOUT").MustDuration(24 * time.Hour)
return err
}

55
modules/setting/config.go Normal file
View File

@@ -0,0 +1,55 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package setting
import (
"sync"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting/config"
)
type PictureStruct struct {
DisableGravatar *config.Value[bool]
EnableFederatedAvatar *config.Value[bool]
}
type ConfigStruct struct {
Picture *PictureStruct
}
var (
defaultConfig *ConfigStruct
defaultConfigOnce sync.Once
)
func initDefaultConfig() {
config.SetCfgSecKeyGetter(&cfgSecKeyGetter{})
defaultConfig = &ConfigStruct{
Picture: &PictureStruct{
DisableGravatar: config.Bool(false, config.CfgSecKey{Sec: "picture", Key: "DISABLE_GRAVATAR"}, "picture.disable_gravatar"),
EnableFederatedAvatar: config.Bool(false, config.CfgSecKey{Sec: "picture", Key: "ENABLE_FEDERATED_AVATAR"}, "picture.enable_federated_avatar"),
},
}
}
func Config() *ConfigStruct {
defaultConfigOnce.Do(initDefaultConfig)
return defaultConfig
}
type cfgSecKeyGetter struct{}
func (c cfgSecKeyGetter) GetValue(sec, key string) (v string, has bool) {
cfgSec, err := CfgProvider.GetSection(sec)
if err != nil {
log.Error("Unable to get config section: %q", sec)
return "", false
}
cfgKey := ConfigSectionKey(cfgSec, key)
if cfgKey == nil {
return "", false
}
return cfgKey.Value(), true
}

View File

@@ -0,0 +1,49 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package config
import (
"context"
"sync"
)
var getterMu sync.RWMutex
type CfgSecKeyGetter interface {
GetValue(sec, key string) (v string, has bool)
}
var cfgSecKeyGetterInternal CfgSecKeyGetter
func SetCfgSecKeyGetter(p CfgSecKeyGetter) {
getterMu.Lock()
cfgSecKeyGetterInternal = p
getterMu.Unlock()
}
func GetCfgSecKeyGetter() CfgSecKeyGetter {
getterMu.RLock()
defer getterMu.RUnlock()
return cfgSecKeyGetterInternal
}
type DynKeyGetter interface {
GetValue(ctx context.Context, key string) (v string, has bool)
GetRevision(ctx context.Context) int
InvalidateCache()
}
var dynKeyGetterInternal DynKeyGetter
func SetDynGetter(p DynKeyGetter) {
getterMu.Lock()
dynKeyGetterInternal = p
getterMu.Unlock()
}
func GetDynGetter() DynKeyGetter {
getterMu.RLock()
defer getterMu.RUnlock()
return dynKeyGetterInternal
}

View File

@@ -0,0 +1,81 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package config
import (
"context"
"strconv"
"sync"
)
type CfgSecKey struct {
Sec, Key string
}
type Value[T any] struct {
mu sync.RWMutex
cfgSecKey CfgSecKey
dynKey string
def, value T
revision int
}
func (value *Value[T]) parse(s string) (v T) {
switch any(v).(type) {
case bool:
b, _ := strconv.ParseBool(s)
return any(b).(T)
default:
panic("unsupported config type, please complete the code")
}
}
func (value *Value[T]) Value(ctx context.Context) (v T) {
dg := GetDynGetter()
if dg == nil {
// this is an edge case: the database is not initialized but the system setting is going to be used
// it should panic to avoid inconsistent config values (from config / system setting) and fix the code
panic("no config dyn value getter")
}
rev := dg.GetRevision(ctx)
// if the revision in database doesn't change, use the last value
value.mu.RLock()
if rev == value.revision {
v = value.value
value.mu.RUnlock()
return v
}
value.mu.RUnlock()
// try to parse the config and cache it
var valStr *string
if dynVal, has := dg.GetValue(ctx, value.dynKey); has {
valStr = &dynVal
} else if cfgVal, has := GetCfgSecKeyGetter().GetValue(value.cfgSecKey.Sec, value.cfgSecKey.Key); has {
valStr = &cfgVal
}
if valStr == nil {
v = value.def
} else {
v = value.parse(*valStr)
}
value.mu.Lock()
value.value = v
value.revision = rev
value.mu.Unlock()
return v
}
func (value *Value[T]) DynKey() string {
return value.dynKey
}
func Bool(def bool, cfgSecKey CfgSecKey, dynKey string) *Value[bool] {
return &Value[bool]{def: def, cfgSecKey: cfgSecKey, dynKey: dynKey}
}

View File

@@ -149,8 +149,9 @@ func EnvironmentToConfig(cfg ConfigProvider, envs []string) (changed bool) {
continue
}
}
key := section.Key(keyName)
key := ConfigSectionKey(section, keyName)
if key == nil {
changed = true
key, err = section.NewKey(keyName, keyValue)
if err != nil {
log.Error("Error creating key: %s in section: %s with value: %s : %v", keyName, sectionName, keyValue, err)

View File

@@ -115,3 +115,29 @@ key = old
EnvironmentToConfig(cfg, []string{"GITEA__sec__key__FILE=" + tmpFile})
assert.Equal(t, "value-from-file\n", cfg.Section("sec").Key("key").String())
}
func TestEnvironmentToConfigSubSecKey(t *testing.T) {
// the INI package has a quirk: by default, the keys are inherited.
// when maintaining the keys, the newly added sub key should not be affected by the parent key.
cfg, err := NewConfigProviderFromData(`
[sec]
key = some
`)
assert.NoError(t, err)
changed := EnvironmentToConfig(cfg, []string{"GITEA__sec_0X2E_sub__key=some"})
assert.True(t, changed)
tmpFile := t.TempDir() + "/test-sub-sec-key.ini"
defer os.Remove(tmpFile)
err = cfg.SaveTo(tmpFile)
assert.NoError(t, err)
bs, err := os.ReadFile(tmpFile)
assert.NoError(t, err)
assert.Equal(t, `[sec]
key = some
[sec.sub]
key = some
`, string(bs))
}

View File

@@ -213,11 +213,9 @@ func NewConfigProviderFromFile(file string, extraConfigs ...string) (ConfigProvi
}
}
if len(extraConfigs) > 0 {
for _, s := range extraConfigs {
if err := cfg.Append([]byte(s)); err != nil {
return nil, fmt.Errorf("unable to append more config: %v", err)
}
for _, s := range extraConfigs {
if err := cfg.Append([]byte(s)); err != nil {
return nil, fmt.Errorf("unable to append more config: %v", err)
}
}

View File

@@ -110,7 +110,7 @@ var OAuth2 = struct {
JWTSigningAlgorithm: "RS256",
JWTSigningPrivateKeyFile: "jwt/private.pem",
MaxTokenLength: math.MaxInt16,
DefaultApplications: []string{"git-credential-oauth", "git-credential-manager"},
DefaultApplications: []string{"git-credential-oauth", "git-credential-manager", "tea"},
}
func loadOAuth2From(rootCfg ConfigProvider) {

View File

@@ -30,7 +30,7 @@ func GetQueueSettings(rootCfg ConfigProvider, name string) (QueueSettings, error
queueSettingsDefault := QueueSettings{
Type: "level", // dummy, channel, level, redis
Datadir: "queues/common", // relative to AppDataPath
Length: 100, // queue length before a channel queue will block
Length: 100000, // queue length before a channel queue will block
QueueName: "_queue",
SetName: "_unique",

View File

@@ -19,7 +19,6 @@ var (
SecretKey string
InternalToken string // internal access token
LogInRememberDays int
CookieUserName string
CookieRememberName string
ReverseProxyAuthUser string
ReverseProxyAuthEmail string
@@ -104,7 +103,6 @@ func loadSecurityFrom(rootCfg ConfigProvider) {
sec := rootCfg.Section("security")
InstallLock = HasInstallLock(rootCfg)
LogInRememberDays = sec.Key("LOGIN_REMEMBER_DAYS").MustInt(7)
CookieUserName = sec.Key("COOKIE_USERNAME").MustString("gitea_awesome")
SecretKey = loadSecret(sec, "SECRET_KEY_URI", "SECRET_KEY")
if SecretKey == "" {
// FIXME: https://github.com/go-gitea/gitea/issues/16832

View File

@@ -81,7 +81,6 @@ var (
StaticCacheTime time.Duration
EnableGzip bool
LandingPageURL LandingPage
LandingPageCustom string
UnixSocketPermission uint32
EnablePprof bool
PprofDataPath string
@@ -103,7 +102,6 @@ var (
StaticURLPrefix string
AbsoluteAssetURL string
HasRobotsTxt bool
ManifestData string
)

View File

@@ -76,8 +76,8 @@ var UI = struct {
CodeCommentLines: 4,
ReactionMaxUserNum: 10,
MaxDisplayFileSize: 8388608,
DefaultTheme: `auto`,
Themes: []string{`auto`, `gitea`, `arc-green`},
DefaultTheme: `gitea-auto`,
Themes: []string{`gitea-auto`, `gitea-light`, `gitea-dark`},
Reactions: []string{`+1`, `-1`, `laugh`, `hooray`, `confused`, `heart`, `rocket`, `eyes`},
CustomEmojis: []string{`git`, `gitea`, `codeberg`, `gitlab`, `github`, `gogs`},
CustomEmojisMap: map[string]string{"git": ":git:", "gitea": ":gitea:", "codeberg": ":codeberg:", "gitlab": ":gitlab:", "github": ":github:", "gogs": ":gogs:"},

View File

@@ -71,6 +71,11 @@ func convertMinioErr(err error) error {
return err
}
var getBucketVersioning = func(ctx context.Context, minioClient *minio.Client, bucket string) error {
_, err := minioClient.GetBucketVersioning(ctx, bucket)
return err
}
// NewMinioStorage returns a minio storage
func NewMinioStorage(ctx context.Context, cfg *setting.Storage) (ObjectStorage, error) {
config := cfg.MinioConfig
@@ -90,6 +95,23 @@ func NewMinioStorage(ctx context.Context, cfg *setting.Storage) (ObjectStorage,
return nil, convertMinioErr(err)
}
// The GetBucketVersioning is only used for checking whether the Object Storage parameters are generally good. It doesn't need to succeed.
// The assumption is that if the API returns the HTTP code 400, then the parameters could be incorrect.
// Otherwise even if the request itself fails (403, 404, etc), the code should still continue because the parameters seem "good" enough.
// Keep in mind that GetBucketVersioning requires "owner" to really succeed, so it can't be used to check the existence.
// Not using "BucketExists (HeadBucket)" because it doesn't include detailed failure reasons.
err = getBucketVersioning(ctx, minioClient, config.Bucket)
if err != nil {
errResp, ok := err.(minio.ErrorResponse)
if !ok {
return nil, err
}
if errResp.StatusCode == http.StatusBadRequest {
log.Error("S3 storage connection failure at %s:%s with base path %s and region: %s", config.Endpoint, config.Bucket, config.Location, errResp.Message)
return nil, err
}
}
// Check to see if we already own this bucket
exists, err := minioClient.BucketExists(ctx, config.Bucket)
if err != nil {
@@ -114,9 +136,18 @@ func NewMinioStorage(ctx context.Context, cfg *setting.Storage) (ObjectStorage,
}
func (m *MinioStorage) buildMinioPath(p string) string {
p = util.PathJoinRelX(m.basePath, p)
p = strings.TrimPrefix(util.PathJoinRelX(m.basePath, p), "/") // object store doesn't use slash for root path
if p == "." {
p = "" // minio doesn't use dot as relative path
p = "" // object store doesn't use dot as relative path
}
return p
}
func (m *MinioStorage) buildMinioDirPrefix(p string) string {
// ending slash is required for avoiding matching like "foo/" and "foobar/" with prefix "foo"
p = m.buildMinioPath(p) + "/"
if p == "/" {
p = "" // object store doesn't use slash for root path
}
return p
}
@@ -215,20 +246,11 @@ func (m *MinioStorage) URL(path, name string) (*url.URL, error) {
// IterateObjects iterates across the objects in the miniostorage
func (m *MinioStorage) IterateObjects(dirName string, fn func(path string, obj Object) error) error {
opts := minio.GetObjectOptions{}
lobjectCtx, cancel := context.WithCancel(m.ctx)
defer cancel()
basePath := m.basePath
if dirName != "" {
// ending slash is required for avoiding matching like "foo/" and "foobar/" with prefix "foo"
basePath = m.buildMinioPath(dirName) + "/"
}
for mObjInfo := range m.client.ListObjects(lobjectCtx, m.bucket, minio.ListObjectsOptions{
Prefix: basePath,
for mObjInfo := range m.client.ListObjects(m.ctx, m.bucket, minio.ListObjectsOptions{
Prefix: m.buildMinioDirPrefix(dirName),
Recursive: true,
}) {
object, err := m.client.GetObject(lobjectCtx, m.bucket, mObjInfo.Key, opts)
object, err := m.client.GetObject(m.ctx, m.bucket, mObjInfo.Key, opts)
if err != nil {
return convertMinioErr(err)
}

View File

@@ -4,10 +4,15 @@
package storage
import (
"context"
"net/http"
"os"
"testing"
"code.gitea.io/gitea/modules/setting"
"github.com/minio/minio-go/v7"
"github.com/stretchr/testify/assert"
)
func TestMinioStorageIterator(t *testing.T) {
@@ -25,3 +30,65 @@ func TestMinioStorageIterator(t *testing.T) {
},
})
}
func TestMinioStoragePath(t *testing.T) {
m := &MinioStorage{basePath: ""}
assert.Equal(t, "", m.buildMinioPath("/"))
assert.Equal(t, "", m.buildMinioPath("."))
assert.Equal(t, "a", m.buildMinioPath("/a"))
assert.Equal(t, "a/b", m.buildMinioPath("/a/b/"))
assert.Equal(t, "", m.buildMinioDirPrefix(""))
assert.Equal(t, "a/", m.buildMinioDirPrefix("/a/"))
m = &MinioStorage{basePath: "/"}
assert.Equal(t, "", m.buildMinioPath("/"))
assert.Equal(t, "", m.buildMinioPath("."))
assert.Equal(t, "a", m.buildMinioPath("/a"))
assert.Equal(t, "a/b", m.buildMinioPath("/a/b/"))
assert.Equal(t, "", m.buildMinioDirPrefix(""))
assert.Equal(t, "a/", m.buildMinioDirPrefix("/a/"))
m = &MinioStorage{basePath: "/base"}
assert.Equal(t, "base", m.buildMinioPath("/"))
assert.Equal(t, "base", m.buildMinioPath("."))
assert.Equal(t, "base/a", m.buildMinioPath("/a"))
assert.Equal(t, "base/a/b", m.buildMinioPath("/a/b/"))
assert.Equal(t, "base/", m.buildMinioDirPrefix(""))
assert.Equal(t, "base/a/", m.buildMinioDirPrefix("/a/"))
m = &MinioStorage{basePath: "/base/"}
assert.Equal(t, "base", m.buildMinioPath("/"))
assert.Equal(t, "base", m.buildMinioPath("."))
assert.Equal(t, "base/a", m.buildMinioPath("/a"))
assert.Equal(t, "base/a/b", m.buildMinioPath("/a/b/"))
assert.Equal(t, "base/", m.buildMinioDirPrefix(""))
assert.Equal(t, "base/a/", m.buildMinioDirPrefix("/a/"))
}
func TestS3StorageBadRequest(t *testing.T) {
if os.Getenv("CI") == "" {
t.Skip("S3Storage not present outside of CI")
return
}
cfg := &setting.Storage{
MinioConfig: setting.MinioStorageConfig{
Endpoint: "minio:9000",
AccessKeyID: "123456",
SecretAccessKey: "12345678",
Bucket: "bucket",
Location: "us-east-1",
},
}
message := "ERROR"
old := getBucketVersioning
defer func() { getBucketVersioning = old }()
getBucketVersioning = func(ctx context.Context, minioClient *minio.Client, bucket string) error {
return minio.ErrorResponse{
StatusCode: http.StatusBadRequest,
Code: "FixtureError",
Message: message,
}
}
_, err := NewStorage(setting.MinioStorageType, cfg)
assert.ErrorContains(t, err, message)
}

View File

@@ -16,13 +16,16 @@ const (
CommitStatusError CommitStatusState = "error"
// CommitStatusFailure is for when the CommitStatus is Failure
CommitStatusFailure CommitStatusState = "failure"
// CommitStatusWarning is for when the CommitStatus is Warning
CommitStatusWarning CommitStatusState = "warning"
)
var commitStatusPriorities = map[CommitStatusState]int{
CommitStatusError: 0,
CommitStatusFailure: 1,
CommitStatusPending: 2,
CommitStatusSuccess: 3,
CommitStatusWarning: 2,
CommitStatusPending: 3,
CommitStatusSuccess: 4,
}
func (css CommitStatusState) String() string {
@@ -32,7 +35,7 @@ func (css CommitStatusState) String() string {
// NoBetterThan returns true if this State is no better than the given State
// This function only handles the states defined in CommitStatusPriorities
func (css CommitStatusState) NoBetterThan(css2 CommitStatusState) bool {
// NoBetterThan only handles the 4 states above
// NoBetterThan only handles the 5 states above
if _, exist := commitStatusPriorities[css]; !exist {
return false
}
@@ -63,3 +66,8 @@ func (css CommitStatusState) IsError() bool {
func (css CommitStatusState) IsFailure() bool {
return css == CommitStatusFailure
}
// IsWarning represents if commit status state is warning
func (css CommitStatusState) IsWarning() bool {
return css == CommitStatusWarning
}

View File

@@ -3,6 +3,8 @@
package structs
import "time"
// CreatePushMirrorOption represents need information to create a push mirror of a repository.
type CreatePushMirrorOption struct {
RemoteAddress string `json:"remote_address"`
@@ -15,12 +17,14 @@ type CreatePushMirrorOption struct {
// PushMirror represents information of a push mirror
// swagger:model
type PushMirror struct {
RepoName string `json:"repo_name"`
RemoteName string `json:"remote_name"`
RemoteAddress string `json:"remote_address"`
CreatedUnix string `json:"created"`
LastUpdateUnix string `json:"last_update"`
LastError string `json:"last_error"`
Interval string `json:"interval"`
SyncOnCommit bool `json:"sync_on_commit"`
RepoName string `json:"repo_name"`
RemoteName string `json:"remote_name"`
RemoteAddress string `json:"remote_address"`
// swagger:strfmt date-time
CreatedUnix time.Time `json:"created"`
// swagger:strfmt date-time
LastUpdateUnix *time.Time `json:"last_update"`
LastError string `json:"last_error"`
Interval string `json:"interval"`
SyncOnCommit bool `json:"sync_on_commit"`
}

View File

@@ -63,6 +63,7 @@ type Repository struct {
Language string `json:"language"`
LanguagesURL string `json:"languages_url"`
HTMLURL string `json:"html_url"`
URL string `json:"url"`
Link string `json:"link"`
SSHURL string `json:"ssh_url"`
CloneURL string `json:"clone_url"`

View File

@@ -3,10 +3,12 @@
package system
import "context"
// StateStore is the interface to get/set app state items
type StateStore interface {
Get(item StateItem) error
Set(item StateItem) error
Get(ctx context.Context, item StateItem) error
Set(ctx context.Context, item StateItem) error
}
// StateItem provides the name for a state item. the name will be used to generate filenames, etc

View File

@@ -4,9 +4,9 @@
package system
import (
"path/filepath"
"testing"
"code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/models/unittest"
"github.com/stretchr/testify/assert"
@@ -14,8 +14,7 @@ import (
func TestMain(m *testing.M) {
unittest.MainTest(m, &unittest.TestOptions{
GiteaRootPath: filepath.Join("..", ".."),
FixtureFiles: []string{""}, // load nothing
FixtureFiles: []string{""}, // load nothing
})
}
@@ -42,25 +41,25 @@ func TestAppStateDB(t *testing.T) {
as := &DBStore{}
item1 := new(testItem1)
assert.NoError(t, as.Get(item1))
assert.NoError(t, as.Get(db.DefaultContext, item1))
assert.Equal(t, "", item1.Val1)
assert.EqualValues(t, 0, item1.Val2)
item1 = new(testItem1)
item1.Val1 = "a"
item1.Val2 = 2
assert.NoError(t, as.Set(item1))
assert.NoError(t, as.Set(db.DefaultContext, item1))
item2 := new(testItem2)
item2.K = "V"
assert.NoError(t, as.Set(item2))
assert.NoError(t, as.Set(db.DefaultContext, item2))
item1 = new(testItem1)
assert.NoError(t, as.Get(item1))
assert.NoError(t, as.Get(db.DefaultContext, item1))
assert.Equal(t, "a", item1.Val1)
assert.EqualValues(t, 2, item1.Val2)
item2 = new(testItem2)
assert.NoError(t, as.Get(item2))
assert.NoError(t, as.Get(db.DefaultContext, item2))
assert.Equal(t, "V", item2.K)
}

View File

@@ -4,6 +4,8 @@
package system
import (
"context"
"code.gitea.io/gitea/models/system"
"code.gitea.io/gitea/modules/json"
@@ -14,8 +16,8 @@ import (
type DBStore struct{}
// Get reads the state item
func (f *DBStore) Get(item StateItem) error {
content, err := system.GetAppStateContent(item.Name())
func (f *DBStore) Get(ctx context.Context, item StateItem) error {
content, err := system.GetAppStateContent(ctx, item.Name())
if err != nil {
return err
}
@@ -26,10 +28,10 @@ func (f *DBStore) Get(item StateItem) error {
}
// Set saves the state item
func (f *DBStore) Set(item StateItem) error {
func (f *DBStore) Set(ctx context.Context, item StateItem) error {
b, err := json.Marshal(item)
if err != nil {
return err
}
return system.SaveAppStateContent(item.Name(), util.BytesToReadOnlyString(b))
return system.SaveAppStateContent(ctx, item.Name(), util.BytesToReadOnlyString(b))
}

View File

@@ -12,6 +12,7 @@ import (
"strings"
"time"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/base"
"code.gitea.io/gitea/modules/emoji"
"code.gitea.io/gitea/modules/markup"
@@ -131,8 +132,11 @@ func NewFuncMap() template.FuncMap {
"DisableImportLocal": func() bool {
return !setting.ImportLocalPaths
},
"DefaultTheme": func() string {
return setting.UI.DefaultTheme
"ThemeName": func(user *user_model.User) string {
if user == nil || user.Theme == "" {
return setting.UI.DefaultTheme
}
return user.Theme
},
"NotificationSettings": func() map[string]any {
return map[string]any{

View File

@@ -58,11 +58,11 @@ func IsMultilineCommitMessage(msg string) bool {
// Actioner describes an action
type Actioner interface {
GetOpType() activities_model.ActionType
GetActUserName() string
GetRepoUserName() string
GetRepoName() string
GetRepoPath() string
GetRepoLink() string
GetActUserName(ctx context.Context) string
GetRepoUserName(ctx context.Context) string
GetRepoName(ctx context.Context) string
GetRepoPath(ctx context.Context) string
GetRepoLink(ctx context.Context) string
GetBranch() string
GetContent() string
GetCreate() time.Time
@@ -74,27 +74,31 @@ func ActionIcon(opType activities_model.ActionType) string {
switch opType {
case activities_model.ActionCreateRepo, activities_model.ActionTransferRepo, activities_model.ActionRenameRepo:
return "repo"
case activities_model.ActionCommitRepo, activities_model.ActionPushTag, activities_model.ActionDeleteTag, activities_model.ActionDeleteBranch:
case activities_model.ActionCommitRepo:
return "git-commit"
case activities_model.ActionCreateIssue:
return "issue-opened"
case activities_model.ActionCreatePullRequest:
return "git-pull-request"
case activities_model.ActionCommentIssue, activities_model.ActionCommentPull:
return "comment-discussion"
case activities_model.ActionDeleteBranch:
return "git-branch"
case activities_model.ActionMergePullRequest, activities_model.ActionAutoMergePullRequest:
return "git-merge"
case activities_model.ActionCloseIssue, activities_model.ActionClosePullRequest:
case activities_model.ActionCreatePullRequest:
return "git-pull-request"
case activities_model.ActionClosePullRequest:
return "git-pull-request-closed"
case activities_model.ActionCreateIssue:
return "issue-opened"
case activities_model.ActionCloseIssue:
return "issue-closed"
case activities_model.ActionReopenIssue, activities_model.ActionReopenPullRequest:
return "issue-reopened"
case activities_model.ActionCommentIssue, activities_model.ActionCommentPull:
return "comment-discussion"
case activities_model.ActionMirrorSyncPush, activities_model.ActionMirrorSyncCreate, activities_model.ActionMirrorSyncDelete:
return "mirror"
case activities_model.ActionApprovePullRequest:
return "check"
case activities_model.ActionRejectPullRequest:
return "diff"
case activities_model.ActionPublishRelease:
return "file-diff"
case activities_model.ActionPublishRelease, activities_model.ActionPushTag, activities_model.ActionDeleteTag:
return "tag"
case activities_model.ActionPullReviewDismissed:
return "x"

View File

@@ -4,6 +4,7 @@
package updatechecker
import (
"context"
"io"
"net/http"
@@ -58,31 +59,31 @@ func GiteaUpdateChecker(httpEndpoint string) error {
return err
}
return UpdateRemoteVersion(respData.Latest.Version)
return UpdateRemoteVersion(req.Context(), respData.Latest.Version)
}
// UpdateRemoteVersion updates the latest available version of Gitea
func UpdateRemoteVersion(version string) (err error) {
return system.AppState.Set(&CheckerState{LatestVersion: version})
func UpdateRemoteVersion(ctx context.Context, version string) (err error) {
return system.AppState.Set(ctx, &CheckerState{LatestVersion: version})
}
// GetRemoteVersion returns the current remote version (or currently installed version if fail to fetch from DB)
func GetRemoteVersion() string {
func GetRemoteVersion(ctx context.Context) string {
item := new(CheckerState)
if err := system.AppState.Get(item); err != nil {
if err := system.AppState.Get(ctx, item); err != nil {
return ""
}
return item.LatestVersion
}
// GetNeedUpdate returns true whether a newer version of Gitea is available
func GetNeedUpdate() bool {
func GetNeedUpdate(ctx context.Context) bool {
curVer, err := version.NewVersion(setting.AppVer)
if err != nil {
// return false to fail silently
return false
}
remoteVerStr := GetRemoteVersion()
remoteVerStr := GetRemoteVersion(ctx)
if remoteVerStr == "" {
// no remote version is known
return false

View File

@@ -225,6 +225,7 @@ func isOSWindows() bool {
var driveLetterRegexp = regexp.MustCompile("/[A-Za-z]:/")
// FileURLToPath extracts the path information from a file://... url.
// It returns an error only if the URL is not a file URL.
func FileURLToPath(u *url.URL) (string, error) {
if u.Scheme != "file" {
return "", errors.New("URL scheme is not 'file': " + u.String())

View File

@@ -39,3 +39,12 @@ func URLJoin(base string, elems ...string) string {
}
return joinedURL
}
func SanitizeURL(s string) (string, error) {
u, err := url.Parse(s)
if err != nil {
return "", err
}
u.User = nil
return u.String(), nil
}

View File

@@ -47,8 +47,6 @@ func GetContextData(c context.Context) ContextData {
func CommonTemplateContextData() ContextData {
return ContextData{
"IsLandingPageHome": setting.LandingPageURL == setting.LandingPageHome,
"IsLandingPageExplore": setting.LandingPageURL == setting.LandingPageExplore,
"IsLandingPageOrganizations": setting.LandingPageURL == setting.LandingPageOrganizations,
"ShowRegistrationButton": setting.Service.ShowRegistrationButton,