1
1
mirror of https://github.com/go-gitea/gitea synced 2025-07-13 14:07:20 +00:00

update go-git to v4.12.0 - fixes #7248 (#7249)

This commit is contained in:
techknowlogick
2019-06-18 22:14:15 -04:00
committed by Lunny Xiao
parent b209531959
commit 33ad554800
270 changed files with 71049 additions and 14434 deletions

View File

@ -26,6 +26,10 @@ type ObjectStorage struct {
dir *dotgit.DotGit
index map[plumbing.Hash]idxfile.Index
packList []plumbing.Hash
packListIdx int
packfiles map[plumbing.Hash]*packfile.Packfile
}
// NewObjectStorage creates a new ObjectStorage with the given .git directory and cache.
@ -187,6 +191,73 @@ func (s *ObjectStorage) encodedObjectSizeFromUnpacked(h plumbing.Hash) (
return size, err
}
func (s *ObjectStorage) packfile(idx idxfile.Index, pack plumbing.Hash) (*packfile.Packfile, error) {
if p := s.packfileFromCache(pack); p != nil {
return p, nil
}
f, err := s.dir.ObjectPack(pack)
if err != nil {
return nil, err
}
var p *packfile.Packfile
if s.objectCache != nil {
p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache)
} else {
p = packfile.NewPackfile(idx, s.dir.Fs(), f)
}
return p, s.storePackfileInCache(pack, p)
}
func (s *ObjectStorage) packfileFromCache(hash plumbing.Hash) *packfile.Packfile {
if s.packfiles == nil {
if s.options.KeepDescriptors {
s.packfiles = make(map[plumbing.Hash]*packfile.Packfile)
} else if s.options.MaxOpenDescriptors > 0 {
s.packList = make([]plumbing.Hash, s.options.MaxOpenDescriptors)
s.packfiles = make(map[plumbing.Hash]*packfile.Packfile, s.options.MaxOpenDescriptors)
}
}
return s.packfiles[hash]
}
func (s *ObjectStorage) storePackfileInCache(hash plumbing.Hash, p *packfile.Packfile) error {
if s.options.KeepDescriptors {
s.packfiles[hash] = p
return nil
}
if s.options.MaxOpenDescriptors <= 0 {
return nil
}
// start over as the limit of packList is hit
if s.packListIdx >= len(s.packList) {
s.packListIdx = 0
}
// close the existing packfile if open
if next := s.packList[s.packListIdx]; !next.IsZero() {
open := s.packfiles[next]
delete(s.packfiles, next)
if open != nil {
if err := open.Close(); err != nil {
return err
}
}
}
// cache newly open packfile
s.packList[s.packListIdx] = hash
s.packfiles[hash] = p
s.packListIdx++
return nil
}
func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) (
size int64, err error) {
if err := s.requireIndex(); err != nil {
@ -198,12 +269,6 @@ func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) (
return 0, plumbing.ErrObjectNotFound
}
f, err := s.dir.ObjectPack(pack)
if err != nil {
return 0, err
}
defer ioutil.CheckClose(f, &err)
idx := s.index[pack]
hash, err := idx.FindHash(offset)
if err == nil {
@ -215,11 +280,13 @@ func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) (
return 0, err
}
var p *packfile.Packfile
if s.objectCache != nil {
p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache)
} else {
p = packfile.NewPackfile(idx, s.dir.Fs(), f)
p, err := s.packfile(idx, pack)
if err != nil {
return 0, err
}
if !s.options.KeepDescriptors && s.options.MaxOpenDescriptors == 0 {
defer ioutil.CheckClose(p, &err)
}
return p.GetSizeByOffset(offset)
@ -361,29 +428,28 @@ func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) (
return nil, plumbing.ErrObjectNotFound
}
f, err := s.dir.ObjectPack(pack)
idx := s.index[pack]
p, err := s.packfile(idx, pack)
if err != nil {
return nil, err
}
if !s.options.KeepDescriptors {
defer ioutil.CheckClose(f, &err)
if !s.options.KeepDescriptors && s.options.MaxOpenDescriptors == 0 {
defer ioutil.CheckClose(p, &err)
}
idx := s.index[pack]
if canBeDelta {
return s.decodeDeltaObjectAt(f, idx, offset, hash)
return s.decodeDeltaObjectAt(p, offset, hash)
}
return s.decodeObjectAt(f, idx, offset)
return s.decodeObjectAt(p, offset)
}
func (s *ObjectStorage) decodeObjectAt(
f billy.File,
idx idxfile.Index,
p *packfile.Packfile,
offset int64,
) (plumbing.EncodedObject, error) {
hash, err := idx.FindHash(offset)
hash, err := p.FindHash(offset)
if err == nil {
obj, ok := s.objectCache.Get(hash)
if ok {
@ -395,28 +461,16 @@ func (s *ObjectStorage) decodeObjectAt(
return nil, err
}
var p *packfile.Packfile
if s.objectCache != nil {
p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache)
} else {
p = packfile.NewPackfile(idx, s.dir.Fs(), f)
}
return p.GetByOffset(offset)
}
func (s *ObjectStorage) decodeDeltaObjectAt(
f billy.File,
idx idxfile.Index,
p *packfile.Packfile,
offset int64,
hash plumbing.Hash,
) (plumbing.EncodedObject, error) {
if _, err := f.Seek(0, io.SeekStart); err != nil {
return nil, err
}
p := packfile.NewScanner(f)
header, err := p.SeekObjectHeader(offset)
scan := p.Scanner()
header, err := scan.SeekObjectHeader(offset)
if err != nil {
return nil, err
}
@ -429,12 +483,12 @@ func (s *ObjectStorage) decodeDeltaObjectAt(
case plumbing.REFDeltaObject:
base = header.Reference
case plumbing.OFSDeltaObject:
base, err = idx.FindHash(header.OffsetReference)
base, err = p.FindHash(header.OffsetReference)
if err != nil {
return nil, err
}
default:
return s.decodeObjectAt(f, idx, offset)
return s.decodeObjectAt(p, offset)
}
obj := &plumbing.MemoryObject{}
@ -444,7 +498,7 @@ func (s *ObjectStorage) decodeDeltaObjectAt(
return nil, err
}
if _, _, err := p.NextObject(w); err != nil {
if _, _, err := scan.NextObject(w); err != nil {
return nil, err
}
@ -515,7 +569,20 @@ func (s *ObjectStorage) buildPackfileIters(
// Close closes all opened files.
func (s *ObjectStorage) Close() error {
return s.dir.Close()
var firstError error
if s.options.KeepDescriptors || s.options.MaxOpenDescriptors > 0 {
for _, packfile := range s.packfiles {
err := packfile.Close()
if firstError == nil && err != nil {
firstError = err
}
}
}
s.packfiles = nil
s.dir.Close()
return firstError
}
type lazyPackfilesIter struct {