mirror of
https://github.com/go-gitea/gitea
synced 2025-07-18 00:08:35 +00:00
Refactor embedded assets and drop unnecessary dependencies (#34692)
Benefits: 1. smaller binary size (reduces more than 1MB) 2. better control of the assets details 3. fewer unmaintained dependencies 4. faster startup if the assets are not needed 5. won't hang up editors when open "bindata.go" by accident
This commit is contained in:
375
modules/assetfs/embed.go
Normal file
375
modules/assetfs/embed.go
Normal file
@@ -0,0 +1,375 @@
|
||||
// Copyright 2025 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package assetfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"code.gitea.io/gitea/modules/json"
|
||||
"code.gitea.io/gitea/modules/util"
|
||||
)
|
||||
|
||||
type EmbeddedFile interface {
|
||||
io.ReadSeeker
|
||||
fs.ReadDirFile
|
||||
ReadDir(n int) ([]fs.DirEntry, error)
|
||||
}
|
||||
|
||||
type EmbeddedFileInfo interface {
|
||||
fs.FileInfo
|
||||
fs.DirEntry
|
||||
GetGzipContent() ([]byte, bool)
|
||||
}
|
||||
|
||||
type decompressor interface {
|
||||
io.Reader
|
||||
Close() error
|
||||
Reset(io.Reader) error
|
||||
}
|
||||
|
||||
type embeddedFileInfo struct {
|
||||
fs *embeddedFS
|
||||
fullName string
|
||||
data []byte
|
||||
|
||||
BaseName string `json:"n"`
|
||||
OriginSize int64 `json:"s,omitempty"`
|
||||
DataBegin int64 `json:"b,omitempty"`
|
||||
DataLen int64 `json:"l,omitempty"`
|
||||
Children []*embeddedFileInfo `json:"c,omitempty"`
|
||||
}
|
||||
|
||||
func (fi *embeddedFileInfo) GetGzipContent() ([]byte, bool) {
|
||||
// when generating the bindata, if the compressed data equals or is larger than the original data, we store the original data
|
||||
if fi.DataLen == fi.OriginSize {
|
||||
return nil, false
|
||||
}
|
||||
return fi.data, true
|
||||
}
|
||||
|
||||
type EmbeddedFileBase struct {
|
||||
info *embeddedFileInfo
|
||||
dataReader io.ReadSeeker
|
||||
seekPos int64
|
||||
}
|
||||
|
||||
func (f *EmbeddedFileBase) ReadDir(n int) ([]fs.DirEntry, error) {
|
||||
// this method is used to satisfy the "func (f ioFile) ReadDir(...)" in httpfs
|
||||
l, err := f.info.fs.ReadDir(f.info.fullName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n < 0 || n > len(l) {
|
||||
return l, nil
|
||||
}
|
||||
return l[:n], nil
|
||||
}
|
||||
|
||||
type EmbeddedOriginFile struct {
|
||||
EmbeddedFileBase
|
||||
}
|
||||
|
||||
type EmbeddedCompressedFile struct {
|
||||
EmbeddedFileBase
|
||||
decompressor decompressor
|
||||
decompressorPos int64
|
||||
}
|
||||
|
||||
type embeddedFS struct {
|
||||
meta func() *EmbeddedMeta
|
||||
|
||||
files map[string]*embeddedFileInfo
|
||||
filesMu sync.RWMutex
|
||||
|
||||
data []byte
|
||||
}
|
||||
|
||||
type EmbeddedMeta struct {
|
||||
Root *embeddedFileInfo
|
||||
}
|
||||
|
||||
func NewEmbeddedFS(data []byte) fs.ReadDirFS {
|
||||
efs := &embeddedFS{data: data, files: make(map[string]*embeddedFileInfo)}
|
||||
efs.meta = sync.OnceValue(func() *EmbeddedMeta {
|
||||
var meta EmbeddedMeta
|
||||
p := bytes.LastIndexByte(data, '\n')
|
||||
if p < 0 {
|
||||
return &meta
|
||||
}
|
||||
if err := json.Unmarshal(data[p+1:], &meta); err != nil {
|
||||
panic("embedded file is not valid")
|
||||
}
|
||||
return &meta
|
||||
})
|
||||
return efs
|
||||
}
|
||||
|
||||
var _ fs.ReadDirFS = (*embeddedFS)(nil)
|
||||
|
||||
func (e *embeddedFS) ReadDir(name string) (l []fs.DirEntry, err error) {
|
||||
fi, err := e.getFileInfo(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !fi.IsDir() {
|
||||
return nil, fs.ErrNotExist
|
||||
}
|
||||
l = make([]fs.DirEntry, len(fi.Children))
|
||||
for i, child := range fi.Children {
|
||||
l[i], err = e.getFileInfo(name + "/" + child.BaseName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return l, nil
|
||||
}
|
||||
|
||||
func (e *embeddedFS) getFileInfo(fullName string) (*embeddedFileInfo, error) {
|
||||
// no need to do heavy "path.Clean()" because we don't want to support "foo/../bar" or absolute paths
|
||||
fullName = strings.TrimPrefix(fullName, "./")
|
||||
if fullName == "" {
|
||||
fullName = "."
|
||||
}
|
||||
|
||||
e.filesMu.RLock()
|
||||
fi := e.files[fullName]
|
||||
e.filesMu.RUnlock()
|
||||
if fi != nil {
|
||||
return fi, nil
|
||||
}
|
||||
|
||||
fields := strings.Split(fullName, "/")
|
||||
fi = e.meta().Root
|
||||
if fullName != "." {
|
||||
found := true
|
||||
for _, field := range fields {
|
||||
for _, child := range fi.Children {
|
||||
if found = child.BaseName == field; found {
|
||||
fi = child
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return nil, fs.ErrNotExist
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
e.filesMu.Lock()
|
||||
defer e.filesMu.Unlock()
|
||||
if fi != nil {
|
||||
fi.fs = e
|
||||
fi.fullName = fullName
|
||||
fi.data = e.data[fi.DataBegin : fi.DataBegin+fi.DataLen]
|
||||
e.files[fullName] = fi // do not cache nil, otherwise keeping accessing random non-existing file will cause OOM
|
||||
return fi, nil
|
||||
}
|
||||
return nil, fs.ErrNotExist
|
||||
}
|
||||
|
||||
func (e *embeddedFS) Open(name string) (fs.File, error) {
|
||||
info, err := e.getFileInfo(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
base := EmbeddedFileBase{info: info}
|
||||
base.dataReader = bytes.NewReader(base.info.data)
|
||||
if info.DataLen != info.OriginSize {
|
||||
decomp, err := gzip.NewReader(base.dataReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &EmbeddedCompressedFile{EmbeddedFileBase: base, decompressor: decomp}, nil
|
||||
}
|
||||
return &EmbeddedOriginFile{base}, nil
|
||||
}
|
||||
|
||||
var (
|
||||
_ EmbeddedFileInfo = (*embeddedFileInfo)(nil)
|
||||
_ EmbeddedFile = (*EmbeddedOriginFile)(nil)
|
||||
_ EmbeddedFile = (*EmbeddedCompressedFile)(nil)
|
||||
)
|
||||
|
||||
func (f *EmbeddedOriginFile) Read(p []byte) (n int, err error) {
|
||||
return f.dataReader.Read(p)
|
||||
}
|
||||
|
||||
func (f *EmbeddedCompressedFile) Read(p []byte) (n int, err error) {
|
||||
if f.decompressorPos > f.seekPos {
|
||||
if err = f.decompressor.Reset(bytes.NewReader(f.info.data)); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
f.decompressorPos = 0
|
||||
}
|
||||
if f.decompressorPos < f.seekPos {
|
||||
if _, err = io.CopyN(io.Discard, f.decompressor, f.seekPos-f.decompressorPos); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
f.decompressorPos = f.seekPos
|
||||
}
|
||||
n, err = f.decompressor.Read(p)
|
||||
f.decompressorPos += int64(n)
|
||||
f.seekPos = f.decompressorPos
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (f *EmbeddedFileBase) Seek(offset int64, whence int) (int64, error) {
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
f.seekPos = offset
|
||||
case io.SeekCurrent:
|
||||
f.seekPos += offset
|
||||
case io.SeekEnd:
|
||||
f.seekPos = f.info.OriginSize + offset
|
||||
}
|
||||
return f.seekPos, nil
|
||||
}
|
||||
|
||||
func (f *EmbeddedFileBase) Stat() (fs.FileInfo, error) {
|
||||
return f.info, nil
|
||||
}
|
||||
|
||||
func (f *EmbeddedOriginFile) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *EmbeddedCompressedFile) Close() error {
|
||||
return f.decompressor.Close()
|
||||
}
|
||||
|
||||
func (fi *embeddedFileInfo) Name() string {
|
||||
return fi.BaseName
|
||||
}
|
||||
|
||||
func (fi *embeddedFileInfo) Size() int64 {
|
||||
return fi.OriginSize
|
||||
}
|
||||
|
||||
func (fi *embeddedFileInfo) Mode() fs.FileMode {
|
||||
return util.Iif(fi.IsDir(), fs.ModeDir|0o555, 0o444)
|
||||
}
|
||||
|
||||
func (fi *embeddedFileInfo) ModTime() time.Time {
|
||||
return getExecutableModTime()
|
||||
}
|
||||
|
||||
func (fi *embeddedFileInfo) IsDir() bool {
|
||||
return fi.Children != nil
|
||||
}
|
||||
|
||||
func (fi *embeddedFileInfo) Sys() any {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fi *embeddedFileInfo) Type() fs.FileMode {
|
||||
return util.Iif(fi.IsDir(), fs.ModeDir, 0)
|
||||
}
|
||||
|
||||
func (fi *embeddedFileInfo) Info() (fs.FileInfo, error) {
|
||||
return fi, nil
|
||||
}
|
||||
|
||||
// getExecutableModTime returns the modification time of the executable file.
|
||||
// In bindata, we can't use the ModTime of the files because we need to make the build reproducible
|
||||
var getExecutableModTime = sync.OnceValue(func() (modTime time.Time) {
|
||||
exePath, err := os.Executable()
|
||||
if err != nil {
|
||||
return modTime
|
||||
}
|
||||
exePath, err = filepath.Abs(exePath)
|
||||
if err != nil {
|
||||
return modTime
|
||||
}
|
||||
exePath, err = filepath.EvalSymlinks(exePath)
|
||||
if err != nil {
|
||||
return modTime
|
||||
}
|
||||
st, err := os.Stat(exePath)
|
||||
if err != nil {
|
||||
return modTime
|
||||
}
|
||||
return st.ModTime()
|
||||
})
|
||||
|
||||
func GenerateEmbedBindata(fsRootPath, outputFile string) error {
|
||||
output, err := os.OpenFile(outputFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer output.Close()
|
||||
|
||||
meta := &EmbeddedMeta{}
|
||||
meta.Root = &embeddedFileInfo{}
|
||||
var outputOffset int64
|
||||
var embedFiles func(parent *embeddedFileInfo, fsPath, embedPath string) error
|
||||
embedFiles = func(parent *embeddedFileInfo, fsPath, embedPath string) error {
|
||||
dirEntries, err := os.ReadDir(fsPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, dirEntry := range dirEntries {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dirEntry.IsDir() {
|
||||
child := &embeddedFileInfo{
|
||||
BaseName: dirEntry.Name(),
|
||||
Children: []*embeddedFileInfo{}, // non-nil means it's a directory
|
||||
}
|
||||
parent.Children = append(parent.Children, child)
|
||||
if err = embedFiles(child, filepath.Join(fsPath, dirEntry.Name()), path.Join(embedPath, dirEntry.Name())); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
data, err := os.ReadFile(filepath.Join(fsPath, dirEntry.Name()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var compressed bytes.Buffer
|
||||
gz, _ := gzip.NewWriterLevel(&compressed, gzip.BestCompression)
|
||||
if _, err = gz.Write(data); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = gz.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// only use the compressed data if it is smaller than the original data
|
||||
outputBytes := util.Iif(len(compressed.Bytes()) < len(data), compressed.Bytes(), data)
|
||||
child := &embeddedFileInfo{
|
||||
BaseName: dirEntry.Name(),
|
||||
OriginSize: int64(len(data)),
|
||||
DataBegin: outputOffset,
|
||||
DataLen: int64(len(outputBytes)),
|
||||
}
|
||||
if _, err = output.Write(outputBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
outputOffset += child.DataLen
|
||||
parent.Children = append(parent.Children, child)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if err = embedFiles(meta.Root, fsRootPath, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
jsonBuf, err := json.Marshal(meta) // can't use json.NewEncoder here because it writes extra EOL
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, _ = output.Write([]byte{'\n'})
|
||||
_, err = output.Write(jsonBuf)
|
||||
return err
|
||||
}
|
98
modules/assetfs/embed_test.go
Normal file
98
modules/assetfs/embed_test.go
Normal file
@@ -0,0 +1,98 @@
|
||||
// Copyright 2025 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package assetfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestEmbed(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
tmpDataDir := tmpDir + "/data"
|
||||
_ = os.MkdirAll(tmpDataDir+"/foo/bar", 0o755)
|
||||
_ = os.WriteFile(tmpDataDir+"/a.txt", []byte("a"), 0o644)
|
||||
_ = os.WriteFile(tmpDataDir+"/foo/bar/b.txt", bytes.Repeat([]byte("a"), 1000), 0o644)
|
||||
_ = os.WriteFile(tmpDataDir+"/foo/c.txt", []byte("c"), 0o644)
|
||||
require.NoError(t, GenerateEmbedBindata(tmpDataDir, tmpDir+"/out.dat"))
|
||||
|
||||
data, err := os.ReadFile(tmpDir + "/out.dat")
|
||||
require.NoError(t, err)
|
||||
efs := NewEmbeddedFS(data)
|
||||
|
||||
// test a non-existing file
|
||||
_, err = fs.ReadFile(efs, "not exist")
|
||||
assert.ErrorIs(t, err, fs.ErrNotExist)
|
||||
|
||||
// test a normal file (no compression)
|
||||
content, err := fs.ReadFile(efs, "a.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "a", string(content))
|
||||
fi, err := fs.Stat(efs, "a.txt")
|
||||
require.NoError(t, err)
|
||||
_, ok := fi.(EmbeddedFileInfo).GetGzipContent()
|
||||
assert.False(t, ok)
|
||||
|
||||
// test a compressed file
|
||||
content, err = fs.ReadFile(efs, "foo/bar/b.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, bytes.Repeat([]byte("a"), 1000), content)
|
||||
fi, err = fs.Stat(efs, "foo/bar/b.txt")
|
||||
require.NoError(t, err)
|
||||
assert.False(t, fi.Mode().IsDir())
|
||||
assert.True(t, fi.Mode().IsRegular())
|
||||
gzipContent, ok := fi.(EmbeddedFileInfo).GetGzipContent()
|
||||
assert.True(t, ok)
|
||||
assert.Greater(t, len(gzipContent), 1)
|
||||
assert.Less(t, len(gzipContent), 1000)
|
||||
|
||||
// test list root directory
|
||||
entries, err := fs.ReadDir(efs, ".")
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, entries, 2)
|
||||
assert.Equal(t, "a.txt", entries[0].Name())
|
||||
assert.False(t, entries[0].IsDir())
|
||||
|
||||
// test list subdirectory
|
||||
entries, err = fs.ReadDir(efs, "foo")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, entries, 2)
|
||||
assert.Equal(t, "bar", entries[0].Name())
|
||||
assert.True(t, entries[0].IsDir())
|
||||
assert.Equal(t, "c.txt", entries[1].Name())
|
||||
assert.False(t, entries[1].IsDir())
|
||||
|
||||
// test directory mode
|
||||
fi, err = fs.Stat(efs, "foo")
|
||||
require.NoError(t, err)
|
||||
assert.True(t, fi.IsDir())
|
||||
assert.True(t, fi.Mode().IsDir())
|
||||
assert.False(t, fi.Mode().IsRegular())
|
||||
|
||||
// test httpfs
|
||||
hfs := http.FS(efs)
|
||||
hf, err := hfs.Open("foo/bar/b.txt")
|
||||
require.NoError(t, err)
|
||||
hi, err := hf.Stat()
|
||||
require.NoError(t, err)
|
||||
fiEmbedded, ok := hi.(EmbeddedFileInfo)
|
||||
require.True(t, ok)
|
||||
gzipContent, ok = fiEmbedded.GetGzipContent()
|
||||
assert.True(t, ok)
|
||||
assert.Greater(t, len(gzipContent), 1)
|
||||
assert.Less(t, len(gzipContent), 1000)
|
||||
|
||||
// test httpfs directory listing
|
||||
hf, err = hfs.Open("foo")
|
||||
require.NoError(t, err)
|
||||
dirs, err := hf.Readdir(1)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, dirs, 1)
|
||||
}
|
@@ -52,8 +52,8 @@ func Local(name, base string, sub ...string) *Layer {
|
||||
}
|
||||
|
||||
// Bindata returns a new Layer with the given name, it serves files from the given bindata asset.
|
||||
func Bindata(name string, fs http.FileSystem) *Layer {
|
||||
return &Layer{name: name, fs: fs}
|
||||
func Bindata(name string, fs fs.FS) *Layer {
|
||||
return &Layer{name: name, fs: http.FS(fs)}
|
||||
}
|
||||
|
||||
// LayeredFS is a layered asset file-system. It works like http.FileSystem, but it can have multiple layers.
|
||||
|
@@ -3,6 +3,28 @@
|
||||
|
||||
//go:build bindata
|
||||
|
||||
//go:generate go run ../../build/generate-bindata.go ../../modules/migration/schemas bindata.dat
|
||||
|
||||
package migration
|
||||
|
||||
//go:generate go run ../../build/generate-bindata.go ../../modules/migration/schemas migration bindata.go
|
||||
import (
|
||||
"io"
|
||||
"io/fs"
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
_ "embed"
|
||||
|
||||
"code.gitea.io/gitea/modules/assetfs"
|
||||
)
|
||||
|
||||
//go:embed bindata.dat
|
||||
var bindata []byte
|
||||
|
||||
var BuiltinAssets = sync.OnceValue(func() fs.FS {
|
||||
return assetfs.NewEmbeddedFS(bindata)
|
||||
})
|
||||
|
||||
func openSchema(filename string) (io.ReadCloser, error) {
|
||||
return BuiltinAssets().Open(path.Base(filename))
|
||||
}
|
||||
|
@@ -1,15 +0,0 @@
|
||||
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//go:build bindata
|
||||
|
||||
package migration
|
||||
|
||||
import (
|
||||
"io"
|
||||
"path"
|
||||
)
|
||||
|
||||
func openSchema(filename string) (io.ReadCloser, error) {
|
||||
return Assets.Open(path.Base(filename))
|
||||
}
|
@@ -3,6 +3,21 @@
|
||||
|
||||
//go:build bindata
|
||||
|
||||
//go:generate go run ../../build/generate-bindata.go ../../options bindata.dat
|
||||
|
||||
package options
|
||||
|
||||
//go:generate go run ../../build/generate-bindata.go ../../options options bindata.go
|
||||
import (
|
||||
"sync"
|
||||
|
||||
_ "embed"
|
||||
|
||||
"code.gitea.io/gitea/modules/assetfs"
|
||||
)
|
||||
|
||||
//go:embed bindata.dat
|
||||
var bindata []byte
|
||||
|
||||
var BuiltinAssets = sync.OnceValue(func() *assetfs.Layer {
|
||||
return assetfs.Bindata("builtin(bindata)", assetfs.NewEmbeddedFS(bindata))
|
||||
})
|
||||
|
@@ -1,14 +0,0 @@
|
||||
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//go:build bindata
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"code.gitea.io/gitea/modules/assetfs"
|
||||
)
|
||||
|
||||
func BuiltinAssets() *assetfs.Layer {
|
||||
return assetfs.Bindata("builtin(bindata)", Assets)
|
||||
}
|
@@ -89,19 +89,16 @@ func handleRequest(w http.ResponseWriter, req *http.Request, fs http.FileSystem,
|
||||
servePublicAsset(w, req, fi, fi.ModTime(), f)
|
||||
}
|
||||
|
||||
type GzipBytesProvider interface {
|
||||
GzipBytes() []byte
|
||||
}
|
||||
|
||||
// servePublicAsset serve http content
|
||||
func servePublicAsset(w http.ResponseWriter, req *http.Request, fi os.FileInfo, modtime time.Time, content io.ReadSeeker) {
|
||||
setWellKnownContentType(w, fi.Name())
|
||||
httpcache.SetCacheControlInHeader(w.Header(), httpcache.CacheControlForPublicStatic())
|
||||
encodings := parseAcceptEncoding(req.Header.Get("Accept-Encoding"))
|
||||
if encodings.Contains("gzip") {
|
||||
// try to provide gzip content directly from bindata (provided by vfsgen۰CompressedFileInfo)
|
||||
if compressed, ok := fi.(GzipBytesProvider); ok {
|
||||
rdGzip := bytes.NewReader(compressed.GzipBytes())
|
||||
fiEmbedded, _ := fi.(assetfs.EmbeddedFileInfo)
|
||||
if encodings.Contains("gzip") && fiEmbedded != nil {
|
||||
// try to provide gzip content directly from bindata
|
||||
if gzipBytes, ok := fiEmbedded.GetGzipContent(); ok {
|
||||
rdGzip := bytes.NewReader(gzipBytes)
|
||||
// all gzipped static files (from bindata) are managed by Gitea, so we can make sure every file has the correct ext name
|
||||
// then we can get the correct Content-Type, we do not need to do http.DetectContentType on the decompressed data
|
||||
if w.Header().Get("Content-Type") == "" {
|
||||
|
@@ -5,4 +5,19 @@
|
||||
|
||||
package public
|
||||
|
||||
//go:generate go run ../../build/generate-bindata.go ../../public public bindata.go true
|
||||
//go:generate go run ../../build/generate-bindata.go ../../public bindata.dat
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
_ "embed"
|
||||
|
||||
"code.gitea.io/gitea/modules/assetfs"
|
||||
)
|
||||
|
||||
//go:embed bindata.dat
|
||||
var bindata []byte
|
||||
|
||||
var BuiltinAssets = sync.OnceValue(func() *assetfs.Layer {
|
||||
return assetfs.Bindata("builtin(bindata)", assetfs.NewEmbeddedFS(bindata))
|
||||
})
|
||||
|
@@ -1,24 +0,0 @@
|
||||
// Copyright 2016 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//go:build bindata
|
||||
|
||||
package public
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"code.gitea.io/gitea/modules/assetfs"
|
||||
"code.gitea.io/gitea/modules/timeutil"
|
||||
)
|
||||
|
||||
var _ GzipBytesProvider = (*vfsgen۰CompressedFileInfo)(nil)
|
||||
|
||||
// GlobalModTime provide a global mod time for embedded asset files
|
||||
func GlobalModTime(filename string) time.Time {
|
||||
return timeutil.GetExecutableModTime()
|
||||
}
|
||||
|
||||
func BuiltinAssets() *assetfs.Layer {
|
||||
return assetfs.Bindata("builtin(bindata)", Assets)
|
||||
}
|
@@ -1,22 +0,0 @@
|
||||
// Copyright 2016 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//go:build bindata
|
||||
|
||||
package templates
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"code.gitea.io/gitea/modules/assetfs"
|
||||
"code.gitea.io/gitea/modules/timeutil"
|
||||
)
|
||||
|
||||
// GlobalModTime provide a global mod time for embedded asset files
|
||||
func GlobalModTime(filename string) time.Time {
|
||||
return timeutil.GetExecutableModTime()
|
||||
}
|
||||
|
||||
func BuiltinAssets() *assetfs.Layer {
|
||||
return assetfs.Bindata("builtin(bindata)", Assets)
|
||||
}
|
@@ -3,6 +3,21 @@
|
||||
|
||||
//go:build bindata
|
||||
|
||||
//go:generate go run ../../build/generate-bindata.go ../../templates bindata.dat
|
||||
|
||||
package templates
|
||||
|
||||
//go:generate go run ../../build/generate-bindata.go ../../templates templates bindata.go true
|
||||
import (
|
||||
"sync"
|
||||
|
||||
_ "embed"
|
||||
|
||||
"code.gitea.io/gitea/modules/assetfs"
|
||||
)
|
||||
|
||||
//go:embed bindata.dat
|
||||
var bindata []byte
|
||||
|
||||
var BuiltinAssets = sync.OnceValue(func() *assetfs.Layer {
|
||||
return assetfs.Bindata("builtin(bindata)", assetfs.NewEmbeddedFS(bindata))
|
||||
})
|
||||
|
@@ -1,50 +0,0 @@
|
||||
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package timeutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
)
|
||||
|
||||
var (
|
||||
executablModTime = time.Now()
|
||||
executablModTimeOnce sync.Once
|
||||
)
|
||||
|
||||
// GetExecutableModTime get executable file modified time of current process.
|
||||
func GetExecutableModTime() time.Time {
|
||||
executablModTimeOnce.Do(func() {
|
||||
exePath, err := os.Executable()
|
||||
if err != nil {
|
||||
log.Error("os.Executable: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
exePath, err = filepath.Abs(exePath)
|
||||
if err != nil {
|
||||
log.Error("filepath.Abs: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
exePath, err = filepath.EvalSymlinks(exePath)
|
||||
if err != nil {
|
||||
log.Error("filepath.EvalSymlinks: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
st, err := os.Stat(exePath)
|
||||
if err != nil {
|
||||
log.Error("os.Stat: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
executablModTime = st.ModTime()
|
||||
})
|
||||
return executablModTime
|
||||
}
|
@@ -16,7 +16,7 @@ import (
|
||||
)
|
||||
|
||||
func TestWriterReader(t *testing.T) {
|
||||
testData := prepareTestData(t, 20_000_000)
|
||||
testData := prepareTestData(t, 1_000_000)
|
||||
|
||||
result := bytes.NewBuffer(nil)
|
||||
|
||||
@@ -64,7 +64,7 @@ func TestWriterReader(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSeekableWriterReader(t *testing.T) {
|
||||
testData := prepareTestData(t, 20_000_000)
|
||||
testData := prepareTestData(t, 2_000_000)
|
||||
|
||||
result := bytes.NewBuffer(nil)
|
||||
|
||||
@@ -109,7 +109,7 @@ func TestSeekableWriterReader(t *testing.T) {
|
||||
reader, err := NewSeekableReader(assertReader)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = reader.Seek(10_000_000, io.SeekStart)
|
||||
_, err = reader.Seek(1_000_000, io.SeekStart)
|
||||
require.NoError(t, err)
|
||||
|
||||
data := make([]byte, 1000)
|
||||
@@ -117,7 +117,7 @@ func TestSeekableWriterReader(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, reader.Close())
|
||||
|
||||
assert.Equal(t, testData[10_000_000:10_000_000+1000], data)
|
||||
assert.Equal(t, testData[1_000_000:1_000_000+1000], data)
|
||||
|
||||
// Should seek 3 times,
|
||||
// the first two times are for getting the index,
|
||||
|
Reference in New Issue
Block a user