* Convert files to utf-8 for indexing * Move utf8 functions to modules/base * Bump repoIndexerLatestVersion to 3 * Add tests for base/encoding.go * Changes to pass gosimple * Move UTF8 funcs into new modules/charset packagetags/v1.10.0-rc1
"code.gitea.io/gitea/models" | "code.gitea.io/gitea/models" | ||||
"code.gitea.io/gitea/models/migrations" | "code.gitea.io/gitea/models/migrations" | ||||
"code.gitea.io/gitea/modules/base" | "code.gitea.io/gitea/modules/base" | ||||
"code.gitea.io/gitea/modules/charset" | |||||
"code.gitea.io/gitea/modules/setting" | "code.gitea.io/gitea/modules/setting" | ||||
"github.com/go-xorm/xorm" | "github.com/go-xorm/xorm" | ||||
if err != nil { | if err != nil { | ||||
return "", err | return "", err | ||||
} | } | ||||
return string(base.RemoveBOMIfPresent(bytes)), nil | |||||
return string(charset.RemoveBOMIfPresent(bytes)), nil | |||||
} | } | ||||
func restoreOldDB(t *testing.T, version string) bool { | func restoreOldDB(t *testing.T, version string) bool { |
"strconv" | "strconv" | ||||
"strings" | "strings" | ||||
"code.gitea.io/gitea/modules/base" | |||||
"code.gitea.io/gitea/modules/charset" | |||||
"code.gitea.io/gitea/modules/git" | "code.gitea.io/gitea/modules/git" | ||||
"code.gitea.io/gitea/modules/highlight" | "code.gitea.io/gitea/modules/highlight" | ||||
"code.gitea.io/gitea/modules/log" | "code.gitea.io/gitea/modules/log" | ||||
"code.gitea.io/gitea/modules/setting" | "code.gitea.io/gitea/modules/setting" | ||||
"github.com/Unknwon/com" | "github.com/Unknwon/com" | ||||
"github.com/sergi/go-diff/diffmatchpatch" | "github.com/sergi/go-diff/diffmatchpatch" | ||||
"golang.org/x/net/html/charset" | |||||
stdcharset "golang.org/x/net/html/charset" | |||||
"golang.org/x/text/transform" | "golang.org/x/text/transform" | ||||
) | ) | ||||
buf.WriteString("\n") | buf.WriteString("\n") | ||||
} | } | ||||
} | } | ||||
charsetLabel, err := base.DetectEncoding(buf.Bytes()) | |||||
charsetLabel, err := charset.DetectEncoding(buf.Bytes()) | |||||
if charsetLabel != "UTF-8" && err == nil { | if charsetLabel != "UTF-8" && err == nil { | ||||
encoding, _ := charset.Lookup(charsetLabel) | |||||
encoding, _ := stdcharset.Lookup(charsetLabel) | |||||
if encoding != nil { | if encoding != nil { | ||||
d := encoding.NewDecoder() | d := encoding.NewDecoder() | ||||
for _, sec := range f.Sections { | for _, sec := range f.Sections { |
"strings" | "strings" | ||||
"code.gitea.io/gitea/modules/base" | "code.gitea.io/gitea/modules/base" | ||||
"code.gitea.io/gitea/modules/charset" | |||||
"code.gitea.io/gitea/modules/git" | "code.gitea.io/gitea/modules/git" | ||||
"code.gitea.io/gitea/modules/indexer" | "code.gitea.io/gitea/modules/indexer" | ||||
"code.gitea.io/gitea/modules/log" | "code.gitea.io/gitea/modules/log" | ||||
if err != nil { | if err != nil { | ||||
return err | return err | ||||
} else if !base.IsTextFile(fileContents) { | } else if !base.IsTextFile(fileContents) { | ||||
// FIXME: UTF-16 files will probably fail here | |||||
return nil | return nil | ||||
} | } | ||||
indexerUpdate := indexer.RepoIndexerUpdate{ | indexerUpdate := indexer.RepoIndexerUpdate{ | ||||
Op: indexer.RepoIndexerOpUpdate, | Op: indexer.RepoIndexerOpUpdate, | ||||
Data: &indexer.RepoIndexerData{ | Data: &indexer.RepoIndexerData{ | ||||
RepoID: repo.ID, | RepoID: repo.ID, | ||||
Content: string(fileContents), | |||||
Content: string(charset.ToUTF8DropErrors(fileContents)), | |||||
}, | }, | ||||
} | } | ||||
return indexerUpdate.AddToFlushingBatch(batch) | return indexerUpdate.AddToFlushingBatch(batch) |
package base | package base | ||||
import ( | import ( | ||||
"bytes" | |||||
"crypto/md5" | "crypto/md5" | ||||
"crypto/rand" | "crypto/rand" | ||||
"crypto/sha1" | "crypto/sha1" | ||||
"strings" | "strings" | ||||
"time" | "time" | ||||
"unicode" | "unicode" | ||||
"unicode/utf8" | |||||
"code.gitea.io/gitea/modules/git" | "code.gitea.io/gitea/modules/git" | ||||
"code.gitea.io/gitea/modules/log" | "code.gitea.io/gitea/modules/log" | ||||
"github.com/Unknwon/com" | "github.com/Unknwon/com" | ||||
"github.com/Unknwon/i18n" | "github.com/Unknwon/i18n" | ||||
"github.com/gogits/chardet" | |||||
) | ) | ||||
// UTF8BOM is the utf-8 byte-order marker | |||||
var UTF8BOM = []byte{'\xef', '\xbb', '\xbf'} | |||||
// EncodeMD5 encodes string to md5 hex value. | // EncodeMD5 encodes string to md5 hex value. | ||||
func EncodeMD5(str string) string { | func EncodeMD5(str string) string { | ||||
m := md5.New() | m := md5.New() | ||||
return TruncateString(sha1, 10) | return TruncateString(sha1, 10) | ||||
} | } | ||||
// DetectEncoding detect the encoding of content | |||||
func DetectEncoding(content []byte) (string, error) { | |||||
if utf8.Valid(content) { | |||||
log.Debug("Detected encoding: utf-8 (fast)") | |||||
return "UTF-8", nil | |||||
} | |||||
textDetector := chardet.NewTextDetector() | |||||
var detectContent []byte | |||||
if len(content) < 1024 { | |||||
// Check if original content is valid | |||||
if _, err := textDetector.DetectBest(content); err != nil { | |||||
return "", err | |||||
} | |||||
times := 1024 / len(content) | |||||
detectContent = make([]byte, 0, times*len(content)) | |||||
for i := 0; i < times; i++ { | |||||
detectContent = append(detectContent, content...) | |||||
} | |||||
} else { | |||||
detectContent = content | |||||
} | |||||
result, err := textDetector.DetectBest(detectContent) | |||||
if err != nil { | |||||
return "", err | |||||
} | |||||
if result.Charset != "UTF-8" && len(setting.Repository.AnsiCharset) > 0 { | |||||
log.Debug("Using default AnsiCharset: %s", setting.Repository.AnsiCharset) | |||||
return setting.Repository.AnsiCharset, err | |||||
} | |||||
log.Debug("Detected encoding: %s", result.Charset) | |||||
return result.Charset, err | |||||
} | |||||
// RemoveBOMIfPresent removes a UTF-8 BOM from a []byte | |||||
func RemoveBOMIfPresent(content []byte) []byte { | |||||
if len(content) > 2 && bytes.Equal(content[0:3], UTF8BOM) { | |||||
return content[3:] | |||||
} | |||||
return content | |||||
} | |||||
// BasicAuthDecode decode basic auth string | // BasicAuthDecode decode basic auth string | ||||
func BasicAuthDecode(encoded string) (string, string, error) { | func BasicAuthDecode(encoded string) (string, string, error) { | ||||
s, err := base64.StdEncoding.DecodeString(encoded) | s, err := base64.StdEncoding.DecodeString(encoded) |
assert.Equal(t, "veryverylo", ShortSha("veryverylong")) | assert.Equal(t, "veryverylo", ShortSha("veryverylong")) | ||||
} | } | ||||
func TestDetectEncoding(t *testing.T) { | |||||
testSuccess := func(b []byte, expected string) { | |||||
encoding, err := DetectEncoding(b) | |||||
assert.NoError(t, err) | |||||
assert.Equal(t, expected, encoding) | |||||
} | |||||
// utf-8 | |||||
b := []byte("just some ascii") | |||||
testSuccess(b, "UTF-8") | |||||
// utf-8-sig: "hey" (with BOM) | |||||
b = []byte{0xef, 0xbb, 0xbf, 0x68, 0x65, 0x79} | |||||
testSuccess(b, "UTF-8") | |||||
// utf-16: "hey<accented G>" | |||||
b = []byte{0xff, 0xfe, 0x68, 0x00, 0x65, 0x00, 0x79, 0x00, 0xf4, 0x01} | |||||
testSuccess(b, "UTF-16LE") | |||||
// iso-8859-1: d<accented e>cor<newline> | |||||
b = []byte{0x44, 0xe9, 0x63, 0x6f, 0x72, 0x0a} | |||||
encoding, err := DetectEncoding(b) | |||||
assert.NoError(t, err) | |||||
// due to a race condition in `chardet` library, it could either detect | |||||
// "ISO-8859-1" or "IS0-8859-2" here. Technically either is correct, so | |||||
// we accept either. | |||||
assert.Contains(t, encoding, "ISO-8859") | |||||
setting.Repository.AnsiCharset = "placeholder" | |||||
testSuccess(b, "placeholder") | |||||
// invalid bytes | |||||
b = []byte{0xfa} | |||||
_, err = DetectEncoding(b) | |||||
assert.Error(t, err) | |||||
} | |||||
func TestBasicAuthDecode(t *testing.T) { | func TestBasicAuthDecode(t *testing.T) { | ||||
_, _, err := BasicAuthDecode("?") | _, _, err := BasicAuthDecode("?") | ||||
assert.Equal(t, "illegal base64 data at input byte 0", err.Error()) | assert.Equal(t, "illegal base64 data at input byte 0", err.Error()) |
// Copyright 2014 The Gogs Authors. All rights reserved. | |||||
// Use of this source code is governed by a MIT-style | |||||
// license that can be found in the LICENSE file. | |||||
package charset | |||||
import ( | |||||
"bytes" | |||||
"fmt" | |||||
"unicode/utf8" | |||||
"code.gitea.io/gitea/modules/log" | |||||
"code.gitea.io/gitea/modules/setting" | |||||
"github.com/gogits/chardet" | |||||
"golang.org/x/net/html/charset" | |||||
"golang.org/x/text/transform" | |||||
) | |||||
// UTF8BOM is the utf-8 byte-order marker | |||||
var UTF8BOM = []byte{'\xef', '\xbb', '\xbf'} | |||||
// ToUTF8WithErr converts content to UTF8 encoding | |||||
func ToUTF8WithErr(content []byte) (string, error) { | |||||
charsetLabel, err := DetectEncoding(content) | |||||
if err != nil { | |||||
return "", err | |||||
} else if charsetLabel == "UTF-8" { | |||||
return string(RemoveBOMIfPresent(content)), nil | |||||
} | |||||
encoding, _ := charset.Lookup(charsetLabel) | |||||
if encoding == nil { | |||||
return string(content), fmt.Errorf("Unknown encoding: %s", charsetLabel) | |||||
} | |||||
// If there is an error, we concatenate the nicely decoded part and the | |||||
// original left over. This way we won't lose data. | |||||
result, n, err := transform.Bytes(encoding.NewDecoder(), content) | |||||
if err != nil { | |||||
result = append(result, content[n:]...) | |||||
} | |||||
result = RemoveBOMIfPresent(result) | |||||
return string(result), err | |||||
} | |||||
// ToUTF8WithFallback detects the encoding of content and coverts to UTF-8 if possible | |||||
func ToUTF8WithFallback(content []byte) []byte { | |||||
charsetLabel, err := DetectEncoding(content) | |||||
if err != nil || charsetLabel == "UTF-8" { | |||||
return RemoveBOMIfPresent(content) | |||||
} | |||||
encoding, _ := charset.Lookup(charsetLabel) | |||||
if encoding == nil { | |||||
return content | |||||
} | |||||
// If there is an error, we concatenate the nicely decoded part and the | |||||
// original left over. This way we won't lose data. | |||||
result, n, err := transform.Bytes(encoding.NewDecoder(), content) | |||||
if err != nil { | |||||
return append(result, content[n:]...) | |||||
} | |||||
return RemoveBOMIfPresent(result) | |||||
} | |||||
// ToUTF8 converts content to UTF8 encoding and ignore error | |||||
func ToUTF8(content string) string { | |||||
res, _ := ToUTF8WithErr([]byte(content)) | |||||
return res | |||||
} | |||||
// ToUTF8DropErrors makes sure the return string is valid utf-8; attempts conversion if possible | |||||
func ToUTF8DropErrors(content []byte) []byte { | |||||
charsetLabel, err := DetectEncoding(content) | |||||
if err != nil || charsetLabel == "UTF-8" { | |||||
return RemoveBOMIfPresent(content) | |||||
} | |||||
encoding, _ := charset.Lookup(charsetLabel) | |||||
if encoding == nil { | |||||
return content | |||||
} | |||||
// We ignore any non-decodable parts from the file. | |||||
// Some parts might be lost | |||||
var decoded []byte | |||||
decoder := encoding.NewDecoder() | |||||
idx := 0 | |||||
for { | |||||
result, n, err := transform.Bytes(decoder, content[idx:]) | |||||
decoded = append(decoded, result...) | |||||
if err == nil { | |||||
break | |||||
} | |||||
decoded = append(decoded, ' ') | |||||
idx = idx + n + 1 | |||||
if idx >= len(content) { | |||||
break | |||||
} | |||||
} | |||||
return RemoveBOMIfPresent(decoded) | |||||
} | |||||
// RemoveBOMIfPresent removes a UTF-8 BOM from a []byte | |||||
func RemoveBOMIfPresent(content []byte) []byte { | |||||
if len(content) > 2 && bytes.Equal(content[0:3], UTF8BOM) { | |||||
return content[3:] | |||||
} | |||||
return content | |||||
} | |||||
// DetectEncoding detect the encoding of content | |||||
func DetectEncoding(content []byte) (string, error) { | |||||
if utf8.Valid(content) { | |||||
log.Debug("Detected encoding: utf-8 (fast)") | |||||
return "UTF-8", nil | |||||
} | |||||
textDetector := chardet.NewTextDetector() | |||||
var detectContent []byte | |||||
if len(content) < 1024 { | |||||
// Check if original content is valid | |||||
if _, err := textDetector.DetectBest(content); err != nil { | |||||
return "", err | |||||
} | |||||
times := 1024 / len(content) | |||||
detectContent = make([]byte, 0, times*len(content)) | |||||
for i := 0; i < times; i++ { | |||||
detectContent = append(detectContent, content...) | |||||
} | |||||
} else { | |||||
detectContent = content | |||||
} | |||||
result, err := textDetector.DetectBest(detectContent) | |||||
if err != nil { | |||||
return "", err | |||||
} | |||||
// FIXME: to properly decouple this function the fallback ANSI charset should be passed as an argument | |||||
if result.Charset != "UTF-8" && len(setting.Repository.AnsiCharset) > 0 { | |||||
log.Debug("Using default AnsiCharset: %s", setting.Repository.AnsiCharset) | |||||
return setting.Repository.AnsiCharset, err | |||||
} | |||||
log.Debug("Detected encoding: %s", result.Charset) | |||||
return result.Charset, err | |||||
} |
// Copyright 2019 The Gitea Authors. All rights reserved. | |||||
// Use of this source code is governed by a MIT-style | |||||
// license that can be found in the LICENSE file. | |||||
package charset | |||||
import ( | |||||
"testing" | |||||
"code.gitea.io/gitea/modules/setting" | |||||
"github.com/stretchr/testify/assert" | |||||
) | |||||
func TestRemoveBOMIfPresent(t *testing.T) { | |||||
res := RemoveBOMIfPresent([]byte{0xc3, 0xa1, 0xc3, 0xa9, 0xc3, 0xad, 0xc3, 0xb3, 0xc3, 0xba}) | |||||
assert.Equal(t, []byte{0xc3, 0xa1, 0xc3, 0xa9, 0xc3, 0xad, 0xc3, 0xb3, 0xc3, 0xba}, res) | |||||
res = RemoveBOMIfPresent([]byte{0xef, 0xbb, 0xbf, 0xc3, 0xa1, 0xc3, 0xa9, 0xc3, 0xad, 0xc3, 0xb3, 0xc3, 0xba}) | |||||
assert.Equal(t, []byte{0xc3, 0xa1, 0xc3, 0xa9, 0xc3, 0xad, 0xc3, 0xb3, 0xc3, 0xba}, res) | |||||
} | |||||
func TestToUTF8WithErr(t *testing.T) { | |||||
var res string | |||||
var err error | |||||
res, err = ToUTF8WithErr([]byte{0x41, 0x42, 0x43}) | |||||
assert.Equal(t, "ABC", res) | |||||
assert.NoError(t, err) | |||||
res, err = ToUTF8WithErr([]byte{0xc3, 0xa1, 0xc3, 0xa9, 0xc3, 0xad, 0xc3, 0xb3, 0xc3, 0xba}) | |||||
assert.Equal(t, "áéíóú", res) | |||||
assert.NoError(t, err) | |||||
res, err = ToUTF8WithErr([]byte{0xef, 0xbb, 0xbf, 0xc3, 0xa1, 0xc3, 0xa9, 0xc3, 0xad, 0xc3, 0xb3, 0xc3, 0xba}) | |||||
assert.Equal(t, "áéíóú", res) | |||||
assert.NoError(t, err) | |||||
// This test FAILS | |||||
res, err = ToUTF8WithErr([]byte{0x48, 0x6F, 0x6C, 0x61, 0x2C, 0x20, 0x61, 0x73, 0xED, 0x20, 0x63, 0xF3, 0x6D, 0x6F, 0x20, 0xF1, 0x6F, 0x73}) | |||||
assert.Equal(t, "Hola, así cómo ños", res) | |||||
assert.NoError(t, err) | |||||
res, err = ToUTF8WithErr([]byte{0x48, 0x6F, 0x6C, 0x61, 0x2C, 0x20, 0x61, 0x73, 0xED, 0x20, 0x63, 0xF3, 0x6D, 0x6F, 0x20, 0x07, 0xA4, 0x6F, 0x73}) | |||||
// Do not fail for differences in invalid cases, as the library might change the conversion criteria for those | |||||
assert.Regexp(t, "^Hola, así cómo", res) | |||||
assert.NoError(t, err) | |||||
res, err = ToUTF8WithErr([]byte{0x48, 0x6F, 0x6C, 0x61, 0x2C, 0x20, 0x61, 0x73, 0xED, 0x20, 0x63, 0xF3, 0x6D, 0x6F, 0x20, 0x81, 0xA4, 0x6F, 0x73}) | |||||
// Do not fail for differences in invalid cases, as the library might change the conversion criteria for those | |||||
assert.Regexp(t, "^Hola, así cómo", res) | |||||
assert.NoError(t, err) | |||||
// Japanese (Shift-JIS) | |||||
res, err = ToUTF8WithErr([]byte{0x93, 0xFA, 0x91, 0xAE, 0x94, 0xE9, 0x82, 0xBC, 0x82, 0xB5, 0x82, 0xBF, 0x82, 0xE3, 0x81, 0x42}) | |||||
assert.Equal(t, "日属秘ぞしちゅ。", res) | |||||
assert.NoError(t, err) | |||||
res, err = ToUTF8WithErr([]byte{0x00, 0x00, 0x00, 0x00}) | |||||
assert.Equal(t, "\x00\x00\x00\x00", res) | |||||
assert.NoError(t, err) | |||||
} | |||||
func TestToUTF8WithFallback(t *testing.T) { | |||||
res := ToUTF8WithFallback([]byte{0x41, 0x42, 0x43}) | |||||
assert.Equal(t, []byte("ABC"), res) | |||||
res = ToUTF8WithFallback([]byte{0xc3, 0xa1, 0xc3, 0xa9, 0xc3, 0xad, 0xc3, 0xb3, 0xc3, 0xba}) | |||||
assert.Equal(t, []byte("áéíóú"), res) | |||||
res = ToUTF8WithFallback([]byte{0xef, 0xbb, 0xbf, 0xc3, 0xa1, 0xc3, 0xa9, 0xc3, 0xad, 0xc3, 0xb3, 0xc3, 0xba}) | |||||
assert.Equal(t, []byte("áéíóú"), res) | |||||
res = ToUTF8WithFallback([]byte{0x48, 0x6F, 0x6C, 0x61, 0x2C, 0x20, 0x61, 0x73, 0xED, 0x20, 0x63, 0xF3, 0x6D, 0x6F, 0x20, 0xF1, 0x6F, 0x73}) | |||||
assert.Equal(t, []byte("Hola, así cómo ños"), res) | |||||
minmatch := []byte("Hola, así cómo ") | |||||
res = ToUTF8WithFallback([]byte{0x48, 0x6F, 0x6C, 0x61, 0x2C, 0x20, 0x61, 0x73, 0xED, 0x20, 0x63, 0xF3, 0x6D, 0x6F, 0x20, 0x07, 0xA4, 0x6F, 0x73}) | |||||
// Do not fail for differences in invalid cases, as the library might change the conversion criteria for those | |||||
assert.Equal(t, minmatch, res[0:len(minmatch)]) | |||||
res = ToUTF8WithFallback([]byte{0x48, 0x6F, 0x6C, 0x61, 0x2C, 0x20, 0x61, 0x73, 0xED, 0x20, 0x63, 0xF3, 0x6D, 0x6F, 0x20, 0x81, 0xA4, 0x6F, 0x73}) | |||||
// Do not fail for differences in invalid cases, as the library might change the conversion criteria for those | |||||
assert.Equal(t, minmatch, res[0:len(minmatch)]) | |||||
// Japanese (Shift-JIS) | |||||
res = ToUTF8WithFallback([]byte{0x93, 0xFA, 0x91, 0xAE, 0x94, 0xE9, 0x82, 0xBC, 0x82, 0xB5, 0x82, 0xBF, 0x82, 0xE3, 0x81, 0x42}) | |||||
assert.Equal(t, []byte("日属秘ぞしちゅ。"), res) | |||||
res = ToUTF8WithFallback([]byte{0x00, 0x00, 0x00, 0x00}) | |||||
assert.Equal(t, []byte{0x00, 0x00, 0x00, 0x00}, res) | |||||
} | |||||
func TestToUTF8(t *testing.T) { | |||||
res := ToUTF8("ABC") | |||||
assert.Equal(t, "ABC", res) | |||||
res = ToUTF8("áéíóú") | |||||
assert.Equal(t, "áéíóú", res) | |||||
// With utf-8 BOM | |||||
res = ToUTF8("\ufeffáéíóú") | |||||
assert.Equal(t, "áéíóú", res) | |||||
res = ToUTF8("Hola, así cómo ños") | |||||
assert.Equal(t, "Hola, así cómo ños", res) | |||||
res = ToUTF8("Hola, así cómo \x07ños") | |||||
// Do not fail for differences in invalid cases, as the library might change the conversion criteria for those | |||||
assert.Regexp(t, "^Hola, así cómo", res) | |||||
// This test FAILS | |||||
// res = ToUTF8("Hola, así cómo \x81ños") | |||||
// Do not fail for differences in invalid cases, as the library might change the conversion criteria for those | |||||
// assert.Regexp(t, "^Hola, así cómo", res) | |||||
// Japanese (Shift-JIS) | |||||
res = ToUTF8("\x93\xFA\x91\xAE\x94\xE9\x82\xBC\x82\xB5\x82\xBF\x82\xE3\x81\x42") | |||||
assert.Equal(t, "日属秘ぞしちゅ。", res) | |||||
res = ToUTF8("\x00\x00\x00\x00") | |||||
assert.Equal(t, "\x00\x00\x00\x00", res) | |||||
} | |||||
func TestToUTF8DropErrors(t *testing.T) { | |||||
res := ToUTF8DropErrors([]byte{0x41, 0x42, 0x43}) | |||||
assert.Equal(t, []byte("ABC"), res) | |||||
res = ToUTF8DropErrors([]byte{0xc3, 0xa1, 0xc3, 0xa9, 0xc3, 0xad, 0xc3, 0xb3, 0xc3, 0xba}) | |||||
assert.Equal(t, []byte("áéíóú"), res) | |||||
res = ToUTF8DropErrors([]byte{0xef, 0xbb, 0xbf, 0xc3, 0xa1, 0xc3, 0xa9, 0xc3, 0xad, 0xc3, 0xb3, 0xc3, 0xba}) | |||||
assert.Equal(t, []byte("áéíóú"), res) | |||||
res = ToUTF8DropErrors([]byte{0x48, 0x6F, 0x6C, 0x61, 0x2C, 0x20, 0x61, 0x73, 0xED, 0x20, 0x63, 0xF3, 0x6D, 0x6F, 0x20, 0xF1, 0x6F, 0x73}) | |||||
assert.Equal(t, []byte("Hola, así cómo ños"), res) | |||||
minmatch := []byte("Hola, así cómo ") | |||||
res = ToUTF8DropErrors([]byte{0x48, 0x6F, 0x6C, 0x61, 0x2C, 0x20, 0x61, 0x73, 0xED, 0x20, 0x63, 0xF3, 0x6D, 0x6F, 0x20, 0x07, 0xA4, 0x6F, 0x73}) | |||||
// Do not fail for differences in invalid cases, as the library might change the conversion criteria for those | |||||
assert.Equal(t, minmatch, res[0:len(minmatch)]) | |||||
res = ToUTF8DropErrors([]byte{0x48, 0x6F, 0x6C, 0x61, 0x2C, 0x20, 0x61, 0x73, 0xED, 0x20, 0x63, 0xF3, 0x6D, 0x6F, 0x20, 0x81, 0xA4, 0x6F, 0x73}) | |||||
// Do not fail for differences in invalid cases, as the library might change the conversion criteria for those | |||||
assert.Equal(t, minmatch, res[0:len(minmatch)]) | |||||
// Japanese (Shift-JIS) | |||||
res = ToUTF8DropErrors([]byte{0x93, 0xFA, 0x91, 0xAE, 0x94, 0xE9, 0x82, 0xBC, 0x82, 0xB5, 0x82, 0xBF, 0x82, 0xE3, 0x81, 0x42}) | |||||
assert.Equal(t, []byte("日属秘ぞしちゅ。"), res) | |||||
res = ToUTF8DropErrors([]byte{0x00, 0x00, 0x00, 0x00}) | |||||
assert.Equal(t, []byte{0x00, 0x00, 0x00, 0x00}, res) | |||||
} | |||||
func TestDetectEncoding(t *testing.T) { | |||||
testSuccess := func(b []byte, expected string) { | |||||
encoding, err := DetectEncoding(b) | |||||
assert.NoError(t, err) | |||||
assert.Equal(t, expected, encoding) | |||||
} | |||||
// utf-8 | |||||
b := []byte("just some ascii") | |||||
testSuccess(b, "UTF-8") | |||||
// utf-8-sig: "hey" (with BOM) | |||||
b = []byte{0xef, 0xbb, 0xbf, 0x68, 0x65, 0x79} | |||||
testSuccess(b, "UTF-8") | |||||
// utf-16: "hey<accented G>" | |||||
b = []byte{0xff, 0xfe, 0x68, 0x00, 0x65, 0x00, 0x79, 0x00, 0xf4, 0x01} | |||||
testSuccess(b, "UTF-16LE") | |||||
// iso-8859-1: d<accented e>cor<newline> | |||||
b = []byte{0x44, 0xe9, 0x63, 0x6f, 0x72, 0x0a} | |||||
encoding, err := DetectEncoding(b) | |||||
assert.NoError(t, err) | |||||
// due to a race condition in `chardet` library, it could either detect | |||||
// "ISO-8859-1" or "IS0-8859-2" here. Technically either is correct, so | |||||
// we accept either. | |||||
assert.Contains(t, encoding, "ISO-8859") | |||||
setting.Repository.AnsiCharset = "placeholder" | |||||
testSuccess(b, "placeholder") | |||||
// invalid bytes | |||||
b = []byte{0xfa} | |||||
_, err = DetectEncoding(b) | |||||
assert.Error(t, err) | |||||
} |
repoIndexerAnalyzer = "repoIndexerAnalyzer" | repoIndexerAnalyzer = "repoIndexerAnalyzer" | ||||
repoIndexerDocType = "repoIndexerDocType" | repoIndexerDocType = "repoIndexerDocType" | ||||
repoIndexerLatestVersion = 2 | |||||
repoIndexerLatestVersion = 3 | |||||
) | ) | ||||
// repoIndexer (thread-safe) index for repository contents | // repoIndexer (thread-safe) index for repository contents |
"path" | "path" | ||||
"strings" | "strings" | ||||
"golang.org/x/net/html/charset" | |||||
"golang.org/x/text/transform" | |||||
"code.gitea.io/gitea/models" | "code.gitea.io/gitea/models" | ||||
"code.gitea.io/gitea/modules/base" | |||||
"code.gitea.io/gitea/modules/cache" | "code.gitea.io/gitea/modules/cache" | ||||
"code.gitea.io/gitea/modules/charset" | |||||
"code.gitea.io/gitea/modules/git" | "code.gitea.io/gitea/modules/git" | ||||
"code.gitea.io/gitea/modules/lfs" | "code.gitea.io/gitea/modules/lfs" | ||||
"code.gitea.io/gitea/modules/log" | "code.gitea.io/gitea/modules/log" | ||||
"code.gitea.io/gitea/modules/setting" | "code.gitea.io/gitea/modules/setting" | ||||
"code.gitea.io/gitea/modules/structs" | "code.gitea.io/gitea/modules/structs" | ||||
stdcharset "golang.org/x/net/html/charset" | |||||
"golang.org/x/text/transform" | |||||
) | ) | ||||
// IdentityOptions for a person's identity like an author or committer | // IdentityOptions for a person's identity like an author or committer | ||||
} | } | ||||
encoding, err := base.DetectEncoding(buf) | |||||
encoding, err := charset.DetectEncoding(buf) | |||||
if err != nil { | if err != nil { | ||||
// just default to utf-8 and no bom | // just default to utf-8 and no bom | ||||
return "UTF-8", false | return "UTF-8", false | ||||
} | } | ||||
if encoding == "UTF-8" { | if encoding == "UTF-8" { | ||||
return encoding, bytes.Equal(buf[0:3], base.UTF8BOM) | |||||
return encoding, bytes.Equal(buf[0:3], charset.UTF8BOM) | |||||
} | } | ||||
charsetEncoding, _ := charset.Lookup(encoding) | |||||
charsetEncoding, _ := stdcharset.Lookup(encoding) | |||||
if charsetEncoding == nil { | if charsetEncoding == nil { | ||||
return "UTF-8", false | return "UTF-8", false | ||||
} | } | ||||
} | } | ||||
if n > 2 { | if n > 2 { | ||||
return encoding, bytes.Equal([]byte(result)[0:3], base.UTF8BOM) | |||||
return encoding, bytes.Equal([]byte(result)[0:3], charset.UTF8BOM) | |||||
} | } | ||||
return encoding, false | return encoding, false | ||||
content := opts.Content | content := opts.Content | ||||
if bom { | if bom { | ||||
content = string(base.UTF8BOM) + content | |||||
content = string(charset.UTF8BOM) + content | |||||
} | } | ||||
if encoding != "UTF-8" { | if encoding != "UTF-8" { | ||||
charsetEncoding, _ := charset.Lookup(encoding) | |||||
charsetEncoding, _ := stdcharset.Lookup(encoding) | |||||
if charsetEncoding != nil { | if charsetEncoding != nil { | ||||
result, _, err := transform.String(charsetEncoding.NewEncoder(), content) | result, _, err := transform.String(charsetEncoding.NewEncoder(), content) | ||||
if err != nil { | if err != nil { |
"code.gitea.io/gitea/modules/markup" | "code.gitea.io/gitea/modules/markup" | ||||
"code.gitea.io/gitea/modules/setting" | "code.gitea.io/gitea/modules/setting" | ||||
"golang.org/x/net/html/charset" | |||||
"golang.org/x/text/transform" | |||||
"gopkg.in/editorconfig/editorconfig-core-go.v1" | "gopkg.in/editorconfig/editorconfig-core-go.v1" | ||||
) | ) | ||||
return base.EncodeSha1(str) | return base.EncodeSha1(str) | ||||
} | } | ||||
// ToUTF8WithErr converts content to UTF8 encoding | |||||
func ToUTF8WithErr(content []byte) (string, error) { | |||||
charsetLabel, err := base.DetectEncoding(content) | |||||
if err != nil { | |||||
return "", err | |||||
} else if charsetLabel == "UTF-8" { | |||||
return string(base.RemoveBOMIfPresent(content)), nil | |||||
} | |||||
encoding, _ := charset.Lookup(charsetLabel) | |||||
if encoding == nil { | |||||
return string(content), fmt.Errorf("Unknown encoding: %s", charsetLabel) | |||||
} | |||||
// If there is an error, we concatenate the nicely decoded part and the | |||||
// original left over. This way we won't lose data. | |||||
result, n, err := transform.Bytes(encoding.NewDecoder(), content) | |||||
if err != nil { | |||||
result = append(result, content[n:]...) | |||||
} | |||||
result = base.RemoveBOMIfPresent(result) | |||||
return string(result), err | |||||
} | |||||
// ToUTF8WithFallback detects the encoding of content and coverts to UTF-8 if possible | |||||
func ToUTF8WithFallback(content []byte) []byte { | |||||
charsetLabel, err := base.DetectEncoding(content) | |||||
if err != nil || charsetLabel == "UTF-8" { | |||||
return base.RemoveBOMIfPresent(content) | |||||
} | |||||
encoding, _ := charset.Lookup(charsetLabel) | |||||
if encoding == nil { | |||||
return content | |||||
} | |||||
// If there is an error, we concatenate the nicely decoded part and the | |||||
// original left over. This way we won't lose data. | |||||
result, n, err := transform.Bytes(encoding.NewDecoder(), content) | |||||
if err != nil { | |||||
return append(result, content[n:]...) | |||||
} | |||||
return base.RemoveBOMIfPresent(result) | |||||
} | |||||
// ToUTF8 converts content to UTF8 encoding and ignore error | |||||
func ToUTF8(content string) string { | |||||
res, _ := ToUTF8WithErr([]byte(content)) | |||||
return res | |||||
} | |||||
// ReplaceLeft replaces all prefixes 'oldS' in 's' with 'newS'. | // ReplaceLeft replaces all prefixes 'oldS' in 's' with 'newS'. | ||||
func ReplaceLeft(s, oldS, newS string) string { | func ReplaceLeft(s, oldS, newS string) string { | ||||
oldLen, newLen, i, n := len(oldS), len(newS), 0, 0 | oldLen, newLen, i, n := len(oldS), len(newS), 0, 0 |
"code.gitea.io/gitea/models" | "code.gitea.io/gitea/models" | ||||
"code.gitea.io/gitea/modules/base" | "code.gitea.io/gitea/modules/base" | ||||
"code.gitea.io/gitea/modules/charset" | |||||
"code.gitea.io/gitea/modules/context" | "code.gitea.io/gitea/modules/context" | ||||
"code.gitea.io/gitea/modules/git" | "code.gitea.io/gitea/modules/git" | ||||
"code.gitea.io/gitea/modules/log" | "code.gitea.io/gitea/modules/log" | ||||
"code.gitea.io/gitea/modules/setting" | "code.gitea.io/gitea/modules/setting" | ||||
"code.gitea.io/gitea/modules/templates" | |||||
) | ) | ||||
const ( | const ( | ||||
note := &git.Note{} | note := &git.Note{} | ||||
err = git.GetNote(ctx.Repo.GitRepo, commitID, note) | err = git.GetNote(ctx.Repo.GitRepo, commitID, note) | ||||
if err == nil { | if err == nil { | ||||
ctx.Data["Note"] = string(templates.ToUTF8WithFallback(note.Message)) | |||||
ctx.Data["Note"] = string(charset.ToUTF8WithFallback(note.Message)) | |||||
ctx.Data["NoteCommit"] = note.Commit | ctx.Data["NoteCommit"] = note.Commit | ||||
ctx.Data["NoteAuthor"] = models.ValidateCommitWithEmail(note.Commit) | ctx.Data["NoteAuthor"] = models.ValidateCommitWithEmail(note.Commit) | ||||
} | } |
"code.gitea.io/gitea/models" | "code.gitea.io/gitea/models" | ||||
"code.gitea.io/gitea/modules/auth" | "code.gitea.io/gitea/modules/auth" | ||||
"code.gitea.io/gitea/modules/base" | "code.gitea.io/gitea/modules/base" | ||||
"code.gitea.io/gitea/modules/charset" | |||||
"code.gitea.io/gitea/modules/context" | "code.gitea.io/gitea/modules/context" | ||||
"code.gitea.io/gitea/modules/git" | "code.gitea.io/gitea/modules/git" | ||||
"code.gitea.io/gitea/modules/log" | "code.gitea.io/gitea/modules/log" | ||||
"code.gitea.io/gitea/modules/repofiles" | "code.gitea.io/gitea/modules/repofiles" | ||||
"code.gitea.io/gitea/modules/setting" | "code.gitea.io/gitea/modules/setting" | ||||
"code.gitea.io/gitea/modules/templates" | |||||
"code.gitea.io/gitea/modules/upload" | "code.gitea.io/gitea/modules/upload" | ||||
"code.gitea.io/gitea/modules/util" | "code.gitea.io/gitea/modules/util" | ||||
) | ) | ||||
d, _ := ioutil.ReadAll(dataRc) | d, _ := ioutil.ReadAll(dataRc) | ||||
buf = append(buf, d...) | buf = append(buf, d...) | ||||
if content, err := templates.ToUTF8WithErr(buf); err != nil { | |||||
if content, err := charset.ToUTF8WithErr(buf); err != nil { | |||||
log.Error("ToUTF8WithErr: %v", err) | log.Error("ToUTF8WithErr: %v", err) | ||||
ctx.Data["FileContent"] = string(buf) | ctx.Data["FileContent"] = string(buf) | ||||
} else { | } else { |
"code.gitea.io/gitea/models" | "code.gitea.io/gitea/models" | ||||
"code.gitea.io/gitea/modules/base" | "code.gitea.io/gitea/modules/base" | ||||
"code.gitea.io/gitea/modules/charset" | |||||
"code.gitea.io/gitea/modules/context" | "code.gitea.io/gitea/modules/context" | ||||
"code.gitea.io/gitea/modules/git" | "code.gitea.io/gitea/modules/git" | ||||
"code.gitea.io/gitea/modules/highlight" | "code.gitea.io/gitea/modules/highlight" | ||||
"code.gitea.io/gitea/modules/log" | "code.gitea.io/gitea/modules/log" | ||||
"code.gitea.io/gitea/modules/markup" | "code.gitea.io/gitea/modules/markup" | ||||
"code.gitea.io/gitea/modules/setting" | "code.gitea.io/gitea/modules/setting" | ||||
"code.gitea.io/gitea/modules/templates" | |||||
) | ) | ||||
const ( | const ( | ||||
ctx.Data["FileSize"] = fileSize | ctx.Data["FileSize"] = fileSize | ||||
} else { | } else { | ||||
d, _ := ioutil.ReadAll(dataRc) | d, _ := ioutil.ReadAll(dataRc) | ||||
buf = templates.ToUTF8WithFallback(append(buf, d...)) | |||||
buf = charset.ToUTF8WithFallback(append(buf, d...)) | |||||
if markup.Type(readmeFile.Name()) != "" { | if markup.Type(readmeFile.Name()) != "" { | ||||
ctx.Data["IsMarkup"] = true | ctx.Data["IsMarkup"] = true | ||||
} | } | ||||
d, _ := ioutil.ReadAll(dataRc) | d, _ := ioutil.ReadAll(dataRc) | ||||
buf = templates.ToUTF8WithFallback(append(buf, d...)) | |||||
buf = charset.ToUTF8WithFallback(append(buf, d...)) | |||||
readmeExist := markup.IsReadmeFile(blob.Name()) | readmeExist := markup.IsReadmeFile(blob.Name()) | ||||
ctx.Data["ReadmeExist"] = readmeExist | ctx.Data["ReadmeExist"] = readmeExist | ||||
} else { | } else { | ||||
// Building code view blocks with line number on server side. | // Building code view blocks with line number on server side. | ||||
var fileContent string | var fileContent string | ||||
if content, err := templates.ToUTF8WithErr(buf); err != nil { | |||||
if content, err := charset.ToUTF8WithErr(buf); err != nil { | |||||
log.Error("ToUTF8WithErr: %v", err) | log.Error("ToUTF8WithErr: %v", err) | ||||
fileContent = string(buf) | fileContent = string(buf) | ||||
} else { | } else { |