Added DiscHandler, TOC generator from file list, sorter, and muxed hasher

This commit is contained in:
DataHoarder 2022-02-16 20:01:51 +01:00
parent 9011364103
commit 5e58b4dbfe
6 changed files with 583 additions and 13 deletions

3
go.mod
View file

@ -3,7 +3,8 @@ module git.gammaspectra.live/S.O.N.G/METANOIA
go 1.18
require (
git.gammaspectra.live/S.O.N.G/Hibiki v0.0.0-20220214135918-99bafdde7a4a
facette.io/natsort v0.0.0-20181210072756-2cd4dd1e2dcb
git.gammaspectra.live/S.O.N.G/Hibiki v0.0.0-20220216151616-63d8894466c0
git.gammaspectra.live/S.O.N.G/MakyuuIchaival v0.0.0-20220131114831-c08c7d9b4153
github.com/dgraph-io/badger/v3 v3.2103.2
github.com/dhowden/tag v0.0.0-20201120070457-d52dcb253c63

8
go.sum
View file

@ -6,11 +6,11 @@ dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl
dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
facette.io/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:1pSweJFeR3Pqx7uoelppkzeegfUBXL6I2FFAbfXw570=
facette.io/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:npRYmtaITVom7rcSo+pRURltHSG2r4TQM1cdqJ2dUB0=
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
git.gammaspectra.live/S.O.N.G/Hibiki v0.0.0-20220206160547-e7023361fa2e h1:n1Fw+AcqqYhEiHXKBO1hhsggz/ND9d/VYKGvPx9KsL0=
git.gammaspectra.live/S.O.N.G/Hibiki v0.0.0-20220206160547-e7023361fa2e/go.mod h1:/NY+4FrfPnEXNCmF16085cSGWZ89YS+Glpg4cTJhamg=
git.gammaspectra.live/S.O.N.G/Hibiki v0.0.0-20220214135918-99bafdde7a4a h1:3LnPQmaEHjMTotiJaeg0t7L0JA/BR+ENlr5gTgwDK6c=
git.gammaspectra.live/S.O.N.G/Hibiki v0.0.0-20220214135918-99bafdde7a4a/go.mod h1:/NY+4FrfPnEXNCmF16085cSGWZ89YS+Glpg4cTJhamg=
git.gammaspectra.live/S.O.N.G/Hibiki v0.0.0-20220216151616-63d8894466c0 h1:MdhCDoFatXYEyweos0PnvWaOOPhw0xs6Y448lqBRa5s=
git.gammaspectra.live/S.O.N.G/Hibiki v0.0.0-20220216151616-63d8894466c0/go.mod h1:/NY+4FrfPnEXNCmF16085cSGWZ89YS+Glpg4cTJhamg=
git.gammaspectra.live/S.O.N.G/MakyuuIchaival v0.0.0-20220131114831-c08c7d9b4153 h1:RMDA05IEOytScNSiE2ms98x/CVMHSlA+eVBC0VCq4po=
git.gammaspectra.live/S.O.N.G/MakyuuIchaival v0.0.0-20220131114831-c08c7d9b4153/go.mod h1:z6KcP5RPhMxDJaVU48sBhiYRCJ6ZJBbx1iIhkUrrhfY=
git.gammaspectra.live/S.O.N.G/go-pus v0.0.0-20220130003320-c9b07c6bec7a h1:LxrTp9gf4w5KnFHRPFLXYfoxC58GCSEmZrHI6Ogtrm0=

407
metadata/dischandler.go Normal file
View file

@ -0,0 +1,407 @@
package metadata
import (
"encoding/binary"
"facette.io/natsort"
"fmt"
"git.gammaspectra.live/S.O.N.G/Hibiki/panako"
"git.gammaspectra.live/S.O.N.G/Hibiki/utilities/audio"
"git.gammaspectra.live/S.O.N.G/Hibiki/utilities/audio/format/flac"
"git.gammaspectra.live/S.O.N.G/Hibiki/utilities/audio/format/mp3"
"git.gammaspectra.live/S.O.N.G/Hibiki/utilities/audio/format/opus"
"git.gammaspectra.live/S.O.N.G/Hibiki/utilities/specializedstore"
"git.gammaspectra.live/S.O.N.G/METANOIA/utilities"
"github.com/dhowden/tag"
"golang.org/x/text/unicode/norm"
"io/ioutil"
"log"
"os"
"path"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
"unicode"
)
type fileEntryList []fileEntry
var flacFormat = flac.NewFormat()
var mp3Format = mp3.NewFormat()
var opusFormat = opus.NewFormat()
const separatorTrimSet = ",.-_()[]{}"
func isSeparator(b byte) bool {
if b == ' ' {
return true
}
for i := 0; i < len(separatorTrimSet); i++ {
if separatorTrimSet[i] == b {
return true
}
}
return false
}
type analyzeEntry struct {
fileEntry
fileHandle *os.File
audioMetadata struct {
sampleRate int
channels int
samples int
}
fileMetadata tag.Metadata
panakoFingerprints []*panako.Fingerprint
hasherCrc32 *Hasher
hasherCueToolsCrc32 *Hasher
hasherAccurateRipV1 *Hasher
hasherAccurateRipV2 *Hasher
}
func (l fileEntryList) analyze(directory string, panakoInstance *panako.Instance) (entries []*analyzeEntry, fullCRC32 uint32, fullCTDBCRC32 uint32) {
var waitGroups []*sync.WaitGroup
printStrategy := panakoInstance.GetStrategy(specializedstore.NewMemoryStore(), audio.RESAMPLER_QUALITY_LINEAR)
var joinedCTDBChannels []HasherChannel
var joinedChannels []HasherChannel
var preLastTotalSamplesWaitGroup sync.WaitGroup
preLastTotalSamples := uint32(0)
for trackIndex, e := range l {
f, err := os.Open(path.Join(directory, e.Name))
if err != nil {
//TODO
log.Print(err)
continue
}
var stream *audio.Stream
var analyzer HasherChannel
meta, err := tag.ReadFrom(f)
if err != nil {
log.Print(err)
err = nil
}
f.Seek(0, 0)
switch utilities.GetMimeTypeFromExtension(path.Ext(e.Name)) {
case "audio/flac":
stream, analyzer, err = flacFormat.OpenAnalyzer(f, panakoInstance.BlockSize)
case "audio/mpeg;codecs=mp3":
stream, err = mp3Format.Open(f, panakoInstance.BlockSize)
case "audio/ogg":
fallthrough
case "audio/opus":
stream, err = opusFormat.Open(f, panakoInstance.BlockSize)
}
if err != nil { //cannot decode
//TODO
log.Print(err)
f.Close()
continue
}
if stream == nil { //no known decoder
//TODO
log.Print(fmt.Errorf("no known decoder for %s", f.Name()))
f.Close()
continue
}
entry := &analyzeEntry{
fileEntry: e,
fileHandle: f,
fileMetadata: meta,
}
var panakoWaitGroup sync.WaitGroup
panakoWaitGroup.Add(1)
preLastTotalSamplesWaitGroup.Add(1)
go func(add bool) {
defer panakoWaitGroup.Done()
defer preLastTotalSamplesWaitGroup.Done()
entry.panakoFingerprints = printStrategy.StreamToFingerprints(stream)
entry.audioMetadata.sampleRate = int(stream.GetSampleRate())
entry.audioMetadata.channels = stream.GetChannels()
entry.audioMetadata.samples = stream.GetSamplesProcessed()
if add {
atomic.AddUint32(&preLastTotalSamples, uint32(entry.audioMetadata.samples/entry.audioMetadata.channels))
}
}(trackIndex < len(l)-1)
//TODO: handle extra appended/prepended silence
if analyzer != nil {
if trackIndex == 0 {
channels := analyzer.Split(4)
joinedChannels = append(joinedChannels, channels[0])
ctChannels := channels[1].SkipStartSamples(Int16SamplesPerSector * 10).Split(2)
joinedCTDBChannels = append(joinedCTDBChannels, ctChannels[0])
entry.hasherCueToolsCrc32 = NewHasher(ctChannels[1], HashtypeCrc32)
arChannels := channels[2].SkipStartSamples(Int16SamplesPerSector*5 - 1).Split(2)
entry.hasherAccurateRipV1 = NewHasher(arChannels[0], HashtypeAccurateRipV1Start)
entry.hasherAccurateRipV2 = NewHasher(arChannels[1], HashtypeAccurateRipV2Start)
entry.hasherCrc32 = NewHasher(channels[3], HashtypeCrc32)
} else if trackIndex == len(l)-1 {
channels := analyzer.Split(4)
joinedChannels = append(joinedChannels, channels[0])
ctChannels := channels[1].SkipEndSamplesMultiple(&preLastTotalSamplesWaitGroup, &preLastTotalSamples, Int16SamplesPerSector*10).Split(2)
joinedCTDBChannels = append(joinedCTDBChannels, ctChannels[0])
entry.hasherCueToolsCrc32 = NewHasher(ctChannels[1], HashtypeCrc32)
arChannels := channels[2].SkipEndSamples(Int16SamplesPerSector * 5).Split(2)
entry.hasherAccurateRipV1 = NewHasher(arChannels[0], HashtypeAccurateRipV1)
entry.hasherAccurateRipV2 = NewHasher(arChannels[1], HashtypeAccurateRipV2)
entry.hasherCrc32 = NewHasher(channels[3], HashtypeCrc32)
} else {
channels := analyzer.Split(5)
joinedChannels = append(joinedChannels, channels[0])
joinedCTDBChannels = append(joinedCTDBChannels, channels[1])
entry.hasherCrc32 = NewHasher(channels[2], HashtypeCrc32)
entry.hasherAccurateRipV1 = NewHasher(channels[3], HashtypeAccurateRipV1)
entry.hasherAccurateRipV2 = NewHasher(channels[4], HashtypeAccurateRipV2)
}
waitGroups = append(waitGroups, entry.hasherCrc32.GetWaitGroup(), entry.hasherAccurateRipV1.GetWaitGroup(), entry.hasherAccurateRipV2.GetWaitGroup())
if entry.hasherCueToolsCrc32 != nil {
waitGroups = append(waitGroups, entry.hasherCueToolsCrc32.GetWaitGroup())
}
}
waitGroups = append(waitGroups, &panakoWaitGroup)
entries = append(entries, entry)
}
fullHasher := NewHasher(MergeHasherChannels(joinedChannels...), HashtypeCrc32)
fullCTDBHasher := NewHasher(MergeHasherChannels(joinedCTDBChannels...), HashtypeCrc32)
fullHasher.Wait()
fullCTDBHasher.Wait()
fullCRC32 = binary.BigEndian.Uint32(fullHasher.GetResult())
fullCTDBCRC32 = binary.BigEndian.Uint32(fullCTDBHasher.GetResult())
//Wait for all tasks
for _, wg := range waitGroups {
wg.Wait()
}
return
}
type fileEntry struct {
Name string
NormalizedSortName string
NormalizedName string
}
func processAudioFiles(files []string) (result fileEntryList) {
result = make(fileEntryList, 0, len(files))
for _, f := range files {
normalized := norm.NFC.String(f)
normalized = strings.ReplaceAll(normalized, "", "0")
normalized = strings.ReplaceAll(normalized, "", "1")
normalized = strings.ReplaceAll(normalized, "", "2")
normalized = strings.ReplaceAll(normalized, "", "3")
normalized = strings.ReplaceAll(normalized, "", "4")
normalized = strings.ReplaceAll(normalized, "", "5")
normalized = strings.ReplaceAll(normalized, "", "6")
normalized = strings.ReplaceAll(normalized, "", "7")
normalized = strings.ReplaceAll(normalized, "", "8")
normalized = strings.ReplaceAll(normalized, "", "9")
ext := strings.LastIndex(normalized, ".")
for k := 0; k < ext; k++ {
index := strings.IndexFunc(normalized[k:], unicode.IsNumber)
if index == -1 {
//wtf, no numbers?
result = append(result, fileEntry{
Name: f,
NormalizedSortName: strings.TrimSpace(strings.TrimLeft(strings.TrimSpace(normalized[:ext]), separatorTrimSet)),
NormalizedName: strings.TrimSpace(strings.TrimLeft(strings.TrimSpace(normalized[:ext]), separatorTrimSet)),
})
break
}
index += k
if index == 0 || isSeparator(normalized[index-1]) { //If it's start of string or prefixed by a space
normalized = normalized[index:ext]
firstNotNumber := strings.IndexFunc(normalized, func(r rune) bool {
return !unicode.IsNumber(r)
})
r := fileEntry{
Name: f,
NormalizedSortName: strings.TrimSpace(strings.TrimLeft(strings.TrimSpace(normalized), separatorTrimSet)),
NormalizedName: strings.TrimSpace(strings.TrimLeft(strings.TrimSpace(normalized[firstNotNumber:]), separatorTrimSet)),
}
result = append(result, r)
break
}
k = index
}
}
//Sort files naturally
sort.SliceStable(result, func(i, j int) bool {
return natsort.Compare(result[i].NormalizedSortName, result[j].NormalizedSortName)
})
return
}
type DiscHandlerResult struct {
TOC TOC
CRC32 uint32
CueToolsCRC32 uint32
Directory string
Tracks []DiscHandlerTrack
}
type DiscHandlerTrack struct {
FileName string
TrackName string
SortName string
Fingerprints struct {
Panako []*panako.Fingerprint
CRC32 uint32
CueToolsCRC32 uint32
AccurateRipV1 uint32
AccurateRipV2 uint32
}
FileMetadata struct {
DiscNumber int
Album string
AlbumArtist string
Artist string
Composer string
Year int
TrackNumber int
Title string
EmbeddedPicture []byte
}
AudioMetadata struct {
SampleRate int
Channels int
NumberOfFullSamples int
Duration time.Duration
}
}
func HandleDiscEntry(panakoInstance *panako.Instance, pathEntry string) *DiscHandlerResult {
log.Printf("Handling %q", pathEntry)
entries, err := ioutil.ReadDir(pathEntry)
if err != nil {
return nil
}
var audioFiles []string
var imageFiles []string
var metadataFiles []string
var folders []string
for _, entry := range entries {
if !entry.IsDir() {
ext := path.Ext(entry.Name())
mime := utilities.GetMimeTypeFromExtension(ext)
isAudio := mime[0:6] == "audio/"
isAudioMetadata := mime == "text/x-log" || mime == "text/x-accurip" || mime == "text/x-cue" || mime == "text/x-toc"
isImage := mime[0:6] == "image/"
if isAudio {
audioFiles = append(audioFiles, entry.Name())
} else if isImage {
imageFiles = append(imageFiles, entry.Name())
} else if isAudioMetadata {
metadataFiles = append(metadataFiles, entry.Name())
}
} else {
folders = append(folders, entry.Name())
}
}
if len(audioFiles) == 0 {
return nil
}
sortedAudioEntries := processAudioFiles(audioFiles)
disc := &DiscHandlerResult{
TOC: TOC{TocPregap},
}
result, fullCRC32, fullCTDBCRC32 := sortedAudioEntries.analyze(pathEntry, panakoInstance)
defer func() {
for _, entry := range result {
entry.fileHandle.Close()
}
}()
disc.CRC32 = fullCRC32
disc.CueToolsCRC32 = fullCTDBCRC32
for _, entry := range result {
track := DiscHandlerTrack{
FileName: entry.Name,
TrackName: entry.NormalizedName,
SortName: entry.NormalizedSortName,
}
track.AudioMetadata.SampleRate = entry.audioMetadata.sampleRate
track.AudioMetadata.Channels = entry.audioMetadata.channels
track.AudioMetadata.NumberOfFullSamples = entry.audioMetadata.samples / entry.audioMetadata.channels
track.AudioMetadata.Duration = time.Duration(float64(time.Second) * float64(track.AudioMetadata.NumberOfFullSamples) / float64(track.AudioMetadata.SampleRate))
track.Fingerprints.Panako = entry.panakoFingerprints
disc.TOC = append(disc.TOC, disc.TOC[len(disc.TOC)-1]+track.AudioMetadata.NumberOfFullSamples/Int16SamplesPerSector)
if entry.hasherCrc32 != nil {
track.Fingerprints.CRC32 = binary.BigEndian.Uint32(entry.hasherCrc32.GetResult())
track.Fingerprints.CueToolsCRC32 = track.Fingerprints.CRC32
}
if entry.hasherCueToolsCrc32 != nil {
track.Fingerprints.CueToolsCRC32 = binary.BigEndian.Uint32(entry.hasherCueToolsCrc32.GetResult())
}
if entry.hasherAccurateRipV1 != nil {
track.Fingerprints.AccurateRipV1 = binary.BigEndian.Uint32(entry.hasherAccurateRipV1.GetResult())
}
if entry.hasherAccurateRipV2 != nil {
track.Fingerprints.AccurateRipV2 = binary.BigEndian.Uint32(entry.hasherAccurateRipV2.GetResult())
}
track.FileMetadata.DiscNumber, _ = entry.fileMetadata.Disc()
track.FileMetadata.TrackNumber, _ = entry.fileMetadata.Track()
track.FileMetadata.Year = entry.fileMetadata.Year()
track.FileMetadata.AlbumArtist = entry.fileMetadata.AlbumArtist()
track.FileMetadata.Artist = entry.fileMetadata.Artist()
track.FileMetadata.Composer = entry.fileMetadata.Composer()
track.FileMetadata.Title = entry.fileMetadata.Title()
if entry.fileMetadata.Picture() != nil {
track.FileMetadata.EmbeddedPicture = entry.fileMetadata.Picture().Data
}
disc.Tracks = append(disc.Tracks, track)
}
disc.TOC = append(TOC{disc.TOC[len(disc.TOC)-1]}, disc.TOC[0:len(disc.TOC)-1]...)
return disc
}

View file

@ -42,9 +42,9 @@ func (d *accurateRipDigestV1) Write(p []byte) (n int, err error) {
}
type accurateRipDigestV2 struct {
crc uint32
pos uint32
offset uint32
crc uint32
multiplier uint32
offset uint32
}
func NewAccurateRipV2(offset uint32) hash.Hash32 {
@ -55,7 +55,7 @@ func (d *accurateRipDigestV2) Size() int { return 4 }
func (d *accurateRipDigestV2) BlockSize() int { return 1 }
func (d *accurateRipDigestV2) Reset() { d.crc = 0; d.pos = d.offset + 1 }
func (d *accurateRipDigestV2) Reset() { d.crc = 0; d.multiplier = d.offset + 1 }
func (d *accurateRipDigestV2) Sum32() uint32 { return d.crc }
@ -69,13 +69,13 @@ func (d *accurateRipDigestV2) Write(p []byte) (n int, err error) {
words := unsafe.Slice((*uint32)(unsafe.Pointer(&p[0])), numWords)
for _, w := range words {
crcNew := uint64(w) * uint64(d.pos)
crcNew := uint64(w) * uint64(d.multiplier)
LO := crcNew & 0xFFFFFFFF
HI := crcNew / 0x100000000
//this can wrap
d.crc += uint32(HI)
d.crc += uint32(LO)
d.pos++
d.multiplier++
}
return len(p), nil

View file

@ -7,6 +7,7 @@ import (
"hash"
"hash/crc32"
"sync"
"sync/atomic"
"time"
)
@ -123,6 +124,58 @@ func (c HasherChannel) SkipEndSamples(samples int) (channel HasherChannel) {
return
}
func (c HasherChannel) SkipEndSamplesMultiple(wg *sync.WaitGroup, offset *uint32, samples int) (channel HasherChannel) {
channel = make(HasherChannel, chanBuf)
go func() {
defer close(channel)
var buffer []*format.AnalyzerPacket
bufferSamples := 0
maxSamples := samples * 2
samplesRead := 0
for packet := range c {
for len(buffer) > 0 && (bufferSamples-len(buffer[0].Samples)/buffer[0].Channels) > maxSamples {
channel <- buffer[0]
samplesRead += len(buffer[0].Samples) / buffer[0].Channels
bufferSamples -= len(buffer[0].Samples) / buffer[0].Channels
buffer = buffer[1:]
}
bufferSamples += len(packet.Samples) / packet.Channels
buffer = append(buffer, packet)
}
wg.Wait()
totalSampleOffset := samplesRead + int(atomic.LoadUint32(offset))
if len(buffer) > 0 {
p := &format.AnalyzerPacket{
Channels: buffer[0].Channels,
SampleRate: buffer[0].SampleRate,
BitDepth: buffer[0].BitDepth,
}
for _, packet := range buffer {
p.Samples = append(p.Samples, packet.Samples...)
}
nsamples := samples + (((len(p.Samples) / p.Channels) + totalSampleOffset) % samples)
if len(p.Samples)/p.Channels > nsamples {
endIndex := len(p.Samples) - nsamples*p.Channels
channel <- &format.AnalyzerPacket{
Samples: p.Samples[:endIndex],
Channels: p.Channels,
SampleRate: p.SampleRate,
BitDepth: p.BitDepth,
}
}
}
}()
return
}
func NewHasherAudioGap(samples, sampleRate, channels, bitDepth int) (channel HasherChannel) {
channel = make(HasherChannel, 1)
channel <- &format.AnalyzerPacket{
@ -158,14 +211,16 @@ const (
HashtypeCrc32 = HashType(iota)
HashtypeSha256
HashtypeAccurateRipV1
HashtypeAccurateRipV1Start
HashtypeAccurateRipV2
HashtypeAccurateRipV2Start
)
type Hasher struct {
hash HashType
hasher hash.Hash
result []byte
channel chan *format.AnalyzerPacket
channel HasherChannel
wg sync.WaitGroup
samples int
duration float64
@ -175,7 +230,7 @@ type Hasher struct {
buffer [][]int32
}
func NewHasher(channel chan *format.AnalyzerPacket, hashType HashType) (h *Hasher) {
func NewHasher(channel HasherChannel, hashType HashType) (h *Hasher) {
h = &Hasher{
hash: hashType,
channel: channel,
@ -188,8 +243,12 @@ func NewHasher(channel chan *format.AnalyzerPacket, hashType HashType) (h *Hashe
h.hasher = sha256.New()
case HashtypeAccurateRipV1:
h.hasher = NewAccurateRipV1(0)
case HashtypeAccurateRipV1Start:
h.hasher = NewAccurateRipV1(Int16SamplesPerSector*5 - 1)
case HashtypeAccurateRipV2:
h.hasher = NewAccurateRipV2(0)
case HashtypeAccurateRipV2Start:
h.hasher = NewAccurateRipV2(Int16SamplesPerSector*5 - 1)
}
@ -294,6 +353,10 @@ func (h *Hasher) GetDuration() time.Duration {
return time.Duration(float64(time.Second) * h.duration)
}
func (h *Hasher) GetWaitGroup() *sync.WaitGroup {
return &h.wg
}
func (h *Hasher) Wait() {
h.wg.Wait()
}

View file

@ -1,3 +1,102 @@
package utilities
import "strings"
const Version = "1.0"
func GetMimeTypeFromExtension(ext string) string {
if len(ext) > 0 {
switch strings.ToLower(ext[1:]) {
//Audio types
case "flac":
return "audio/flac"
case "mp3":
return "audio/mpeg;codecs=mp3"
case "m4a":
return "audio/mp4"
case "mka":
return "audio/x-matroska"
case "ogg":
return "audio/ogg"
case "opus":
return "audio/opus"
case "tta":
return "audio/tta"
case "aac":
return "audio/aac"
case "alac":
return "audio/alac"
case "wav":
return "audio/wav"
case "ape":
return "audio/ape"
//Image types
case "png":
return "image/png"
case "jfif":
fallthrough
case "jpeg":
fallthrough
case "jpg":
return "image/jpeg"
case "gif":
return "image/gif"
case "svg":
return "image/svg+xml"
case "tiff":
fallthrough
case "tif":
return "image/tiff"
case "webp":
return "image/webp"
case "bmp":
return "image/bmp"
//Text types
case "txt":
return "text/plain"
case "log":
return "text/x-log"
case "accurip":
return "text/x-accurip"
case "cue":
return "text/x-cue"
case "toc":
return "text/x-toc"
//Text subtitles
case "lrc":
return "text/x-subtitle-lrc"
case "ssa":
return "text/x-subtitle-ssa"
case "ass":
return "text/x-subtitle-ass"
case "srt":
return "text/x-subtitle-subrip"
//Web types
case "js":
return "text/javascript"
case "wasm":
return "application/wasm"
case "html":
return "text/html"
case "css":
return "text/css"
case "ttf":
return "font/ttf"
case "otf":
return "font/otf"
case "woff":
return "font/woff"
case "woff2":
return "font/woff2"
}
}
return "application/octet-stream"
}