Sync with 1ee5b75627bbfb886b158cc668c96b74fddf8aa4

This commit is contained in:
AnimeBytes 2023-06-09 03:35:19 +02:00
parent 519ba4669b
commit 1a96929be9
22 changed files with 217 additions and 206 deletions

View file

@ -32,6 +32,6 @@ linters:
issues:
exclude-rules:
- path: _test\.go
- path: util/unsafe_rand(_test)?\.go
linters:
- gosec

View file

@ -38,7 +38,7 @@ var (
)
func init() {
flag.BoolVar(&decode, "d", false, "Decodes data instead of encoding")
flag.BoolVar(&decode, "D", false, "Decodes data instead of encoding")
flag.BoolVar(&help, "h", false, "Prints this help message")
}

View file

@ -20,17 +20,22 @@ package main
import (
"flag"
"fmt"
"net"
"net/http"
_ "net/http/pprof" //nolint:gosec
"os"
"os/signal"
"runtime"
"runtime/pprof"
"syscall"
"chihaya/log"
"chihaya/server"
)
var profile, help bool
var (
pprof string
help bool
)
// Provided at compile-time
var (
@ -39,13 +44,13 @@ var (
)
func init() {
flag.BoolVar(&profile, "P", false, "Generate profiling data for pprof into chihaya.cpu")
flag.StringVar(&pprof, "P", "", "Starts special pprof debug server on specified addr")
flag.BoolVar(&help, "h", false, "Shows this help dialog")
}
func main() {
fmt.Printf("chihaya (kuroneko), ver=%s date=%s runtime=%s\n\n",
BuildVersion, BuildDate, runtime.Version())
fmt.Printf("chihaya (kuroneko), ver=%s date=%s runtime=%s, cpus=%d\n\n",
BuildVersion, BuildDate, runtime.Version(), runtime.GOMAXPROCS(0))
flag.Parse()
@ -56,17 +61,27 @@ func main() {
return
}
if profile {
log.Info.Printf("Running with profiling enabled, using %d CPUs", runtime.GOMAXPROCS(0))
if len(pprof) > 0 {
// Both are disabled by default; sample 1% of events
runtime.SetMutexProfileFraction(100)
runtime.SetBlockProfileRate(100)
f, err := os.Create("chihaya.cpu")
if err != nil {
log.Fatal.Fatalf("Failed to create profile file: %s\n", err)
} else {
if err = pprof.StartCPUProfile(f); err != nil {
log.Fatal.Fatalf("Can not start profiling session: %s\n", err)
go func() {
l, err := net.Listen("tcp", pprof)
if err != nil {
log.Error.Printf("Failed to start special pprof debug server: %v", err)
return
}
}
//nolint:gosec
s := &http.Server{
Handler: http.DefaultServeMux,
}
log.Warning.Printf("Started special pprof debug server on %s", l.Addr())
_ = s.Serve(l)
}()
}
go func() {
@ -74,15 +89,13 @@ func main() {
signal.Notify(c, syscall.SIGINT, syscall.SIGTERM)
<-c
if profile {
pprof.StopCPUProfile()
}
log.Info.Print("Caught interrupt, shutting down...")
server.Stop()
<-c
os.Exit(0)
}()
log.Info.Print("Starting main server loop...")
server.Start()
}

View file

@ -19,24 +19,20 @@ package config
import (
"encoding/json"
"math/rand"
"os"
"path/filepath"
"reflect"
"testing"
"time"
"chihaya/util"
)
var configTest Map
func TestMain(m *testing.M) {
rand.Seed(time.Now().UnixNano())
tempPath, err := os.MkdirTemp(os.TempDir(), "chihaya_config-*")
if err != nil {
panic(err)
}
tempPath := filepath.Join(os.TempDir(), "chihaya_config-"+util.RandStringBytes(6))
if err := os.Mkdir(tempPath, 0755); err != nil {
if err := os.Chmod(tempPath, 0755); err != nil {
panic(err)
}

View file

@ -23,6 +23,7 @@ import (
"fmt"
"os"
"sync"
"sync/atomic"
"time"
"chihaya/collectors"
@ -60,10 +61,10 @@ type Database struct {
unPruneTorrentStmt *sql.Stmt
Users map[string]*cdb.User
HitAndRuns map[cdb.UserTorrentPair]struct{}
HitAndRuns atomic.Pointer[map[cdb.UserTorrentPair]struct{}]
Torrents map[string]*cdb.Torrent // SHA-1 hash (20 bytes)
TorrentGroupFreeleech map[cdb.TorrentGroup]*cdb.TorrentGroupFreeleech
Clients map[uint16]string
TorrentGroupFreeleech atomic.Pointer[map[cdb.TorrentGroup]*cdb.TorrentGroupFreeleech]
Clients atomic.Pointer[map[uint16]string]
mainConn *Connection // Used for reloading and misc queries
@ -101,12 +102,7 @@ func (db *Database) Init() {
db.ClientsSemaphore = util.NewSemaphore()
// Used for recording updates, so the max required size should be < 128 bytes. See record.go for details
maxBuffers := torrentFlushBufferSize +
userFlushBufferSize +
transferHistoryFlushBufferSize +
transferIpsFlushBufferSize +
snatchFlushBufferSize
db.bufferPool = util.NewBufferPool(maxBuffers, 128)
db.bufferPool = util.NewBufferPool(128)
var err error
@ -161,9 +157,13 @@ func (db *Database) Init() {
}
db.Users = make(map[string]*cdb.User)
db.HitAndRuns = make(map[cdb.UserTorrentPair]struct{})
db.Torrents = make(map[string]*cdb.Torrent)
db.Clients = make(map[uint16]string)
dbHitAndRuns := make(map[cdb.UserTorrentPair]struct{})
db.HitAndRuns.Store(&dbHitAndRuns)
dbClients := make(map[uint16]string)
db.Clients.Store(&dbClients)
db.deserialize()

View file

@ -105,24 +105,27 @@ func TestLoadUsers(t *testing.T) {
func TestLoadHitAndRuns(t *testing.T) {
prepareTestDatabase()
db.HitAndRuns = make(map[cdb.UserTorrentPair]struct{})
dbHitAndRuns := make(map[cdb.UserTorrentPair]struct{})
db.HitAndRuns.Store(&dbHitAndRuns)
db.loadHitAndRuns()
dbHitAndRuns = *db.HitAndRuns.Load()
hnr := cdb.UserTorrentPair{
UserID: 2,
TorrentID: 2,
}
_, hnrExists := db.HitAndRuns[hnr]
_, hnrExists := dbHitAndRuns[hnr]
if len(db.HitAndRuns) != 1 {
if len(dbHitAndRuns) != 1 {
t.Fatal(fixtureFailure("Did not load all hit and runs as expected from fixture file",
1,
len(db.HitAndRuns)))
len(dbHitAndRuns)))
}
if !hnrExists {
t.Fatal(fixtureFailure("Did not load hit and run as expected from fixture file", db.HitAndRuns, hnr))
t.Fatal(fixtureFailure("Did not load hit and run as expected from fixture file", dbHitAndRuns, hnr))
}
}
@ -204,7 +207,8 @@ func TestLoadTorrents(t *testing.T) {
func TestLoadGroupsFreeleech(t *testing.T) {
prepareTestDatabase()
db.TorrentGroupFreeleech = make(map[cdb.TorrentGroup]*cdb.TorrentGroupFreeleech)
dbMap := make(map[cdb.TorrentGroup]*cdb.TorrentGroupFreeleech)
db.TorrentGroupFreeleech.Store(&dbMap)
torrentGroupFreeleech := map[cdb.TorrentGroup]*cdb.TorrentGroupFreeleech{
{
@ -219,52 +223,58 @@ func TestLoadGroupsFreeleech(t *testing.T) {
// Test with fresh data
db.loadGroupsFreeleech()
if len(db.TorrentGroupFreeleech) != len(torrentGroupFreeleech) {
dbMap = *db.TorrentGroupFreeleech.Load()
if len(dbMap) != len(torrentGroupFreeleech) {
t.Fatal(fixtureFailure("Did not load all torrent group freeleech data as expected from fixture file",
len(torrentGroupFreeleech),
len(db.TorrentGroupFreeleech)))
len(dbMap)))
}
for group, freeleech := range torrentGroupFreeleech {
if !reflect.DeepEqual(freeleech, db.TorrentGroupFreeleech[group]) {
if !reflect.DeepEqual(freeleech, dbMap[group]) {
t.Fatal(fixtureFailure(
fmt.Sprintf("Did not load torrent group freeleech data (%v) as expected from fixture file", group),
freeleech,
db.TorrentGroupFreeleech[group]))
dbMap[group]))
}
}
// Now test load on top of existing data
oldTorrentGroupFreeleech := db.TorrentGroupFreeleech
oldTorrentGroupFreeleech := *db.TorrentGroupFreeleech.Load()
db.loadGroupsFreeleech()
if !reflect.DeepEqual(oldTorrentGroupFreeleech, db.TorrentGroupFreeleech) {
dbMap = *db.TorrentGroupFreeleech.Load()
if !reflect.DeepEqual(oldTorrentGroupFreeleech, dbMap) {
t.Fatal(fixtureFailure(
"Did not reload torrent group freeleech data as expected from fixture file",
oldTorrentGroupFreeleech,
db.TorrentGroupFreeleech))
dbMap))
}
}
func TestLoadConfig(t *testing.T) {
prepareTestDatabase()
GlobalFreeleech = false
GlobalFreeleech.Store(false)
db.loadConfig()
if GlobalFreeleech {
if GlobalFreeleech.Load() {
t.Fatal(fixtureFailure("Did not load config as expected from fixture file",
false,
GlobalFreeleech))
true))
}
}
func TestLoadClients(t *testing.T) {
prepareTestDatabase()
db.Clients = make(map[uint16]string)
dbClients := make(map[uint16]string)
db.Clients.Store(&dbClients)
expectedClients := map[uint16]string{
1: "-TR2",
3: "-DE13",
@ -272,12 +282,14 @@ func TestLoadClients(t *testing.T) {
db.loadClients()
if len(db.Clients) != 2 {
t.Fatal(fixtureFailure("Did not load all clients as expected from fixture file", 2, db.Clients))
dbClients = *db.Clients.Load()
if len(dbClients) != 2 {
t.Fatal(fixtureFailure("Did not load all clients as expected from fixture file", 2, dbClients))
}
if !reflect.DeepEqual(expectedClients, db.Clients) {
t.Fatal(fixtureFailure("Did not load clients as expected from fixture file", expectedClients, db.Clients))
if !reflect.DeepEqual(expectedClients, dbClients) {
t.Fatal(fixtureFailure("Did not load clients as expected from fixture file", expectedClients, dbClients))
}
}

View file

@ -18,6 +18,7 @@
package database
import (
"sync/atomic"
"time"
"chihaya/collectors"
@ -30,7 +31,7 @@ import (
var (
reloadInterval int
// GlobalFreeleech indicates whether site is now in freeleech mode (takes precedence over torrent-specific multipliers)
GlobalFreeleech = false
GlobalFreeleech atomic.Bool
)
func init() {
@ -159,11 +160,11 @@ func (db *Database) loadHitAndRuns() {
newHnr[hnr] = struct{}{}
}
db.HitAndRuns = newHnr
db.HitAndRuns.Store(&newHnr)
elapsedTime := time.Since(start)
collectors.UpdateReloadTime("hit_and_runs", elapsedTime)
log.Info.Printf("Hit and run load complete (%d rows, %s)", len(db.HitAndRuns), elapsedTime.String())
log.Info.Printf("Hit and run load complete (%d rows, %s)", len(newHnr), elapsedTime.String())
}
func (db *Database) loadTorrents() {
@ -279,7 +280,7 @@ func (db *Database) loadGroupsFreeleech() {
}
}
db.TorrentGroupFreeleech = newTorrentGroupFreeleech
db.TorrentGroupFreeleech.Store(&newTorrentGroupFreeleech)
elapsedTime := time.Since(start)
collectors.UpdateReloadTime("groups_freeleech", elapsedTime)
@ -310,7 +311,7 @@ func (db *Database) loadConfig() {
log.WriteStack()
}
GlobalFreeleech = globalFreelech
GlobalFreeleech.Store(globalFreelech)
}
}
@ -350,9 +351,9 @@ func (db *Database) loadClients() {
newClients[id] = peerID
}
db.Clients = newClients
db.Clients.Store(&newClients)
elapsedTime := time.Since(start)
collectors.UpdateReloadTime("clients", elapsedTime)
log.Info.Printf("Client list load complete (%d rows, %s)", len(db.Clients), elapsedTime.String())
log.Info.Printf("Client list load complete (%d rows, %s)", len(newClients), elapsedTime.String())
}

View file

@ -67,6 +67,7 @@ func TestSerializer(t *testing.T) {
Seeders: map[string]*cdb.Peer{"12-peer_is_twenty_chars": testPeer},
}
// Prepare empty map to populate with test data
db.Torrents = make(map[string]*cdb.Torrent)
db.Users = make(map[string]*cdb.User)
@ -79,13 +80,20 @@ func TestSerializer(t *testing.T) {
}
db.serialize()
// Reset map to fully test deserialization
db.Torrents = make(map[string]*cdb.Torrent)
db.Users = make(map[string]*cdb.User)
db.deserialize()
if !reflect.DeepEqual(db.Torrents, testTorrents) {
t.Fatalf("Torrents after serialization and deserialization do not match original torrents!")
t.Fatalf("Torrents (%v) after serialization and deserialization do not match original torrents (%v)!",
db.Torrents, testTorrents)
}
if !reflect.DeepEqual(db.Users, testUsers) {
t.Fatalf("Users after serialization and deserialization do not match original users!")
t.Fatalf("Users (%v) after serialization and deserialization do not match original users (%v)!",
db.Users, testUsers)
}
}

View file

@ -25,6 +25,7 @@ import (
var flags = log.Ldate | log.Ltime | log.LUTC | log.Lmsgprefix
//goland:noinspection GoUnusedGlobalVariable
var (
Verbose = log.New(os.Stdout, "[V] ", flags)
Info = log.New(os.Stdout, "[I] ", flags)

View file

@ -19,9 +19,7 @@ package record
import (
"bufio"
"math/rand"
"os"
"path/filepath"
"strconv"
"testing"
"time"
@ -40,11 +38,12 @@ type record struct {
}
func TestMain(m *testing.M) {
rand.Seed(time.Now().UnixNano())
tempPath, err := os.MkdirTemp(os.TempDir(), "chihaya_record-*")
if err != nil {
panic(err)
}
tempPath := filepath.Join(os.TempDir(), "chihaya_record-"+util.RandStringBytes(6))
if err := os.Mkdir(tempPath, 0755); err != nil {
if err := os.Chmod(tempPath, 0755); err != nil {
panic(err)
}
@ -68,14 +67,14 @@ func TestRecord(t *testing.T) {
for i := 0; i < 10; i++ {
tmp := record{
true,
uint16(rand.Uint32()),
rand.Uint32(),
rand.Uint32(),
rand.Uint64(),
rand.Uint64(),
rand.Uint64(),
int64(rand.Uint64()),
int64(rand.Uint64()),
uint16(util.UnsafeUint32()),
util.UnsafeUint32(),
util.UnsafeUint32(),
util.UnsafeUint64(),
util.UnsafeUint64(),
util.UnsafeUint64(),
int64(util.UnsafeUint64()),
int64(util.UnsafeUint64()),
"completed",
"127.0.0.1",
}

View file

@ -329,13 +329,13 @@ func announce(ctx context.Context, qs string, header http.Header, remoteAddr str
torrentGroupDownMultiplier := 1.0
torrentGroupUpMultiplier := 1.0
if torrentGroupFreeleech, exists := db.TorrentGroupFreeleech[torrent.Group]; exists {
if torrentGroupFreeleech, exists := (*db.TorrentGroupFreeleech.Load())[torrent.Group]; exists {
torrentGroupDownMultiplier = torrentGroupFreeleech.DownMultiplier
torrentGroupUpMultiplier = torrentGroupFreeleech.UpMultiplier
}
var deltaDownload int64
if !database.GlobalFreeleech {
if !database.GlobalFreeleech.Load() {
deltaDownload = int64(float64(rawDeltaDownload) * math.Abs(user.DownMultiplier) *
math.Abs(torrentGroupDownMultiplier) * math.Abs(torrent.DownMultiplier))
}
@ -430,7 +430,7 @@ func announce(ctx context.Context, qs string, header http.Header, remoteAddr str
/* We ask clients to announce each interval seconds. In order to spread the load on tracker,
we will vary the interval given to client by random number of seconds between 0 and value
specified in config */
announceDrift := util.Rand(0, maxAccounceDrift)
announceDrift := util.UnsafeRand(0, maxAccounceDrift)
response["interval"] = announceInterval + announceDrift
if numWant > 0 && active {
@ -516,12 +516,8 @@ func announce(ctx context.Context, qs string, header http.Header, remoteAddr str
}
}
bufdata, err := bencode.EncodeBytes(response)
if err != nil {
panic(err)
}
if _, err = buf.Write(bufdata); err != nil {
encoder := bencode.NewEncoder(buf)
if err = encoder.Encode(response); err != nil {
panic(err)
}

View file

@ -54,8 +54,8 @@ func metrics(ctx context.Context, auth string, db *database.Database, buf *bytes
collectors.UpdateUptime(time.Since(handler.startTime).Seconds())
collectors.UpdateUsers(len(db.Users))
collectors.UpdateTorrents(len(db.Torrents))
collectors.UpdateClients(len(db.Clients))
collectors.UpdateHitAndRuns(len(db.HitAndRuns))
collectors.UpdateClients(len(*db.Clients.Load()))
collectors.UpdateHitAndRuns(len(*db.HitAndRuns.Load()))
collectors.UpdatePeers(peers)
collectors.UpdateRequests(handler.requests.Load())
collectors.UpdateThroughput(handler.throughput)

View file

@ -19,12 +19,13 @@ package params
import (
"fmt"
"math/rand"
"net/url"
"os"
"reflect"
"strconv"
"testing"
"chihaya/util"
)
var infoHashes []string
@ -34,7 +35,7 @@ func TestMain(m *testing.M) {
for i := 0; i < 10; i++ {
token = make([]byte, 20)
rand.Read(token)
_, _ = util.UnsafeReadRand(token[:])
infoHashes = append(infoHashes, string(token))
}

View file

@ -82,12 +82,8 @@ func scrape(ctx context.Context, qs string, user *cdb.User, db *database.Databas
scrapeData["interval"] = scrapeInterval
scrapeData["min interval"] = scrapeInterval
bufdata, err := bencode.EncodeBytes(scrapeData)
if err != nil {
panic(err)
}
if _, err = buf.Write(bufdata); err != nil {
encoder := bencode.NewEncoder(buf)
if err = encoder.Encode(scrapeData); err != nil {
panic(err)
}

View file

@ -230,7 +230,7 @@ func Start() {
/* Initialize reusable buffer pool; this is faster than allocating new memory for every request.
If necessary, new memory will be allocated when pool is empty, however. */
bufferPool := util.NewBufferPool(500, 512)
bufferPool := util.NewBufferPool(512)
handler.bufferPool = bufferPool
addr, _ := config.Section("http").Get("addr", ":34000")
@ -318,7 +318,10 @@ func Start() {
}
func Stop() {
// Closing the listener stops accepting connections and causes Serve to return
_ = listener.Close()
if listener != nil {
// Closing the listener stops accepting connections and causes Serve to return
_ = listener.Close()
}
handler.terminate = true
}

View file

@ -34,14 +34,10 @@ func failure(err string, buf *bytes.Buffer, interval time.Duration) {
data["interval"] = interval / time.Second // Assuming in seconds
data["min interval"] = interval / time.Second // Assuming in seconds
encoded, errz := bencode.EncodeBytes(data)
if errz != nil {
panic(errz)
}
buf.Reset()
if _, errz = buf.Write(encoded); errz != nil {
encoder := bencode.NewEncoder(buf)
if errz := encoder.Encode(buf); errz != nil {
panic(errz)
}
}
@ -55,7 +51,7 @@ func clientApproved(peerID string, db *database.Database) (uint16, bool) {
matched bool
)
for id, clientID := range db.Clients {
for id, clientID := range *db.Clients.Load() {
widLen = len(clientID)
if widLen <= len(peerID) {
matched = true
@ -96,7 +92,7 @@ func hasHitAndRun(db *database.Database, userID, torrentID uint32) bool {
TorrentID: torrentID,
}
_, exists := db.HitAndRuns[hnr]
_, exists := (*db.HitAndRuns.Load())[hnr]
return exists
}

View file

@ -19,35 +19,30 @@ package util
import (
"bytes"
"sync"
)
type BufferPool struct {
pool chan *bytes.Buffer
bufSize int
pool sync.Pool
}
func NewBufferPool(size int, bufSize int) *BufferPool {
return &BufferPool{
make(chan *bytes.Buffer, size),
bufSize,
func NewBufferPool(bufSize int) *BufferPool {
p := &BufferPool{}
p.pool.New = func() any {
internalBuf := make([]byte, 0, bufSize)
return bytes.NewBuffer(internalBuf)
}
return p
}
func (pool *BufferPool) Take() (buf *bytes.Buffer) {
select {
case buf = <-pool.pool:
buf.Reset()
default:
internalBuf := make([]byte, 0, pool.bufSize)
buf = bytes.NewBuffer(internalBuf)
}
buf = pool.pool.Get().(*bytes.Buffer)
buf.Reset()
return
}
func (pool *BufferPool) Give(buf *bytes.Buffer) {
select {
case pool.pool <- buf:
default:
}
pool.pool.Put(buf)
}

View file

@ -23,17 +23,17 @@ import (
)
func TestBufferPool(t *testing.T) {
bufferpool := NewBufferPool(1, 1)
bufferPool := NewBufferPool(64)
poolBuf := bufferpool.Take()
poolBuf := bufferPool.Take()
if !bytes.Equal(poolBuf.Bytes(), []byte("")) {
t.Fatalf("Buffer from empty bufferpool was allocated incorrectly.")
}
origBuf := bytes.NewBuffer([]byte("X"))
bufferpool.Give(origBuf)
bufferPool.Give(origBuf)
reusedBuf := bufferpool.Take()
reusedBuf := bufferPool.Take()
if !bytes.Equal(reusedBuf.Bytes(), []byte("")) {
t.Fatalf("Buffer from filled bufferpool was recycled incorrectly.")
}

32
util/unsafe_rand.go Normal file
View file

@ -0,0 +1,32 @@
package util
import (
unsafeRandom "math/rand"
"time"
)
var randomSource = unsafeRandom.New(unsafeRandom.NewSource(time.Now().Unix()))
func UnsafeInt() int {
return randomSource.Int()
}
func UnsafeIntn(n int) int {
return randomSource.Intn(n)
}
func UnsafeUint32() uint32 {
return randomSource.Uint32()
}
func UnsafeUint64() uint64 {
return randomSource.Uint64()
}
func UnsafeRand(min int, max int) int {
return randomSource.Intn(max-min+1) + min
}
func UnsafeReadRand(p []byte) (n int, err error) {
return randomSource.Read(p)
}

28
util/unsafe_rand_test.go Normal file
View file

@ -0,0 +1,28 @@
package util
import (
"math/rand"
"testing"
)
func TestUnsafeRand(t *testing.T) {
for i := 0; i < 10; i++ {
min := rand.Intn(1000)
max := rand.Intn(1000) + min
randomInt := UnsafeRand(min, max)
if randomInt < min || randomInt > max {
t.Fatalf("Integer %d is outside specified range (%d - %d)", randomInt, min, max)
}
}
}
func TestUnsafeIntn(t *testing.T) {
for i := 1; i < 2000; i++ {
genInt := UnsafeIntn(i)
if genInt < 0 || genInt >= i {
t.Fatalf("Generated random integer (%d) does not fall in the range [0, %d)!", genInt, i)
}
}
}

View file

@ -17,13 +17,6 @@
package util
import (
"crypto/rand"
"encoding/binary"
)
const alphanumBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
func Min(a int, b int) int {
if a < b {
return a
@ -47,28 +40,3 @@ func Btoa(a bool) string {
return "0"
}
func Intn(n int) int {
b := make([]byte, 8)
if _, err := rand.Read(b); err != nil {
panic(err)
}
i := binary.BigEndian.Uint32(b)
return int(i) % n
}
func RandStringBytes(n int) string {
b := make([]byte, n)
for i := range b {
b[i] = alphanumBytes[Intn(len(alphanumBytes))]
}
return string(b)
}
func Rand(min int, max int) int {
return Intn(max-min+1) + min
}

View file

@ -18,14 +18,13 @@
package util
import (
"math/rand"
"testing"
)
func TestMin(t *testing.T) {
for i := 0; i < 100; i++ {
a := rand.Int()
b := rand.Int()
a := UnsafeInt()
b := UnsafeInt()
gotMin := Min(a, b)
var actualMin int
@ -43,8 +42,8 @@ func TestMin(t *testing.T) {
func TestMax(t *testing.T) {
for i := 0; i < 100; i++ {
a := rand.Int()
b := rand.Int()
a := UnsafeInt()
b := UnsafeInt()
gotMax := Max(a, b)
var actualMax int
@ -66,7 +65,7 @@ func TestBtoa(t *testing.T) {
var actualResult string
if rand.Intn(2) == 1 {
if UnsafeIntn(2) == 1 {
b = true
actualResult = "1"
} else {
@ -80,36 +79,3 @@ func TestBtoa(t *testing.T) {
}
}
}
func TestIntn(t *testing.T) {
for i := 1; i < 2000; i++ {
genInt := Intn(i)
if genInt < 0 || genInt >= i {
t.Fatalf("Generated random integer (%d) does not fall in the range [0, %d)!", genInt, i)
}
}
}
func TestRandStringBytes(t *testing.T) {
for i := 0; i < 10; i++ {
n := rand.Intn(100000)
randomString := RandStringBytes(n)
if len(randomString) != n {
t.Fatalf("String (length %d) not of required length (%d)!", len(randomString), n)
}
}
}
func TestRand(t *testing.T) {
for i := 0; i < 10; i++ {
min := rand.Intn(1000)
max := rand.Intn(1000) + min
randomInt := Rand(min, max)
if randomInt < min || randomInt > max {
t.Fatalf("Integer %d is outside specified range (%d - %d)", randomInt, min, max)
}
}
}