Sync with 41d9ef49b4c1ddcf227b5d240ecf6ec8d3bcc7c1

This commit is contained in:
AnimeBytes 2023-06-15 13:26:25 +02:00
parent 8d901abcb8
commit 046b30b7a9
5 changed files with 32 additions and 39 deletions

View file

@ -32,6 +32,7 @@ type AdminCollector struct {
deadlockAbortedMetric *prometheus.Desc
erroredRequestsMetric *prometheus.Desc
timeoutRequestsMetric *prometheus.Desc
cancelRequestsMetric *prometheus.Desc
sqlErrorCountMetric *prometheus.Desc
serializationTimeSummary *prometheus.Histogram
@ -81,6 +82,7 @@ var (
deadlockAborted = 0
erroredRequests = 0
timeoutRequests = 0
cancelRequests = 0
sqlErrorCount = 0
)
@ -131,6 +133,8 @@ func NewAdminCollector() *AdminCollector {
"Number of failed requests", nil, nil),
timeoutRequestsMetric: prometheus.NewDesc("chihaya_requests_timeout",
"Number of requests for which context deadline was exceeded", nil, nil),
cancelRequestsMetric: prometheus.NewDesc("chihaya_requests_cancel",
"Number of requests for which context was prematurely cancelled", nil, nil),
sqlErrorCountMetric: prometheus.NewDesc("chihaya_sql_errors_count",
"Number of SQL errors", nil, nil),
@ -152,6 +156,7 @@ func (collector *AdminCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- collector.deadlockTimeMetric
ch <- collector.erroredRequestsMetric
ch <- collector.timeoutRequestsMetric
ch <- collector.cancelRequestsMetric
ch <- collector.sqlErrorCountMetric
serializationTime.Describe(ch)
@ -171,6 +176,7 @@ func (collector *AdminCollector) Collect(ch chan<- prometheus.Metric) {
ch <- prometheus.MustNewConstMetric(collector.deadlockTimeMetric, prometheus.CounterValue, deadlockTime.Seconds())
ch <- prometheus.MustNewConstMetric(collector.erroredRequestsMetric, prometheus.CounterValue, float64(erroredRequests))
ch <- prometheus.MustNewConstMetric(collector.timeoutRequestsMetric, prometheus.CounterValue, float64(timeoutRequests))
ch <- prometheus.MustNewConstMetric(collector.cancelRequestsMetric, prometheus.CounterValue, float64(cancelRequests))
ch <- prometheus.MustNewConstMetric(collector.sqlErrorCountMetric, prometheus.CounterValue, float64(sqlErrorCount))
serializationTime.Collect(ch)
@ -204,6 +210,10 @@ func IncrementTimeoutRequests() {
timeoutRequests++
}
func IncrementCancelRequests() {
cancelRequests++
}
func IncrementSQLErrorCount() {
sqlErrorCount++
}

View file

@ -1,5 +0,0 @@
- ID: 1
Time: 1592659152
- ID: 2
Time: 1609032772

View file

@ -1,2 +0,0 @@
- ID: 1
Time: 1592659152

View file

@ -1,21 +1,21 @@
create table approved_clients
(
id mediumint unsigned auto_increment primary key,
peer_id varchar(42) null,
archived tinyint(1) default 0 null
peer_id varchar(42) not null,
archived tinyint(1) default 0 not null
);
create table mod_core
(
mod_option varchar(121) not null primary key,
mod_setting int(12) default 0 not null
mod_setting int(12) not null
);
create table torrent_group_freeleech
(
ID int(10) auto_increment primary key,
GroupID int(10) default 0 not null,
Type enum ('anime', 'music') default 'anime' not null,
GroupID int(10) not null,
Type enum ('anime', 'music') not null,
DownMultiplier float default 1 not null,
UpMultiplier float default 1 not null,
constraint GroupID unique (GroupID, Type)
@ -25,7 +25,7 @@ create table torrents
(
ID int(10) auto_increment primary key,
GroupID int(10) not null,
TorrentType enum ('anime', 'music') default 'anime' not null,
TorrentType enum ('anime', 'music') not null,
info_hash blob not null,
Leechers int(6) default 0 not null,
Seeders int(6) default 0 not null,
@ -34,25 +34,13 @@ create table torrents
DownMultiplier float default 1 not null,
UpMultiplier float default 1 not null,
Status int default 0 not null,
constraint InfoHash unique (info_hash)
constraint InfoHash unique (info_hash (20))
);
create table torrents_group
(
ID int unsigned auto_increment primary key,
Time int(10) default 0 not null
) charset = utf8mb4;
create table torrents_group2
(
ID int unsigned auto_increment primary key,
Time int(10) default 0 not null
) charset = utf8mb4;
create table transfer_history
(
uid int default 0 not null,
fid int default 0 not null,
uid int not null,
fid int not null,
uploaded bigint default 0 not null,
downloaded bigint default 0 not null,
seeding tinyint default 0 not null,
@ -64,7 +52,7 @@ create table transfer_history
starttime int default 0 not null,
last_announce int default 0 not null,
snatched int default 0 not null,
snatched_time int default 0 null,
snatched_time int default 0 not null,
primary key (uid, fid)
);
@ -72,13 +60,13 @@ create table transfer_ips
(
last_announce int unsigned default 0 not null,
starttime int unsigned default 0 not null,
uid int unsigned default 0 not null,
fid int unsigned default 0 not null,
ip int unsigned default 0 not null,
client_id mediumint unsigned default 0 not null,
uid int unsigned not null,
fid int unsigned not null,
ip int unsigned not null,
client_id mediumint unsigned not null,
uploaded bigint unsigned default 0 not null,
downloaded bigint unsigned default 0 not null,
port smallint unsigned zerofill null,
port smallint unsigned zerofill default 0 not null,
primary key (uid, fid, ip, client_id)
);
@ -89,8 +77,8 @@ create table users_main
Downloaded bigint unsigned default 0 not null,
Enabled enum ('0', '1', '2') default '0' not null,
torrent_pass char(32) not null,
rawup bigint unsigned not null,
rawdl bigint unsigned not null,
rawup bigint unsigned default 0 not null,
rawdl bigint unsigned default 0 not null,
DownMultiplier float default 1 not null,
UpMultiplier float default 1 not null,
DisableDownload tinyint(1) default 0 not null,

View file

@ -159,17 +159,19 @@ func (handler *httpHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
select {
case <-ctx.Done():
collectors.IncrementTimeoutRequests()
switch err := ctx.Err(); err {
case context.DeadlineExceeded:
collectors.IncrementTimeoutRequests()
failure("Request context deadline exceeded", buf, 5*time.Minute)
w.Header().Add("Content-Length", strconv.Itoa(buf.Len()))
w.Header().Add("Content-Type", "text/plain")
w.WriteHeader(http.StatusOK) // Required by torrent clients to interpret failure response
_, _ = w.Write(buf.Bytes())
default:
case context.Canceled:
collectors.IncrementCancelRequests()
w.WriteHeader(http.StatusRequestTimeout)
}
default: