Add TickLoop struct.

Add a number of new test cases and refactor others.
Replace NotifyOne() with ActivityMatches.Add()
Shorten arr_ to a_ in tmpl_list.go
Reduce boilerplate.
This commit is contained in:
Azareal 2021-05-02 18:47:19 +10:00
parent 78fbbcda21
commit fc5b29e648
8 changed files with 1045 additions and 623 deletions

View File

@ -34,7 +34,6 @@ type Alert struct {
type AlertStmts struct { type AlertStmts struct {
notifyWatchers *sql.Stmt notifyWatchers *sql.Stmt
notifyOne *sql.Stmt
getWatchers *sql.Stmt getWatchers *sql.Stmt
} }
@ -49,7 +48,6 @@ func init() {
qgen.DBInsert{"activity_stream_matches", "watcher,asid", ""}, qgen.DBInsert{"activity_stream_matches", "watcher,asid", ""},
qgen.DBJoin{"activity_stream", "activity_subscriptions", "activity_subscriptions.user, activity_stream.asid", "activity_subscriptions.targetType = activity_stream.elementType AND activity_subscriptions.targetID = activity_stream.elementID AND activity_subscriptions.user != activity_stream.actor", "asid=?", "", ""}, qgen.DBJoin{"activity_stream", "activity_subscriptions", "activity_subscriptions.user, activity_stream.asid", "activity_subscriptions.targetType = activity_stream.elementType AND activity_subscriptions.targetID = activity_stream.elementID AND activity_subscriptions.user != activity_stream.actor", "asid=?", "", ""},
), ),
notifyOne: acc.Insert("activity_stream_matches").Columns("watcher,asid").Fields("?,?").Prepare(),
getWatchers: acc.SimpleInnerJoin("activity_stream", "activity_subscriptions", "activity_subscriptions.user", "activity_subscriptions.targetType = activity_stream.elementType AND activity_subscriptions.targetID = activity_stream.elementID AND activity_subscriptions.user != activity_stream.actor", "asid=?", "", ""), getWatchers: acc.SimpleInnerJoin("activity_stream", "activity_subscriptions", "activity_subscriptions.user", "activity_subscriptions.targetType = activity_stream.elementType AND activity_subscriptions.targetID = activity_stream.elementID AND activity_subscriptions.user != activity_stream.actor", "asid=?", "", ""),
} }
return acc.FirstError() return acc.FirstError()
@ -101,6 +99,7 @@ func BuildAlert(a Alert, user User /* The current user */) (out string, err erro
var url, area, phraseName string var url, area, phraseName string
own := false own := false
// TODO: Avoid loading a bit of data twice
switch a.ElementType { switch a.ElementType {
case "convo": case "convo":
convo, err := Convos.Get(a.ElementID) convo, err := Convos.Get(a.ElementID)
@ -232,6 +231,7 @@ func BuildAlertSb(sb *strings.Builder, a *Alert, u *User /* The current user */)
var url, area string var url, area string
own := false own := false
// TODO: Avoid loading a bit of data twice
switch a.ElementType { switch a.ElementType {
case "convo": case "convo":
convo, err := Convos.Get(a.ElementID) convo, err := Convos.Get(a.ElementID)
@ -300,6 +300,7 @@ func BuildAlertSb(sb *strings.Builder, a *Alert, u *User /* The current user */)
//const AlertsGrowHint3 = len(`{"msg":"._","sub":["",""],"path":"","img":"","id":}`) + 3 + 2 + 2 + 2 + 2 + 1 //const AlertsGrowHint3 = len(`{"msg":"._","sub":["",""],"path":"","img":"","id":}`) + 3 + 2 + 2 + 2 + 2 + 1
// TODO: Create a notifier structure?
func AddActivityAndNotifyAll(a Alert) error { func AddActivityAndNotifyAll(a Alert) error {
id, err := Activity.Add(a) id, err := Activity.Add(a)
if err != nil { if err != nil {
@ -308,13 +309,14 @@ func AddActivityAndNotifyAll(a Alert) error {
return NotifyWatchers(id) return NotifyWatchers(id)
} }
// TODO: Create a notifier structure?
func AddActivityAndNotifyTarget(a Alert) error { func AddActivityAndNotifyTarget(a Alert) error {
id, err := Activity.Add(a) id, err := Activity.Add(a)
if err != nil { if err != nil {
return err return err
} }
err = NotifyOne(a.TargetUserID, id) err = ActivityMatches.Add(a.TargetUserID, id)
if err != nil { if err != nil {
return err return err
} }
@ -330,11 +332,7 @@ func AddActivityAndNotifyTarget(a Alert) error {
return nil return nil
} }
func NotifyOne(watcher, asid int) error { // TODO: Create a notifier structure?
_, err := alertStmts.notifyOne.Exec(watcher, asid)
return err
}
func NotifyWatchers(asid int) error { func NotifyWatchers(asid int) error {
_, err := alertStmts.notifyWatchers.Exec(asid) _, err := alertStmts.notifyWatchers.Exec(asid)
if err != nil { if err != nil {

View File

@ -622,7 +622,7 @@ func compileJSTemplates(wg *sync.WaitGroup, c *tmpl.CTemplateSet, themeName stri
} }
var poutlen = len("\n// nolint\nfunc init() {\n") var poutlen = len("\n// nolint\nfunc init() {\n")
var poutlooplen = len("__frags[0]=arr_0[:]\n") var poutlooplen = len("__frags[0]=a_0[:]\n")
func getTemplateList(c *tmpl.CTemplateSet, wg *sync.WaitGroup, prefix string) string { func getTemplateList(c *tmpl.CTemplateSet, wg *sync.WaitGroup, prefix string) string {
DebugLog("in getTemplateList") DebugLog("in getTemplateList")
@ -666,16 +666,16 @@ func getTemplateList(c *tmpl.CTemplateSet, wg *sync.WaitGroup, prefix string) st
} }
} }
tmpStr := strconv.Itoa(tmpCount) tmpStr := strconv.Itoa(tmpCount)
//"arr_" + tmpStr + ":=[...]byte{" + /*bits*/ bsb.String() + "}\n" //"a_" + tmpStr + ":=[...]byte{" + /*bits*/ bsb.String() + "}\n"
poutsb.WriteString("arr_") poutsb.WriteString("a_")
poutsb.WriteString(tmpStr) poutsb.WriteString(tmpStr)
poutsb.WriteString(":=[...]byte{") poutsb.WriteString(":=[...]byte{")
poutsb.WriteString(bsb.String()) poutsb.WriteString(bsb.String())
poutsb.WriteString("}\n") poutsb.WriteString("}\n")
//front + "=arr_" + tmpStr + "[:]\n" //front + "=a_" + tmpStr + "[:]\n"
poutsb.WriteString(front) poutsb.WriteString(front)
poutsb.WriteString("=arr_") poutsb.WriteString("=a_")
poutsb.WriteString(tmpStr) poutsb.WriteString(tmpStr)
poutsb.WriteString("[:]\n") poutsb.WriteString("[:]\n")
tmpCount++ tmpCount++

215
common/tickloop.go Normal file
View File

@ -0,0 +1,215 @@
package common
import (
"log"
"strconv"
"sync/atomic"
"time"
qgen "github.com/Azareal/Gosora/query_gen"
"github.com/pkg/errors"
)
type TickLoop struct {
HalfSec *time.Ticker
Sec *time.Ticker
FifteenMin *time.Ticker
Hour *time.Ticker
Day *time.Ticker
HalfSecf func() error
Secf func() error
FifteenMinf func() error
Hourf func() error
Dayf func() error
}
func NewTickLoop() *TickLoop {
return &TickLoop{
// TODO: Write tests for these
// Run this goroutine once every half second
HalfSec: time.NewTicker(time.Second / 2),
Sec: time.NewTicker(time.Second),
FifteenMin: time.NewTicker(15 * time.Minute),
Hour: time.NewTicker(time.Hour),
Day: time.NewTicker(time.Hour * 24),
}
}
func (l *TickLoop) Loop() {
r := func(e error) {
if e != nil {
LogError(e)
}
}
for {
select {
case <-l.HalfSec.C:
r(l.HalfSecf())
case <-l.Sec.C:
r(l.Secf())
case <-l.FifteenMin.C:
r(l.FifteenMinf())
case <-l.Hour.C:
r(l.Hourf())
// TODO: Handle the instance going down a lot better
case <-l.Day.C:
r(l.Dayf())
}
}
}
var ErrDBDown = errors.New("The database is down")
func StartTick() (abort bool) {
db := qgen.Builder.GetConn()
isDBDown := atomic.LoadInt32(&IsDBDown)
if e := db.Ping(); e != nil {
// TODO: There's a bit of a race here, but it doesn't matter if this error appears multiple times in the logs as it's capped at three times, we just want to cut it down 99% of the time
if isDBDown == 0 {
db.SetConnMaxLifetime(time.Second / 2) // Drop all the connections and start over
LogWarning(e, ErrDBDown.Error())
}
atomic.StoreInt32(&IsDBDown, 1)
return true
}
if isDBDown == 1 {
log.Print("The database is back")
}
//db.SetConnMaxLifetime(time.Second * 60 * 5) // Make this infinite as the temporary lifetime change will purge the stale connections?
db.SetConnMaxLifetime(-1)
atomic.StoreInt32(&IsDBDown, 0)
return false
}
// TODO: Move these into DailyTick() methods?
func asmMatches() error {
// TODO: Find a more efficient way of doing this
return qgen.NewAcc().Select("activity_stream").Cols("asid").EachInt(func(asid int) error {
if ActivityMatches.CountAsid(asid) > 0 {
return nil
}
return Activity.Delete(asid)
})
}
// TODO: Name the tasks so we can figure out which one it was when something goes wrong? Or maybe toss it up WithStack down there?
func RunTasks(tasks []func() error) error {
for _, task := range tasks {
if e := task(); e != nil {
return e
}
}
return nil
}
/*func init() {
DbInits.Add(func(acc *qgen.Accumulator) error {
replyStmts = ReplyStmts{
isLiked: acc.Select("likes").Columns("targetItem").Where("sentBy=? and targetItem=? and targetType='replies'").Prepare(),
}
return acc.FirstError()
})
}*/
func StartupTasks() (e error) {
r := func(ee error) {
if e == nil {
e = ee
}
}
if Config.DisableRegLog {
r(RegLogs.Purge())
}
if Config.DisableLoginLog {
r(LoginLogs.Purge())
}
if Config.DisablePostIP {
// TODO: Clear the caches?
r(Topics.ClearIPs())
r(Rstore.ClearIPs())
r(Prstore.ClearIPs())
}
if Config.DisablePollIP {
r(Polls.ClearIPs())
}
if Config.DisableLastIP {
r(Users.ClearLastIPs())
}
return e
}
func Dailies() (e error) {
if e = asmMatches(); e != nil {
return e
}
newAcc := func() *qgen.Accumulator {
return qgen.NewAcc()
}
exec := func(ac qgen.AccExec) {
if e != nil {
return
}
_, ee := ac.Exec()
e = ee
}
r := func(ee error) {
if e == nil {
e = ee
}
}
if Config.LogPruneCutoff > -1 {
// TODO: Clear the caches?
if !Config.DisableLoginLog {
r(LoginLogs.DeleteOlderThanDays(Config.LogPruneCutoff))
}
if !Config.DisableRegLog {
r(RegLogs.DeleteOlderThanDays(Config.LogPruneCutoff))
}
}
if !Config.DisablePostIP && Config.PostIPCutoff > -1 {
// TODO: Use unixtime to remove this MySQLesque logic?
f := func(tbl string) {
exec(newAcc().Update(tbl).Set("ip=''").DateOlderThan("createdAt", Config.PostIPCutoff, "day").Where("ip!=''"))
}
f("topics")
f("replies")
f("users_replies")
}
if !Config.DisablePollIP && Config.PollIPCutoff > -1 {
// TODO: Use unixtime to remove this MySQLesque logic?
exec(newAcc().Update("polls_votes").Set("ip=''").DateOlderThan("castAt", Config.PollIPCutoff, "day").Where("ip!=''"))
// TODO: Find some way of purging the ip data in polls_votes without breaking any anti-cheat measures which might be running... maybe hash it instead?
}
// TODO: lastActiveAt isn't currently set, so we can't rely on this to purge last_ips of users who haven't been on in a while
if !Config.DisableLastIP && Config.LastIPCutoff > 0 {
//exec(newAcc().Update("users").Set("last_ip='0'").DateOlderThan("lastActiveAt",c.Config.PostIPCutoff,"day").Where("last_ip!='0'"))
mon := time.Now().Month()
exec(newAcc().Update("users").Set("last_ip=''").Where("last_ip!='' AND last_ip NOT LIKE '" + strconv.Itoa(int(mon)) + "-%'"))
}
if e != nil {
return e
}
if e = RunTasks(ScheduledDayTasks); e != nil {
return e
}
e = ForumActionStore.DailyTick()
if e != nil {
return e
}
{
e := Meta.SetInt64("lastDaily", time.Now().Unix())
if e != nil {
return e
}
}
return nil
}

View File

@ -66,8 +66,9 @@ type User struct {
} }
type UserPrivacy struct { type UserPrivacy struct {
ShowComments int // 0 = default, 1 = public, 2 = registered, 3 = friends, 4 = self, 5 = disabled / unused ShowComments int // 0 = default, 1 = public, 2 = registered, 3 = friends, 4 = self, 5 = disabled / unused
AllowMessage int // 0 = default, 1 = registered, 2 = friends, 3 = mods, 4 = disabled / unused AllowMessage int // 0 = default, 1 = registered, 2 = friends, 3 = mods, 4 = disabled / unused
NoPresence bool // false = default, true = true
} }
func (u *User) WebSockets() *WsJSONUser { func (u *User) WebSockets() *WsJSONUser {
@ -169,37 +170,39 @@ var userStmts UserStmts
func init() { func init() {
DbInits.Add(func(acc *qgen.Accumulator) error { DbInits.Add(func(acc *qgen.Accumulator) error {
u := "users" u, w := "users", "uid=?"
w := "uid=?" set := func(s string) *sql.Stmt {
return acc.Update(u).Set(s).Where(w).Prepare()
}
userStmts = UserStmts{ userStmts = UserStmts{
activate: acc.SimpleUpdate(u, "active=1", w), activate: set("active=1"),
changeGroup: acc.SimpleUpdate(u, "group=?", w), // TODO: Implement user_count for users_groups here changeGroup: set("group=?"), // TODO: Implement user_count for users_groups here
delete: acc.Delete(u).Where(w).Prepare(), delete: acc.Delete(u).Where(w).Prepare(),
setAvatar: acc.Update(u).Set("avatar=?").Where(w).Prepare(), setAvatar: set("avatar=?"),
setName: acc.Update(u).Set("name=?").Where(w).Prepare(), setName: set("name=?"),
update: acc.Update(u).Set("name=?,email=?,group=?").Where(w).Prepare(), // TODO: Implement user_count for users_groups on things which use this update: set("name=?,email=?,group=?"), // TODO: Implement user_count for users_groups on things which use this
// Stat Statements // Stat Statements
// TODO: Do +0 to avoid having as many statements? // TODO: Do +0 to avoid having as many statements?
incScore: acc.Update(u).Set("score=score+?").Where(w).Prepare(), incScore: set("score=score+?"),
incPosts: acc.Update(u).Set("posts=posts+?").Where(w).Prepare(), incPosts: set("posts=posts+?"),
incBigposts: acc.Update(u).Set("posts=posts+?,bigposts=bigposts+?").Where(w).Prepare(), incBigposts: set("posts=posts+?,bigposts=bigposts+?"),
incMegaposts: acc.Update(u).Set("posts=posts+?,bigposts=bigposts+?,megaposts=megaposts+?").Where(w).Prepare(), incMegaposts: set("posts=posts+?,bigposts=bigposts+?,megaposts=megaposts+?"),
incPostStats: acc.Update(u).Set("posts=posts+?,score=score+?,level=?").Where(w).Prepare(), incPostStats: set("posts=posts+?,score=score+?,level=?"),
incBigpostStats: acc.Update(u).Set("posts=posts+?,bigposts=bigposts+?,score=score+?,level=?").Where(w).Prepare(), incBigpostStats: set("posts=posts+?,bigposts=bigposts+?,score=score+?,level=?"),
incMegapostStats: acc.Update(u).Set("posts=posts+?,bigposts=bigposts+?,megaposts=megaposts+?,score=score+?,level=?").Where(w).Prepare(), incMegapostStats: set("posts=posts+?,bigposts=bigposts+?,megaposts=megaposts+?,score=score+?,level=?"),
incTopics: acc.SimpleUpdate(u, "topics=topics+?", w), incTopics: set("topics=topics+?"),
updateLevel: acc.SimpleUpdate(u, "level=?", w), updateLevel: set("level=?"),
resetStats: acc.Update(u).Set("score=0,posts=0,bigposts=0,megaposts=0,topics=0,level=0").Where(w).Prepare(), resetStats: set("score=0,posts=0,bigposts=0,megaposts=0,topics=0,level=0"),
setStats: acc.Update(u).Set("score=?,posts=?,bigposts=?,megaposts=?,topics=?,level=?").Where(w).Prepare(), setStats: set("score=?,posts=?,bigposts=?,megaposts=?,topics=?,level=?"),
incLiked: acc.Update(u).Set("liked=liked+?,lastLiked=UTC_TIMESTAMP()").Where(w).Prepare(), incLiked: set("liked=liked+?,lastLiked=UTC_TIMESTAMP()"),
decLiked: acc.Update(u).Set("liked=liked-?").Where(w).Prepare(), decLiked: set("liked=liked-?"),
//recalcLastLiked: acc... //recalcLastLiked: acc...
updateLastIP: acc.SimpleUpdate(u, "last_ip=?", w), updateLastIP: set("last_ip=?"),
updatePrivacy: acc.Update(u).Set("profile_comments=?,enable_embeds=?").Where(w).Prepare(), updatePrivacy: set("profile_comments=?,enable_embeds=?"),
setPassword: acc.Update(u).Set("password=?,salt=?").Where(w).Prepare(), setPassword: set("password=?,salt=?"),
scheduleAvatarResize: acc.Insert("users_avatar_queue").Columns("uid").Fields("?").Prepare(), scheduleAvatarResize: acc.Insert("users_avatar_queue").Columns("uid").Fields("?").Prepare(),
@ -269,34 +272,34 @@ func (u *User) ScheduleGroupUpdate(gid, issuedBy int, dur time.Duration) error {
} }
revertAt := time.Now().Add(dur) revertAt := time.Now().Add(dur)
tx, err := qgen.Builder.Begin() tx, e := qgen.Builder.Begin()
if err != nil { if e != nil {
return err return e
} }
defer tx.Rollback() defer tx.Rollback()
err = u.deleteScheduleGroupTx(tx) e = u.deleteScheduleGroupTx(tx)
if err != nil { if e != nil {
return err return e
} }
createScheduleGroupTx, err := qgen.Builder.SimpleInsertTx(tx, "users_groups_scheduler", "uid,set_group,issued_by,issued_at,revert_at,temporary", "?,?,?,UTC_TIMESTAMP(),?,?") createScheduleGroupTx, e := qgen.Builder.SimpleInsertTx(tx, "users_groups_scheduler", "uid,set_group,issued_by,issued_at,revert_at,temporary", "?,?,?,UTC_TIMESTAMP(),?,?")
if err != nil { if e != nil {
return err return e
} }
_, err = createScheduleGroupTx.Exec(u.ID, gid, issuedBy, revertAt, temp) _, e = createScheduleGroupTx.Exec(u.ID, gid, issuedBy, revertAt, temp)
if err != nil { if e != nil {
return err return e
} }
err = u.setTempGroupTx(tx, gid) e = u.setTempGroupTx(tx, gid)
if err != nil { if e != nil {
return err return e
} }
err = tx.Commit() e = tx.Commit()
u.CacheRemove() u.CacheRemove()
return err return e
} }
func (u *User) RevertGroupUpdate() error { func (u *User) RevertGroupUpdate() error {
@ -338,11 +341,8 @@ func (u *User) Activate() (e error) {
// TODO: Expose this to the admin? // TODO: Expose this to the admin?
func (u *User) Delete() error { func (u *User) Delete() error {
_, e := userStmts.delete.Exec(u.ID) _, e := userStmts.delete.Exec(u.ID)
if e != nil {
return e
}
u.CacheRemove() u.CacheRemove()
return nil return e
} }
// TODO: dismiss-event // TODO: dismiss-event
@ -530,17 +530,17 @@ func (u *User) ChangeAvatar(avatar string) error {
} }
// TODO: Abstract this with an interface so we can scale this with an actual dedicated queue in a real cluster // TODO: Abstract this with an interface so we can scale this with an actual dedicated queue in a real cluster
func (u *User) ScheduleAvatarResize() (err error) { func (u *User) ScheduleAvatarResize() (e error) {
_, err = userStmts.scheduleAvatarResize.Exec(u.ID) _, e = userStmts.scheduleAvatarResize.Exec(u.ID)
if err != nil { if e != nil {
// TODO: Do a more generic check so that we're not as tied to MySQL // TODO: Do a more generic check so that we're not as tied to MySQL
me, ok := err.(*mysql.MySQLError) me, ok := e.(*mysql.MySQLError)
if !ok { if !ok {
return err return e
} }
// If it's just telling us that the item already exists in the database, then we can ignore it, as it doesn't matter if it's this call or another which schedules the item in the queue // If it's just telling us that the item already exists in the database, then we can ignore it, as it doesn't matter if it's this call or another which schedules the item in the queue
if me.Number != 1062 { if me.Number != 1062 {
return err return e
} }
} }
return nil return nil
@ -557,11 +557,11 @@ func (u *User) GetIP() string {
// ! Only updates the database not the *User for safety reasons // ! Only updates the database not the *User for safety reasons
func (u *User) UpdateIP(ip string) error { func (u *User) UpdateIP(ip string) error {
_, err := userStmts.updateLastIP.Exec(ip, u.ID) _, e := userStmts.updateLastIP.Exec(ip, u.ID)
if uc := Users.GetCache(); uc != nil { if uc := Users.GetCache(); uc != nil {
uc.Remove(u.ID) uc.Remove(u.ID)
} }
return err return e
} }
//var ErrMalformedInteger = errors.New("malformed integer") //var ErrMalformedInteger = errors.New("malformed integer")
@ -722,6 +722,28 @@ func (u *User) InitPerms() {
} }
} }
// TODO: Write unit tests for this
func InitPerms2(group int, superAdmin bool, tempGroup int) (perms *Perms, admin, superMod, banned bool) {
if tempGroup != 0 {
group = tempGroup
}
g := Groups.DirtyGet(group)
if superAdmin {
perms = &AllPerms
} else {
perms = &g.Perms
}
admin = superAdmin || g.IsAdmin
superMod = admin || g.IsMod
banned = g.IsBanned
if banned && superMod {
banned = false
}
return perms, admin, superMod, banned
}
// TODO: Write tests // TODO: Write tests
// TODO: Implement and use this // TODO: Implement and use this
// TODO: Implement friends // TODO: Implement friends
@ -781,7 +803,14 @@ func buildNoavatar(uid, width int) string {
l(10) l(10)
} }
if !Config.DisableDefaultNoavatar && uid < 11 { if !Config.DisableDefaultNoavatar && uid < 11 {
if width == 200 { /*if uid < 6 {
if width == 200 {
return noavatarCache200Avif[uid]
} else if width == 48 {
return noavatarCache48Avif[uid]
}
return StaticFiles.Prefix + "n" + strconv.Itoa(uid) + "-" + strconv.Itoa(width) + ".avif?i=0"
} else */if width == 200 {
return noavatarCache200[uid] return noavatarCache200[uid]
} else if width == 48 { } else if width == 48 {
return noavatarCache48[uid] return noavatarCache48[uid]

20
main.go
View File

@ -278,6 +278,10 @@ func storeInit() (e error) {
if e != nil { if e != nil {
return ws(e) return ws(e)
} }
c.ActivityMatches, e = c.NewDefaultActivityStreamMatches(acc)
if e != nil {
return ws(e)
}
// TODO: Let the admin choose other thumbnailers, maybe ones defined in plugins // TODO: Let the admin choose other thumbnailers, maybe ones defined in plugins
c.Thumbnailer = c.NewCaireThumbnailer() c.Thumbnailer = c.NewCaireThumbnailer()
c.Recalc, e = c.NewDefaultRecalc(acc) c.Recalc, e = c.NewDefaultRecalc(acc)
@ -545,7 +549,9 @@ func main() {
// TODO: Could we expand this to attachments and other things too? // TODO: Could we expand this to attachments and other things too?
thumbChan := make(chan bool) thumbChan := make(chan bool)
go c.ThumbTask(thumbChan) go c.ThumbTask(thumbChan)
go tickLoop(thumbChan) if err = tickLoop(thumbChan); err != nil {
c.LogError(err)
}
// Resource Management Goroutine // Resource Management Goroutine
go func() { go func() {
@ -575,7 +581,9 @@ func main() {
}() }()
log.Print("Initialising the router") log.Print("Initialising the router")
router, err = NewGenRouter(http.FileServer(http.Dir("./uploads"))) router, err = NewGenRouter(&RouterConfig{
Uploads: http.FileServer(http.Dir("./uploads")),
})
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
@ -589,7 +597,7 @@ func main() {
go func() { go func() {
sig := <-sigs sig := <-sigs
// TODO: Gracefully shutdown the HTTP server // TODO: Gracefully shutdown the HTTP server
runTasks(c.ShutdownTasks) c.RunTasks(c.ShutdownTasks)
c.StoppedServer("Received a signal to shutdown: ", sig) c.StoppedServer("Received a signal to shutdown: ", sig)
}() }()
@ -597,9 +605,9 @@ func main() {
c.WsHub.Start() c.WsHub.Start()
if false { if false {
f, err := os.Create(c.Config.LogDir + "cpu.prof") f, e := os.Create(c.Config.LogDir + "cpu.prof")
if err != nil { if e != nil {
log.Fatal(err) log.Fatal(e)
} }
pprof.StartCPUProfile(f) pprof.StartCPUProfile(f)
} }

File diff suppressed because it is too large Load Diff

View File

@ -123,7 +123,12 @@ func (r *GenRouter) DailyTick() error {
return rotateLog(r.reqLog2, "reqs-") return rotateLog(r.reqLog2, "reqs-")
} }
func NewGenRouter(uploads http.Handler) (*GenRouter, error) { type RouterConfig struct {
Uploads http.Handler
DisableTick bool
}
func NewGenRouter(cfg *RouterConfig) (*GenRouter, error) {
stimestr := strconv.FormatInt(c.StartTime.Unix(), 10) stimestr := strconv.FormatInt(c.StartTime.Unix(), 10)
createLog := func(name, stimestr string) (*RouterLog, error) { createLog := func(name, stimestr string) (*RouterLog, error) {
f, err := os.OpenFile(c.Config.LogDir+name+"-"+stimestr+".log", os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0755) f, err := os.OpenFile(c.Config.LogDir+name+"-"+stimestr+".log", os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0755)
@ -154,17 +159,21 @@ func NewGenRouter(uploads http.Handler) (*GenRouter, error) {
} }
reqMiscLog := log.New(f3, "", log.LstdFlags) reqMiscLog := log.New(f3, "", log.LstdFlags)
return &GenRouter{ ro := &GenRouter{
UploadHandler: func(w http.ResponseWriter, r *http.Request) { UploadHandler: func(w http.ResponseWriter, r *http.Request) {
writ := NewWriterIntercept(w) writ := NewWriterIntercept(w)
http.StripPrefix("/uploads/", uploads).ServeHTTP(writ, r) http.StripPrefix("/uploads/", cfg.Uploads).ServeHTTP(writ, r)
}, },
extraRoutes: make(map[string]func(http.ResponseWriter, *http.Request, *c.User) c.RouteError), extraRoutes: make(map[string]func(http.ResponseWriter, *http.Request, *c.User) c.RouteError),
reqLogger: reqMiscLog, reqLogger: reqMiscLog,
reqLog2: reqLog, reqLog2: reqLog,
suspLog: suspReqLog, suspLog: suspReqLog,
}, nil }
if !cfg.DisableTick {
c.AddScheduledDayTask(ro.DailyTick)
}
return ro, nil
} }
func (r *GenRouter) handleError(err c.RouteError, w http.ResponseWriter, req *http.Request, u *c.User) { func (r *GenRouter) handleError(err c.RouteError, w http.ResponseWriter, req *http.Request, u *c.User) {

View File

@ -4,266 +4,138 @@ import (
"database/sql" "database/sql"
"log" "log"
"strconv" "strconv"
"sync/atomic"
"time" "time"
c "github.com/Azareal/Gosora/common" c "github.com/Azareal/Gosora/common"
qgen "github.com/Azareal/Gosora/query_gen"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
// TODO: Name the tasks so we can figure out which one it was when something goes wrong? Or maybe toss it up WithStack down there? var TickLoop *c.TickLoop
func runTasks(tasks []func() error) {
for _, task := range tasks {
if e := task(); e != nil {
c.LogError(e)
}
}
}
func startTick() (abort bool) { func runHook(name string) error {
isDBDown := atomic.LoadInt32(&c.IsDBDown)
if err := db.Ping(); err != nil {
// TODO: There's a bit of a race here, but it doesn't matter if this error appears multiple times in the logs as it's capped at three times, we just want to cut it down 99% of the time
if isDBDown == 0 {
db.SetConnMaxLifetime(time.Second) // Drop all the connections and start over
c.LogWarning(err)
c.LogWarning(errors.New("The database is down"))
}
atomic.StoreInt32(&c.IsDBDown, 1)
return true
}
if isDBDown == 1 {
log.Print("The database is back")
}
//db.SetConnMaxLifetime(time.Second * 60 * 5) // Make this infinite as the temporary lifetime change will purge the stale connections?
db.SetConnMaxLifetime(-1)
atomic.StoreInt32(&c.IsDBDown, 0)
return false
}
func runHook(name string) {
if e := c.RunTaskHook(name); e != nil { if e := c.RunTaskHook(name); e != nil {
c.LogError(e, "Failed at task '"+name+"'") return errors.Wrap(e, "Failed at task '"+name+"'")
} }
return nil
} }
func tickLoop(thumbChan chan bool) { func deferredDailies() error {
lastDailyStr, err := c.Meta.Get("lastDaily") lastDailyStr, e := c.Meta.Get("lastDaily")
// TODO: Report this error back correctly... // TODO: Report this error back correctly...
if err != nil && err != sql.ErrNoRows { if e != nil && e != sql.ErrNoRows {
c.LogError(err) return e
} }
lastDaily, _ := strconv.ParseInt(lastDailyStr, 10, 64) lastDaily, _ := strconv.ParseInt(lastDailyStr, 10, 64)
low := time.Now().Unix() - (60 * 60 * 24) low := time.Now().Unix() - (60 * 60 * 24)
if lastDaily < low { if lastDaily < low {
dailies() if e := c.Dailies(); e != nil {
} return e
// TODO: Write tests for these
// Run this goroutine once every half second
halfSecondTicker := time.NewTicker(time.Second / 2)
secondTicker := time.NewTicker(time.Second)
fifteenMinuteTicker := time.NewTicker(15 * time.Minute)
hourTicker := time.NewTicker(time.Hour)
dailyTicker := time.NewTicker(time.Hour * 24)
tick := func(name string, tasks []func() error) bool {
if startTick() {
return true
} }
runHook("before_" + name + "_tick")
runTasks(tasks)
runHook("after_" + name + "_tick")
return false
}
for {
select {
case <-halfSecondTicker.C:
if tick("half_second", c.ScheduledHalfSecondTasks) {
continue
}
case <-secondTicker.C:
if startTick() {
continue
}
runHook("before_second_tick")
go func() { thumbChan <- true }()
runTasks(c.ScheduledSecondTasks)
// TODO: Stop hard-coding this
if err := c.HandleExpiredScheduledGroups(); err != nil {
c.LogError(err)
}
// TODO: Handle delayed moderation tasks
// Sync with the database, if there are any changes
if err = c.HandleServerSync(); err != nil {
c.LogError(err)
}
// TODO: Manage the TopicStore, UserStore, and ForumStore
// TODO: Alert the admin, if CPU usage, RAM usage, or the number of posts in the past second are too high
// TODO: Clean-up alerts with no unread matches which are over two weeks old. Move this to a 24 hour task?
// TODO: Rescan the static files for changes
runHook("after_second_tick")
case <-fifteenMinuteTicker.C:
if startTick() {
continue
}
runHook("before_fifteen_minute_tick")
runTasks(c.ScheduledFifteenMinuteTasks)
// TODO: Automatically lock topics, if they're really old, and the associated setting is enabled.
// TODO: Publish scheduled posts.
runHook("after_fifteen_minute_tick")
case <-hourTicker.C:
if startTick() {
continue
}
runHook("before_hour_tick")
jsToken, err := c.GenerateSafeString(80)
if err != nil {
c.LogError(err)
}
c.JSTokenBox.Store(jsToken)
c.OldSessionSigningKeyBox.Store(c.SessionSigningKeyBox.Load().(string)) // TODO: We probably don't need this type conversion
sessionSigningKey, err := c.GenerateSafeString(80)
if err != nil {
c.LogError(err)
}
c.SessionSigningKeyBox.Store(sessionSigningKey)
runTasks(c.ScheduledHourTasks)
runHook("after_hour_tick")
// TODO: Handle the instance going down a lot better
case <-dailyTicker.C:
dailies()
}
// TODO: Handle the daily clean-up.
} }
return nil
} }
func asmMatches() { func tickLoop(thumbChan chan bool) error {
// TODO: Find a more efficient way of doing this tl := c.NewTickLoop()
acc := qgen.NewAcc() TickLoop = tl
countStmt := acc.Count("activity_stream_matches").Where("asid=?").Prepare() if e := deferredDailies(); e != nil {
if err := acc.FirstError(); err != nil { return e
c.LogError(err) }
return if e := c.StartupTasks(); e != nil {
return e
} }
err := acc.Select("activity_stream").Cols("asid").EachInt(func(asid int) error { tick := func(name string, tasks []func() error) error {
var count int if c.StartTick() {
err := countStmt.QueryRow(asid).Scan(&count)
if err != sql.ErrNoRows {
return err
}
if count > 0 {
return nil return nil
} }
_, err = qgen.NewAcc().Delete("activity_stream").Where("asid=?").Run(asid) if e := runHook("before_" + name + "_tick"); e != nil {
return err return e
})
if err != nil && err != sql.ErrNoRows {
c.LogError(err)
}
}
func dailies() {
asmMatches()
if c.Config.DisableRegLog {
_, err := qgen.NewAcc().Purge("registration_logs").Exec()
if err != nil {
c.LogError(err)
} }
} if e := c.RunTasks(tasks); e != nil {
if c.Config.LogPruneCutoff > -1 { return e
f := func(tbl string) {
_, err := qgen.NewAcc().Delete(tbl).DateOlderThan("doneAt", c.Config.LogPruneCutoff, "day").Run()
if err != nil {
c.LogError(err)
}
} }
f("login_logs") return runHook("after_" + name + "_tick")
f("registration_logs")
} }
if c.Config.DisablePostIP { tl.HalfSecf = func() error {
f := func(tbl string) { return tick("half_second", c.ScheduledHalfSecondTasks)
_, err := qgen.NewAcc().Update(tbl).Set("ip=''").Where("ip!=''").Exec() }
if err != nil { // TODO: Automatically lock topics, if they're really old, and the associated setting is enabled.
c.LogError(err) // TODO: Publish scheduled posts.
} tl.FifteenMinf = func() error {
return tick("fifteen_minute", c.ScheduledFifteenMinuteTasks)
}
// TODO: Handle the instance going down a lot better
// TODO: Handle the daily clean-up.
tl.Dayf = func() error {
if c.StartTick() {
return nil
} }
f("topics") return c.Dailies()
f("replies")
f("users_replies")
} else if c.Config.PostIPCutoff > -1 {
// TODO: Use unixtime to remove this MySQLesque logic?
f := func(tbl string) {
_, err := qgen.NewAcc().Update(tbl).Set("ip=''").DateOlderThan("createdAt", c.Config.PostIPCutoff, "day").Where("ip!=''").Exec()
if err != nil {
c.LogError(err)
}
}
f("topics")
f("replies")
f("users_replies")
} }
if c.Config.DisablePollIP { tl.Secf = func() (e error) {
_, err := qgen.NewAcc().Update("polls_votes").Set("ip=''").Where("ip!=''").Exec() if c.StartTick() {
if err != nil { return nil
c.LogError(err)
} }
} else if c.Config.PollIPCutoff > -1 { if e = runHook("before_second_tick"); e != nil {
// TODO: Use unixtime to remove this MySQLesque logic? return e
_, err := qgen.NewAcc().Update("polls_votes").Set("ip=''").DateOlderThan("castAt", c.Config.PollIPCutoff, "day").Where("ip!=''").Exec() }
if err != nil { go func() { thumbChan <- true }()
c.LogError(err) if e = c.RunTasks(c.ScheduledSecondTasks); e != nil {
return e
} }
// TODO: Find some way of purging the ip data in polls_votes without breaking any anti-cheat measures which might be running... maybe hash it instead? // TODO: Stop hard-coding this
} if e = c.HandleExpiredScheduledGroups(); e != nil {
return e
// TODO: lastActiveAt isn't currently set, so we can't rely on this to purge last_ips of users who haven't been on in a while
if c.Config.DisableLastIP {
_, err := qgen.NewAcc().Update("users").Set("last_ip=''").Where("last_ip!=''").Exec()
if err != nil {
c.LogError(err)
} }
} else if c.Config.LastIPCutoff > 0 {
/*_, err = qgen.NewAcc().Update("users").Set("last_ip='0'").DateOlderThan("lastActiveAt",c.Config.PostIPCutoff,"day").Where("last_ip!='0'").Exec() // TODO: Handle delayed moderation tasks
if err != nil {
c.LogError(err) // Sync with the database, if there are any changes
}*/ if e = c.HandleServerSync(); e != nil {
mon := time.Now().Month() return e
_, err := qgen.NewAcc().Update("users").Set("last_ip=''").Where("last_ip!='' AND last_ip NOT LIKE '" + strconv.Itoa(int(mon)) + "-%'").Exec()
if err != nil {
c.LogError(err)
} }
// TODO: Manage the TopicStore, UserStore, and ForumStore
// TODO: Alert the admin, if CPU usage, RAM usage, or the number of posts in the past second are too high
// TODO: Clean-up alerts with no unread matches which are over two weeks old. Move this to a 24 hour task?
// TODO: Rescan the static files for changes
return runHook("after_second_tick")
} }
e := router.DailyTick() tl.Hourf = func() error {
if e != nil { if c.StartTick() {
c.LogError(e) return nil
} }
e = c.ForumActionStore.DailyTick() if e := runHook("before_hour_tick"); e != nil {
if e != nil { return e
c.LogError(e) }
}
{ jsToken, e := c.GenerateSafeString(80)
e := c.Meta.Set("lastDaily", strconv.FormatInt(time.Now().Unix(), 10))
if e != nil { if e != nil {
c.LogError(e) return e
} }
c.JSTokenBox.Store(jsToken)
c.OldSessionSigningKeyBox.Store(c.SessionSigningKeyBox.Load().(string)) // TODO: We probably don't need this type conversion
sessionSigningKey, e := c.GenerateSafeString(80)
if e != nil {
return e
}
c.SessionSigningKeyBox.Store(sessionSigningKey)
if e = c.RunTasks(c.ScheduledHourTasks); e != nil {
return e
}
return runHook("after_hour_tick")
} }
go tl.Loop()
return nil
} }
func sched() error { func sched() error {