Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions api/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -449,6 +449,7 @@ func NewApiServer(config config.Config) *ApiServer {
g.Get("/metrics/aggregates/routes/trailing/:time_range", app.v1MetricsRoutesTrailing)

// Notifications
g.Get("/notifications/:userId", app.requireUserIdMiddleware, app.v1Notifications)
g.Get("/notifications/:userId/playlist_updates", app.requireUserIdMiddleware, app.v1NotificationsPlaylistUpdates)

}
Expand Down
2 changes: 2 additions & 0 deletions api/server_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,8 @@ func Test200UnAuthed(t *testing.T) {
"/v1/full/playlists?id=7eP5n",
"/v1/full/playlists/7eP5n/reposts",
"/v1/full/playlists/7eP5n/favorites",

"/v1/full/notifications/7eP5n?limit=50",
"/v1/full/playlists/trending",
// unclaimed ids
"/v1/users/unclaimed_id",
Expand Down
4 changes: 1 addition & 3 deletions api/swagger/swagger-v1-full.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9085,9 +9085,7 @@ components:
is_album:
type: boolean
playlist_id:
type: array
items:
type: string
type: string
create_track_notification_action_data:
required:
- track_id
Expand Down
173 changes: 173 additions & 0 deletions api/v1_notifications.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,173 @@
package api

import (
"encoding/json"
"slices"

"bridgerton.audius.co/trashid"
"github.com/gofiber/fiber/v2"
"github.com/jackc/pgx/v5"
)

func (app *ApiServer) v1Notifications(c *fiber.Ctx) error {

sql := `
WITH user_seen as (
SELECT
LAG(seen_at, 1, now()::timestamp) OVER ( ORDER BY seen_at desc ) AS seen_at,
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I didn't know about LAG... that's super cool, though isn't this backwards? shouldn't the lag be prev_seen_at?

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah this does look backwards... it was mostly taken verbatim from get_notifications.py

I think it would be more correct to just flip the names tho... I'll try that out.

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ok I get it now... the query is ordered by seen_at desc...
so with that ordering, the row before the current row is a greater timestamp.

          seen_at           |    prev_seen_at     
----------------------------+---------------------
 2025-07-17 14:27:25.609154 | 2025-07-17 06:54:29
 2025-07-17 06:54:29        | 2025-07-17 02:52:34
 2025-07-17 02:52:34        | 2025-07-17 01:23:07
 2025-07-17 01:23:07        | 2025-07-17 01:22:56
 2025-07-17 01:22:56        | 2025-07-16 22:55:05
 2025-07-16 22:55:05        | 2025-07-16 20:05:05

So I think the logic and naming is OK.

seen_at as prev_seen_at
FROM
notification_seen
WHERE
user_id = @user_id
ORDER BY
seen_at desc
),
user_created_at as (
SELECT
created_at
FROM
users
WHERE
user_id = @user_id
AND is_current
)
SELECT
n.type,
n.group_id as group_id,
json_agg(
json_build_object(
'type', type,
'specifier', specifier,
'timestamp', EXTRACT(EPOCH FROM timestamp),
'data', data
)
ORDER BY timestamp DESC
)::jsonb as actions,
CASE
WHEN user_seen.seen_at is not NULL THEN now()::timestamp != user_seen.seen_at
ELSE EXISTS(SELECT 1 from notification_seen ns where ns.user_id = @user_id)
END::boolean as is_seen,

CASE
WHEN user_seen.seen_at != now()::timestamp THEN EXTRACT(EPOCH FROM user_seen.seen_at)
ELSE null
END AS seen_at

FROM
notification n
LEFT JOIN user_seen on
user_seen.seen_at >= n.timestamp and user_seen.prev_seen_at < n.timestamp
WHERE
((ARRAY[@user_id] && n.user_ids) OR (n.type = 'announcement' AND n.timestamp > (SELECT created_at FROM user_created_at)))
AND n.type = ANY(@valid_types)
GROUP BY
n.type, n.group_id, user_seen.seen_at, user_seen.prev_seen_at
ORDER BY
user_seen.seen_at desc NULLS LAST,
max(n.timestamp) desc,
n.group_id desc
limit @limit::int
;
`

// default types are always enabled
validTypes := []string{
Comment thread
stereosteve marked this conversation as resolved.
"follow",
"repost",
"save",
"tip_send",
"tip_receive",
"track_added_to_purchased_album",
"track_added_to_playlist",
"tastemaker",
"supporter_rank_up",
"supporting_rank_up",
"supporter_dethroned",
"challenge_reward",
"claimable_reward",
"tier_change",
"create",
"remix",
"cosign",
"trending_playlist",
"trending",
"trending_underground",
"milestone",
"announcement",
"reaction",
"repost_of_repost",
"save_of_repost",
"usdc_purchase_seller",
"usdc_purchase_buyer",
"request_manager",
"approve_manager_request",
"comment",
"comment_thread",
"comment_mention",
"comment_reaction",
"listen_streak_reminder",
"fan_remix_contest_ended",
"artist_remix_contest_ended",
"artist_remix_contest_ending_soon",
"fan_remix_contest_ending_soon",
"fan_remix_contest_winners_selected",
"fan_remix_contest_started",
"artist_remix_contest_submissions",
}

// add optional valid_types
for _, t := range queryMulti(c, "valid_types") {
if !slices.Contains(validTypes, t) {
validTypes = append(validTypes, t)
}
}

userId := app.getUserId(c)
limit := c.QueryInt("limit", 20)

// python returns 20 items when limit=0
// and client relies on this for showing unread count
if limit == 0 {
limit = 20
}

type GetNotifsRow struct {
Type string `json:"type"`
GroupID string `json:"group_id"`
Actions json.RawMessage `json:"actions"`
IsSeen bool `json:"is_seen"`
SeenAt interface{} `json:"seen_at"`
}

rows, err := app.pool.Query(c.Context(), sql, pgx.NamedArgs{
"user_id": userId,
"limit": limit,
"valid_types": validTypes,
})
if err != nil {
return err
}

notifs, err := pgx.CollectRows(rows, pgx.RowToStructByNameLax[GetNotifsRow])
if err != nil {
return err
}

unreadCount := 0
for idx, notif := range notifs {
notif.Actions = trashid.HashifyJson(notif.Actions)
notifs[idx] = notif
if !notif.IsSeen {
unreadCount++
}
}

return c.JSON(fiber.Map{
"data": fiber.Map{
"notifications": notifs,
"unread_count": unreadCount,
},
})

}
43 changes: 43 additions & 0 deletions trashid/hashify_json.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
package trashid

import (
"bytes"
"fmt"
"regexp"
"strconv"
)

var re = regexp.MustCompile(`"(?P<key>\w+_id|id|specifier)"\s*:\s*(?P<val>\d+)`)
var skipKeys = [][]byte{
[]byte(`special_id`),
}

func HashifyJson(jsonBytes []byte) []byte {
return re.ReplaceAllFunc(jsonBytes, func(match []byte) []byte {
submatches := re.FindSubmatchIndex(match)
if submatches == nil || len(submatches) < 6 {
return match
}

// Extract key and value from match using named groups
key := match[submatches[2]:submatches[3]]
for _, skipKey := range skipKeys {
if bytes.Equal(key, skipKey) {
return match
}
}

val := match[submatches[4]:submatches[5]]
num, err := strconv.Atoi(string(val))
if err != nil {
return match
}

// Replace with hex string
hashed, err := EncodeHashId(num)
if err != nil {
return match
}
return []byte(fmt.Sprintf(`"%s": "%s"`, key, hashed))
})
}
63 changes: 63 additions & 0 deletions trashid/hashify_json_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
package trashid

import (
"encoding/json"
"testing"

"github.com/stretchr/testify/assert"
"github.com/tidwall/gjson"
)

func TestHashifyJson(t *testing.T) {
j1 := []byte(`
{
"data": {
"id": 1,
"user_id": 2,
"special_id": 999,
"tracks": [
{
"id": 3,
"title": "fun",
"value": "id",
"other": "user_id",
"good_idea": 333,
"ida": 111
},
{
"id": 4,
"title": "fun",
"value": "id",
"other": "user_id",
"good_idea": 333,
"ida": 111
}
]
}
}
`)

var m map[string]any
err := json.Unmarshal(j1, &m)
assert.NoError(t, err)

j2 := HashifyJson(j1)

expectations := map[string]string{
"data.id": "7eP5n",
"data.user_id": "ML51L",
"data.special_id": "999",
"data.tracks.0.id": "lebQD",
"data.tracks.0.value": "id",
"data.tracks.0.other": "user_id",
"data.tracks.0.good_idea": "333",
"data.tracks.1.id": "ELKzn",
}
for path, exp := range expectations {
assert.Equal(t, exp, gjson.GetBytes(j2, path).String())
}

err = json.Unmarshal(j2, &m)
assert.NoError(t, err)

}
Loading