You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 

545 lines
14 KiB

  1. package main
  2. import (
  3. "bufio"
  4. "bytes"
  5. "context"
  6. "encoding/json"
  7. "fmt"
  8. "io"
  9. "log"
  10. "net/http"
  11. "os"
  12. "os/signal"
  13. "strings"
  14. "sync"
  15. "syscall"
  16. "time"
  17. "github.com/go-redis/redis/v8"
  18. "github.com/gorilla/mux"
  19. )
  20. const (
  21. ItemChannelBuffer = 100000
  22. ItemWrapSize = 100000
  23. )
  24. type ProjectRedisConfig struct {
  25. Host string `json:"host"`
  26. Pass string `json:"pass"`
  27. Port int `json:"port"`
  28. }
  29. type ProjectConfig struct {
  30. RedisConfig *ProjectRedisConfig `json:"redis,omitempty"`
  31. }
  32. type BackfeedItem struct {
  33. PrimaryShard byte
  34. SecondaryShard string
  35. Item []byte
  36. }
  37. type ProjectBackfeedManager struct {
  38. Context context.Context
  39. Cancel context.CancelFunc
  40. Done chan bool
  41. C chan *BackfeedItem
  42. Name string
  43. BackfeedRedis *redis.ClusterClient
  44. ProjectRedis *redis.Client
  45. LegacyRedis *redis.Client
  46. Lock sync.RWMutex
  47. ProjectConfig ProjectConfig
  48. }
  49. func (that *ProjectBackfeedManager) RedisConfigDiffers(new *ProjectRedisConfig) bool {
  50. if that.ProjectConfig.RedisConfig == nil && new == nil {
  51. return false
  52. }
  53. if that.ProjectConfig.RedisConfig == nil || new == nil || that.ProjectConfig.RedisConfig.Host != new.Host || that.ProjectConfig.RedisConfig.Port != new.Port || that.ProjectConfig.RedisConfig.Pass != new.Pass {
  54. return true
  55. }
  56. return false
  57. }
  58. func (that *ProjectBackfeedManager) PushItem(ctx context.Context, item *BackfeedItem) bool {
  59. that.Lock.RLock()
  60. defer that.Lock.RUnlock()
  61. if that.C == nil {
  62. return false
  63. }
  64. select {
  65. case <-ctx.Done():
  66. return false
  67. case <-that.Context.Done():
  68. return false
  69. case that.C <- item:
  70. return true
  71. }
  72. }
  73. func (that *ProjectBackfeedManager) PopItem(blocking bool) (*BackfeedItem, bool) {
  74. if blocking {
  75. select {
  76. case <-that.Context.Done():
  77. return nil, false
  78. case item := <-that.C:
  79. return item, true
  80. }
  81. } else {
  82. select {
  83. case <-that.Context.Done():
  84. return nil, false
  85. case item := <-that.C:
  86. return item, true
  87. default:
  88. return nil, false
  89. }
  90. }
  91. }
  92. func (that *ProjectBackfeedManager) CloseItemChannel() {
  93. that.Lock.Lock()
  94. defer that.Lock.Unlock()
  95. if that.C == nil {
  96. return
  97. }
  98. close(that.C)
  99. that.C = nil
  100. }
  101. func (that *ProjectBackfeedManager) Do() {
  102. defer close(that.Done)
  103. defer that.CloseItemChannel()
  104. defer that.Cancel()
  105. for {
  106. select {
  107. case <-that.Context.Done():
  108. break
  109. case <-that.Done:
  110. break
  111. default:
  112. }
  113. item, ok := that.PopItem(true)
  114. if !ok {
  115. break
  116. }
  117. keyMap := map[string][][]byte{}
  118. key := fmt.Sprintf("%s:%02x:%s", that.Name, item.PrimaryShard, item.SecondaryShard)
  119. keyMap[key] = append(keyMap[key], item.Item)
  120. wrapped := 1
  121. for wrapped < ItemWrapSize {
  122. item, ok := that.PopItem(false)
  123. if !ok {
  124. break
  125. }
  126. key := fmt.Sprintf("%s:%02x:%s", that.Name, item.PrimaryShard, item.SecondaryShard)
  127. keyMap[key] = append(keyMap[key], item.Item)
  128. wrapped++
  129. }
  130. select {
  131. case <-that.Context.Done():
  132. break
  133. case <-that.Done:
  134. break
  135. default:
  136. }
  137. resultMap := map[string]*redis.Cmd{}
  138. pipe := that.BackfeedRedis.Pipeline()
  139. for key, items := range keyMap {
  140. args := []interface{}{
  141. "bf.madd",
  142. key,
  143. }
  144. for _, item := range items {
  145. args = append(args, item)
  146. }
  147. resultMap[key] = pipe.Do(context.Background(), args...)
  148. }
  149. _, err := pipe.Exec(context.Background())
  150. if err != nil {
  151. log.Printf("%s", err)
  152. continue
  153. }
  154. var sAddItems []interface{}
  155. for key, items := range keyMap {
  156. rawRes, err := resultMap[key].Result()
  157. if err != nil {
  158. log.Printf("%s", err)
  159. continue
  160. }
  161. rawResArray, ok := rawRes.([]interface{})
  162. if !ok || len(keyMap[key]) != len(rawResArray) {
  163. continue
  164. }
  165. for i, vi := range rawResArray {
  166. v, ok := vi.(int64)
  167. if !ok || v != 1 {
  168. continue
  169. }
  170. sAddItems = append(sAddItems, items[i])
  171. }
  172. }
  173. dupes := wrapped - len(sAddItems)
  174. if len(sAddItems) != 0 {
  175. args := []interface{}{
  176. "bf.mexists",
  177. that.Name,
  178. }
  179. args = append(args, sAddItems...)
  180. rawRes, err := that.LegacyRedis.Do(context.Background(), args...).Result()
  181. if err != nil {
  182. log.Printf("unable to dedupe against %s legacy backfeed: %s", that.Name, err)
  183. } else {
  184. rawResArray, ok := rawRes.([]interface{})
  185. if ok && len(sAddItems) != len(rawResArray) {
  186. var filteredSAddItems []interface{}
  187. for i, vi := range rawResArray {
  188. v, ok := vi.(int64)
  189. if !ok || v != 0 {
  190. continue
  191. }
  192. filteredSAddItems = append(filteredSAddItems, sAddItems[i])
  193. }
  194. sAddItems = filteredSAddItems
  195. }
  196. }
  197. }
  198. if len(sAddItems) != 0 {
  199. err := that.ProjectRedis.SAdd(context.Background(), fmt.Sprintf("%s:todo:backfeed", that.Name), sAddItems...).Err()
  200. if err != nil {
  201. log.Printf("failed to sadd items for %s: %s", that.Name, err)
  202. }
  203. }
  204. if dupes > 0 {
  205. that.BackfeedRedis.HIncrBy(context.Background(), ":", that.Name, int64(dupes))
  206. }
  207. }
  208. }
  209. type GlobalBackfeedManager struct {
  210. Context context.Context
  211. Cancel context.CancelFunc
  212. ActiveFeeds map[string]*ProjectBackfeedManager
  213. ActiveSlugs map[string]string
  214. TrackerRedis *redis.Client
  215. BackfeedRedis *redis.ClusterClient
  216. LegacyRedis *redis.Client
  217. Lock sync.RWMutex
  218. }
  219. func (that *GlobalBackfeedManager) RefreshFeeds() error {
  220. slugProjectMap, err := that.TrackerRedis.HGetAll(that.Context, "backfeed").Result()
  221. if err != nil {
  222. return err
  223. }
  224. var projects []string
  225. projectSlugMap := map[string][]string{}
  226. for slug, project := range slugProjectMap {
  227. projectSlugMap[project] = append(projectSlugMap[project], slug)
  228. }
  229. for project := range projectSlugMap {
  230. projects = append(projects, project)
  231. }
  232. projectConfigs := map[string]ProjectConfig{}
  233. if len(projects) != 0 {
  234. cfgi, err := that.TrackerRedis.HMGet(that.Context, "trackers", projects...).Result()
  235. if err != nil {
  236. return err
  237. }
  238. if len(projects) != len(cfgi) {
  239. return fmt.Errorf("hmget result had unexpected length")
  240. }
  241. for i, project := range projects {
  242. configString, ok := cfgi[i].(string)
  243. if !ok {
  244. continue
  245. }
  246. config := ProjectConfig{}
  247. err := json.Unmarshal([]byte(configString), &config)
  248. if err != nil {
  249. continue
  250. }
  251. projectConfigs[project] = config
  252. }
  253. }
  254. projects = nil
  255. for project := range projectSlugMap {
  256. if _, has := projectConfigs[project]; !has {
  257. delete(projectSlugMap, project)
  258. continue
  259. }
  260. projects = append(projects, project)
  261. }
  262. for slug, project := range slugProjectMap {
  263. if _, has := projectConfigs[project]; !has {
  264. delete(slugProjectMap, slug)
  265. }
  266. }
  267. // add feeds for new projects
  268. for _, project := range projects {
  269. projectConfig := projectConfigs[project]
  270. var outdatedProjectBackfeedManager *ProjectBackfeedManager
  271. if projectBackfeedManager, has := that.ActiveFeeds[project]; has {
  272. if that.ActiveFeeds[project].RedisConfigDiffers(projectConfig.RedisConfig) {
  273. outdatedProjectBackfeedManager = projectBackfeedManager
  274. } else {
  275. continue
  276. }
  277. }
  278. ctx, cancel := context.WithCancel(that.Context)
  279. projectBackfeedManager := &ProjectBackfeedManager{
  280. Context: ctx,
  281. Cancel: cancel,
  282. Done: make(chan bool),
  283. C: make(chan *BackfeedItem, ItemChannelBuffer),
  284. BackfeedRedis: that.BackfeedRedis,
  285. Name: project,
  286. ProjectConfig: projectConfig,
  287. LegacyRedis: that.LegacyRedis,
  288. }
  289. if projectConfig.RedisConfig != nil {
  290. projectBackfeedManager.ProjectRedis = redis.NewClient(&redis.Options{
  291. Addr: fmt.Sprintf("%s:%d", projectConfig.RedisConfig.Host, projectConfig.RedisConfig.Port),
  292. Username: "default",
  293. Password: projectConfig.RedisConfig.Pass,
  294. ReadTimeout: 15 * time.Minute,
  295. })
  296. } else {
  297. projectBackfeedManager.ProjectRedis = that.TrackerRedis
  298. }
  299. go projectBackfeedManager.Do()
  300. that.Lock.Lock()
  301. that.ActiveFeeds[project] = projectBackfeedManager
  302. that.Lock.Unlock()
  303. if outdatedProjectBackfeedManager != nil {
  304. outdatedProjectBackfeedManager.Cancel()
  305. <-outdatedProjectBackfeedManager.Done
  306. log.Printf("updated project: %s", project)
  307. } else {
  308. log.Printf("added project: %s", project)
  309. }
  310. }
  311. that.Lock.Lock()
  312. that.ActiveSlugs = slugProjectMap
  313. that.Lock.Unlock()
  314. // remove feeds for old projects
  315. for project, projectBackfeedManager := range that.ActiveFeeds {
  316. if _, has := projectSlugMap[project]; has {
  317. continue
  318. }
  319. log.Printf("removing project: %s", project)
  320. that.Lock.Lock()
  321. delete(that.ActiveFeeds, project)
  322. that.Lock.Unlock()
  323. projectBackfeedManager.Cancel()
  324. <-projectBackfeedManager.Done
  325. log.Printf("removed project: %s", project)
  326. }
  327. return nil
  328. }
  329. type Splitter struct {
  330. Delimiter []byte
  331. IgnoreEOF bool
  332. }
  333. func (that *Splitter) Split(data []byte, atEOF bool) (int, []byte, error) {
  334. for i := 0; i < len(data); i++ {
  335. if bytes.Equal(data[i:i+len(that.Delimiter)], that.Delimiter) {
  336. return i + len(that.Delimiter), data[:i], nil
  337. }
  338. }
  339. if len(data) == 0 || !atEOF {
  340. return 0, nil, nil
  341. }
  342. if atEOF && that.IgnoreEOF {
  343. return len(data), data, nil
  344. }
  345. return 0, data, io.ErrUnexpectedEOF
  346. }
  347. func GenShardHash(b []byte) (final byte) {
  348. for i, b := range b {
  349. final = (b ^ final ^ byte(i)) + final + byte(i) + final*byte(i)
  350. }
  351. return final
  352. }
  353. func WriteResponse(res http.ResponseWriter, statusCode int, v interface{}) {
  354. res.Header().Set("Content-Type", "application/json")
  355. res.WriteHeader(statusCode)
  356. if statusCode == http.StatusNoContent {
  357. return
  358. }
  359. if err, isError := v.(error); isError {
  360. v = map[string]interface{}{
  361. "error": fmt.Sprintf("%v", err),
  362. "status_code": statusCode,
  363. }
  364. } else {
  365. log.Printf("%#v", v)
  366. v = map[string]interface{}{
  367. "data": v,
  368. "status_code": statusCode,
  369. }
  370. }
  371. json.NewEncoder(res).Encode(v)
  372. }
  373. func (that *GlobalBackfeedManager) GetFeed(slug string) *ProjectBackfeedManager {
  374. that.Lock.RLock()
  375. defer that.Lock.RUnlock()
  376. project, has := that.ActiveSlugs[slug]
  377. if !has {
  378. return nil
  379. }
  380. projectBackfeedManager, has := that.ActiveFeeds[project]
  381. if !has {
  382. return nil
  383. }
  384. return projectBackfeedManager
  385. }
  386. func (that *GlobalBackfeedManager) Handle(res http.ResponseWriter, req *http.Request) {
  387. defer req.Body.Close()
  388. vars := mux.Vars(req)
  389. slug := vars["slug"]
  390. secondaryShard := req.URL.Query().Get("shard")
  391. projectBackfeedManager := that.GetFeed(slug)
  392. if projectBackfeedManager == nil {
  393. WriteResponse(res, http.StatusNotFound, fmt.Errorf("%s", "no such backfeed channel"))
  394. return
  395. }
  396. splitter := &Splitter{
  397. Delimiter: []byte(req.URL.Query().Get("delimiter")),
  398. IgnoreEOF: req.URL.Query().Get("ignoreeof") != "",
  399. }
  400. if len(splitter.Delimiter) == 0 {
  401. splitter.Delimiter = []byte{0x00}
  402. }
  403. scanner := bufio.NewScanner(req.Body)
  404. scanner.Split(splitter.Split)
  405. var err error
  406. statusCode := http.StatusNoContent
  407. for scanner.Scan() {
  408. b := scanner.Bytes()
  409. if len(b) == 0 {
  410. continue
  411. }
  412. item := &BackfeedItem{
  413. PrimaryShard: GenShardHash(b),
  414. SecondaryShard: secondaryShard,
  415. Item: b,
  416. }
  417. ok := projectBackfeedManager.PushItem(req.Context(), item)
  418. if !ok {
  419. err = fmt.Errorf("channel closed")
  420. statusCode = http.StatusServiceUnavailable
  421. break
  422. }
  423. }
  424. if err == nil {
  425. err = scanner.Err()
  426. if err != nil {
  427. statusCode = http.StatusBadRequest
  428. }
  429. }
  430. WriteResponse(res, statusCode, err)
  431. return
  432. }
  433. func (that *GlobalBackfeedManager) CancelAllFeeds() {
  434. that.Cancel()
  435. for project, projectBackfeedManager := range that.ActiveFeeds {
  436. log.Printf("waiting for %s channel to shut down...", project)
  437. <-projectBackfeedManager.Done
  438. delete(that.ActiveFeeds, project)
  439. }
  440. }
  441. func main() {
  442. log.SetFlags(log.Flags() | log.Lshortfile)
  443. trackerRedisOptions, err := redis.ParseURL(os.Getenv("REDIS_TRACKER"))
  444. if err != nil {
  445. log.Panicf("invalid REDIS_TRACKER url: %s", err)
  446. }
  447. trackerRedisOptions.ReadTimeout = 15 * time.Minute
  448. trackerRedisClient := redis.NewClient(trackerRedisOptions)
  449. legacyRedisOptions, err := redis.ParseURL(os.Getenv("REDIS_LEGACY"))
  450. if err != nil {
  451. log.Panicf("invalid REDIS_LEGACY url: %s", err)
  452. }
  453. legacyRedisOptions.ReadTimeout = 15 * time.Minute
  454. legacyRedisClient := redis.NewClient(legacyRedisOptions)
  455. backfeedRedisClient := redis.NewClusterClient(&redis.ClusterOptions{
  456. Addrs: strings.Split(os.Getenv("REDIS_BACKFEED_ADDRS"), ","),
  457. Username: os.Getenv("REDIS_BACKFEED_USERNAME"),
  458. Password: os.Getenv("REDIS_BACKFEED_PASSWORD"),
  459. ReadTimeout: 15 * time.Minute,
  460. })
  461. if err := trackerRedisClient.Ping(context.Background()).Err(); err != nil {
  462. log.Panicf("unable to ping tracker redis: %s", err)
  463. }
  464. if err := backfeedRedisClient.Ping(context.Background()).Err(); err != nil {
  465. log.Panicf("unable to ping backfeed redis: %s", err)
  466. }
  467. if err := legacyRedisClient.Ping(context.Background()).Err(); err != nil {
  468. log.Panicf("unable to ping legacy redis: %s", err)
  469. }
  470. globalBackfeedManager := &GlobalBackfeedManager{
  471. ActiveFeeds: map[string]*ProjectBackfeedManager{},
  472. ActiveSlugs: map[string]string{},
  473. TrackerRedis: trackerRedisClient,
  474. BackfeedRedis: backfeedRedisClient,
  475. LegacyRedis: legacyRedisClient,
  476. }
  477. globalBackfeedManager.Context, globalBackfeedManager.Cancel = context.WithCancel(context.Background())
  478. defer globalBackfeedManager.CancelAllFeeds()
  479. err = globalBackfeedManager.RefreshFeeds()
  480. if err != nil {
  481. log.Panicf("unable to set up backfeed projects: %s", err)
  482. }
  483. r := mux.NewRouter()
  484. r.Methods(http.MethodPost).Path("/legacy/{slug}").HandlerFunc(globalBackfeedManager.Handle)
  485. serveErrChan := make(chan error)
  486. go func() {
  487. s := &http.Server{
  488. Addr: ":21581",
  489. IdleTimeout: 1 * time.Hour,
  490. MaxHeaderBytes: 1 * 1024 * 1024,
  491. Handler: r,
  492. }
  493. serveErrChan <- s.ListenAndServe()
  494. }()
  495. sc := make(chan os.Signal, 1)
  496. signal.Notify(sc, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill)
  497. ticker := time.NewTicker(1 * time.Second)
  498. for {
  499. select {
  500. case <-sc:
  501. return
  502. case <-ticker.C:
  503. }
  504. err = globalBackfeedManager.RefreshFeeds()
  505. if err != nil {
  506. log.Printf("unable to refresh backfeed projects: %s", err)
  507. }
  508. }
  509. }