Add self-hosted deployment configuration
- Add backend entry point (cmd/server/main.go) - Add prompt=select_account to Google OAuth flow - Add combined init.sql for self-hosted PostgreSQL - Update docker-compose to include backend service with memory limits Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
281
backend/cmd/server/main.go
Normal file
281
backend/cmd/server/main.go
Normal file
@@ -0,0 +1,281 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/http/pprof"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"runtime"
|
||||
|
||||
"github.com/M1ngdaXie/realtime-collab/internal/auth"
|
||||
"github.com/M1ngdaXie/realtime-collab/internal/config"
|
||||
"github.com/M1ngdaXie/realtime-collab/internal/handlers"
|
||||
"github.com/M1ngdaXie/realtime-collab/internal/hub"
|
||||
"github.com/M1ngdaXie/realtime-collab/internal/logger"
|
||||
"github.com/M1ngdaXie/realtime-collab/internal/messagebus"
|
||||
"github.com/M1ngdaXie/realtime-collab/internal/store"
|
||||
"github.com/M1ngdaXie/realtime-collab/internal/workers"
|
||||
"github.com/gin-contrib/cors"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/google/uuid"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// CLI flags - override env vars
|
||||
portFlag := flag.String("port", "", "Server port (overrides PORT env var)")
|
||||
flag.Parse()
|
||||
|
||||
// Load configuration
|
||||
cfg, err := config.Load(*portFlag)
|
||||
if err != nil {
|
||||
log.Fatalf("Configuration error: %v", err)
|
||||
}
|
||||
log.Printf("Configuration loaded (environment: %s, port: %s)", cfg.Environment, cfg.Port)
|
||||
|
||||
// Initialize structured logger
|
||||
zapLogger, err := logger.NewLoggerFromEnv()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to initialize logger: %v", err)
|
||||
}
|
||||
defer zapLogger.Sync()
|
||||
|
||||
// Generate unique server ID for this instance
|
||||
hostname, _ := os.Hostname()
|
||||
serverID := fmt.Sprintf("%s-%s", hostname, uuid.New().String()[:8])
|
||||
zapLogger.Info("Server identity", zap.String("server_id", serverID))
|
||||
|
||||
// Initialize MessageBus (Redis or Local fallback)
|
||||
var msgBus messagebus.MessageBus
|
||||
if cfg.RedisURL != "" {
|
||||
redisBus, err := messagebus.NewRedisMessageBus(cfg.RedisURL, serverID, zapLogger)
|
||||
if err != nil {
|
||||
zapLogger.Warn("Redis unavailable, falling back to local mode", zap.Error(err))
|
||||
msgBus = messagebus.NewLocalMessageBus()
|
||||
} else {
|
||||
msgBus = redisBus
|
||||
}
|
||||
} else {
|
||||
zapLogger.Info("No REDIS_URL configured, using local mode")
|
||||
msgBus = messagebus.NewLocalMessageBus()
|
||||
}
|
||||
defer msgBus.Close()
|
||||
|
||||
// Initialize database
|
||||
dbStore, err := store.NewPostgresStore(cfg.DatabaseURL)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to initialize database: %v", err)
|
||||
}
|
||||
defer dbStore.Close()
|
||||
log.Println("Database connection established")
|
||||
|
||||
// Initialize WebSocket hub
|
||||
wsHub := hub.NewHub(msgBus, serverID, zapLogger)
|
||||
go wsHub.Run()
|
||||
zapLogger.Info("WebSocket hub started")
|
||||
|
||||
// Start Redis health monitoring (if using Redis)
|
||||
if redisBus, ok := msgBus.(*messagebus.RedisMessageBus); ok {
|
||||
go redisBus.StartHealthMonitoring(context.Background(), 30*time.Second, func(healthy bool) {
|
||||
wsHub.SetFallbackMode(!healthy)
|
||||
})
|
||||
zapLogger.Info("Redis health monitoring started")
|
||||
}
|
||||
|
||||
// Start update persist worker (stream WAL persistence)
|
||||
workerCtx, workerCancel := context.WithCancel(context.Background())
|
||||
defer workerCancel()
|
||||
go workers.StartUpdatePersistWorker(workerCtx, msgBus, dbStore, zapLogger, serverID)
|
||||
zapLogger.Info("Update persist worker started")
|
||||
|
||||
// Start periodic session cleanup (every hour)
|
||||
go func() {
|
||||
ticker := time.NewTicker(1 * time.Hour)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
if err := dbStore.CleanupExpiredSessions(context.Background()); err != nil {
|
||||
log.Printf("Error cleaning up expired sessions: %v", err)
|
||||
} else {
|
||||
log.Println("Cleaned up expired sessions")
|
||||
}
|
||||
}
|
||||
}()
|
||||
log.Println("Session cleanup task started")
|
||||
|
||||
// Initialize handlers
|
||||
docHandler := handlers.NewDocumentHandler(dbStore, msgBus, serverID, zapLogger)
|
||||
wsHandler := handlers.NewWebSocketHandler(wsHub, dbStore, cfg, msgBus)
|
||||
authHandler := handlers.NewAuthHandler(dbStore, cfg)
|
||||
authMiddleware := auth.NewAuthMiddleware(dbStore, cfg.JWTSecret, zapLogger)
|
||||
shareHandler := handlers.NewShareHandler(dbStore, cfg)
|
||||
versionHandler := handlers.NewVersionHandler(dbStore)
|
||||
|
||||
// Setup Gin router
|
||||
router := gin.Default()
|
||||
|
||||
// Optional pprof endpoints for profiling under load (guarded by env).
|
||||
// Enable with: ENABLE_PPROF=1
|
||||
// Optional: PPROF_BLOCK_RATE=1 PPROF_MUTEX_FRACTION=1 (adds overhead; use for short profiling windows).
|
||||
if shouldEnablePprof(cfg) {
|
||||
blockRate := getEnvInt("PPROF_BLOCK_RATE", 0)
|
||||
mutexFraction := getEnvInt("PPROF_MUTEX_FRACTION", 0)
|
||||
localOnly := getEnvBool("PPROF_LOCAL_ONLY", true)
|
||||
|
||||
if blockRate > 0 {
|
||||
runtime.SetBlockProfileRate(blockRate)
|
||||
}
|
||||
if mutexFraction > 0 {
|
||||
runtime.SetMutexProfileFraction(mutexFraction)
|
||||
}
|
||||
|
||||
pprofGroup := router.Group("/debug/pprof")
|
||||
if localOnly {
|
||||
pprofGroup.Use(func(c *gin.Context) {
|
||||
ip := net.ParseIP(c.ClientIP())
|
||||
if ip == nil || !ip.IsLoopback() {
|
||||
c.AbortWithStatus(403)
|
||||
return
|
||||
}
|
||||
c.Next()
|
||||
})
|
||||
}
|
||||
|
||||
user, pass := os.Getenv("PPROF_USER"), os.Getenv("PPROF_PASS")
|
||||
if user != "" || pass != "" {
|
||||
if user == "" || pass == "" {
|
||||
zapLogger.Warn("PPROF_USER/PPROF_PASS must both be set; skipping basic auth")
|
||||
} else {
|
||||
pprofGroup.Use(gin.BasicAuth(gin.Accounts{user: pass}))
|
||||
}
|
||||
}
|
||||
|
||||
pprofGroup.GET("/", gin.WrapF(pprof.Index))
|
||||
pprofGroup.GET("/cmdline", gin.WrapF(pprof.Cmdline))
|
||||
pprofGroup.GET("/profile", gin.WrapF(pprof.Profile))
|
||||
pprofGroup.GET("/symbol", gin.WrapF(pprof.Symbol))
|
||||
pprofGroup.GET("/trace", gin.WrapF(pprof.Trace))
|
||||
|
||||
pprofGroup.GET("/allocs", gin.WrapH(pprof.Handler("allocs")))
|
||||
pprofGroup.GET("/block", gin.WrapH(pprof.Handler("block")))
|
||||
pprofGroup.GET("/goroutine", gin.WrapH(pprof.Handler("goroutine")))
|
||||
pprofGroup.GET("/heap", gin.WrapH(pprof.Handler("heap")))
|
||||
pprofGroup.GET("/mutex", gin.WrapH(pprof.Handler("mutex")))
|
||||
pprofGroup.GET("/threadcreate", gin.WrapH(pprof.Handler("threadcreate")))
|
||||
|
||||
zapLogger.Info("pprof enabled",
|
||||
zap.Bool("local_only", localOnly),
|
||||
zap.Int("block_rate", blockRate),
|
||||
zap.Int("mutex_fraction", mutexFraction),
|
||||
)
|
||||
}
|
||||
|
||||
// CORS configuration
|
||||
corsConfig := cors.DefaultConfig()
|
||||
corsConfig.AllowOrigins = cfg.AllowedOrigins
|
||||
corsConfig.AllowMethods = []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"}
|
||||
corsConfig.AllowHeaders = []string{"Origin", "Content-Type", "Accept", "Authorization"}
|
||||
corsConfig.AllowCredentials = true
|
||||
router.Use(cors.New(corsConfig))
|
||||
|
||||
// Health check
|
||||
router.GET("/health", func(c *gin.Context) {
|
||||
c.JSON(200, gin.H{"status": "ok"})
|
||||
})
|
||||
|
||||
// WebSocket endpoint (no auth required, validated in handler)
|
||||
router.GET("/ws/:roomId", wsHandler.HandleWebSocket)
|
||||
|
||||
// Load test endpoint - NO AUTH (only for local testing!)
|
||||
router.GET("/ws/loadtest/:roomId", wsHandler.HandleWebSocketLoadTest)
|
||||
|
||||
// REST API
|
||||
api := router.Group("/api")
|
||||
|
||||
authGroup := api.Group("/auth")
|
||||
{
|
||||
authGroup.GET("/google", authHandler.GoogleLogin)
|
||||
authGroup.GET("/google/callback", authHandler.GoogleCallback)
|
||||
authGroup.GET("/github", authHandler.GithubLogin)
|
||||
authGroup.GET("/github/callback", authHandler.GithubCallback)
|
||||
authGroup.GET("/me", authMiddleware.RequireAuth(), authHandler.Me)
|
||||
authGroup.POST("/logout", authMiddleware.RequireAuth(), authHandler.Logout)
|
||||
}
|
||||
|
||||
// Document routes with optional auth
|
||||
docs := api.Group("/documents")
|
||||
|
||||
{
|
||||
docs.GET("", authMiddleware.RequireAuth(), docHandler.ListDocuments)
|
||||
docs.GET("/:id", authMiddleware.RequireAuth(), docHandler.GetDocument)
|
||||
docs.GET("/:id/state", authMiddleware.OptionalAuth(), docHandler.GetDocumentState)
|
||||
|
||||
// Permission route (supports both auth and share token)
|
||||
docs.GET("/:id/permission", authMiddleware.OptionalAuth(), docHandler.GetDocumentPermission)
|
||||
|
||||
docs.POST("", authMiddleware.RequireAuth(), docHandler.CreateDocument)
|
||||
docs.PUT("/:id/state", authMiddleware.RequireAuth(), docHandler.UpdateDocumentState)
|
||||
docs.DELETE("/:id", authMiddleware.RequireAuth(), docHandler.DeleteDocument)
|
||||
|
||||
// Share routes
|
||||
docs.POST("/:id/shares", authMiddleware.RequireAuth(), shareHandler.CreateShare)
|
||||
docs.GET("/:id/shares", authMiddleware.RequireAuth(), shareHandler.ListShares)
|
||||
docs.DELETE("/:id/shares/:userId", authMiddleware.RequireAuth(), shareHandler.DeleteShare)
|
||||
docs.POST("/:id/share-link", authMiddleware.RequireAuth(), shareHandler.CreateShareLink)
|
||||
docs.GET("/:id/share-link", authMiddleware.RequireAuth(), shareHandler.GetShareLink)
|
||||
docs.DELETE("/:id/share-link", authMiddleware.RequireAuth(), shareHandler.RevokeShareLink)
|
||||
|
||||
// Version history routes
|
||||
docs.POST("/:id/versions", authMiddleware.RequireAuth(), versionHandler.CreateVersion)
|
||||
docs.GET("/:id/versions", authMiddleware.RequireAuth(), versionHandler.ListVersions)
|
||||
docs.GET("/:id/versions/:versionId/snapshot", authMiddleware.RequireAuth(), versionHandler.GetVersionSnapshot)
|
||||
docs.POST("/:id/restore", authMiddleware.RequireAuth(), versionHandler.RestoreVersion)
|
||||
}
|
||||
|
||||
// Start server
|
||||
log.Printf("Server starting on port %s", cfg.Port)
|
||||
if err := router.Run(":" + cfg.Port); err != nil {
|
||||
log.Fatalf("Failed to start server: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func shouldEnablePprof(cfg *config.Config) bool {
|
||||
if cfg == nil || cfg.IsProduction() {
|
||||
return false
|
||||
}
|
||||
return getEnvBool("ENABLE_PPROF", false)
|
||||
}
|
||||
|
||||
func getEnvBool(key string, defaultValue bool) bool {
|
||||
value, ok := os.LookupEnv(key)
|
||||
if !ok {
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
switch strings.ToLower(strings.TrimSpace(value)) {
|
||||
case "1", "true", "t", "yes", "y", "on":
|
||||
return true
|
||||
case "0", "false", "f", "no", "n", "off":
|
||||
return false
|
||||
default:
|
||||
return defaultValue
|
||||
}
|
||||
}
|
||||
|
||||
func getEnvInt(key string, defaultValue int) int {
|
||||
value, ok := os.LookupEnv(key)
|
||||
if !ok {
|
||||
return defaultValue
|
||||
}
|
||||
parsed, err := strconv.Atoi(strings.TrimSpace(value))
|
||||
if err != nil {
|
||||
return defaultValue
|
||||
}
|
||||
return parsed
|
||||
}
|
||||
@@ -57,7 +57,7 @@ func NewAuthHandler(store store.Store, cfg *config.Config) *AuthHandler {
|
||||
func (h *AuthHandler) GoogleLogin(c *gin.Context) {
|
||||
// Generate random state and set cookie
|
||||
oauthState := h.generateStateOauthCookie(c.Writer)
|
||||
url := h.googleConfig.AuthCodeURL(oauthState, oauth2.AccessTypeOffline)
|
||||
url := h.googleConfig.AuthCodeURL(oauthState, oauth2.AccessTypeOffline, oauth2.SetAuthURLParam("prompt", "select_account"))
|
||||
c.Redirect(http.StatusTemporaryRedirect, url)
|
||||
}
|
||||
|
||||
|
||||
267
backend/scripts/init.sql
Normal file
267
backend/scripts/init.sql
Normal file
@@ -0,0 +1,267 @@
|
||||
-- Migration: Create required PostgreSQL extensions
|
||||
-- Extensions must be created before other migrations can use them
|
||||
|
||||
-- uuid-ossp: Provides functions for generating UUIDs (uuid_generate_v4())
|
||||
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
|
||||
|
||||
-- pgcrypto: Provides cryptographic functions (used for token hashing)
|
||||
CREATE EXTENSION IF NOT EXISTS "pgcrypto";
|
||||
-- Initialize database schema for realtime collaboration
|
||||
-- This is the base schema that creates core tables for documents and updates
|
||||
|
||||
CREATE TABLE IF NOT EXISTS documents (
|
||||
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
|
||||
name VARCHAR(255) NOT NULL,
|
||||
type VARCHAR(50) NOT NULL CHECK (type IN ('editor', 'kanban')),
|
||||
yjs_state BYTEA,
|
||||
created_at TIMESTAMPTZ DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX idx_documents_type ON documents(type);
|
||||
CREATE INDEX idx_documents_created_at ON documents(created_at DESC);
|
||||
|
||||
-- Table for storing incremental updates (for history tracking)
|
||||
CREATE TABLE IF NOT EXISTS document_updates (
|
||||
id SERIAL PRIMARY KEY,
|
||||
document_id UUID NOT NULL REFERENCES documents(id) ON DELETE CASCADE,
|
||||
update BYTEA NOT NULL,
|
||||
created_at TIMESTAMPTZ DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX idx_updates_document_id ON document_updates(document_id);
|
||||
CREATE INDEX idx_updates_created_at ON document_updates(created_at DESC);
|
||||
-- Migration: Add users and sessions tables for authentication
|
||||
-- Run this before 002_add_document_shares.sql
|
||||
|
||||
-- Enable UUID extension
|
||||
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
|
||||
|
||||
-- Users table
|
||||
CREATE TABLE IF NOT EXISTS users (
|
||||
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
|
||||
email VARCHAR(255) NOT NULL,
|
||||
name VARCHAR(255) NOT NULL,
|
||||
avatar_url TEXT,
|
||||
provider VARCHAR(50) NOT NULL CHECK (provider IN ('google', 'github')),
|
||||
provider_user_id VARCHAR(255) NOT NULL,
|
||||
created_at TIMESTAMPTZ DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ DEFAULT NOW(),
|
||||
last_login_at TIMESTAMPTZ,
|
||||
UNIQUE(provider, provider_user_id)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_users_email ON users(email);
|
||||
CREATE INDEX idx_users_provider ON users(provider, provider_user_id);
|
||||
|
||||
COMMENT ON TABLE users IS 'Stores user accounts from OAuth providers';
|
||||
COMMENT ON COLUMN users.provider IS 'OAuth provider: google or github';
|
||||
COMMENT ON COLUMN users.provider_user_id IS 'User ID from OAuth provider';
|
||||
|
||||
-- Sessions table
|
||||
CREATE TABLE IF NOT EXISTS sessions (
|
||||
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
|
||||
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
token_hash VARCHAR(64) NOT NULL,
|
||||
expires_at TIMESTAMPTZ NOT NULL,
|
||||
created_at TIMESTAMPTZ DEFAULT NOW(),
|
||||
user_agent TEXT,
|
||||
ip_address VARCHAR(45),
|
||||
UNIQUE(token_hash)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_sessions_user_id ON sessions(user_id);
|
||||
CREATE INDEX idx_sessions_token_hash ON sessions(token_hash);
|
||||
CREATE INDEX idx_sessions_expires_at ON sessions(expires_at);
|
||||
|
||||
COMMENT ON TABLE sessions IS 'Stores active JWT sessions for revocation support';
|
||||
COMMENT ON COLUMN sessions.token_hash IS 'SHA-256 hash of JWT token';
|
||||
COMMENT ON COLUMN sessions.user_agent IS 'User agent string for device tracking';
|
||||
|
||||
-- Add owner_id to documents table if it doesn't exist
|
||||
ALTER TABLE documents ADD COLUMN IF NOT EXISTS owner_id UUID REFERENCES users(id) ON DELETE SET NULL;
|
||||
CREATE INDEX IF NOT EXISTS idx_documents_owner_id ON documents(owner_id);
|
||||
|
||||
COMMENT ON COLUMN documents.owner_id IS 'User who created the document';
|
||||
-- Migration: Add document sharing with permissions
|
||||
-- Run against existing database
|
||||
|
||||
CREATE TABLE IF NOT EXISTS document_shares (
|
||||
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
|
||||
document_id UUID NOT NULL REFERENCES documents(id) ON DELETE CASCADE,
|
||||
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
permission VARCHAR(20) NOT NULL CHECK (permission IN ('view', 'edit')),
|
||||
created_at TIMESTAMPTZ DEFAULT NOW(),
|
||||
created_by UUID REFERENCES users(id) ON DELETE SET NULL,
|
||||
UNIQUE(document_id, user_id)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_shares_document_id ON document_shares(document_id);
|
||||
CREATE INDEX idx_shares_user_id ON document_shares(user_id);
|
||||
CREATE INDEX idx_shares_permission ON document_shares(document_id, permission);
|
||||
|
||||
COMMENT ON TABLE document_shares IS 'Stores per-user document access permissions';
|
||||
COMMENT ON COLUMN document_shares.permission IS 'Access level: view (read-only) or edit (read-write)';
|
||||
-- Migration: Add public sharing support via share tokens
|
||||
-- Dependencies: Run after 002_add_document_shares.sql
|
||||
-- Purpose: Add share_token and is_public columns used by share link feature
|
||||
|
||||
-- Add columns for public sharing
|
||||
ALTER TABLE documents ADD COLUMN IF NOT EXISTS share_token VARCHAR(255);
|
||||
ALTER TABLE documents ADD COLUMN IF NOT EXISTS is_public BOOLEAN DEFAULT false NOT NULL;
|
||||
|
||||
-- Create indexes for performance
|
||||
CREATE INDEX IF NOT EXISTS idx_documents_share_token ON documents(share_token) WHERE share_token IS NOT NULL;
|
||||
CREATE INDEX IF NOT EXISTS idx_documents_is_public ON documents(is_public) WHERE is_public = true;
|
||||
|
||||
-- Constraint: public documents must have a token
|
||||
-- This ensures data integrity - a document can't be public without a share token
|
||||
ALTER TABLE documents ADD CONSTRAINT check_public_has_token
|
||||
CHECK (is_public = false OR (is_public = true AND share_token IS NOT NULL));
|
||||
|
||||
-- Documentation
|
||||
COMMENT ON COLUMN documents.share_token IS 'Public share token for link-based access (base64-encoded random string, 32 bytes)';
|
||||
COMMENT ON COLUMN documents.is_public IS 'Whether document is publicly accessible via share link';
|
||||
-- Migration: Add permission column for public share links
|
||||
-- Dependencies: Run after 003_add_public_sharing.sql
|
||||
-- Purpose: Store permission level (view/edit) for public share links
|
||||
|
||||
-- Add permission column to documents table
|
||||
ALTER TABLE documents ADD COLUMN IF NOT EXISTS share_permission VARCHAR(20) DEFAULT 'edit' CHECK (share_permission IN ('view', 'edit'));
|
||||
|
||||
-- Create index for performance
|
||||
CREATE INDEX IF NOT EXISTS idx_documents_share_permission ON documents(share_permission) WHERE is_public = true;
|
||||
|
||||
-- Documentation
|
||||
COMMENT ON COLUMN documents.share_permission IS 'Permission level for public share link: view (read-only) or edit (read-write). Defaults to edit for backward compatibility.';
|
||||
-- Migration: Add OAuth token storage
|
||||
-- This table stores OAuth2 access tokens and refresh tokens from external providers
|
||||
-- Used for refreshing user sessions without re-authentication
|
||||
|
||||
CREATE TABLE IF NOT EXISTS oauth_tokens (
|
||||
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
|
||||
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
provider VARCHAR(50) NOT NULL,
|
||||
access_token TEXT NOT NULL,
|
||||
refresh_token TEXT,
|
||||
token_type VARCHAR(50) DEFAULT 'Bearer',
|
||||
expires_at TIMESTAMPTZ NOT NULL,
|
||||
scope TEXT,
|
||||
created_at TIMESTAMPTZ DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ DEFAULT NOW(),
|
||||
|
||||
CONSTRAINT oauth_tokens_user_id_provider_key UNIQUE (user_id, provider)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_oauth_tokens_user_id ON oauth_tokens(user_id);
|
||||
-- Migration: Add document version history support
|
||||
-- This migration creates the version history table, adds tracking columns,
|
||||
-- and provides a helper function for version numbering
|
||||
|
||||
-- Create document versions table for storing version snapshots
|
||||
CREATE TABLE IF NOT EXISTS document_versions (
|
||||
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
|
||||
document_id UUID NOT NULL REFERENCES documents(id) ON DELETE CASCADE,
|
||||
yjs_snapshot BYTEA NOT NULL,
|
||||
text_preview TEXT,
|
||||
version_number INTEGER NOT NULL,
|
||||
created_by UUID REFERENCES users(id) ON DELETE SET NULL,
|
||||
version_label TEXT,
|
||||
is_auto_generated BOOLEAN DEFAULT true,
|
||||
created_at TIMESTAMPTZ DEFAULT NOW(),
|
||||
|
||||
CONSTRAINT unique_document_version UNIQUE(document_id, version_number)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_document_versions_document_id ON document_versions(document_id, created_at DESC);
|
||||
CREATE INDEX idx_document_versions_created_by ON document_versions(created_by);
|
||||
|
||||
-- Add version tracking columns to documents table
|
||||
ALTER TABLE documents ADD COLUMN IF NOT EXISTS version_count INTEGER DEFAULT 0;
|
||||
ALTER TABLE documents ADD COLUMN IF NOT EXISTS last_snapshot_at TIMESTAMPTZ;
|
||||
|
||||
-- Function to get the next version number for a document
|
||||
-- This ensures version numbers are sequential and unique per document
|
||||
CREATE OR REPLACE FUNCTION get_next_version_number(p_document_id UUID)
|
||||
RETURNS INTEGER AS $$
|
||||
DECLARE
|
||||
next_version INTEGER;
|
||||
BEGIN
|
||||
SELECT COALESCE(MAX(version_number), 0) + 1
|
||||
INTO next_version
|
||||
FROM document_versions
|
||||
WHERE document_id = p_document_id;
|
||||
|
||||
RETURN next_version;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
-- Migration: Enable Row Level Security (RLS) on all tables
|
||||
-- This enables RLS but uses permissive policies to allow all operations
|
||||
-- Authorization is still handled by the Go backend middleware
|
||||
|
||||
-- Enable RLS on all tables
|
||||
ALTER TABLE users ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE sessions ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE oauth_tokens ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE documents ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE document_updates ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE document_shares ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE document_versions ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- Create permissive policies that allow all operations
|
||||
-- This maintains current behavior where backend handles authorization
|
||||
|
||||
-- Users table
|
||||
CREATE POLICY "Allow all operations on users" ON users FOR ALL USING (true);
|
||||
|
||||
-- Sessions table
|
||||
CREATE POLICY "Allow all operations on sessions" ON sessions FOR ALL USING (true);
|
||||
|
||||
-- OAuth tokens table
|
||||
CREATE POLICY "Allow all operations on oauth_tokens" ON oauth_tokens FOR ALL USING (true);
|
||||
|
||||
-- Documents table
|
||||
CREATE POLICY "Allow all operations on documents" ON documents FOR ALL USING (true);
|
||||
|
||||
-- Document updates table
|
||||
CREATE POLICY "Allow all operations on document_updates" ON document_updates FOR ALL USING (true);
|
||||
|
||||
-- Document shares table
|
||||
CREATE POLICY "Allow all operations on document_shares" ON document_shares FOR ALL USING (true);
|
||||
|
||||
-- Document versions table
|
||||
CREATE POLICY "Allow all operations on document_versions" ON document_versions FOR ALL USING (true);
|
||||
-- Migration: Add stream checkpoints table for Redis Streams durability
|
||||
-- This table tracks last processed stream position per document
|
||||
|
||||
CREATE TABLE IF NOT EXISTS stream_checkpoints (
|
||||
document_id UUID PRIMARY KEY REFERENCES documents(id) ON DELETE CASCADE,
|
||||
last_stream_id TEXT NOT NULL,
|
||||
last_seq BIGINT NOT NULL DEFAULT 0,
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_stream_checkpoints_updated_at
|
||||
ON stream_checkpoints(updated_at DESC);
|
||||
-- Migration: Add update history table for Redis Stream WAL
|
||||
-- This table stores per-update payloads for recovery and replay
|
||||
|
||||
CREATE TABLE IF NOT EXISTS document_update_history (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
document_id UUID NOT NULL REFERENCES documents(id) ON DELETE CASCADE,
|
||||
stream_id TEXT NOT NULL,
|
||||
seq BIGINT NOT NULL,
|
||||
payload BYTEA NOT NULL,
|
||||
msg_type TEXT,
|
||||
server_id TEXT,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS uniq_update_history_document_stream_id
|
||||
ON document_update_history(document_id, stream_id);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS uniq_update_history_document_seq
|
||||
ON document_update_history(document_id, seq);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_update_history_document_seq
|
||||
ON document_update_history(document_id, seq);
|
||||
Reference in New Issue
Block a user