import {
  S3Client,
  PutObjectCommand,
  DeleteObjectCommand,
} from "@aws-sdk/client-s3"
import fs from "fs"
import path from "path"
import { Readable } from "stream"

// Sanitize the key to remove special characters that might cause issues
const sanitizeS3Key = (key: string) => {
  return key
    .split("/")
    .map(
      (segment) =>
        segment
          .replace(/\s+/g, "-") // Replace spaces with hyphens
          .replace(/[^a-zA-Z0-9-_.]/g, "_"), // Replace other special characters with underscores
    )
    .join("/")
}

const s3Client = new S3Client({
  region: "auto",
  endpoint: `https://${process.env.CLOUDFLARE_R2_ACCOUNT_ID}.r2.cloudflarestorage.com`,
  credentials: {
    accessKeyId: process.env.CLOUDFLARE_R2_ACCESS_KEY_ID!,
    secretAccessKey: process.env.CLOUDFLARE_R2_SECRET_ACCESS_KEY!,
  },
  forcePathStyle: true, // Required for R2
})

const getMimeType = (filePath: string) => {
  const ext = path.extname(filePath).toLowerCase()
  switch (ext) {
    case ".mp4":
      return "video/mp4"
    case ".webm":
      return "video/webm"
    case ".ogg":
      return "video/ogg"
    case ".jpg":
    case ".jpeg":
      return "image/jpeg"
    case ".png":
      return "image/png"
    case ".gif":
      return "image/gif"
    case ".webp":
      return "image/webp"
    default:
      return "application/octet-stream"
  }
}

/**
 * Wait for file to be stable (completely written to disk)
 * Checks file size multiple times to ensure it's not still being written.
 * Uses longer timeouts on server (e.g. NFS/slow disk) where file visibility can be delayed.
 */
const waitForFileStable = async (
  filePath: string,
  maxRetries = 50,
  delayMs = 250,
): Promise<boolean> => {
  // Initial delay so disk/NFS can finish writing (helps on server where local works)
  await new Promise((resolve) => setTimeout(resolve, 400))

  let previousSize = -1
  let stableCount = 0
  const requiredStableChecks = 3 // File size must be stable for 3 consecutive checks

  for (let i = 0; i < maxRetries; i++) {
    try {
      if (!fs.existsSync(filePath)) {
        await new Promise((resolve) => setTimeout(resolve, delayMs))
        continue
      }

      const stats = fs.statSync(filePath)
      const currentSize = stats.size

      if (currentSize === 0) {
        await new Promise((resolve) => setTimeout(resolve, delayMs))
        continue
      }

      // Check if size is stable
      if (currentSize === previousSize) {
        stableCount++
        if (stableCount >= requiredStableChecks) {
          // Extra safety buffer after stability confirmed
          await new Promise((resolve) => setTimeout(resolve, 500))
          return true
        }
      } else {
        // Size changed, reset counter
        stableCount = 0
      }

      previousSize = currentSize
      await new Promise((resolve) => setTimeout(resolve, delayMs))
    } catch (err) {
      await new Promise((resolve) => setTimeout(resolve, delayMs))
    }
  }

  console.error(`❌ File never became stable after ${maxRetries} retries`)
  return false
}

/**
 * Verify file integrity by reading it completely
 * Ensures file is not corrupted or partially written
 */
const verifyFileIntegrity = (filePath: string): boolean => {
  try {
    const stats = fs.statSync(filePath)
    const fileSize = stats.size

    if (fileSize === 0) {
      console.error("❌ File is empty")
      return false
    }

    // Read entire file to verify it's not corrupted
    const buffer = fs.readFileSync(filePath)

    if (buffer.length !== fileSize) {
      console.error(
        `❌ File size mismatch: stats=${fileSize}, buffer=${buffer.length}`,
      )
      return false
    }

    return true
  } catch (err) {
    console.error("❌ File integrity check failed:", err)
    return false
  }
}

export const uploadToR2 = async (file: string | Readable, key: string) => {
  try {
    const sanitizedKey = sanitizeS3Key(key)

    let body: Readable | Buffer
    let contentType: string
    let contentLength: number | undefined

    if (typeof file === "string") {
      if (!fs.existsSync(file)) {
        throw new Error("File not found: " + file)
      }

      const stats = fs.statSync(file)
      contentLength = stats.size

      if (contentLength === 0) {
        throw new Error("File is empty: " + file)
      }

      // ALWAYS use buffer for maximum reliability (especially for mobile uploads)
      // This ensures the entire file is read into memory before uploading
      body = fs.readFileSync(file)
      contentType = getMimeType(file)
    } else {
      body = file
      contentType = "application/octet-stream"
    }

    const uploadParams = {
      Bucket: process.env.CLOUDFLARE_R2_BUCKET_NAME,
      Key: sanitizedKey,
      Body: body,
      ContentType: contentType,
      ContentLength: contentLength,
      CacheControl: "public, max-age=31536000", // Cache for 1 year
    }

    await s3Client.send(new PutObjectCommand(uploadParams))

    // Return the public URL
    const publicUrl = `https://${process.env.CLOUDFLARE_R2_BUCKET_NAME}.r2.cloudflarestorage.com/${sanitizedKey}`

    return publicUrl
  } catch (error) {
    console.error("❌ Error uploading file to R2:", error)

    if (error instanceof Error) {
      console.error("Error name:", error.name)
      console.error("Error message:", error.message)
      console.error("Error stack:", error.stack)
      if ("$metadata" in error) {
        console.error(
          "Error metadata:",
          JSON.stringify((error as any).$metadata, null, 2),
        )
      }
      if ("$response" in error) {
        console.error(
          "Error response:",
          JSON.stringify((error as any).$response, null, 2),
        )
      }
    }

    throw new Error(
      `Failed to upload file to R2: ${error instanceof Error ? error.message : "Unknown error"}`,
    )
  }
}

export const uploadFile = async (file: string | Readable, key: string) => {
  if (process.env.FILE_UPLOAD === "R2") {
    return uploadToR2(file, key)
  } else {
    if (typeof file === "string") return `/public/${key}`
    throw new Error("Cannot save stream locally")
  }
}

/**
 * Upload file to R2 with retry logic
 * This waits for the file to be stable before uploading
 * Called automatically after multer finishes writing the file
 */
export const uploadFileWithRetry = async (file: any) => {
  let localPath = file.localPath || file.path
  const relativePath = file.relativePath || file.filename

  if (!localPath || !relativePath) {
    console.error("❌ Missing file paths for R2 upload")
    throw new Error("Missing file paths for R2 upload")
  }

  // Resolve to absolute path so we always check the same path Multer wrote to (server cwd can differ)
  if (!path.isAbsolute(localPath)) {
    localPath = path.resolve(process.cwd(), localPath)
  }

  try {
    // Step 1: Wait for file to be completely written and stable

    const isStable = await waitForFileStable(localPath)

    if (!isStable) {
      throw new Error("File did not become stable within timeout period")
    }

    // Step 2: Verify file integrity

    const isValid = verifyFileIntegrity(localPath)

    if (!isValid) {
      throw new Error("File integrity check failed")
    }

    // Step 3: Upload to R2

    const r2Url = await uploadFile(localPath, relativePath)

    // Step 4: Delete local file after successful upload
    setTimeout(() => {
      if (fs.existsSync(localPath)) {
        fs.unlink(localPath, (err) => {
          if (err) {
            console.warn("⚠️ Failed to delete local file:", err.message)
          } else {
            console.log("🗑️ Local file deleted:", localPath)
          }
        })
      }
    }, 1000)

    return r2Url
  } catch (error) {
    console.error("❌ R2 upload with retry failed:", error)
    throw error
  }
}

/**
 * Deletes a file from both R2 and local storage (safe operation)
 */
export const deleteFile = async (filePath: string) => {
  if (!filePath) return

  // Extract just the key part if filePath is a full R2 URL
  let key = filePath

  if (filePath.startsWith("http")) {
    try {
      const url = new URL(filePath)
      key = url.pathname.replace(/^\/+/, "")
    } catch (err) {
      console.warn("⚠️ Invalid URL format:", filePath)
    }
  } else {
    key = filePath.replace(/^\/+/, "")
  }

  // Try delete from R2
  try {
    await s3Client.send(
      new DeleteObjectCommand({
        Bucket: process.env.CLOUDFLARE_R2_BUCKET_NAME!,
        Key: key,
      }),
    )

    console.log("✅ Deleted from R2:", key)
  } catch (err) {
    console.warn("⚠️ R2 delete skipped or failed:", err.message)
  }

  // Try delete from local
  try {
    const fullPath = path.join(process.cwd(), "public", key)
    if (fs.existsSync(fullPath)) {
      await fs.promises.unlink(fullPath)
      console.log("✅ Deleted from local:", fullPath)
    } else {
      console.warn("⚠️ Local file not found:", fullPath)
    }
  } catch (err) {
    console.warn("⚠️ Local delete failed:", err.message)
  }
}
