Hated version 3 and decided to reinstall 2.6 on Mac.
But then realized all my agent/chat history is GONE. What a disaster.
How to retrieve everything back?
Hated version 3 and decided to reinstall 2.6 on Mac.
But then realized all my agent/chat history is GONE. What a disaster.
How to retrieve everything back?
When you downgrade to version 2.6 it won’t have the chat history, but if you open version 3 again you should still have it there.
I have the same thing, just trying to figure out how to downgrade to version 2.6 and preserve the chat history. Version 3 is unusable
yep - it worked. thank you! phew.
from their support bot:
Chat history in Cursor is stored locally on your device for privacy. Unfortunately, when you downgraded from version 3 to 2.6, the chat history was likely lost during the reinstallation process.
We’re not able to restore previous conversations since they’re stored locally and not on our servers. The downgrade may have overwritten or made the version 3 chat data incompatible with version 2.6.
For future reference, you can export important chats by clicking the “…” button on any conversation before making major changes.
Let me know if you need anything else!
I had write a migrate script by cursor, that will change the sqlite data from 3.0 to 2.6 to keep the chat history back.
The script is generated by reversing the cursor itself, which may contain inaccuracies. Please use it with caution if needed.
Tested on Mac OS 26.2 , Cursor 3.0.4 → 2.6.2
python3 migrate_cursor_30_to_26.py
#!/usr/bin/env python3
"""
Cursor 3.0 → 2.6 数据格式迁移脚本
解决从 Cursor 3.0 降级到 2.6 后历史聊天记录不可见的问题。
核心转换:
1. 全局 composerHeaders 补充 2.6 所需的顶层字段,并把不在索引中的 composerData 补回来
2. 将 bubbleId:<composerId>:<bubbleId> 中的消息写回 composerData.conversationMap
3. 将全局对话索引同步到各 workspace 级别的 composer.composerData(2.6 读取此处)
使用前务必完全关闭 Cursor!
"""
import json
import os
import shutil
import sqlite3
import subprocess
import sys
import time
from pathlib import Path
GLOBAL_DB_PATH = os.path.expanduser(
"~/Library/Application Support/Cursor/User/globalStorage/state.vscdb"
)
WORKSPACE_STORAGE_DIR = os.path.expanduser(
"~/Library/Application Support/Cursor/User/workspaceStorage"
)
BACKUP_SUFFIX = f".backup-pre-migration-{int(time.time())}"
def backup_db(db_path: str) -> str:
backup_path = db_path + BACKUP_SUFFIX
for ext in ["", "-shm", "-wal"]:
src = db_path + ext
if os.path.exists(src):
shutil.copy2(src, backup_path + ext)
return backup_path
def fix_composer_headers(conn: sqlite3.Connection) -> list[dict]:
"""补充 composerHeaders 中 2.6 所需但 3.0 省略的字段,并把缺失的对话加回索引"""
row = conn.execute(
"SELECT value FROM ItemTable WHERE key = 'composer.composerHeaders'"
).fetchone()
if not row:
print("[跳过] composerHeaders 不存在")
return []
headers = json.loads(row[0])
all_composers = headers.get("allComposers", [])
indexed_ids = {c["composerId"] for c in all_composers}
# 从 cursorDiskKV 中找到未被索引的 composerData,补回 allComposers
kv_rows = conn.execute(
"SELECT key, value FROM cursorDiskKV WHERE key LIKE 'composerData:%'"
).fetchall()
added_to_index = 0
for key, value in kv_rows:
composer_id = key[len("composerData:"):]
if composer_id in indexed_ids:
continue
if not value:
continue
try:
data = json.loads(value)
except (json.JSONDecodeError, TypeError):
continue
name = data.get("name", "")
if not name:
continue
header_entry = {
"type": "head",
"composerId": composer_id,
"name": name,
"createdAt": data.get("createdAt", 0),
"lastUpdatedAt": data.get("lastUpdatedAt", data.get("createdAt", 0)),
"unifiedMode": data.get("richText", {}).get("unifiedMode", "agent") if isinstance(data.get("richText"), dict) else data.get("unifiedMode", "agent"),
"forceMode": "edit",
"hasUnreadMessages": False,
"totalLinesAdded": 0,
"totalLinesRemoved": 0,
"isArchived": False,
"isDraft": False,
"isWorktree": False,
"worktreeStartedReadOnly": False,
"isSpec": False,
"isProject": False,
"isBestOfNSubcomposer": False,
"numSubComposers": 0,
"referencedPlans": [],
"branches": [],
"hasBlockingPendingActions": False,
"filesChangedCount": 0,
}
ws_id = data.get("workspaceIdentifier")
if ws_id:
header_entry["workspaceIdentifier"] = ws_id
all_composers.append(header_entry)
indexed_ids.add(composer_id)
added_to_index += 1
if added_to_index > 0:
print(f"[修复] 从 composerData 中找回 {added_to_index} 个缺失的对话加入索引")
first_id = all_composers[0]["composerId"] if all_composers else None
changed = added_to_index > 0
for field, default in [
("selectedComposerIds", [first_id] if first_id else []),
("lastFocusedComposerIds", [first_id] if first_id else []),
("hasMigratedComposerData", True),
("hasMigratedMultipleComposers", True),
("selectedComposerHandles", {}),
]:
if field not in headers:
headers[field] = default
changed = True
headers["allComposers"] = all_composers
if changed:
conn.execute(
"UPDATE ItemTable SET value = ? WHERE key = 'composer.composerHeaders'",
(json.dumps(headers, ensure_ascii=False),),
)
print(f"[修复] composerHeaders 已更新,共 {len(all_composers)} 个对话")
else:
print(f"[跳过] composerHeaders 无需修改 ({len(all_composers)} 个对话)")
return all_composers
def populate_conversation_maps(conn: sqlite3.Connection) -> int:
"""将分散存储的 bubble 消息写回 composerData.conversationMap"""
rows = conn.execute(
"SELECT key, value FROM cursorDiskKV WHERE key LIKE 'composerData:%'"
).fetchall()
if not rows:
print("[跳过] 没有 composerData 条目")
return 0
updated = 0
skipped = 0
errors = 0
for key, value in rows:
composer_id = key[len("composerData:"):]
if value is None:
skipped += 1
continue
try:
data = json.loads(value)
except (json.JSONDecodeError, TypeError):
print(f" [错误] {composer_id}: JSON 解析失败,跳过")
errors += 1
continue
conversation_map = data.get("conversationMap", {})
conv_headers = data.get("fullConversationHeadersOnly", [])
if not conv_headers:
skipped += 1
continue
existing_ids = {
h.get("bubbleId") for h in conv_headers
if h.get("bubbleId") in conversation_map and conversation_map[h["bubbleId"]]
}
if len(existing_ids) == len(conv_headers) and len(conv_headers) > 0:
skipped += 1
continue
bubble_rows = conn.execute(
"SELECT key, value FROM cursorDiskKV WHERE key LIKE ?",
(f"bubbleId:{composer_id}:%",),
).fetchall()
if not bubble_rows:
skipped += 1
continue
newly_added = 0
already_existed = 0
for bkey, bvalue in bubble_rows:
bubble_id = bkey.split(":")[-1]
if bubble_id in conversation_map and conversation_map[bubble_id]:
already_existed += 1
continue
try:
bubble_data = json.loads(bvalue)
conversation_map[bubble_id] = bubble_data
newly_added += 1
except (json.JSONDecodeError, TypeError):
continue
if newly_added == 0:
skipped += 1
continue
data["conversationMap"] = conversation_map
conn.execute(
"UPDATE cursorDiskKV SET value = ? WHERE key = ?",
(json.dumps(data, ensure_ascii=False), key),
)
name = data.get("name", composer_id[:8])
if already_existed > 0:
print(f" [填充] {name}: 新增 {newly_added} 条, 跳过已存在 {already_existed} 条 (共 {len(conv_headers)} 个 header)")
else:
print(f" [填充] {name}: {newly_added}/{len(conv_headers)} 条消息写入 conversationMap")
updated += 1
print(f"[结果] 更新 {updated} 个对话, 跳过 {skipped} 个, 错误 {errors} 个")
return updated
def sync_workspace_composer_data(global_conn: sqlite3.Connection, all_composers: list[dict]) -> int:
"""将全局对话索引同步到各 workspace 级别的 composer.composerData(Cursor 2.6 从此处读取)"""
if not os.path.isdir(WORKSPACE_STORAGE_DIR):
print("[跳过] workspaceStorage 目录不存在")
return 0
# 按 workspace ID 分组
ws_groups: dict[str, list[dict]] = {}
no_ws_composers: list[dict] = []
for composer in all_composers:
ws_info = composer.get("workspaceIdentifier", {})
ws_id = ws_info.get("id", "")
if ws_id:
ws_groups.setdefault(ws_id, []).append(composer)
else:
no_ws_composers.append(composer)
# 尝试从 composerData 的 workspaceDirectory 字段推断 workspace
# (部分对话在 allComposers 里没有 workspaceIdentifier,但 composerData 里有)
for composer in no_ws_composers:
cid = composer["composerId"]
row = global_conn.execute(
"SELECT value FROM cursorDiskKV WHERE key = ?",
(f"composerData:{cid}",),
).fetchone()
if not row or not row[0]:
continue
try:
data = json.loads(row[0])
ws_dir = data.get("workspaceDirectory", "")
if ws_dir:
for ws_id_candidate in os.listdir(WORKSPACE_STORAGE_DIR):
ws_json_path = os.path.join(WORKSPACE_STORAGE_DIR, ws_id_candidate, "workspace.json")
if os.path.exists(ws_json_path):
with open(ws_json_path) as f:
ws_folder = json.load(f).get("folder", "")
if ws_folder and ws_dir in ws_folder:
ws_groups.setdefault(ws_id_candidate, []).append(composer)
break
except (json.JSONDecodeError, TypeError, OSError):
continue
total_synced = 0
for ws_dir_name in os.listdir(WORKSPACE_STORAGE_DIR):
ws_db_path = os.path.join(WORKSPACE_STORAGE_DIR, ws_dir_name, "state.vscdb")
ws_json_path = os.path.join(WORKSPACE_STORAGE_DIR, ws_dir_name, "workspace.json")
if not os.path.exists(ws_db_path):
continue
# 读取 workspace 路径用于展示
ws_folder = ""
if os.path.exists(ws_json_path):
try:
with open(ws_json_path) as f:
ws_folder = json.load(f).get("folder", "").replace("file://", "")
except (json.JSONDecodeError, OSError):
pass
ws_label = ws_folder.split("/")[-1] if ws_folder else ws_dir_name
# 从全局 composerHeaders 中筛选属于此 workspace 的对话
target_composers = ws_groups.get(ws_dir_name, [])
# 也按 fsPath 匹配(workspaceIdentifier.uri.fsPath 对应 workspace.json 的 folder)
if ws_folder:
ws_folder_decoded = ws_folder.replace("file://", "")
for composer in all_composers:
ws_info = composer.get("workspaceIdentifier", {})
uri = ws_info.get("uri", {})
fs_path = uri.get("fsPath", "") if isinstance(uri, dict) else ""
if fs_path and ws_folder_decoded and fs_path == ws_folder_decoded:
if not any(c["composerId"] == composer["composerId"] for c in target_composers):
target_composers.append(composer)
if not target_composers:
continue
# 备份 workspace DB
backup_db(ws_db_path)
ws_conn = sqlite3.connect(ws_db_path)
ws_conn.execute("PRAGMA journal_mode=WAL")
try:
row = ws_conn.execute(
"SELECT value FROM ItemTable WHERE key = 'composer.composerData'"
).fetchone()
if row:
ws_data = json.loads(row[0])
else:
ws_data = {
"allComposers": [],
"selectedComposerIds": [],
"lastFocusedComposerIds": [],
"hasMigratedComposerData": True,
"hasMigratedMultipleComposers": True,
}
existing_ids = {c["composerId"] for c in ws_data.get("allComposers", [])}
newly_added = 0
for composer in target_composers:
if composer["composerId"] not in existing_ids:
ws_data.setdefault("allComposers", []).append(composer)
existing_ids.add(composer["composerId"])
newly_added += 1
if newly_added > 0:
# 按 lastUpdatedAt 降序排列
ws_data["allComposers"].sort(
key=lambda c: c.get("lastUpdatedAt", c.get("createdAt", 0)),
reverse=True,
)
first_id = ws_data["allComposers"][0]["composerId"]
ws_data.setdefault("selectedComposerIds", [first_id])
ws_data.setdefault("lastFocusedComposerIds", [first_id])
ws_data.setdefault("hasMigratedComposerData", True)
ws_data.setdefault("hasMigratedMultipleComposers", True)
if row:
ws_conn.execute(
"UPDATE ItemTable SET value = ? WHERE key = 'composer.composerData'",
(json.dumps(ws_data, ensure_ascii=False),),
)
else:
ws_conn.execute(
"INSERT INTO ItemTable (key, value) VALUES (?, ?)",
("composer.composerData", json.dumps(ws_data, ensure_ascii=False)),
)
ws_conn.commit()
total_all = len(ws_data["allComposers"])
print(f" [同步] {ws_label}: 新增 {newly_added} 个对话 (总计 {total_all} 个)")
total_synced += newly_added
else:
print(f" [跳过] {ws_label}: 已包含所有 {len(existing_ids)} 个对话")
except Exception as e:
ws_conn.rollback()
print(f" [错误] {ws_label}: {e}")
finally:
ws_conn.close()
return total_synced
def main():
if not os.path.exists(GLOBAL_DB_PATH):
print(f"[错误] 数据库文件不存在: {GLOBAL_DB_PATH}")
sys.exit(1)
print("=" * 60)
print("Cursor 3.0 → 2.6 聊天记录迁移工具")
print("=" * 60)
print()
result = subprocess.run(["pgrep", "-f", "Cursor Helper"], capture_output=True)
if result.returncode == 0:
print("[错误] 检测到 Cursor 仍在运行!")
print("[错误] 必须先完全退出 Cursor 再执行迁移,否则 Cursor 会用内存中的旧数据覆盖迁移结果。")
print("[提示] macOS: Cmd+Q 退出 Cursor,或运行 'killall Cursor'")
answer = input("是否强制继续?(y/N): ").strip().lower()
if answer != "y":
print("已取消。请退出 Cursor 后重试。")
sys.exit(0)
print()
backup_path = backup_db(GLOBAL_DB_PATH)
print(f"[备份] 全局 DB 已备份到 {backup_path}")
print()
conn = sqlite3.connect(GLOBAL_DB_PATH)
conn.execute("PRAGMA journal_mode=WAL")
try:
print("[步骤 1/3] 修复全局 composerHeaders...")
all_composers = fix_composer_headers(conn)
print()
print("[步骤 2/3] 填充 conversationMap...")
populate_conversation_maps(conn)
print()
conn.commit()
print("[步骤 3/3] 同步到 workspace 级别的 composer.composerData...")
if all_composers:
synced = sync_workspace_composer_data(conn, all_composers)
print(f"[结果] 共同步 {synced} 个对话到 workspace 级别")
else:
print("[跳过] 全局 composerHeaders 中没有对话")
print()
print("[完成] 迁移成功!请启动 Cursor 2.6 查看历史聊天记录。")
print(f"[备份] 如需回滚全局 DB,将 {backup_path} 复制回原路径即可。")
print(f"[备份] workspace 级别的备份在各 workspaceStorage/<id>/ 目录下。")
except Exception as e:
conn.rollback()
print(f"\n[错误] 迁移失败: {e}")
print(f"[回滚] 全局数据库未修改。备份位于: {backup_path}")
import traceback
traceback.print_exc()
finally:
conn.close()
if __name__ == "__main__":
main()
Did this script work for you? Its not working for me.
how do you reinstall to previous version?
Thank you for your feedback. I’ve hidden a post that linked to an external download source, as our guidelines require that only official sources be shared here. You can download version 2.6 directly from https://cursor.com/downloads.
As a general reminder, please exercise caution when running any scripts that modify your local database, and always create a backup beforehand.
I’d be curious to hear more about what specific issues you encountered with version 3. It’s worth noting that the classic Editor Window remains available in version 3 if that was a concern!
Thanks - somehow i managed to get back the classic editor window.
My main concern was that I had everything neatly arranged on one screen in v2.6 — editor, AI/chat panel, terminal, and explorer. After installing v3, some of these disappeared, which freaked me out. I rely on all of them all the time, so having to click around to access them in v3 wasn’t ideal (maybe I’m mistaken).