Fastapply ignores removing comments

My diff in the chat removes all the comments from the code, when I try to apply it on the file, the changes to comments are ignored.

Code snippet from the chat:

import google.generativeai as genai
from google import genai as genai2
import os
from modules.tools import gemini_tools_list
from modules.data_types import (
    PromptResponse,
    SimpleToolCall,
    ToolsAndPrompts,
    ThoughtResponse,
    ToolCallResponse,
    BenchPromptResponse,
)
from utils import (
    parse_markdown_backticks,
    timeit,
    MAP_MODEL_ALIAS_TO_COST_PER_MILLION_TOKENS,
)
from dotenv import load_dotenv

load_dotenv()
genai.configure(api_key=os.getenv("GEMINI_API_KEY"))

def get_gemini_cost(model: str, input_tokens: int, output_tokens: int) -> float:
    cost_map = MAP_MODEL_ALIAS_TO_COST_PER_MILLION_TOKENS.get(model)
    if not cost_map:
        return 0.0

    input_cost = (input_tokens / 1_000_000) * cost_map["input"]
    output_cost = (output_tokens / 1_000_000) * cost_map["output"]
    return round(input_cost + output_cost, 6)

def thought_prompt(prompt: str, model: str) -> ThoughtResponse:
    try:
        if model != "gemini-2.0-flash-thinking-exp-01-21":
            raise ValueError(f"Invalid model for thought prompts: {model}")

        config = {"thinking_config": {"include_thoughts": True}}
        client = genai2.Client(
            api_key=os.getenv("GEMINI_API_KEY"), 
            http_options={"api_version": "v1alpha"}
        )

        with timeit() as t:
            response = client.models.generate_content(
                model=model, contents=prompt, config=config
            )
            elapsed_ms = t()

            thoughts = []
            response_content = []
            for part in response.candidates[0].content.parts:
                if hasattr(part, "thought") and part.thought:
                    thoughts.append(part.text)
                else:
                    response_content.append(part.text)

        return ThoughtResponse(
            thoughts="\n".join(thoughts),
            response="\n".join(response_content),
            error=None,
        )

    except Exception as e:
        print(f"Gemini thought error: {str(e)}")
        return ThoughtResponse(thoughts=f"Error: {str(e)}", response="", error=str(e))

def text_prompt(prompt: str, model: str) -> PromptResponse:
    try:
        with timeit() as t:
            gemini_model = genai.GenerativeModel(model_name=model)
            response = gemini_model.generate_content(prompt)
            elapsed_ms = t()

            metadata = response._result.usage_metadata
            cost = get_gemini_cost(
                model, 
                metadata.prompt_token_count, 
                metadata.candidates_token_count
            )

        return PromptResponse(
            response=response.text,
            runTimeMs=elapsed_ms,
            inputAndOutputCost=cost,
        )
    except Exception as e:
        print(f"Gemini error: {str(e)}")
        return PromptResponse(response=f"Error: {str(e)}", runTimeMs=0.0, inputAndOutputCost=0.0)

def bench_prompt(prompt: str, model: str) -> BenchPromptResponse:
    try:
        with timeit() as t:
            gemini_model = genai.GenerativeModel(model_name=model)
            response = gemini_model.generate_content(prompt)
            elapsed_ms = t()

            metadata = response._result.usage_metadata
            cost = get_gemini_cost(
                model,
                metadata.prompt_token_count,
                metadata.candidates_token_count
            )

        return BenchPromptResponse(
            response=response.text,
            tokens_per_second=0.0,
            provider="gemini",
            total_duration_ms=elapsed_ms,
            load_duration_ms=0.0,
            inputAndOutputCost=cost,
        )
    except Exception as e:
        print(f"Gemini error: {str(e)}")
        return BenchPromptResponse(
            response=f"Error: {str(e)}",
            tokens_per_second=0.0,
            provider="gemini",
            total_duration_ms=0.0,
            load_duration_ms=0.0,
            inputAndOutputCost=0.0,
            errored=True,
        )

def tool_prompt(prompt: str, model: str, force_tools: list[str]) -> ToolCallResponse:
    with timeit() as t:
        if "-json" in model:
            base_model = "gemini-exp-1114" if model == "gemini-exp-1114-json" else model.replace("-json", "")
            gemini_model = genai.GenerativeModel(model_name=base_model)
            chat = gemini_model.start_chat()
            response = chat.send_message(prompt)

            try:
                parsed_response = ToolsAndPrompts.model_validate_json(parse_markdown_backticks(response.text))
                tool_calls = [
                    SimpleToolCall(tool_name=tap.tool_name, params={"prompt": tap.prompt})
                    for tap in parsed_response.tools_and_prompts
                ]
            except Exception as e:
                print(f"Failed to parse JSON response: {e}")
                tool_calls = []
        else:
            gemini_model = genai.GenerativeModel(model_name=model, tools=gemini_tools_list)
            chat = gemini_model.start_chat(enable_automatic_function_calling=True)
            response = chat.send_message(prompt)

            tool_calls = [
                SimpleToolCall(tool_name=part.function_call.name, params=part.function_call.args)
                for part in response.parts
                if hasattr(part, "function_call")
            ]

        metadata = response._result.usage_metadata
        cost = get_gemini_cost(model, metadata.prompt_token_count, metadata.candidates_token_count)

    return ToolCallResponse(tool_calls=tool_calls, runTimeMs=t(), inputAndOutputCost=cost)

How it looks after apply:

Hey, try adding to your Rules for AI section that it should never delete your comments, and see how far that gets you!