File size: 11,198 Bytes
3b78b4e
ce204af
ff3c585
db54eb2
ce204af
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
317bb3f
 
 
 
 
 
ce204af
 
 
 
e7abdcc
 
ce204af
 
 
 
 
 
 
 
 
 
85684e9
ce204af
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e7abdcc
 
 
 
 
 
 
ce204af
 
 
 
 
 
e7abdcc
 
ce204af
2513120
ce204af
 
c4acae9
 
 
ce204af
c4acae9
 
ce204af
c4acae9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2ae12ef
ce204af
 
 
 
 
d866884
6776e04
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ce204af
6776e04
 
 
 
 
 
b03e81c
6776e04
 
ff3c585
6776e04
 
 
 
 
 
ff3c585
6776e04
 
 
 
 
 
 
 
 
 
 
 
 
 
2ae12ef
6776e04
 
 
 
 
4244096
6776e04
 
 
 
 
 
4244096
6776e04
 
 
 
 
4244096
6776e04
 
 
 
 
 
4244096
6776e04
317bb3f
6776e04
 
 
 
 
 
 
 
 
d866884
6776e04
 
db54eb2
 
 
 
 
 
 
 
 
 
 
e7abdcc
db54eb2
 
 
 
 
 
 
 
 
 
 
6776e04
 
 
 
 
 
 
 
 
 
 
 
 
 
ce204af
 
 
ff3c585
 
 
ce204af
 
 
5a2f1c7
 
 
 
ce204af
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
import uuid
import os
import gradio as gr
from loguru import logger
from langchain_core.messages import HumanMessage, AIMessage

# Assuming mcpc_graph.py and its setup_graph function are in the same directory.
from mcpc_graph import setup_graph


async def chat_logic(
    message,
    history,
    session_state,
    github_repo,
    github_token,
    trello_api,
    trello_token,
    hf_token,
):
    """
    Handles the main chat logic, including environment setup and streaming responses.

    Args:
        message (str): The user's input message.
        history (list): The chat history managed by Gradio.
        session_state (dict): A dictionary to maintain state across calls for a session.
        github_repo (str): The GitHub repository (username/repo).
        github_token (str): The GitHub personal access token.
        trello_api (str): The Trello API key.
        trello_token (str): The Trello API token.
        hf_token (str): The Hugging Face API token.

    Yields:
        str: The bot's streaming response or an interruption message.
    """

    # Load Hugging Face token from environment (passed as Gradio secret or set locally)
    hf_token = os.getenv("NEBIUS_API_KEY")
    if not hf_token:
        yield "Error: LLM token not found. Please set the token as environment variable or configure it as a Gradio secret."
        return
    # Retrieve the initialized graph and interrupt handler from the session state.
    app = session_state.get("app")
    human_resume_node = session_state.get("human_resume_node")

    #to manage passing the repository to the agent or not
    first_turn = session_state.get("first_turn_done") is None
    # If the graph is not initialized, this is the first message of the session.
    # We configure the environment and set up the graph.
    if app is None:
        # Check if all required fields have been filled out.
        if not all([github_repo, github_token, trello_api, trello_token, hf_token]):
            yield "Error: Please provide all API keys and the GitHub repository in the 'API Configuration' section before starting the chat."
            return

        # Set environment variables for the current process.
        os.environ["GITHUB_REPO"] = github_repo
        os.environ["NEBIUS_API_KEY"] = hf_token

        # Asynchronously initialize the graph and store it in the session state
        # to reuse it for subsequent messages in the same session.
        app, human_resume_node = await setup_graph(
            github_token=github_token, trello_api=trello_api, trello_token=trello_token
        )
        session_state["app"] = app
        session_state["human_resume_node"] = human_resume_node

    # Ensure a unique thread_id for the conversation.
    thread_id = session_state.get("thread_id")
    if not thread_id:
        thread_id = str(uuid.uuid4())
        session_state["thread_id"] = thread_id

    # Check if the current message is a response to a human interruption.
    is_message_command = session_state.get("is_message_command", False)

    config = {
        "configurable": {"thread_id": thread_id},
        "recursion_limit": 100,
    }

    if first_turn:
        # what LangGraph will see
        prompt_for_agent = f"{message}\n\nGITHUB REPOSITORY: {github_repo}"
        session_state["first_turn_done"] = True      # mark as used
    else:
        prompt_for_agent = message

    if is_message_command:
        # The user is providing feedback to an interruption.
        app_input = human_resume_node.call_human_interrupt_agent(message)
        session_state["is_message_command"] = False
    else:
        # A standard user message.
        logger.debug(f"Prompt for agent: '{prompt_for_agent}'")
        app_input = {"messages": [HumanMessage(content=prompt_for_agent)]}

    # app_input["github_repo"] = github_repo
    # Stream the graph's response.
    # This revised logic handles intermediate messages and prevents duplication.
    
    final_reply = None          # buffer for the last AIMessage we see

    async for res in app.astream(app_input, config=config, stream_mode="values"):
        # ── 1) Handle human-interrupts immediately ─────────────────────────
        if "__interrupt__" in res:
            session_state["is_message_command"] = True
            # yield the interrupt text straight away
            yield res["__interrupt__"][0].value
            return                        # stop processing until user replies

        # ── 2) Remember the latest AIMessage we’ve seen ────────────────────
        if "messages" in res:
            last = res["messages"][-1]
            if isinstance(last, AIMessage):
                final_reply = last.content

    # ── 3) After the graph stops, emit the buffered final answer ───────────
    if final_reply is not None:
        yield final_reply                 # exactly one assistant chunk
    else:
        # fail-safe: graph produced no AIMessage
        yield "✅ Done"


def create_gradio_app():
    """Creates and launches the Gradio web application."""
    print("Launching Gradio app...")

    #AVATAR_BOT = "pmcp/assets/pmcp_bot.jpeg"

    theme = gr.themes.Soft(
        primary_hue="green",
        secondary_hue="teal",
        neutral_hue="slate",
        font=["Arial", "sans-serif"]
        ).set(
            body_background_fill="linear-gradient(135deg,#e8f5e9 0%,#f4fcf4 100%)",
            block_background_fill="white",
            block_border_width="1px",
            block_shadow="*shadow_drop_lg",
            button_primary_background_fill="#02B900",  
            button_primary_text_color="white",
            button_secondary_background_fill="#35C733",
            button_secondary_text_color="white",
        )
    
    # Extra CSS (font, bubble colors, subtle animations)
    custom_css = """
    body { font-family: 'Inter', sans-serif; }
    #header { text-align:center; margin-bottom: 1.25rem; }
    #header h1 { font-size:2.25rem; font-weight:700; background:linear-gradient(90deg,#02B900 0%,#35C733 100%); -webkit-background-clip:text; color:transparent; }
    #chatbot .message.user   { background:#4F814E; }
    #chatbot .message.assistant { background:#F9FDF9; }
    """

    with gr.Blocks( theme=theme, 
                    title="LangGraph Multi-Agent Chat",
                    css=custom_css,
                    fill_height=True,) as demo:
        
        session_state = gr.State({})

        gr.HTML(
                        """
            <div id='header'>
            <h1>PMCP — Agentic Project Management</h1>
            <p class='tagline'>Manage your projects with PMCP, a multi-agent system capable to interact with Trello and GitHub.</p>
            </div>
            """
                    )


        with gr.Row():
            with gr.Column(scale=1):
                with gr.Accordion("🔑 API Configuration", open=True):
                    gr.Markdown(
                        "We set up a [Trello public board](https://trello.com/b/Z2MAnn7H/pmcp-agent-ai) and a [Github repository](https://github.com/PMCPAgentAI/brainrot_image_generation) so you can experiment this agent. If you want to try with your account, you can edit this configuration with your API keys."
                    )
                    github_repo = gr.Textbox(
                        label="📁 GitHub Repo",
                        placeholder="e.g., username/repository",
                        info="The target repository for GitHub operations.",
                        value=os.getenv("GITHUB_REPO_NAME")
                    )
                    github_token = gr.Textbox(
                        label="🔐 GitHub Token",
                        placeholder="ghp_xxxxxxxxxxxx",
                        type="password",
                        info="A fine-grained personal access token.",
                        value=os.getenv("GITHUB_API_KEY")
                    )
                    trello_api = gr.Textbox(
                        label="🗂️ Trello API Key",
                        placeholder="Your Trello API key",
                        info="Your API key from trello.com/power-ups/admin.",
                        value=os.getenv("TRELLO_API_KEY")
                    )
                    trello_token = gr.Textbox(
                        label="🔐 Trello Token",
                        placeholder="Your Trello token",
                        type="password",
                        info="A token generated from your Trello account.",
                        value=os.getenv("TRELLO_TOKEN")
                    )
                    

            with gr.Column(scale=2):
                chatbot = gr.Chatbot(
                    [],
                    elem_id="chatbot",
                    bubble_full_width=False,
                    height=600,
                    label="Multi-Agent Chat",
                    show_label=False,
                    avatar_images=(None, None)
                    )
                
                # 🆕 add this helper next to create_gradio_app()
                def _reset_agent(state: dict):
                    """
                    Runs when the user clicks the 🗑 button.

                    Keeps the API credentials that live in the Textboxes and the env-vars,
                    but forgets everything that makes the current LangGraph session unique,
                    so the next user message starts from the root node again.
                    """
                    logger.info("Resetting the agent")
                    state["app"] = None
                    state['first_turn_done'] = None
                    return state
                

                # 🆕 2. bind it to the built-in clear event --------------
                chatbot.clear(
                    _reset_agent,            # fn
                    inputs=[session_state],         # what the fn receives
                    outputs=[session_state],        # what the fn updates
                )
                # --------------------------------------------------------


                gr.ChatInterface(
                    fn=chat_logic,
                    chatbot=chatbot,
                    additional_inputs=[
                        session_state,
                        github_repo,
                        github_token,
                        trello_api,
                        trello_token,
                    ],
                    title=None,
                    description="Ask **PMCP** to create tickets, open PRs, or coordinate tasks across your boards and repositories.",
                )

    demo.queue()
    demo.launch(debug=True)


if __name__ == "__main__":
    try:
        # The main function to create the app is now synchronous.
        # Gradio handles the async calls within the chat logic.
        import subprocess

        subprocess.run(["pip", "install", "-e", "."])

        create_gradio_app()
    except KeyboardInterrupt:
        print("\nShutting down Gradio app.")
    except Exception as e:
        print(f"An error occurred: {e}")