From 3590c43793a7036cef23baf33472c93f80508e9a Mon Sep 17 00:00:00 2001 From: openhands Date: Sat, 18 Oct 2025 15:19:57 +0200 Subject: [PATCH 01/37] docs: LLM profiles design + example profile --- docs/llm_profiles.md | 42 ++++++++++++++++++++++++++++++ examples/llm-profiles/example.json | 11 ++++++++ 2 files changed, 53 insertions(+) create mode 100644 docs/llm_profiles.md create mode 100644 examples/llm-profiles/example.json diff --git a/docs/llm_profiles.md b/docs/llm_profiles.md new file mode 100644 index 0000000000..e87b2ab6d2 --- /dev/null +++ b/docs/llm_profiles.md @@ -0,0 +1,42 @@ +LLM Profiles (design) + +Overview + +This document records the design decision for "LLM profiles" (named LLM configuration files) and how they map to the existing LLM model and persistence in the SDK. + +Key decisions + +- Reuse the existing LLM Pydantic model schema. A profile file is simply the JSON dump of an LLM instance (the same shape produced by LLM.model_dump(exclude_none=True) or LLM.load_from_json). +- Storage location: ~/.openhands/llm-profiles/.json. The profile_name is the filename (no extension) used to refer to the profile. +- Do not change ConversationState or Agent serialization format for now. Profiles are a convenience for creating LLM instances and registering them in the runtime LLMRegistry. +- Secrets: do NOT store plaintext API keys in profile files by default. Prefer storing the env var name in the LLM.api_key (via LLM.load_from_env) or keep the API key in runtime SecretsManager. The ProfileManager.save_profile API will expose an include_secrets flag; default False. +- LLM.service_id semantics: keep current behavior (a small set of runtime "usage" identifiers such as 'agent', 'condenser', 'title-gen', etc.). Do not use service_id as the profile name. We will evaluate a rename (service_id -> usage_id) in a separate task (see agent-sdk-23). + +ProfileManager API (summary) + +- list_profiles() -> list[str] +- load_profile(name: str) -> LLM +- save_profile(name: str, llm: LLM, include_secrets: bool = False) -> str (path) +- register_all(registry: LLMRegistry) -> None + +Implementation notes + +- Use LLM.load_from_json(path) for loading and llm.model_dump(exclude_none=True) for saving. +- Default directory: os.path.expanduser('~/.openhands/llm-profiles/') +- When loading, do not inject secrets. The runtime should reconcile secrets via ConversationState/Agent resolve_diff_from_deserialized or via SecretsManager. +- When saving, respect include_secrets flag; if False, ensure secret fields (api_key, aws_* keys) are omitted or masked. + +CLI + +- Use a single flag: --llm to select a profile for the agent LLM. +- Also support an environment fallback: OPENHANDS_LLM_PROFILE. +- Provide commands: `openhands llm list`, `openhands llm show ` (redacts secrets). + +Migration + +- Migration from inline configs to profiles: provide a migration helper script to extract inline LLMs from ~/.openhands/agent_settings.json and conversation base_state.json into ~/.openhands/llm-profiles/.json and update references (manual opt-in by user). + +Notes on service_id rename + +- There is an ongoing discussion about renaming `LLM.service_id` to a clearer name (e.g., `usage_id` or `token_tracking_id`) because `service_id` is overloaded. We will not rename immediately; agent-sdk-23 will investigate the migration and impact. + diff --git a/examples/llm-profiles/example.json b/examples/llm-profiles/example.json new file mode 100644 index 0000000000..c1ff2e0537 --- /dev/null +++ b/examples/llm-profiles/example.json @@ -0,0 +1,11 @@ +{ + "model": "gpt-4o-mini", + "base_url": "https://api.openai.com/v1", + "api_key": null, + "temperature": 0.0, + "max_output_tokens": 1024, + "service_id": "agent", + "metadata": { + "profile_description": "Example profile for local testing (no api_key stored)." + } +} From 9b1e3dbd369f6e08ede66e6a526ad55faab0e1a1 Mon Sep 17 00:00:00 2001 From: openhands Date: Sat, 18 Oct 2025 15:38:52 +0200 Subject: [PATCH 02/37] llm: add profile_id field to LLM (profile filename identifier)\n\nCo-authored-by: openhands --- openhands/sdk/llm/llm.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/openhands/sdk/llm/llm.py b/openhands/sdk/llm/llm.py index b4a4753bc8..170cfba595 100644 --- a/openhands/sdk/llm/llm.py +++ b/openhands/sdk/llm/llm.py @@ -208,6 +208,10 @@ class LLM(BaseModel, RetryMixin, NonNativeToolCallingMixin): "Safety settings for models that support them (like Mistral AI and Gemini)" ), ) + profile_id: str | None = Field( + default=None, + description="Optional profile id (filename under ~/.openhands/llm-profiles).", + ) service_id: str = Field( default="default", description="Unique identifier for LLM. Typically used by LLM registry.", From 21efefef58f16ce8a37923db8790c78f1524133a Mon Sep 17 00:00:00 2001 From: openhands Date: Sat, 18 Oct 2025 15:43:43 +0200 Subject: [PATCH 03/37] feat(llm): add ProfileManager and eagerly register profiles at conversation startup\n\n- ProfileManager manages ~/.openhands/llm-profiles/*.json (load/save/list/register)\n- LocalConversation now calls ProfileManager.register_all to eagerly populate LLMRegistry\n\nCo-authored-by: openhands --- .beads/agent-sdk.db | Bin 0 -> 180224 bytes .worktrees/message-typed | 1 + .worktrees/native-responses | 1 + .worktrees/refactor/llm-tool-api-from-main | 1 + .worktrees/responses | 1 + .worktrees/sonnet-thinking | 1 + AGENTS.md | 1 + CLAUDE.md | 180 ++++ agent-sdk.workspace.code-workspace | 14 + docs/agent-sdk.workspace.code-workspace | 14 + docs/llm-model-info-and-caps.md | 56 ++ docs/llm-refactor.md | 110 +++ log.txt | 825 ++++++++++++++++++ .../conversation/impl/local_conversation.py | 14 + openhands/sdk/llm/profile_manager.py | 79 ++ previous.md | 68 ++ 16 files changed, 1366 insertions(+) create mode 100644 .beads/agent-sdk.db create mode 160000 .worktrees/message-typed create mode 160000 .worktrees/native-responses create mode 160000 .worktrees/refactor/llm-tool-api-from-main create mode 160000 .worktrees/responses create mode 160000 .worktrees/sonnet-thinking create mode 100644 AGENTS.md create mode 100644 CLAUDE.md create mode 100644 agent-sdk.workspace.code-workspace create mode 100644 docs/agent-sdk.workspace.code-workspace create mode 100644 docs/llm-model-info-and-caps.md create mode 100644 docs/llm-refactor.md create mode 100644 log.txt create mode 100644 openhands/sdk/llm/profile_manager.py create mode 100644 previous.md diff --git a/.beads/agent-sdk.db b/.beads/agent-sdk.db new file mode 100644 index 0000000000000000000000000000000000000000..832265a4a5990e7cb7cdac9479fb82677ca38eb9 GIT binary patch literal 180224 zcmeIbdvIJ?dLIT5BtU{_G&wUIEH%43XUXLPy#qA*b~k_qc4uTP3C>W%*86teSP@a*Efm({l7=><)Hrq|Mo||_xgIQFaOoY4$)KUt*`GRBi|eO=1A%2 z|8V5DkACNfb@-2l{{7)!9Qw(@&mQ_e4*kVL<%56i(9pnJ2fs3Ky#K$*{y*G8eWXI* zwJ(efoj7ym@axV^)7Z8Ry}na59j9YB(eG!Mmlju-#1}3sy&$6HVsT}7W$EJ5*;QfY zKRtYY`N}0BTMKjeh3A%*mr(Awy4!KY(_-9e8?EtSAr>#66PK^7ilr}JSXo^W6DShG z>qisZOWi=-y3wkch9l}+;RmizccSGFog;E9+P6+Y*pK~wP72MGmf8L)$tsf z=Q@uMjV+$(fAzSD*SKZYZ&mB2?fSs2Hgy~Guj+2Jbj*YEh0Eubz9@RP;n$AzHWBoe zd$s!5(Ab$X{kQvFeXU_cdyjrU;f=Jq__>QqBKpKzjR07yOJ7_SSC=nbT3minJiqkf zB*P{cP@Dr|tX{aZ#0z@)+Qp0F+|v2QYZq6=*=x(oOP5!x^mJu)@zPb<^!%0Or3=qq zroP04KTJ+6FP#UvTt2(B;;r$7S~w?0@x1hD^VQ^_ z#Lr_fxF#M>wDhJiiI%z7)<=0|?u>Jva$@o`8|y)q`50Ok7cQ?ZJ-f6VMNhGKZ54b1 z%`O4=B*ytuk1v*d#$8sK{CS!ErIb(gnrqo<&8$Yf8X|zK+%mSi%4$}#i8bXeWUX*{ z-g(lwnqhn}1B7enYevJV@;~zXXcs<2>kvN3-xK^#?$zgy^3uKb5-**6CO=1FODD_x zrK1goIB^QdxnkOb2oDPR#-yB1PQvyb&v6#afUw&Kwzdsuf_-QUDh5Tlz$gPq3TwjZ3reCk~DxPF`+^gzMb)b|AyQ_wj+ z$USVAhj-mBBVtxni!Rxb>R@7bVkC*~PO@OA3$?l`5;Due==y7sF@y7r55D+7aL(QJ=q{D?TFq#?I)nnq zGmz12Q@=}#mgPd$k5PWt&>jGLRAT)?&e315Kt}K>Iq0(Jfh4H;%+sR4B7aZ<_>?{9 zFyK?C44OiQJ+Rv@C^4|A$$tl+R30Y9l63RI?C;Xor?USc`|q-UnElt;-4ST>xt)pAJZvuYg@opeUXZPVQp zX0zQeg0iy>LvM*rTezD>q}}4x3)I#TEo&Q(08o0~vRYG)3%R-}+B&*&HqCb1sOQBs z1|GMVh8`yeuR3j`mT%UDYYEbHxC@AK7#=GqYMVxFt6@5>0A4p8G2wLDZOew_9`>NO zO~@vVWPnX1KL{PUx+=ST}F+xW+A=28Q0f zZrCJb3&+C1q7AX$bp&3}tKm!+v?>UsRo5F<%cugX)5TI`PI!ICYGIgsdnYf>Lx$x6 z>YaAOtO12Ey}E_^ek}lCV$A}yY|CK|AC^~LltP*8z8jZGLi&_V3+j1K_qHbGlN8AW#CPc<6k=hNhZd*<6 zZQZm%e~UcrB~qJ#c^0AFx^6b`?ieYKZqpzgd#6np)Yf+zmR@%Rb~>8by47e{+YDs@ zC*TS6$b*kfJ=S#F-Vwkt{KBKOHKR$238G%TwIkL$jQk*R9UNL0u8G0Xk?H1yT-}b@ zsPnk!k0Dz3A(xl079s$VQ=482_~Ovpd8{fxX@(F1f%Zsn76-LDK!`2FXp8XWgZg=~ zSg&J3w!sE0+V*Soa*;R4IVpPeWO2<$(-ogC1x>apjmvYaBaN>aM+SZnXQ}FmSJDjNzfC#aqvnK z!ZGhB95eyIu6{f=o);JVjYizKc=1wR51EZRia@+QJ@08 z5jEl+Z3Bda#gtIOXbvzJ8>JILS;;YVz_QcAR>Esdm+_P9*n9=fcbF=fa9(Rm#&(cEo^E;VC$4;-j zbutwbW>(~;E75(u(})lx+aupQwT zE*gP-J-uqf(l>!+fwlLhiPqqza)@c{A+VXlR*MBN*GUV#3I8iY9EIFs zA({z@s7tq85{9gfEeP^p*|jZNPUbhkI#0-+82@rNNKVmjVtXUQK5!LGMUeG7CS^hZ z1VVayGPaz6HOjpfu&+ZV(YScWt&3$nsM;swOp9ku0qYxqbcu#tr`E zkc5t29Y#d?4zMAES@HL?#Qn}nDV?YvP$f_>=0N6;?dvSKhH^ab*kBfEdm$zVxVHki zvkqCeg|30nw8Fs{%l?+!6X6Pw5U~{&W@hJ85w7cbU*Cbhefs6M##0flNZc(lVPfJ5 zLv0(q!M-cyWQhFvqU0_lRpiV3Ncw~Yvv`&cY{fL74x zz;GZ~q0lk$s2afBFGX?RMGG9Lcf{Hh&tQw0P{W>)6qmOEbwzzjsizir=B) zb%UljWi(CRqG@A~y*tPrdobC@ikK*Iw(f!?6(aHc<0a*L(0L_zWKM1gi9QBZg+UZBlCnkc9|k|>xvo+y|-mMECXCJM@M)|MIy1<%Iv` zG5^b>{+CDmFUS2a$NVqZ;enI=GVb!QUzYK|jQC%UhP8(MvLpVN(eUBGk%5y(yxzQt z4*Jy&`CsVN{-D+Ne-4Gs53v6~yWNM_|3A+Do9y?p{|fs5ufyB_E7@Pn{^RV=W&a`c z|Gxph|6k7j#q5t{-_G97zJluNPeDLIKtVu3KtVu3KtVu3KtVu3KtVu3KtVu3;O-DO zG|=CFV2nQ>=g$-T`51pb%Ab$$=W+f##-CaK9Ocgpe~$3yQT`m}&m;VKm_LX3bC5p| z@#jJQ9N^Cb2M79(c>4c0`?7zQ{rA~F%Kl&3{~Z>9|0VmIhynQTvcC))z(2|UJmLa= zI{SBE1^BDkzmoll?2l!?dG{AoH53FC1QY}m1QY}m1QY}m1QY}m1QY}m1QY}m1bRW> z;6SE-j6NQxj}!Fq7=1iSACJ(-ar!t$A6fbsrH>4KjL^qX`WU8Q_Kl+KxpJjeM z^A9rL%$S*TnI}j7bmV_Rb@itppdg?ipdg?ipdg?ipdg?ipdg?i@X#Uf$$?A#LH^N- z&}J62`Gta(pP4Pr&3pMt$C8!G3mURO&*bOk=PP~=zsFN6p{M!6EHeGc>}e-bDj~gO z1?eZt+KiV>`mxkXCFD$v7}YXUS7dRP7gdEdpk@V>8g?KvN%)l zrgS`|BW(er&CJc0D&CZi#VXASZFZqhK^|;Qf?e_|Wn-0QgoZqZ1q=vS&IMB%O;qA; z2m<9|W!{HCCRqt-=w=p5mHbSlq7}*#0wc*vv&bt;OIOn7<|`hUj>aH>?A^0SXonQD zl<#xSt23PJXa@74oS)iEY0iiDk?u-JSzN5-fdTlFQ##yTsR)pl@p7{ze^G{#mC{n) z4yIO0jE&Zg<6aruwZmes4%ysNEal6vk!YUME;~H5 zYlqrhJ1ou3YTk82vcrSBc38N3hb5ROq%lBtIIvrX+WcKREY6oS?^+V>@X`V5@IsG8 z(JFWEupFCT|E}|!yK8saY`G9!67v5KWvBZP_5X{A@P9kImA!Q6yHK|n!3K|n!3 zK|n!3K|n!3K|n!3K|n#^!wi9s9Xu1gtU!wyScZ0cS{&T%sdi|$r-g&NJk{n0c6(Ym z@KMY;n({v~XKhaE|6lFP{@d*Tn*Hb5{|e{)|8n-LALb=i@2()AAfOk*DR6cuSumI^IFu|nm@F7b797Y9#BBje|9_Zks`^$CP!LcM zP!LcMP!LcMP!LcMP!LcMP!LcMNJ1d4suxQM{l8dD=>J76q5l^PN&SC5ssC4!`u`lA z|L;4NoQwLcAfO>aIj)||RT{oPXZMI$fM4QEWU0D1>Z-fn-R^1TmmMyNdjn?9Yu%o!yj;xJ~4(68^OSjsVW6-m8bE9MH1SNt0s1KUL zgpuFKPlnTWsvcx{ws`$*%ieO@dd-+>7&ncE*lJm8c;7alWjZeR69Pi$HQRC=an@>K z3XUAj?zEgqe%G3XQn78io5D35*AaMOqva6X7y$eTt3hy!G(gIpMQ z!g&L`@E-cchHY%qf)XC&hZFRU;Iy5#3k%|wXpMyI*Pb7EWsLRoF5Hp^;EQFo%PcR(D|5Mqcq)`jK)dJPt)t-G6fagDJ?wrd+TQQI_XTMZ&5nuHGSh4;r0 z#NB|-AQ()CCI`5;gmFu!t}*>4ItO4G_ro!Q1>biH=(_6mE)>vRKS=>p<4$8hQovVV ziBdpmR|+UaC}3r$Rg2(9-Kz15OU}cb#`I}~%esi*Y{LLtJ8fQ(0DJ=UBG8#5TGlom zVNq#yTUKjI0vpHxF6L~S?Y2=5R+U;1e#lL#>R^HMP4IXNY}(%9E@~1tjCy3{K|f94 zvq!n%j^R}GwmB(cv^4>Ab%B!TvE2qH{P`Swm;oVOaZi2)@& zW^7=3bt_T}0GL>_04>|HB$hLna*{nzQ-YHK#oNH@n+9ezDZuXHh{O^=-z#4^X}-LQa^$2T;q-T0}|c8J{Cpy;;!gkjG#M7Hxd3oyC{n_A>xLB zY3q&y>B8!4Y<6Qk@5BsylC2uxHxVjYZvf~_JYe251QUR9%dl%E312`Td#8=PFI6NHb{dvmmkb`P)Lh?@Goh|pBrE|uz$R;*H$5q~xF)tX?l;fd zjZ<~4Y7-CwBZe3@LE|?GXggvZAYieLw@mD~C=aSu@jE1&8#Ki!qiK@Z!!P1*MTEc} zF_QSR26~{KaE*3ja1N?W;%g%>RyVQxI-6DpB)8GgZR{5tB>Lesf<42*gt*O`?&7jI z07`DU5c)t+ytPe%q^C66$-x}Jy?`D2DJ%ig6L*>4m3ET#yJ<(Oym7qW6PBKh(vG$( z?Pw9&xk{`9qHzp8{YBU&Eg%3{hdMR{;GwHn#f|{G|nf@_?lvv_>t6 z+lw1m;(QgG!I}vkt4pCpq8m`mF$^d%MvLShXPP)LKbl_C9Rpl9Cz^&0WTbU&N(2O_ zh6q9;xena9B(W8^1RTVm0k)HB;kblJVE7(l)H*g@EeLe3+g96&>H+~JRS8Kw?S*<< zH*H{DgapB{En?T}5Df`y3D!|IJst9lw$4Q9_-e?y)d0UCOeNq1dnpa$!G~qRnr_=W zBH&vg_=8DGE;7M#R13t}VT}x1Bg9P?oXiBZVrr(F6LPsbxJ!}8#ZU~h| z>Vx0^+u1_)$?WLp|2z8oqyO{hFOUAs(VrN7bM)q@k@Z04=eV)l2kFO7a8vpsq&^XHjs*WU}CNvfxy*;1kIL5ickfpG*{JA5Rn%J{B*~=0BP!s63GHaVnrSe#^!lTK8N0J4{69vU%i2^N~C@7333o`L(%#Xx>SB}Pi z&ke_a7mvh$mk!5&YeVthvxD*9Gl$~8%Ln7XO9S!W#RH=Qr;mhS(^&sM`X_za??HS2 zIcV;0XJ5*`m|e`~vd2gNWb}7Ne`)lmN53=r>Zm#T`O*2&PmCVP{9)!dGry4eo0)HC zb~1YALPpD+%=C|ZZ{$};es1KuBX5tqH1gue;z(}fIJ#AT3IYlO3IYlO3IYlO3IYlO z3IYlO?*#~q4V3zW1BDf#;fP{kp)mhcceyr?@=vFiYq^y2%EF8`ky>8X##75n#ZM*6 ziwiWR(wQFRpX^b7x<~n`9_63tQ7%%;wV5YV%M04aQ_68bv+}Xj@{;z^)N*b1iPZ8! z>HAX3wS|&)vb(%cTqw?bBvw8rwDLlsvY^e4#mZ*{3TFxnTJiB@`P@QLTPT-LbeC&| zh2rdE$@1BSauGv3+Fh<;-0~wS<#T|~@nreTf;O{IsvJv}mw{9Z1uffMUZO{%-Q^g! zSj;5LOBfUfXGglr(ND2*w7a}m#{7p<%7LQVkz{!hXA&0*Gl!GqSS-x5IMiK^gS<1^ zU}|}}bSR~qjy)diF6Z-M1F7X&<-p?ur6U2oW8F0%1J+B%|NmZJ_V?ZkOi9f_K|n!3 zK|n!3K|n!3K|n!3K|n!3K|n!3K|n#^{vq&@{(&=3%8X@5hlZRTK?DH#|NFE5wh#ZQ zKLr5=0R;gC0R;gC0R;gC0R;gC0R;gC0R;gCfe#1-vIA$1L=6Gt{~tU0*ZQ*mefB@i zwzGdBdu;SyBI5s5MEi@(zsdZg%r`REGf$2D>BujQ{KUw{$n40WqrZmg>Q6yHK|n!3 zK|n!3K|n!3K|n!3LEzy)V0vue%%#9#H&rYJVY~?2N4PYF=jTgv^K<3OQ;7X86s8}K zHz-CMAefor>htBYHd85i4Nk-xXsHcKbF=fB*Wj^ugF>_cri2JTC>8US!tBhPKf>{NgSmJEKHQKmSLQ3hJK!jH1gzR@ zyg>?Bp*uwz%tRX?=9|Lq^A)XJ)O@gx#v7D_1~URd^c;7epP8L66#a$E#2bVwjezZW z8IYeZ%ogV~e;r2R4Z@X116-m&k@WeQN@aGo?1A-Yyg|6qXfP{7?DHjLnV<3BVL09( zTxo8=vG@66Wk&OnFMnj(eb> ztx)jbnIDMNkF7Ay2hh=hd|`gBQuG!qe;`&rw!kxV2biYi z3#Hjg$s>b&f2@9NeM|BPLVmVfDi*!@kpF*d^fu-HyOF(;eI`4V{b=^+=%0>$Z}c}t z|5^5TMt^4XCr5vL^fs~s$jpA+01-A^AARTD)WsGG(M`AQxH%PP!LcMP!LcMP!LcM zP!LcMcyB=9lLMDxSqsy`AI6fEg7|_+$iw43D@DQ_P9!U(g*H5vtP~6v31@gTSt%`q z;gRl2-C+yIyDN2vDjZ8zN()cO#wzs)Nf=F3N)1EEBrBzb9*iU_rG*ee-=8Cv$$}4@dse$Y+lJi{XzS`QMIgAO6$BlS6+#^mhl(9r~j~ zf923;5B|o%zdi6v1K+~kf&T;zZXf)}(1~2`@SSIGn#T58!>Vl=^(r4NaN@ttE-x*v zE{QK(Sb9OkpNPek;gzL}OJ`Sy1^&$ZrziQ#*(=vBuTIqSb)#*x>Q2>aRn2-%=uXgn zZKqnZIxTnj{PLAcLXIlT;UBni;j)mej8@Gw98ninE~8_!E}jkci0ZRqtKZxN#3qe~Di z4;O%dp8#g!eKC1{TW{)@udIrtFJ4$#T@e$!XmTNiCU{BWi@+6labhs%#pwletX7@d z$$5l}2Qbot^0VIHC4?f-drNVztCs6LJ~Xy?rvKIBW~*-8GV8ahb<=iz;8vTuP57?6 z(b6#w&KEA9Tl%8t-G)&u(%VGPTW;>e;MntL`uliP=cR_}8dbgHTKqdc1uZ)CP) zCj4P?VtMI2=<4#>r4^5kCy3TTjOV}^z&Fk=uAE&ww=|r4>e0cm??2fKG&QTWZf;aH z`R`+Ej|`2SJlX&1lMD@6C_f*KK|q#y@4jX1^my%?dZS~=Xa4IB=c>mC$L3DJW1Btne9J1h%F0|;XNbf5PC&Jdbm1BcrQz!f7;uAV~?HG9S`|)0VbDc=H0l=?j z2gjazs{cg>jE7;4@lpp2(@6Y076Vn{;Y3Ss8k1<5du@G`fqiG3`;-%tpV?Rsvdl-c zI#}rH(z8p;Q7jXS*H*#A(d-gXNFt6u^*H?HGw!m=+?5g5Umf{Wuep|;*34?utKpi; z$}MBNtE^@y#duRed2*ZH-Vc%5ab{2Ym`vn=!1*>O4s_V2W1;EHJvB77(06_axAIk^xI z%1&M$9vV9f2LD6=&FG$04P}pL@zE~uMxRC+5u~G!Cc^$mmT4Rr8k@)9L+OJv#z&hE z?>=ro?9nzMkDWWb>!!j|mXw#OTC|5Fx95j=Q+Z>CS3LSG`u%Wh^`lSrfCI9+C*-B! zLN{>-JB=?+JveFlWWDC?0C7Ij107>i-xst@LGAb;d*3drgNfaVk;IAYPO@OA3$?l` z5;Due== zy7sF@y7r55D+7aL(;?hN-c8-3ei zqLQWZ^ME(7l+I+CPeL98ji91L7T0te3DHEHeQxRO^AioDwc&0~aD|-s?9*bVQ1Gy{ zZa6jDY`dn_ij!POTH~^N)WmAurIKE&8EsdG1OfDd$ZDJVU0Sp(7us2D%kCQ5gLaQf zQuSgjz+c=zpzx_T=(6a6B&hk!)1tuoK~Mttls)J$;8TQ(pgRaU&XxxtA8GkzB+LxQ zHJdB~0?j(~qMoE9AN#yUckf+<=(OveY-(DCR|*e(%y8Z(6jPod6>9pIH z?FvFAW4+T5%ZAgoS`L;3|2sf`2el*AfHsTuy0Dlu^hVgQY1Iv}4r%fVRQ|;aVMlrK z!luy@HNDX=Z0eRqvuxw(RU0b91X{o{aMMI<-5+z>-6Sm>8X{y%bKNxT+$1_^yB*sQ zH(*5IDdv%?@uq2YoT^^}$>D}rx2>kQ2qL?9@siM6b>VmB3bW-}!s)C@ao!a-P$lm- zc5d(-F+&SY41ZR(XzFN({~ZBk$Te<(*`1a%$?saTP%5@fcT<3@Tu0!Ajg~`jV+6oE=q-U)?lcY2am>~R-m@Eu+=JC| zMZME*m^F-oiZEee!95t{ffkbg03(Ri;J42zPAwECPlY@J6^I?6LP*#s3lDp(%9BQ9 z4J^%9ibZ)83-v={-b?*g!XD}R4xIfdm+t)VL61OAM+v07D}j_Tv`-)xn(cXc-#{Obk|Q(0M)qD7?2e3)mNevP}-FON)ZZJ z*=f}x_))iNyyBAcFsCtn8sV}oA~@SH0M|~NS0n(R0KEuw=7^THjYn8iTHTh_nv%c< zGJuOYn`XOh)Pq%}7K9&ild3vc;CvH2-U6Gpx3~+~BnUT*dSvB6KTY7XN4epS;Z*gu z3B5T=TN6N67buAy+ihSnei_6ATzh9y*dPfmW4vaJLb_lc*T)hfdNrKsf(8NxHtG$lWmKK|)^xEH>|!)KkBIW^oxC^?V+;@AGh+kOt6Py;0KmkW z1!&opC9#~rl#}d%z)Elupm-Ztebd0KCIy&3rnpTY-wrlcq7Xorm{u?e)CTbO8{PRf zru=LHhef@Ut3aGx7=B9q1iEvLH|7sWbpQHT6y1xvqI)rd?j+qr_yg^tEY^gG8v>>c z*%s1;)!EqW#(Lg~8TKSwHNbBoRJ7gz(3yC^ylDs~0OOWn*Gv+=fIjw4n{YT7EiYEg zW~YI^ASgv=BWXeQfHiG&um=f72R&@fi^%ATK3tPH9OF2#UC0iej6l=H@>30P@I_|y zOEug#bklaQ`x3OPrkm!fk2=)vI6XG^&x?9^M zK*>#4SVT~~wM~Jfr!?Bh!5qN7fF1iOECJKCO1@B>ohi@kWvfW3-%UGO<&ES0p0M<6 zlyT2TA(31M?p%`C3S0sXVo*%C#n?`s0>>px0>k$Zqt>ypA-F*2x^1Uga*+v^qgo)&4r^rC8X<1F;AAGK6;m_aoRG`iF&lLr7eg^b z>ptWj0!ca$<73!tn4Vn6XvEWQnqwqB12%E6|mV?TxOnxgdHvyvQAt2B(&+3J$8p@gOq7}_mJ zt2NR+yDS=>m04O5(2-{ymIevxH#FvikcrSeX+(jwD4`dnk*9ZA^qXHEV)Fdjv$xMg z$y3{nJe6Xi6q65%ViMA@R0u;onv8ew;IP{5kWT0sMg#I=y^>T%GC)W(6uFDUmSMCb zyJt{8FXW~k+B-rUIoUmey4dqXCmgVANxa6e;>Z4Kya32Hw#o>l*9>^6lWLl&(Se zUqZW!wCd>qZl?u9Hm^PD_lOKM$<6LHCfJgX(1P8?;CZoE39_B+fNVYN-yvyl&Dhl8 zwY6iou@74%+7rQa>gcvIB8O%MAWN-$38#zJbR_e!B1U;Z^)h z{@rO096#NB^NCF6HSJ{oiA>+6OMRgguyn8f6Xh=XK)6fZd%~7vzz4_|AN7=%7P_U( z=<~?=8YEVgrnNV;{^O^&-~19#-t0zs?Y`{5%Iy20F#Ep01eo1uCSg8~(oA@gtXT3h z&-V2VZk(>Z(Vxj=PIT=OvwN0cl)%E`*t;OGkdTtXJ-Y>oI3{#Izmqw1?DWc8CsQ$D zW)C5Gge)oo@53H}*Bc)bk#t@7n0Wqr>)^4|8*e_FijU<`%L#*^dPcyhfH(0j2fV#; z@xbxZFTXVo(dF_vyaOj|~6$;XfWK_kDVB>0s-?&!IT+_u8Su$4-Cw)p>By zll^041&CbpP&vOurll4FO*uY&vC)`%4mKiZ3Ze3_s9b?;!M~;2NR?Xd6vcO}A+(o6 zD9DNxgcc&Oh{D`^cH@VMlS*;zLf!*NZ4)Np_!}d8M6r^e#|;>>Uf4wRd6*NTP@eO` z^zQb&udn~jGds7dsk@2B3~1%t^Bi9*&2H@fqeywOxPc!9t_c|JC~lDqOBWFBjzc2| zTSmy@rh(V8g%?%hdnp`0VxK~RniNfp@J;4z76Sh5n`C@K@Ol_EFJpn&i)icH9H4z+ z`U=c?u-aJ&kn~RW==g%LeC)RN)VdAxTb<%~5JxHPXoxg*#Ac_70Br7xEk6_@##8s- z367J%h!+~gyd%vp!`7!e~JbLU*^Y!#iUDIeEpf~bh z4jkMg4_aR%%FhMQA#yue1d5*W;SLr1K=1oNbf zCo)=mb{`SB@rl$;r`^zZfC62w#Az@lxH4Tex&aSl7*-Zb&PWD%E0f+`ej zPLt1zbH=)kfW*LXNkGC#k(W(hv@m_k+`NC0P;}AKp5I;_K7QuK+h?$OAL7mX`R601 zM6Ez90yDh^d|o!b0@Ap`s4Zi$5n@(H0A_3B3&E;QpS^g&rxReUT&D4Bm!H3U<%P>) zi6T1}(t8^R9Yko3;LY?R__QhUBAqY@;cTG9U}VJLHF0o(qsb7Dj7QQnFfBfi}R?M+U^>OcMwHB7Y9BnnRc=F-|x!4u6L;heNB0L39UNA~v&w9R`uF-tjCB zRgH%e%cEILNI2Anb>`zQ^0XRnenA{J2b9P6Dweusz%oZeQ#TxM8re5{EnslxQ>pA7 zEQz8kgdQa1D$|`%!SmOL29GII6+rxc^{u5!5n*1g&_@= zMRnn0;<++a!40p@X5RH|W-o;6Iy0F-xUT1P{(q?ddwrR|H!?T;vxooN!3zfu;HUcg zph4jE$Bzy7ok1#vH?KrvkhI-T4rL(c9?-Cg~aFfum;^E+>;ih@O=Q~a1qnQu1MZf|5HD|_Rm1PZdX z1y-lDd_f8a<0i={upfBaN--whGY&>&SlDvNIQiNuBQm1;&XduVp5Ha1dLASDyyk93 zNmNZub@R}zl=}+(H0$MV;7_d| zb#^nNh4->Z0-Z?T1hJ~i?TGq4qqS4(C&11i7A zj{IkpwL0oq-IKK%K-@p+Kh280J4SU|z2jRZtf6EV=lp*MKi7BgbD7nVkt2U_=$Cpb zzF&Ld_^~tkkLP^1N8EQecTOgXB!fNNX6xi}bl?bW>f%!nYAsGe1s^7yqsr`J3C>JlTcJCyQM*Vzf`&Bq~!1&$KIhlX@D@SDN6(jcb> zo%?|`9A`spoJongpO7FOt~3j2Baq~U+FcJZt}Ln;kfBk>CaU5M|Pl zF8LY`zre>s4yC%e&e@r8$VX-!w4MC$@FlZ_gfqxyC>I172OD*`tx*MF)f>5m;VD6L zMBWgWJghk}H5JTKx~6$SalDbCit2v%u_lxXP+k+Do8!0#0hvUQKB#Th=m+Q58%I-i zM+NQtg6anRLT%(^^HMM7@Bk@8uo_UI;ZB-pA-fu-ev4LsBM`ZD;EHD_Jc1gc5B(|P zWVWqSdMs;=9HWs+^qPLHDGF#nGR7JlMzf55%77dQe0iB{@(U1X(%t5gn0EfKo` zU7lhUZc$=H9Et)!r^vGk-yzPwa%wbTo6eT=8NnX#?b`=Oj;VN&u_el$5z_BTAZiOaM>|bqxZ7D6_GikBM#iwjQjdaO zC7)!hrIU|$XWkPrhAKVb4o(!-I0G}06_fH!q%CG%f$Vxn! zYevQ0PykyU0$SM9KTT=TrYY^@w0~TZIQ9iEnt}7n@}3ci>0{Uao?a`m2cuJp@}X!c z2RWc_0#jsqQesIyyGKMwfXlQQ(jJV4s|czCPor#s-m-|M=?7cLRQbInl1i~5hJG>8;M2Tp$cD>c z;t(-f(z?HSI0zrZTP9CtRD=fU4abYM{LI{ZsgjIc?D71sKbn2y%&Skn{$wWe3yUdi zV#ZIz`Z>M6ETa{d=)mg&;X3U^Rhd(~YTT;v!C$0EAbEjj$bwH5CwK)8U|->|lqFQ- z?R-I;dXcika-bYG7iujxNv~tTwN$) zR)s;ZWLX-ity9B`G&oEP3tBNhGdovF2`@^0ekYeXe&*Wkl}zT&kR-BLlw;{U5~@CX zb#-cn-Ru(NgmGl%*B{TZrGb1w!(`PQBERyx9Kaz(i}vU`zyGE)*or zL#%FMH>a|oH@e{jS_=qJW~{{ldM~<}f)a^u<8|`EMuRuOZ#t|Wf7uyC?|K;r5I$@* zIG5)F>2|%7WU_6VAYdaaFGdxNS)s`zmfl$cf%0hv?;Cqwl@YyOrU80IxoiyP5MaDD zVPolrLa?k*Jz;Ugi-r+^Ub+F^OsQ*_SiH>A(S$MtEY1ii6G&UO41rBW)&1Jh~!?tlCp zidBZ3!t#o@@+d|6dDwOG;xfc0+6hC38p&pYy^wPiLvN8xVj9Mr4z^1ePntLw1MUiu zy%En^L!u?XfB$g_X*_2)%VcGtTwK7>@-j{h_(yI|Mcj;PSDOYlw0rPmm*?BE;L)XXL$4VjFf!QfTT(`3p=!Jt^=; zU2}*yDJ&Fc-#+>%Gs>^Ma3_{)P%+8}7n$C-j51_g^4OR>HWQ}#l4l3m>=8)1#OVCw ztE6&x=W6H(iI)eRtnffZ=6Cy>Z|UB-8r;%2WiV6GW{Tbkwis87$mD8UW7TH=x3!qZ zbXM*&Z+!F-=0CqyP2LZ{mykQxN#jLg4M^PD*z8 zqortkm9~4PRxO&U)puX^@}GJxQ*eiPh*hBXvU9=k-=23(xnJpmoqKh^5=7G+u-99?Jh0b01zdELh+m@WyCK9~{IJ~I(>V6e5lg+^ zpC^`D?!gsH)%N6zWxRdYys`VYu~$}8y|@@4Jyf(;{ZO)l-?u3J!hfUw5mtQOdhwrr zEXK=bcDGm0V01rPw1@HZ-o3$H+%DnuOmRr2`-iVTn1h%Q=k6|IiC*{YBqmBOAtH}2 z=8~Rbuc(*1mTt5XI4|gOfW#Yn9!?!xUz@^sK~$;QuWfe+WTdWV2xh95%Wb3HcID- zVNJ;ny{g0t!$-^o`AIJhc#6h*<$CLtyGMv!Z!P*ak4J;Z#CfFCqXC3o70)VBd zpIT@k;0V4^3NiA3&~y-vh47bCuV74yaWVLwRb*TWGp})M7*ce(xdocsc{&W??{{=G zz?rULhq0PfikQ7cCH$?*5r_ycD{UFYY=y}WDKm_X4wUueUfBFSuZJMS$E$2Lq`O#)_ zPKHGCT2goc3mK3q*63zx3Vq~=NQy_5Ddd78VIjdF?xv~(Y3>iTf_SNEia&@zezr6l z!y;!dYrZR5&S|j@?-ea~>AO@Y&T!uz+IkXHIvsK3Avz0JcbadU2{Y`4v{WoeARxd^ z_K9FsM8;i5^fj)R!4>KF-^u$u=IN@Oe4?|@K|X{uABk&ySE4l)$G&fl{jSg_McFVT zqVVWwXmNx@%M$5{a6{sZH(Z6eMZhY7G{QrqBULj*EK4>TBq zCMIyZd1E~v>e;yL8o%W!2pq2RZoz(9mij+)DeuIS;eO=Nl<1fRxmA%KF9$tETD(m= z7tU75Xm~GZ-HT^OIqP~H`At#b#Qaf{l3g-5SWpq-KazjEpG#NrmM6=MBc7k_M{pll50eSsl7BU$X&Err9BDYk9Cj+(%>!26*=W< z7jOce5nNfDSZ#0Xu0qts*>yT&8JV@W7fyFvvk{fuw9>3Z61j*G$o6bej4gXgivZuc z#1(I>phCDH{%GF6;2E}2<;hM5f)q||v?D&&B4itMCAkZg>#^ZPOz*v)J>21qKyA1{hv*lui$ep8uZ>&Xj0%=iH22_wdm_ojShc$di`}$rLU7Ny(f()-~)mA zcyH=7=Cc=ldQaUaH0q%1m`eR%Jw>(ZNKZ+V8Xz859|f+9;pZ7bBeAW;CED6j}T0} zZA@Jcnx^aPJv#|B_Mn3vD*HY3a2KsTt*3j@+k=@#6?f|Nel_==6N>BZ;kZ%dT@HNx z!PR$zqWE%}5E&E_m^7MP7Ef7fn9$Hff4y zFh;jBzt@&s`utrQ2D)xcqu*;IU#I673>bPzv6t}}QIPJUqz7-DQ1BxY*i-8?9Y4}X z8nA;m@6z%k4FhJ=6C2VL{YVGCP~)rm!3%fk`g>}WRQFRF?IJ#PY5Xb8y$SD4D-YhL zm(ss)1N6}QLs=)#+a6Hy_eB;Z{xAs>bM*B}eDT9?9wd~8`+X0HKM%uZ5IG#mVS;Q7 zp!oj-2mhq+$lt<;`cn{45Ks{KFhJn#{1Y;y?b|;b3u)Uug|QaRVZ3XEepeLRuC*!P zgHxTtyebng(nTATDh^H>TwuqUCH>TExEK!csFZ)rOUD^IGQzo0W%4n;e2sEbMAE4t zlZ2nr^loWObCbwwiBMMLNa9>wO&R}+FelD;!MQ|bS~X;gM0!@buuk4gx_I>hVpyhN za&_FQcb3JOKvq#NAo@#my2SZ1SHWxeRVJu7tc66Ya7;}h@j$Cy_3~Z@Rr%2O)0BoI z=#^SieSgwHUFs;THnE1&SI^ZSNaaW#+JUPUp4<9Dm}D$??1KkxDoh5)M7#uhJMvun0<8XCD$a1_ z=(cK`l6U$5?U`UMwFV8dY|tHkSh1@+?j{nJ5R5o|3Ee=-kyRYEZnb0*5yVSPaE24k zFN$ms#x1NZdAPmgg%Sc1o{KdKazfrJyOKW!sz%kFcHsuQ)|5y~7eOA!co_W1Z@h;EgU!%mG6@b>nAS6 z7}oB|zqM%g@7{@*i7vZNA$75^du$(JYFFRi37n-3f%yv`@_5%$ITJ`P-7T3jkp|7x z&sq%Dw`(?Mst+!76AXo@Jj?*Shjh-=HlEKpDCtV*Oiy;t=*(?6tEwPls5ti8LJ-)Lm zAfl{@6`6~C0{pnWQ%9z7@n@N2^116ZX@!-=yrHUe0xy3z)S0P|e$RG9&9@5DfFXOJqCt00CPmLx8SJoPxI#QL( zT-#40n#!Rri8tqY!_;q*HsRlcw?M)?F%2BPK(gm*z0+(@z>dzHv@q1ut9A#vBIyL% zq+?LFu;|kJXF*sKj^4n54Ab&9n^2vCd#uGCaHMp+C^eDDL}(IBoup1z=@O3#nsrX# z?fD*bOoG`P?f_t=CnsM>zG=@p9iSu2%NoK<9X^8{#=!L*F)0PR34Fku?ASXT@2z59 zrZ@ICCFC^jAmRHavfd`4&B@aVKv*&)J|=h(to>UhtQJ%z+{78)`hefw@l~VFFSG0| zI+`Jy@H&>C85ZROM#60$3-Y);Vgk#Zqp_MgvN1#7(}NRNC?vyy_crL4bm({#8%L?p z5%#zO6^&xUwUw2HQZZjC%ogTLeiYk*H_p5|{i9RqsjKJxtfJu-eqUMXIRu7|EE)UW zVX)ot3dzWU?$rO3&zhspgE0AI>TQC2@NPb0`Y7Vxb3lk{;2;>myHi)ayU8ef+q* z+UISV7XJB5eLROLr@l3%sk?pl#96+f`A~d)hJ_&oMPKB;o2gOVkTkAtNP@9W8XrDj zHzd7pG-=42`>#l&(+AX}7dv&x5p@WC3y^FeyC91;l4qG4uv%L+&q#q=tEf(muU_*7 zS0~@3+pH*ZKCZ!|ZlJG66$qRm;fnGy9g<5gc5|9lBgZqvEk&vh1U!k!laUhJd zBl9LqVzLd}HiRP1o8)+zwI-&fNJheq01adScMLCO3~#l?65CdAQOQlSX5bzLY~~QY zNm>hYe#1JElKKzpX6a?N! z5culW$E4fq$9JM`uVP`>#74#Fz4By2?47tSl48-nIV^C0dWw_NvbTiv2!T!f>W@(w8qfw zHPi&=8&s4kzl_tC=6Zh%H-AtX9@}Qypj0QAHCgMRmeTc;xaW;-)Rd!Kxw>@uxy8%p zR;sYYRIe^yIe+2e5=NtdgLN1nH~|Ym4u+vA42mm5Yc5^v+Ho*4cN|zd<@KZ-MNO6| zHhhhf)=U<#z$FbW+h2Qz&v@{n+=%awN;(C(7hI%)a1CGK#v3ENm!O5L9yBNH;|`94 zLx#cZF&bIIQU`;901)sZBWR^!YHFGhVM1mMl1b`<1LJ*`$d`gmK&n(Vz$uM&6e?CT@N50T3asiKf0oH`iji2wpJi9C?vu zLnS;oPq+OsVe*99M#DC8?sjr#sO zgK*JPrBv4Zi=OW44NBJUb_bO!Uu%6-a@)TYkBXY#mD_6b7~D@9d0~GNkA}9kF6O)M zBSd)d;=5vhBfdb$OW4hPU9!r=-Ju*02E0nSI#kwonDc+mfDPEhrHzE0X-t3!kvJ1F zjR@xAI6v2DQUEnh)p?Z5;nb)2jwPJVCG_5+YfO_-KhSXG4MWUi(P?0qoDLU|l_`)1 z_749FLAFp+k9t1s)6lmqf?w;(AKj^ZZaOKLATI?_hI@y3%Om zYi+)io6b7pb|!Zdms0WlImEPlZvof~hV|%+II+&rQ^G(t6{wZ*X#hgFqSkXm%Q-{4 z_2EJH*Ag1}6(skyd2N)!#}y^e8W{6x4utvnvxCHV{w@kUDybv7OdE;G0yhd2bTOOv*;APtG&F* z5G?T&uCjft3$2{Ggf|GiW524oxH*g06c&hSye)2Fl*^6iNdLwriDm0>vN-#AAB`U+ zhRa^(=F63`=k&NM!%x=lX80wI^#6ea`Mv}Bqkre{runQffj}NCFFimq9XmjQ>s(KS{oPij;5r1w>8Lic-AntZE`j0Ry6BMFJ_ zkTchjjysTaNEFJ8K=PFmj;s$M1tJUux7H^Uc6%At;R!h2;e%~t`Xn`#1Gm zDKzR`yaEa3FYXpVKt);0BQZhIi!6Ne(Z`RS+4y=kHJqYY^x{wuYtpJipx)@Rmx=JQ zlH5I2_IWqCp&&Dp>n3!%(52nm5Me2Nr^}2YCj5XJ$fYv&GqhkhxS3t@24?}-w+x}R z*)+&6)VeuELTg=y;&3F)L?lL7nzVc^9(Vy8QJbwxHQ?K2LIX_Gzr9cf5#=$$Rf%yp z?m@AnunKfV%SzMMq(CGrj#O~2KnU2J2hp#LmN|+;!tobCg8G7Si_LO0gw#|aT$3yc z9~B;XnC#&GdE6sQ=O!qyN5?S_lIg(#p*^54j?4*F8SWMUItMcx)Bqi(4~Ejpre(t- z<~iEB0&FoJx!W+43+uEjP#K^z_%DMzxix!--#+R$({b5u%WMK5dR=c*7!fNZH8%Ni z1855Tum3W>sRh$#>)8%`aZP^|(o1y@Xk>S6Z`zc=e?o#P*ON~Y0&tWt29t5a1ec8t z&N|VdmGIRjB-ZGST)5v!sUNU*ElM88$RAl=8TKVjNvs<5igGF~XfqJ#=PR@GGKwim zl;s7j>-k%sKXL5LOFz<|PL&$-)Ja|xd5)%O`YMVv&~=gNy|?h`lccl`o#tT*O)5i^ z!ZS1agu@_{lLFj`7h~DFrU$M{_C#OH^b|XstD<{B@sOB&V3gz_Y@+K`1f(;*CIT-# zVj!~|j3AL4bG+k5!QnO{Gs5D;Tfo*~*n}P6Z~i_6tiB0^BuhM6@ykaQQ{TP;Y@jc?=EuOoy zgzAf8+p@Q)i!NgD<_@blTa{tb&JnJ7*5uwirJ&{K5g9q-?c`lNCGni}|HB9S`i>qL z{N=%wL%(&%IQRz#w^5+}6a*9mKJXCeoH^2WW(t?XzY@6{UYo5O2+c;~>e^0~nV8Xn z+=Wda>@0kQNO0OPWQ_1ExFNPXcdE8&A`CrzQY=P!5JJ043&m3BlMj7ZEp6D-4}I7| z+OVe{_ORM~+OVH^=)+dhh7}Ke*twKp-hA3z0%}Yn)KfE zUaGktiZ3XH4$E{LT$n@xD}Ez9#v--l)IxFcl<7DfqbjdjI^{x*Lj{jmk(VBgLMrEr zGP@LF)R$^QeUJ5%9x&?h?Uz3;`Nof(jPVU^cfO%va6jbX-t}lShGB)1enXMOtKNGT z5e|Zl`Msfy)10I}PB`=+3OSS5JE4)&Ko3JDr+(jOI(ah2jN>-mbK3cC{Ov9Zde>Ut zMMJ0h_pPFX#P^x|B~)3lDx|#}`CijF2n+8hciTgbSCx3F6xo>wrZ3Mo1Mukl?1op}QPb;4Y zpNROQ$>;oG4x4`Y6eoTHcrIe7VHAPz&6hsKo7T5~_-n_b zs+d;Ub<3S<= z&n*?n|KC6SGky3^{V5122q*|B2q*|B2q*|B2q*}=M?FA=RD z`rb3&Cq*37k&p+@H!wULoVh|cGdq%ktRmsDe^_F|WCN6IfKmK1;*rW52?4vko#mCPm6X}F^ z6!fX^uo7LJ6^~=QfJ`#T>auZ{PIY-a702@ExR!qwHW&)WI$dkaXjSQa8J&;9_(-+G z)=^5Dhqz|)QJ>Xa9XDuuNp&LA#+h%* zp)4-pDLWnw(O(YbB0`e&+7`~yQYx4xjOu;{COQKa9^CS-A)|CYx|r~<+(TkA97(Nm z)_5thfiBdGb`D4&<&24=xya&g>U{2u4j*$`A)NX&khsW8 zI) zTe#|ha_AweygU$H_s(eYS}g%9Xfm91ejTZv$*t!`n%ky#7bBjsI5l(Y~R4l-_r z_cGlrNqnl8DMx3(ao!tY)H+@Lhoq3f%+a-_5d1Z?weaijcXDyUFGLy}#kBQtIP1BqAVb8P{+@F60&GGgkb5 zucY|@XE^@9KQrEk|J0v?fP#R6fP#R6fP#R6fP#R6fP#R6fPz3@-<>avA>btY)>eA% z#nJ;iQXZ<`1P3KOugODqnqYu;!Dk}X>4)qx0Tkm76O8(B{3YmkS9eM4l~n(KninJL zH%xoZ@&ALFKK!Tt6a*9m6a*9mKCBSA8+ef0cK8YASHU%j*}|If6(W^6Avc;{lHjJJ5ZXFMGuK0y0iEp_}BD z(Od2&eCTG4esE&tacWD6nHxdFprpECTb(v1d}rquUsVXt7nX;wy%ZDheXWtp$`29X z8cqFHl>>}$rx#L-C!YHAtdb9o57lS|WrLP7b(99)OcNOvt#7>emq1ec7P@pHgWQ_u@%jz?6J zpN&L$la)6azUz0&o4n8I?YiP5cFFAtMYggqqvgwm@|@;fp3xmwQMXzK?jAuzy@@z2 zFL)J16Wq0kdk>g~VmBf*WorMVryq3#;){9sy;2VC^uj7H({(JkrlgHn&G7b`tMQ}} z*r90jNk|yDmXYtbuo2&hXbId;ft=fX=Y-Q~w<%I`g0C{l<&gsyS1LLNVibbR+r<*! b>k)~54T53 literal 0 HcmV?d00001 diff --git a/.worktrees/message-typed b/.worktrees/message-typed new file mode 160000 index 0000000000..30d52f94b3 --- /dev/null +++ b/.worktrees/message-typed @@ -0,0 +1 @@ +Subproject commit 30d52f94b38a2156583ef2cc0698e42335876d1e diff --git a/.worktrees/native-responses b/.worktrees/native-responses new file mode 160000 index 0000000000..f867039782 --- /dev/null +++ b/.worktrees/native-responses @@ -0,0 +1 @@ +Subproject commit f867039782c66764fff65d8d90286d9ef672091d diff --git a/.worktrees/refactor/llm-tool-api-from-main b/.worktrees/refactor/llm-tool-api-from-main new file mode 160000 index 0000000000..99898e73ec --- /dev/null +++ b/.worktrees/refactor/llm-tool-api-from-main @@ -0,0 +1 @@ +Subproject commit 99898e73ec3119a7e988eb586f7d8f97565c9af5 diff --git a/.worktrees/responses b/.worktrees/responses new file mode 160000 index 0000000000..72a68a1b49 --- /dev/null +++ b/.worktrees/responses @@ -0,0 +1 @@ +Subproject commit 72a68a1b49f3cf5a80494092059056e8d34062ee diff --git a/.worktrees/sonnet-thinking b/.worktrees/sonnet-thinking new file mode 160000 index 0000000000..e6e480db63 --- /dev/null +++ b/.worktrees/sonnet-thinking @@ -0,0 +1 @@ +Subproject commit e6e480db634ba0f9b98818986afa6826f3f66220 diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 0000000000..22644d6fc4 --- /dev/null +++ b/AGENTS.md @@ -0,0 +1 @@ +We track work in Beads. Run `bd quickstart` to see how. diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000000..db09863b2f --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,180 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +## Project Overview + +OpenHands Agent SDK enables building software with AI agents. This SDK powers the OpenHands project and allows developers to create custom agents that write code, debug issues, automate tasks, and interact with various tools. + +The repository is structured as a **UV workspace** with four main packages: +- `openhands/sdk`: Core agent functionality, LLM integration, conversation management +- `openhands/tools`: Built-in tools (bash, file editing, task tracking, browser automation) +- `openhands/workspace`: Workspace management (local and remote execution environments) +- `openhands/agent_server`: FastAPI-based REST/WebSocket server for remote agent interactions + +## Development Commands + +### Environment Setup +```bash +# Initial setup (install dependencies + pre-commit hooks) +make build + +# Add new dependencies +uv add package-name # Runtime dependency +uv add --dev package-name # Development dependency +``` + +### Code Quality +```bash +# Format code +make format # or: uv run ruff format + +# Lint and auto-fix +make lint # or: uv run ruff check --fix + +# Type checking +uv run pyright # Runs on pre-commit + +# Run all pre-commit hooks +uv run pre-commit run --all-files +uv run pre-commit run --files path/to/file.py +``` + +### Testing +```bash +# Run all tests +uv run pytest + +# Run specific test suites +uv run pytest tests/sdk/ +uv run pytest tests/tools/ +uv run pytest tests/agent_server/ +uv run pytest tests/cross/ +uv run pytest tests/integration/ + +# Run with coverage +uv run pytest --cov=openhands --cov-report=html + +# Run specific test file or function +uv run pytest tests/sdk/test_conversation.py +uv run pytest tests/sdk/test_conversation.py::test_function_name +``` + +### Agent Server +```bash +# Build server executable +make build-server + +# Validate OpenAPI schema +make test-server-schema +``` + +### Running Examples +```bash +# Set API key first +export LLM_API_KEY=your_key_here + +# Run examples (standalone SDK usage) +uv run python examples/01_standalone_sdk/*.py + +# Examples requiring agent server +cd examples/02_remote_agent_server +# Follow README in that directory +``` + +## Architecture + +### Core SDK Architecture + +**Agent (`openhands/sdk/agent/`)**: The central orchestrator that coordinates LLMs, tools, and conversation state. Agents can be created via presets (`get_default_agent()` or `get_planning_agent()`) or manually configured with specific tools. + +**Conversation (`openhands/sdk/conversation/`)**: Manages interaction flow between users and agents. Key components: +- `Conversation`: Main class for SDK usage +- `LocalConversation`: Runs agent locally in same process +- `RemoteConversation`: Connects to remote agent via WebSocket +- `EventStore`: Persists conversation history +- `StuckDetector`: Detects and handles infinite loops + +**LLM Integration (`openhands/sdk/llm/`)**: Unified interface for multiple LLM providers via LiteLLM. Supports function calling, multimodal inputs, and custom routing strategies. `LLMRegistry` manages shared LLM configurations. + +**Context Management (`openhands/sdk/context/`)**: Controls agent behavior and memory: +- `AgentContext`: System/user message customization +- `Microagents`: Inject context based on triggers (repo-wide or keyword-based) +- `Condenser`: Manages conversation history truncation (e.g., `LLMSummarizingCondenser` replaces old events with summaries) + +**Tools (`openhands/sdk/tool/` and `openhands/tools/`)**: Tools are registered via `register_tool()` and instantiated with `Tool()` specs: +- `BashTool`: Execute bash commands in persistent shell +- `FileEditorTool`: Create/edit files with advanced editing capabilities +- `TaskTrackerTool`: Organize and track development tasks +- `BrowserToolSet`: Web automation (disabled in CLI mode) +- Built-in tools: `ThinkTool` (reasoning) and `FinishTool` (task completion) + +**MCP Integration (`openhands/sdk/mcp/`)**: Model Context Protocol support for external tool providers. Default preset includes `mcp-server-fetch` (web fetching) and `repomix` (codebase packing). + +**Security (`openhands/sdk/security/`)**: `LLMSecurityAnalyzer` analyzes tool calls for potential risks and can prompt for user confirmation on risky actions. + +**Events (`openhands/sdk/event/`)**: All actions and observations are represented as events. `LLMConvertibleEvent` types can be serialized to/from LLM messages. + +### Agent Server Architecture + +**API Layer (`openhands/agent_server/api.py`)**: FastAPI application with REST endpoints and WebSocket support. Routes are organized by domain: +- `conversation_router`: Create/manage conversations +- `event_router`: Query conversation events +- `bash_router`, `file_router`, `tool_router`: Direct tool access +- `vscode_router`, `desktop_router`: IDE/desktop integration +- `sockets_router`: WebSocket connections for real-time updates + +**Services**: +- `conversation_service`: Manages conversation lifecycle +- `vscode_service`, `desktop_service`: Optional IDE/desktop environment management + +**Pub/Sub (`pub_sub.py`)**: In-memory event bus for broadcasting conversation updates to WebSocket clients. + +**Docker Support**: Dockerfiles in `openhands/agent_server/docker/` for containerized deployment. + +### Workspace Management + +**Workspace (`openhands/workspace/`)**: Abstracts execution environments. `LocalWorkspace` runs on host, `RemoteWorkspace` connects to remote environments via API. + +## Key Patterns and Conventions + +### Tool Development +Tools must inherit from `ToolBase` and implement `get_schema()` and `execute()`. Register tools before agent creation: +```python +from openhands.sdk.tool import register_tool +register_tool("MyTool", MyToolClass) +``` + +### Conversation Flow +1. Create agent with LLM and tools +2. Create conversation with agent +3. Send messages via `conversation.send_message()` +4. Run conversation with `conversation.run()` (blocks until agent awaits user input) +5. Access events via `conversation.events` + +### Event-Driven Design +All interactions are events. Tools produce `Action` events (what agent wants to do) and `Observation` events (results). The conversation loop processes events until agent enters "await user input" state. + +### UV Workspace Structure +This is a monorepo with inter-package dependencies managed by UV workspace. When modifying dependencies: +- Add to the appropriate package's `pyproject.toml` +- Run `uv sync` to update lockfile +- Workspace sources are defined in root `pyproject.toml` `[tool.uv.sources]` + +### Testing Structure +- `tests/sdk/`: Core SDK functionality tests +- `tests/tools/`: Individual tool tests +- `tests/agent_server/`: Server API tests +- `tests/cross/`: Cross-package integration tests +- `tests/integration/`: Full end-to-end tests +- Use `pytest-asyncio` for async tests (asyncio_mode = "auto" in pyproject.toml) + +## Important Notes + +- Python 3.12+ required +- UV 0.8.13+ required for workspace support +- Pre-commit hooks enforce ruff formatting, linting, pycodestyle, and pyright type checking +- All LLM interactions go through LiteLLM for provider abstraction +- Default preset includes MCP servers: `mcp-server-fetch` and `repomix` +- Browser tools are automatically disabled when `cli_mode=True` +- Security analyzer is enabled by default in the default preset diff --git a/agent-sdk.workspace.code-workspace b/agent-sdk.workspace.code-workspace new file mode 100644 index 0000000000..ef4df70c86 --- /dev/null +++ b/agent-sdk.workspace.code-workspace @@ -0,0 +1,14 @@ +{ + "folders": [ + { + "path": "." + }, + { + "path": "../odie-cli" + }, + { + "path": "../../.openhands" + } + ], + "settings": {} +} \ No newline at end of file diff --git a/docs/agent-sdk.workspace.code-workspace b/docs/agent-sdk.workspace.code-workspace new file mode 100644 index 0000000000..ef4df70c86 --- /dev/null +++ b/docs/agent-sdk.workspace.code-workspace @@ -0,0 +1,14 @@ +{ + "folders": [ + { + "path": "." + }, + { + "path": "../odie-cli" + }, + { + "path": "../../.openhands" + } + ], + "settings": {} +} \ No newline at end of file diff --git a/docs/llm-model-info-and-caps.md b/docs/llm-model-info-and-caps.md new file mode 100644 index 0000000000..ecd93bce8d --- /dev/null +++ b/docs/llm-model-info-and-caps.md @@ -0,0 +1,56 @@ +# Model Info and Capabilities Initialization + +Problem +- `_init_model_info_and_caps()` mixes network I/O, name fallback heuristics, capability derivation, and policy (e.g., Claude 64k override). This reduces readability, slows object construction, and complicates testing. + +Goals +- Keep initialization fast and predictable. +- Isolate provider-specific probing and capability derivation. +- Make Anthropic-specific rules easy to find and change. +- Avoid repeated network calls for the same model/base_url. + +Proposed Structure +1) Resolver with cache +- `resolve_model_info(model: str, base_url: str | None, api_key: SecretStr | None) -> dict | None` +- Tries in order: + 1. If model.startswith("openrouter"): litellm.get_model_info(model) + 2. If model.startswith("litellm_proxy/"): fetch from `{base_url}/v1/model/info`, find matching `model_name`, return `model_info` + 3. Fallback: litellm.get_model_info(model.split(":")[0]) + 4. Fallback: litellm.get_model_info(model.split("/")[-1]) +- Wrap in an LRU cache keyed by `(provider_tag, normalized_model, base_url)`. +- Apply a short timeout on httpx.get and handle errors gracefully. + +2) Pure derivations +- `derive_token_limits(model: str, model_info: dict | None, existing_max_in: int | None, existing_max_out: int | None) -> tuple[int | None, int | None]` + - Respect existing values when already provided by the user. + - If Anthropic family and no explicit max_output_tokens, apply a practical cap (e.g., 64k) via a shared Anthropic helper. + - Use model_info["max_input_tokens"] / ["max_output_tokens"] / ["max_tokens"] as fallbacks. +- `compute_function_calling_active(native_override: bool | None, features) -> bool` + - If user sets `native_tool_calling` use it; otherwise features.supports_function_calling. + +3) Anthropic helpers (co-located) +- `anthropic/cache.py` → apply_prompt_caching(messages) +- `anthropic/tokens.py` → claude_practical_max_output(model) -> int | None +- `anthropic/reasoning.py` → headers and interleaved-thinking beta logic + +4) Initialization flow inside LLM +- During validation: set telemetry/metrics/tokenizer. +- Call `self._initialize_model_profile()` (small): + - `self._model_info = resolve_model_info(self.model, self.base_url, self.api_key)` + - `(self.max_input_tokens, self.max_output_tokens) = derive_token_limits(...)` + - `self._function_calling_active = compute_function_calling_active(self.native_tool_calling, get_features(self.model))` +- Optionally lazy: if we defer resolver to first use, ensure `clone()` carries resolved profile forward to avoid surprises. + +Base URL Scheme for Local/Proxy +- If `base_url` lacks a scheme, default to `http://` for localhost/intranet friendliness, with a clear debug log: "No scheme in base_url, defaulting to http://". +- Optionally add `force_https: bool = False` flag to override behavior when desired. + +Why This Works +- Readability: every function does one thing; the big method is gone. +- Testability: resolver can be mocked, derivations are pure and easy to unit test. +- Performance: model info cached across instances; no repeated network calls. +- Extensibility: Anthropic rules live together; adding providers won’t bloat LLM. + +Open Questions +- Should we always default to `http://` when no scheme, or default to `https://` and special-case `localhost`/`127.0.0.1`? Defaulting to `http://` is convenient for local dev; we can add a security note in docs. +- How large should the resolver LRU cache be? Likely tiny (e.g., 64 entries) since models are a short list. diff --git a/docs/llm-refactor.md b/docs/llm-refactor.md new file mode 100644 index 0000000000..9fcd7143b9 --- /dev/null +++ b/docs/llm-refactor.md @@ -0,0 +1,110 @@ +# LLM Refactor Plan: Simplicity, Streaming/Async, Stateful Responses + +Context +- The current LLM class (openhands/sdk/llm/llm.py) has grown large and mixes several concerns: config, feature detection, message formatting, tool strategy (native vs mock), provider option selection, transport calls, retry+telemetry, and post-processing. +- Today: sync-only, non-streaming for both Chat Completions and OpenAI Responses API. No stateful Responses API. +- Goals: improve readability, keep public API stable, and create clear extension points for stateful Responses API, streaming, async, and on‑the‑fly LLM switching. + +Design Principles +- Thin Facade: Keep LLM as a small, readable entry point that delegates. +- Small Modules, One Responsibility: favor 50–150 LOC modules that do one thing well. +- Composition over Inheritance: avoid complex adapter hierarchies; use simple functions/classes. +- Backward Compatible: keep LLM.completion and LLM.responses behavior intact. + +Proposed Architecture +1) Formatters (pure): + - formatters/chat.py + - prepare_chat_messages(llm, messages) -> list[dict] + - Applies Anthropic cache markers only when relevant. + - Applies vision/function-calling flags. + - Uses Message.to_llm_dict(). + - formatters/responses.py + - prepare_responses_input(llm, messages) -> (instructions: str | None, input_items: list[dict]) + - Vision only; no cache flags. + - Uses Message.to_responses_value(). + +2) Tools: + - tools/prepare.py + - build_chat_tools(tools) -> list[ChatCompletionToolParam] + - build_responses_tools(tools) -> list[Responses ToolParam] + - tools/strategy.py + - choose_tool_strategy(llm, chat_tools) -> strategy + - NativeToolStrategy: send tools natively (when supported) + - MockToolStrategy: pre/post transforms for prompt-mocked tool calls + +3) Options (rename normalize_* → select_options_*): + - options/chat_options.py + - select_chat_options(llm, user_kwargs, has_tools: bool) -> dict + - options/responses_options.py + - select_responses_options(llm, user_kwargs, include, store) -> dict + +4) Transport (litellm boundary): + - transport/chat.py + - transport_chat_sync(model, messages, options) -> ModelResponse + - (future) transport_chat_stream/async + - transport/responses.py + - transport_responses_sync(model, instructions, input_items, tools, options) -> ResponsesAPIResponse + - (future) transport_responses_stream/async + - Keep litellm.modify_params guard centralized here. + +5) Invocation (retry + telemetry): + - invocation/chat_invoker.py + - call_sync(ctx) -> LLMResponse + - (future) call_stream/call_async/call_async_stream + - invocation/responses_invoker.py + - call_sync(ctx) -> LLMResponse + +6) Caching (Anthropic-only flags today): + - caching/anthropic_cache.py + - apply_prompt_caching(messages) -> None + +7) Streaming/Async (future): + - streaming/events.py: {TextDelta, ToolCallDelta, ReasoningDelta, UsageDelta, Error, End} + - streaming/aggregator.py: fold deltas into final Message/Usage + +Public LLM Surface (unchanged now, future-ready) +- completion(...) +- responses(...) +- (future) completion_stream(...), responses_stream(...) +- (future) acompletion(...), aresponses(...), acompletion_stream(...), aresponses_stream(...) + +On‑the‑fly LLM switching +- Prefer clone-and-swap: LLM.clone(**overrides) returns a new configured instance; Agent swaps atomically. +- Optionally use a lightweight LLMHandle wrapper that the Agent holds; handle.set(new_llm) hot-swaps internally. + +Stateful Responses API (future) +- responses_invoker + responses transport accept store=True and session/thread identifiers from select_responses_options. +- No changes required in LLM facade beyond plumbing. + +Refactor of _init_model_info_and_caps() +Current behavior (mixed concerns): performs provider-specific model_info fetches (including network), sets token limits and function-calling capability. This couples init-time side effects, network I/O, and policy. + +We will: +- Extract a resolver: resolve_model_info(model, base_url, api_key) with an LRU cache. Supports openrouter, litellm_proxy, and basename fallbacks. +- Extract pure derivations: + - derive_token_limits(model, model_info) + - compute_function_calling_active(native_override, features) +- Consider lazy loading guarded by ensure_model_info_loaded(), but be mindful of clone(): clone should carry over resolved model profile so we avoid late surprises. + +Anthropic-specific logic +- Group Anthropic-specific concerns behind a small module anthropic/: + - anthropic/cache.py: apply_prompt_caching(...) + - anthropic/tokens.py: optional token/output caps overrides (e.g., Claude practical 64k) + - anthropic/reasoning.py: extended thinking headers, interleaved-thinking beta, etc. +- Chat and Responses option selectors call into these helpers only when get_features(model).is_anthropic is true. This keeps Anthropic “stuff” co-located and easy to find without over-engineering adapters. + +Base URL scheme for local/proxy +- If base_url has no scheme, default to http:// to support localhost/intranet usage, but log a concise debug message. If security is a concern in some environments, allow an LLM flag to force https. + +Migration Plan (incremental) +1) Extract prepare_* and select_options_* helpers (rename from normalize_*). No behavior change. +2) Extract chat/responses transport and centralize litellm.modify_params guard. +3) Introduce ToolStrategy (native/mock) using existing mixin logic. +4) Add Chat/Responses invokers (retry + telemetry) and delegate from LLM. +5) Introduce model_info resolver + derivations; replace _init_model_info_and_caps with a small initializer that calls the resolver and derivations. +6) Add streaming/async in invokers. + +Readability Wins +- Each module is short, with purpose-revealing names. +- LLM methods read as: prepare → select options → transport → postprocess → wrap. +- Provider quirks (Anthropic) are grouped and opt-in by features. diff --git a/log.txt b/log.txt new file mode 100644 index 0000000000..000890ce36 --- /dev/null +++ b/log.txt @@ -0,0 +1,825 @@ +#15 FROM ghcr.io/astral-sh/uv:latest@sha256:6dbd7c42a9088083fa79e41431a579196a189bcee3ae68ba904ac2bf77765867 +#15 DONE 0.0s + +#16 importing cache manifest from ghcr.io/all-hands-ai/agent-server:buildcache-source-nikolaik_s_python-nodejs_tag_python3.12-nodejs22-main +#16 ... + +#17 importing cache manifest from ghcr.io/all-hands-ai/agent-server:buildcache-source-nikolaik_s_python-nodejs_tag_python3.12-nodejs22-vscode-settings-extension +#17 ERROR: failed to configure registry cache importer: ghcr.io/all-hands-ai/agent-server:buildcache-source-nikolaik_s_python-nodejs_tag_python3.12-nodejs22-vscode-settings-extension: not found + +#16 importing cache manifest from ghcr.io/all-hands-ai/agent-server:buildcache-source-nikolaik_s_python-nodejs_tag_python3.12-nodejs22-main +#16 ERROR: failed to configure registry cache importer: ghcr.io/all-hands-ai/agent-server:buildcache-source-nikolaik_s_python-nodejs_tag_python3.12-nodejs22-main: not found + +#13 [internal] load build context +#13 transferring context: 40.71kB 0.0s done +#13 DONE 0.0s + +#11 [internal] settings cache mount permissions +#11 CACHED + +#18 [builder 2/7] COPY --from=ghcr.io/astral-sh/uv /uv /uvx /bin/ +#18 CACHED + +#19 [builder 3/7] RUN groupadd -g 10001 openhands && useradd -m -u 10001 -g 10001 -s /usr/sbin/nologin openhands +#19 CACHED + +#20 [builder 4/7] WORKDIR /agent-server +#20 CACHED + +#21 [builder 5/7] COPY --chown=openhands:openhands pyproject.toml uv.lock README.md LICENSE ./ +#21 CACHED + +#22 [builder 6/7] COPY --chown=openhands:openhands openhands ./openhands +#22 DONE 0.2s + +#23 [builder 7/7] RUN --mount=type=cache,target=/home/openhands/.cache,uid=10001,gid=10001 uv sync --frozen --no-editable +#23 0.103 Using CPython 3.12.12 interpreter at: /usr/local/bin/python +#23 0.103 Creating virtual environment at: .venv +#23 0.127 Building openhands-agent-server @ file:///agent-server/openhands/agent_server +#23 0.128 Building openhands-sdk @ file:///agent-server/openhands/sdk +#23 0.128 Building openhands-tools @ file:///agent-server/openhands/tools +#23 0.129 Building openhands-workspace @ file:///agent-server/openhands/workspace +#23 1.155 Built openhands-workspace @ file:///agent-server/openhands/workspace +#23 1.155 Built openhands-tools @ file:///agent-server/openhands/tools +#23 1.162 Built openhands-agent-server @ file:///agent-server/openhands/agent_server +#23 1.163 Built openhands-sdk @ file:///agent-server/openhands/sdk +#23 1.188 Prepared 4 packages in 1.06s +#23 1.189 warning: Failed to hardlink files; falling back to full copy. This may lead to degraded performance. +#23 1.189 If the cache and target directories are on different filesystems, hardlinking may not be supported. +#23 1.189 If this is intentional, set `export UV_LINK_MODE=copy` or use `--link-mode=copy` to suppress this warning. +#23 1.783 Installed 183 packages in 594ms +#23 1.783 + aiofiles==24.1.0 +#23 1.783 + aiohappyeyeballs==2.6.1 +#23 1.783 + aiohttp==3.12.15 +#23 1.783 + aiosignal==1.4.0 +#23 1.783 + aiosqlite==0.21.0 +#23 1.783 + alembic==1.16.5 +#23 1.783 + altair==5.5.0 +#23 1.783 + altgraph==0.17.4 +#23 1.783 + annotated-types==0.7.0 +#23 1.783 + anthropic==0.68.0 +#23 1.783 + anyio==4.10.0 +#23 1.783 + attrs==25.3.0 +#23 1.783 + authlib==1.6.4 +#23 1.783 + backoff==2.2.1 +#23 1.783 + bashlex==0.18 +#23 1.783 + binaryornot==0.4.4 +#23 1.783 + blinker==1.9.0 +#23 1.783 + browser-use==0.7.9 +#23 1.783 + bubus==1.5.6 +#23 1.783 + cachetools==5.5.2 +#23 1.783 + cdp-use==1.4.1 +#23 1.783 + certifi==2025.8.3 +#23 1.783 + cffi==2.0.0 +#23 1.783 + cfgv==3.4.0 +#23 1.783 + chardet==5.2.0 +#23 1.783 + charset-normalizer==3.4.3 +#23 1.783 + click==8.2.1 +#23 1.783 + coverage==7.10.6 +#23 1.783 + cryptography==46.0.1 +#23 1.783 + cyclopts==3.24.0 +#23 1.783 + distlib==0.4.0 +#23 1.783 + distro==1.9.0 +#23 1.783 + dnspython==2.8.0 +#23 1.783 + docker==7.1.0 +#23 1.783 + docstring-parser==0.17.0 +#23 1.783 + docutils==0.22.1 +#23 1.783 + email-validator==2.3.0 +#23 1.783 + exceptiongroup==1.3.0 +#23 1.783 + fastapi==0.116.2 +#23 1.783 + fastmcp==2.12.3 +#23 1.783 + fastuuid==0.13.5 +#23 1.783 + filelock==3.19.1 +#23 1.783 + frozenlist==1.7.0 +#23 1.783 + fsspec==2025.9.0 +#23 1.783 + func-timeout==4.3.5 +#23 1.783 + gitdb==4.0.12 +#23 1.783 + gitpython==3.1.45 +#23 1.783 + google-api-core==2.25.1 +#23 1.784 + google-api-python-client==2.182.0 +#23 1.784 + google-auth==2.40.3 +#23 1.784 + google-auth-httplib2==0.2.0 +#23 1.784 + google-auth-oauthlib==1.2.2 +#23 1.784 + google-genai==1.38.0 +#23 1.784 + googleapis-common-protos==1.70.0 +#23 1.784 + greenlet==3.2.4 +#23 1.784 + groq==0.31.1 +#23 1.784 + h11==0.16.0 +#23 1.784 + hf-xet==1.1.10 +#23 1.784 + html2text==2025.4.15 +#23 1.784 + httpcore==1.0.9 +#23 1.784 + httplib2==0.31.0 +#23 1.784 + httpx==0.28.1 +#23 1.784 + httpx-sse==0.4.1 +#23 1.784 + huggingface-hub==0.35.0 +#23 1.784 + identify==2.6.14 +#23 1.784 + idna==3.10 +#23 1.784 + importlib-metadata==8.7.0 +#23 1.784 + iniconfig==2.1.0 +#23 1.784 + isodate==0.7.2 +#23 1.784 + jinja2==3.1.6 +#23 1.784 + jiter==0.11.0 +#23 1.784 + jsonschema==4.25.1 +#23 1.784 + jsonschema-path==0.3.4 +#23 1.784 + jsonschema-specifications==2025.9.1 +#23 1.784 + lazy-object-proxy==1.12.0 +#23 1.784 + libtmux==0.46.2 +#23 1.784 + litellm==1.77.7 (from git+https://github.com/BerriAI/litellm.git@763d2f8ccdd8412dbe6d4ac0e136d9ac34dcd4c0) +#23 1.784 + mako==1.3.10 +#23 1.784 + markdown-it-py==4.0.0 +#23 1.784 + markupsafe==3.0.2 +#23 1.784 + mcp==1.14.1 +#23 1.784 + mdurl==0.1.2 +#23 1.784 + more-itertools==10.8.0 +#23 1.784 + multidict==6.6.4 +#23 1.784 + narwhals==2.5.0 +#23 1.784 + nodeenv==1.9.1 +#23 1.784 + nodejs-wheel-binaries==22.19.0 +#23 1.784 + numpy==2.3.3 +#23 1.784 + oauthlib==3.3.1 +#23 1.784 + ollama==0.5.4 +#23 1.784 + openai==1.108.1 +#23 1.784 + openapi-core==0.19.5 +#23 1.784 + openapi-pydantic==0.5.1 +#23 1.784 + openapi-schema-validator==0.6.3 +#23 1.784 + openapi-spec-validator==0.7.2 +#23 1.784 + openhands-agent-server==1.0.0 (from file:///agent-server/openhands/agent_server) +#23 1.784 + openhands-sdk==1.0.0 (from file:///agent-server/openhands/sdk) +#23 1.784 + openhands-tools==1.0.0 (from file:///agent-server/openhands/tools) +#23 1.784 + openhands-workspace==1.0.0 (from file:///agent-server/openhands/workspace) +#23 1.784 + packaging==25.0 +#23 1.784 + pandas==2.3.2 +#23 1.784 + parse==1.20.2 +#23 1.784 + pathable==0.4.4 +#23 1.784 + pillow==11.3.0 +#23 1.784 + platformdirs==4.4.0 +#23 1.784 + pluggy==1.6.0 +#23 1.784 + portalocker==2.10.1 +#23 1.784 + posthog==6.7.5 +#23 1.784 + pre-commit==4.3.0 +#23 1.784 + propcache==0.3.2 +#23 1.784 + proto-plus==1.26.1 +#23 1.784 + protobuf==6.32.1 +#23 1.784 + psutil==7.1.0 +#23 1.784 + py==1.11.0 +#23 1.785 + pyarrow==21.0.0 +#23 1.785 + pyasn1==0.6.1 +#23 1.785 + pyasn1-modules==0.4.2 +#23 1.785 + pycodestyle==2.14.0 +#23 1.785 + pycparser==2.23 +#23 1.785 + pydantic==2.11.9 +#23 1.785 + pydantic-core==2.33.2 +#23 1.785 + pydantic-settings==2.10.1 +#23 1.785 + pydeck==0.9.1 +#23 1.785 + pygments==2.19.2 +#23 1.785 + pyinstaller==6.16.0 +#23 1.785 + pyinstaller-hooks-contrib==2025.8 +#23 1.785 + pyotp==2.9.0 +#23 1.785 + pyparsing==3.2.4 +#23 1.785 + pypdf==6.0.0 +#23 1.785 + pyperclip==1.10.0 +#23 1.785 + pyright==1.1.405 +#23 1.785 + pytest==8.4.2 +#23 1.785 + pytest-asyncio==1.2.0 +#23 1.785 + pytest-cov==7.0.0 +#23 1.785 + pytest-forked==1.6.0 +#23 1.785 + pytest-timeout==2.4.0 +#23 1.785 + python-dateutil==2.9.0.post0 +#23 1.785 + python-dotenv==1.1.1 +#23 1.785 + python-frontmatter==1.1.0 +#23 1.785 + python-json-logger==3.3.0 +#23 1.785 + python-multipart==0.0.20 +#23 1.785 + pytz==2025.2 +#23 1.785 + pyyaml==6.0.2 +#23 1.785 + referencing==0.36.2 +#23 1.785 + regex==2025.9.18 +#23 1.785 + reportlab==4.4.4 +#23 1.785 + requests==2.32.5 +#23 1.785 + requests-oauthlib==2.0.0 +#23 1.785 + rfc3339-validator==0.1.4 +#23 1.785 + rich==14.1.0 +#23 1.785 + rich-rst==1.3.1 +#23 1.785 + rpds-py==0.27.1 +#23 1.785 + rsa==4.9.1 +#23 1.785 + ruff==0.13.1 +#23 1.785 + screeninfo==0.8.1 +#23 1.785 + setuptools==80.9.0 +#23 1.785 + six==1.17.0 +#23 1.785 + smmap==5.0.2 +#23 1.785 + sniffio==1.3.1 +#23 1.785 + sqlalchemy==2.0.43 +#23 1.785 + sse-starlette==3.0.2 +#23 1.785 + starlette==0.48.0 +#23 1.785 + streamlit==1.49.1 +#23 1.785 + tabulate==0.9.0 +#23 1.785 + tenacity==9.1.2 +#23 1.785 + tiktoken==0.11.0 +#23 1.785 + tokenizers==0.22.1 +#23 1.785 + toml==0.10.2 +#23 1.785 + tornado==6.5.2 +#23 1.785 + tqdm==4.67.1 +#23 1.785 + typing-extensions==4.15.0 +#23 1.785 + typing-inspection==0.4.1 +#23 1.785 + tzdata==2025.2 +#23 1.785 + uritemplate==4.2.0 +#23 1.785 + urllib3==2.5.0 +#23 1.785 + uuid7==0.1.0 +#23 1.785 + uvicorn==0.35.0 +#23 1.785 + virtualenv==20.34.0 +#23 1.785 + watchdog==6.0.0 +#23 1.785 + websockets==15.0.1 +#23 1.785 + werkzeug==3.1.1 +#23 1.785 + yarl==1.20.1 +#23 1.785 + zipp==3.23.0 +#23 DONE 2.4s + +#24 [base-image-minimal 3/4] COPY --from=ghcr.io/astral-sh/uv /uv /uvx /bin/ +#24 CACHED + +#25 [base-image 1/7] RUN set -eux; mkdir -p $(dirname /openhands/.openvscode-server); arch=$(uname -m); if [ "${arch}" = "x86_64" ]; then arch="x64"; elif [ "${arch}" = "aarch64" ]; then arch="arm64"; elif [ "${arch}" = "armv7l" ]; then arch="armhf"; fi; wget https://github.com/gitpod-io/openvscode-server/releases/download/openvscode-server-v1.98.2/openvscode-server-v1.98.2-linux-${arch}.tar.gz; tar -xzf openvscode-server-v1.98.2-linux-${arch}.tar.gz; if [ -d "/openhands/.openvscode-server" ]; then rm -rf "/openhands/.openvscode-server"; fi; mv openvscode-server-v1.98.2-linux-${arch} /openhands/.openvscode-server; cp /openhands/.openvscode-server/bin/remote-cli/openvscode-server /openhands/.openvscode-server/bin/remote-cli/code; rm -f openvscode-server-v1.98.2-linux-${arch}.tar.gz; chown -R openhands:openhands /openhands/.openvscode-server +#25 CACHED + +#26 [base-image 2/7] RUN set -eux; if grep -q "ubuntu" /etc/os-release; then install -m 0755 -d /etc/apt/keyrings; curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc; chmod a+r /etc/apt/keyrings/docker.asc; echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null; else install -m 0755 -d /etc/apt/keyrings; curl -fsSL https://download.docker.com/linux/debian/gpg -o /etc/apt/keyrings/docker.asc; chmod a+r /etc/apt/keyrings/docker.asc; echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/debian bookworm stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null; fi; apt-get update; apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin; apt-get clean; rm -rf /var/lib/apt/lists/* +#26 CACHED + +#27 [base-image 3/7] RUN mkdir -p /etc/docker && echo '{"mtu": 1450}' > /etc/docker/daemon.json +#27 CACHED + +#28 [base-image 4/7] RUN set -eux; apt-get update; apt-get install -y --no-install-recommends tigervnc-standalone-server xfce4 dbus-x11 novnc websockify $(if grep -q "ubuntu" /etc/os-release; then echo "chromium-browser"; else echo "chromium"; fi); apt-get clean; rm -rf /var/lib/apt/lists/* +#28 CACHED + +#29 [base-image 5/7] RUN chown -R openhands:openhands /usr/share/novnc +#29 CACHED + +#30 [base-image-minimal 2/4] RUN set -eux; apt-get update; apt-get install -y --no-install-recommends ca-certificates curl wget sudo apt-utils git jq tmux build-essential coreutils util-linux procps findutils grep sed apt-transport-https gnupg lsb-release; (getent group 10001 || groupadd -g 10001 openhands); (id -u openhands >/dev/null 2>&1 || useradd -m -u 10001 -g 10001 -s /bin/bash openhands); usermod -aG sudo openhands; echo "openhands ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers; mkdir -p /workspace/project; chown -R openhands:openhands /workspace; rm -rf /var/lib/apt/lists/* +#30 CACHED + +#31 [base-image 6/7] COPY --chown=openhands:openhands openhands/agent_server/docker/wallpaper.svg /usr/share/backgrounds/xfce/xfce-shapes.svg +#31 CACHED + +#32 [source 1/2] COPY --chown=openhands:openhands --from=builder /agent-server /agent-server +#32 DONE 2.0s + +#33 [source 2/2] COPY --chown=openhands:openhands --from=builder /agent-server/openhands/agent_server/vscode_extensions /openhands/.openvscode-server/extensions +#33 DONE 0.1s + +#34 exporting to image +#34 exporting layers +#34 exporting layers 1.7s done +#34 writing image sha256:97a17ba72cf8e9a01177eaa430636ec6de8c86bd13da14ced864b4569a064d12 +#34 writing image sha256:97a17ba72cf8e9a01177eaa430636ec6de8c86bd13da14ced864b4569a064d12 done +#34 naming to ghcr.io/all-hands-ai/agent-server:71d1a9f-custom-dev done +#34 naming to ghcr.io/all-hands-ai/agent-server:v1.0.0_nikolaik_s_python-nodejs_tag_python3.12-nodejs22-dev done +#34 DONE 1.7s +------ + > importing cache manifest from ghcr.io/all-hands-ai/agent-server:buildcache-source-nikolaik_s_python-nodejs_tag_python3.12-nodejs22-vscode-settings-extension: +------ +------ + > importing cache manifest from ghcr.io/all-hands-ai/agent-server:buildcache-source-nikolaik_s_python-nodejs_tag_python3.12-nodejs22-main: +------ + +View build details: docker-desktop://dashboard/build/desktop-linux/desktop-linux/c52mydc2har9e73bfl9d6ipr1 +[build] Done. Tags: + - ghcr.io/all-hands-ai/agent-server:71d1a9f-custom-dev + - ghcr.io/all-hands-ai/agent-server:v1.0.0_nikolaik_s_python-nodejs_tag_python3.12-nodejs22-dev +BUILD DONE. +[10/12/25 23:34:14] INFO Using image: ghcr.io/all-hands-ai/agent-server:71d1a9f-custom-dev workspace.py:179 +[10/12/25 23:34:14] INFO $ docker run -d --platform linux/arm64 --rm --name command.py:27 + agent-server-9727b297-a0b7-4606-acc0-72d2df11fe35 -e + LLM_API_KEY=sk-7QlqTKmhtDfJfwQjwZZqPQ -p 8010:8000 -p 8011:8001 -p 8012:8002 + ghcr.io/all-hands-ai/agent-server:71d1a9f-custom-dev --host 0.0.0.0 --port 8000 +a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +[10/12/25 23:34:14] INFO Started container: workspace.py:326 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +[10/12/25 23:34:14] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +true +[10/12/25 23:34:15] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +true +[10/12/25 23:34:16] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +true +[DOCKER] {"asctime": "2025-10-12 21:34:16,780", "levelname": "WARNING", "name": "uvicorn.error", "filename": "config.py", "lineno": 283, "message": "Current configuration will not reload as not all conditions are met, please refer to documentation."} +[DOCKER] /agent-server/.venv/lib/python3.12/site-packages/websockets/legacy/__init__.py:6: DeprecationWarning: websockets.legacy is deprecated; see https://websockets.readthedocs.io/en/stable/howto/upgrade.html for upgrade instructions +[DOCKER] warnings.warn( # deprecated in 14.0 - 2024-11-09 +[DOCKER] /agent-server/.venv/lib/python3.12/site-packages/uvicorn/protocols/websockets/websockets_impl.py:17: DeprecationWarning: websockets.server.WebSocketServerProtocol is deprecated +[DOCKER] from websockets.server import WebSocketServerProtocol +[10/12/25 23:34:17] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +true +[DOCKER] {"asctime": "2025-10-12 21:34:17,932", "levelname": "INFO", "name": "uvicorn.error", "filename": "server.py", "lineno": 84, "message": "Started server process [1]", "color_message": "Started server process [\u001b[36m%d\u001b[0m]"} +[DOCKER] {"asctime": "2025-10-12 21:34:17,932", "levelname": "INFO", "name": "uvicorn.error", "filename": "on.py", "lineno": 48, "message": "Waiting for application startup."} +[DOCKER] {"asctime": "2025-10-12 21:34:18,177", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension sql build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:18,354", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension json build failed"} +[10/12/25 23:34:18] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +true +[DOCKER] {"asctime": "2025-10-12 21:34:18,540", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension objective-c build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:18,720", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension julia build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:18,897", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension handlebars build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:19,074", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension docker build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:19,241", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension go build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:19,409", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension search-result build failed"} +[10/12/25 23:34:19] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +true +[DOCKER] {"asctime": "2025-10-12 21:34:19,587", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension powershell build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:19,753", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension shellscript build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:19,918", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension gulp build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:20,104", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension groovy build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:20,319", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension csharp build failed"} +[10/12/25 23:34:20] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +[DOCKER] {"asctime": "2025-10-12 21:34:20,492", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension terminal-suggest build failed"} +true +[DOCKER] {"asctime": "2025-10-12 21:34:20,660", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension typescript-basics build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:20,826", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension php build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:20,992", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension github-authentication build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:21,161", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension theme-monokai-dimmed build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:21,330", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension tunnel-forwarding build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:21,498", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension theme-solarized-dark build failed"} +[10/12/25 23:34:21] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +true +[DOCKER] {"asctime": "2025-10-12 21:34:21,665", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension ipynb build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:21,830", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension less build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:21,999", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension swift build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:22,165", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension perl build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:22,337", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension ruby build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:22,509", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension debug-auto-launch build failed"} +[10/12/25 23:34:22] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +true +[DOCKER] {"asctime": "2025-10-12 21:34:22,684", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension log build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:22,866", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension python build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:23,042", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension jake build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:23,212", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension media-preview build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:23,380", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension shaderlab build failed"} +[10/12/25 23:34:23] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +[DOCKER] {"asctime": "2025-10-12 21:34:23,547", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension rust build failed"} +true +[DOCKER] {"asctime": "2025-10-12 21:34:23,724", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension debug-server-ready build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:23,894", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension git build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:24,062", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension markdown-math build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:24,237", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension html-language-features build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:24,412", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension configuration-editing build failed"} +[10/12/25 23:34:24] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +[DOCKER] {"asctime": "2025-10-12 21:34:24,584", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension scss build failed"} +true +[DOCKER] {"asctime": "2025-10-12 21:34:24,755", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension ms-vscode.js-debug-companion build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:24,921", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension json-language-features build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:25,093", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension r build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:25,260", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension emmet build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:25,430", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension theme-solarized-light build failed"} +[10/12/25 23:34:25] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +[DOCKER] {"asctime": "2025-10-12 21:34:25,610", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension ms-vscode.js-debug build failed"} +true +[DOCKER] {"asctime": "2025-10-12 21:34:25,790", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension npm build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:25,959", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension make build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:26,129", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension css-language-features build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:26,297", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension hlsl build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:26,464", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension simple-browser build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:26,634", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension latex build failed"} +[10/12/25 23:34:26] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +true +[DOCKER] {"asctime": "2025-10-12 21:34:26,807", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension git-base build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:26,977", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension javascript build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:27,143", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension yaml build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:27,309", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension markdown-basics build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:27,476", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension bat build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:27,646", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension theme-kimbie-dark build failed"} +[10/12/25 23:34:27] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +true +[DOCKER] {"asctime": "2025-10-12 21:34:27,816", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension ini build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:27,984", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension typescript-language-features build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:28,148", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension theme-quietlight build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:28,313", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension theme-monokai build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:28,480", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension theme-seti build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:28,650", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension theme-tomorrow-night-blue build failed"} +[10/12/25 23:34:28] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +true +[DOCKER] {"asctime": "2025-10-12 21:34:28,819", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension vb build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:28,988", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension grunt build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:29,156", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension coffeescript build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:29,325", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension dart build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:29,495", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension github build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:29,666", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension cpp build failed"} +[10/12/25 23:34:29] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +true +[DOCKER] {"asctime": "2025-10-12 21:34:29,858", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension theme-defaults build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:30,029", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension clojure build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:30,200", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension fsharp build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:30,370", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension theme-abyss build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:30,541", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension xml build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:30,713", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension pug build failed"} +[10/12/25 23:34:30] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +true +[DOCKER] {"asctime": "2025-10-12 21:34:30,885", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension lua build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:31,055", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension razor build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:31,224", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension extension-editing build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:31,392", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension diff build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:31,563", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension microsoft-authentication build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:31,732", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension java build failed"} +[10/12/25 23:34:31] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +true +[DOCKER] {"asctime": "2025-10-12 21:34:31,907", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension css build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:32,077", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension markdown-language-features build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:32,247", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension html build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:32,419", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension theme-red build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:32,593", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension references-view build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:32,761", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension php-language-features build failed"} +[10/12/25 23:34:32] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +true +[DOCKER] {"asctime": "2025-10-12 21:34:32,929", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension restructuredtext build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:33,103", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension merge-conflict build failed"} +[DOCKER] {"asctime": "2025-10-12 21:34:33,273", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension ms-vscode.vscode-js-profile-table build failed"} +[10/12/25 23:34:33] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +true +[10/12/25 23:34:34] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +true +[10/12/25 23:34:35] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +true +[10/12/25 23:34:36] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +true +[10/12/25 23:34:38] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +true +[10/12/25 23:34:39] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +true +[10/12/25 23:34:40] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +true +[10/12/25 23:34:41] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +true +[DOCKER] {"asctime": "2025-10-12 21:34:42,024", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension notebook-renderers build failed"} +[10/12/25 23:34:42] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +true +[10/12/25 23:34:43] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +true +[DOCKER] {"asctime": "2025-10-12 21:34:43,775", "levelname": "INFO", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 218, "message": "Extension openhands-settings built successfully"} +[DOCKER] {"asctime": "2025-10-12 21:34:43,886", "levelname": "INFO", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 191, "message": "VSCode server startup detected"} +[DOCKER] {"asctime": "2025-10-12 21:34:43,886", "levelname": "INFO", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 64, "message": "VSCode server started successfully on port 8001"} +[DOCKER] {"asctime": "2025-10-12 21:34:43,886", "levelname": "INFO", "name": "openhands.agent_server.api", "filename": "api.py", "lineno": 49, "message": "VSCode service started successfully"} +[DOCKER] {"asctime": "2025-10-12 21:34:43,893", "levelname": "INFO", "name": "openhands.agent_server.desktop_service", "filename": "desktop_service.py", "lineno": 78, "message": "Starting TigerVNC on :1 (1280x800)..."} +[DOCKER] Please be aware that you are exposing your VNC server to all users on the +[DOCKER] local machine. These users can access your server without authentication! +[DOCKER] /usr/bin/xauth: file /home/openhands/.Xauthority does not exist +[DOCKER] +[DOCKER] New Xtigervnc server 'a22128f07980:1 (openhands)' on port 5901 for display :1. +[DOCKER] Use xtigervncviewer -SecurityTypes None :1 to connect to the VNC server. +[DOCKER] +[10/12/25 23:34:44] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +true +[10/12/25 23:34:45] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +true +[10/12/25 23:34:46] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +true +[DOCKER] {"asctime": "2025-10-12 21:34:47,043", "levelname": "INFO", "name": "openhands.agent_server.desktop_service", "filename": "desktop_service.py", "lineno": 121, "message": "Starting noVNC proxy on 0.0.0.0:8002 -> 127.0.0.1:5901 ..."} +[DOCKER] {"asctime": "2025-10-12 21:34:47,045", "levelname": "INFO", "name": "openhands.agent_server.desktop_service", "filename": "desktop_service.py", "lineno": 143, "message": "noVNC URL: http://localhost:8002/vnc.html?autoconnect=1&resize=remote"} +[10/12/25 23:34:47] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +true +[10/12/25 23:34:48] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 + a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea +true +[DOCKER] {"asctime": "2025-10-12 21:34:49,048", "levelname": "INFO", "name": "openhands.agent_server.desktop_service", "filename": "desktop_service.py", "lineno": 153, "message": "Desktop started successfully"} +[DOCKER] {"asctime": "2025-10-12 21:34:49,048", "levelname": "INFO", "name": "openhands.agent_server.api", "filename": "api.py", "lineno": 59, "message": "Desktop service started successfully"} +[DOCKER] {"asctime": "2025-10-12 21:34:49,245", "levelname": "INFO", "name": "uvicorn.error", "filename": "on.py", "lineno": 62, "message": "Application startup complete."} +[DOCKER] {"asctime": "2025-10-12 21:34:49,245", "levelname": "INFO", "name": "uvicorn.error", "filename": "server.py", "lineno": 216, "message": "Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit)", "color_message": "Uvicorn running on \u001b[1m%s://%s:%d\u001b[0m (Press CTRL+C to quit)"} +[DOCKER] 🙌 Starting OpenHands Agent Server on 0.0.0.0:8000 +[10/12/25 23:34:49] INFO Docker workspace is ready at http://localhost:8010 workspace.py:343 +[DOCKER] 📖 API docs will be available at http://0.0.0.0:8000/docs +[DOCKER] 🔄 Auto-reload: disabled +[DOCKER] 🔒 DEBUG mode: DISABLED +[DOCKER] +[DOCKER] {"asctime": "2025-10-12 21:34:49,439", "levelname": "INFO", "name": "uvicorn.access", "client_addr": null, "request_line": null, "status_code": null} +[DOCKER] {"asctime": "2025-10-12 21:35:02,330", "levelname": "INFO", "name": "openhands.sdk.conversation.state", "filename": "state.py", "lineno": 213, "message": "Created new conversation 98aa5ad0-f12d-44fa-94d8-b1152df5a9a1\nState: {'id': UUID('98aa5ad0-f12d-44fa-94d8-b1152df5a9a1'), 'workspace': {'kind': 'LocalWorkspace', 'working_dir': '/workspace'}, 'persistence_dir': 'workspace/conversations/event_service/98aa5ad0-f12d-44fa-94d8-b1152df5a9a1', 'max_iterations': 500, 'stuck_detection': True, 'agent_status': , 'confirmation_policy': {'kind': 'NeverConfirm'}, 'activated_knowledge_microagents': [], 'stats': {'service_to_metrics': {}}}\nAgent: {'kind': 'Agent', 'llm': {'model': 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', 'api_key': SecretStr('**********'), 'base_url': 'https://llm-proxy.eval.all-hands.dev', 'openrouter_site_url': 'https://docs.all-hands.dev/', 'openrouter_app_name': 'OpenHands', 'num_retries': 5, 'retry_multiplier': 8.0, 'retry_min_wait': 8, 'retry_max_wait': 64, 'max_message_chars': 30000, 'temperature': 0.0, 'top_p': 1.0, 'max_input_tokens': 200000, 'max_output_tokens': 64000, 'drop_params': True, 'modify_params': True, 'disable_stop_word': False, 'caching_prompt': True, 'log_completions': False, 'log_completions_folder': 'logs/completions', 'enable_encrypted_reasoning': False, 'extended_thinking_budget': 200000, 'service_id': 'agent', 'metadata': {}, 'OVERRIDE_ON_SERIALIZE': ('api_key', 'aws_access_key_id', 'aws_secret_access_key')}, 'tools': [{'name': 'BashTool', 'params': {}}, {'name': 'FileEditorTool', 'params': {}}, {'name': 'TaskTrackerTool', 'params': {}}], 'mcp_config': {'mcpServers': {'fetch': {'command': 'uvx', 'args': ['mcp-server-fetch']}, 'repomix': {'command': 'npx', 'args': ['-y', 'repomix@1.4.2', '--mcp']}}}, 'filter_tools_regex': '^(?!repomix)(.*)|^repomix.*pack_codebase.*$', 'system_prompt_filename': 'system_prompt.j2', 'system_prompt_kwargs': {'cli_mode': True}, 'security_analyzer': {'kind': 'LLMSecurityAnalyzer'}, 'condenser': {'kind': 'LLMSummarizingCondenser', 'llm': {'model': 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', 'api_key': SecretStr('**********'), 'base_url': 'https://llm-proxy.eval.all-hands.dev', 'openrouter_site_url': 'https://docs.all-hands.dev/', 'openrouter_app_name': 'OpenHands', 'num_retries': 5, 'retry_multiplier': 8.0, 'retry_min_wait': 8, 'retry_max_wait': 64, 'max_message_chars': 30000, 'temperature': 0.0, 'top_p': 1.0, 'max_input_tokens': 200000, 'max_output_tokens': 64000, 'drop_params': True, 'modify_params': True, 'disable_stop_word': False, 'caching_prompt': True, 'log_completions': False, 'log_completions_folder': 'logs/completions', 'enable_encrypted_reasoning': False, 'extended_thinking_budget': 200000, 'service_id': 'condenser', 'metadata': {}, 'OVERRIDE_ON_SERIALIZE': ('api_key', 'aws_access_key_id', 'aws_secret_access_key')}, 'max_size': 80, 'keep_first': 4}}"} +[DOCKER] {"asctime": "2025-10-12 21:35:02,337", "levelname": "INFO", "name": "openhands.tools.execute_bash.terminal.factory", "filename": "factory.py", "lineno": 103, "message": "Auto-detected: Using TmuxTerminal (tmux available)"} +[DOCKER] {"asctime": "2025-10-12 21:35:02,596", "levelname": "INFO", "name": "openhands.tools.execute_bash.impl", "filename": "impl.py", "lineno": 51, "message": "BashExecutor initialized with working_dir: /workspace, username: None, terminal_type: TerminalSession"} +[DOCKER] {"asctime": "2025-10-12 21:35:02,598", "levelname": "INFO", "name": "openhands.tools.file_editor.editor", "filename": "editor.py", "lineno": 85, "message": "FileEditor initialized with cwd: /workspace"} +[DOCKER] {"asctime": "2025-10-12 21:35:02,598", "levelname": "INFO", "name": "openhands.tools.task_tracker.definition", "filename": "definition.py", "lineno": 155, "message": "TaskTrackerExecutor initialized with save_dir: workspace/conversations/event_service/98aa5ad0-f12d-44fa-94d8-b1152df5a9a1"} +[DOCKER] {"asctime": "2025-10-12 21:35:02,643", "levelname": "INFO", "name": "mcp.server.lowlevel.server", "filename": "server.py", "lineno": 623, "message": "Processing request of type ListToolsRequest"} +[DOCKER] Downloading lxml (4.8MiB) +[DOCKER] Downloading pydantic-core (1.9MiB) +[DOCKER] Downloading pydantic-core +[DOCKER] Downloading lxml +[DOCKER] Installed 40 packages in 10ms +[DOCKER] {"asctime": "2025-10-12 21:35:09,628", "levelname": "INFO", "name": "openhands.sdk.mcp.utils", "filename": "utils.py", "lineno": 62, "message": "Created 8 MCP tools: ['fetch_fetch', 'repomix_pack_codebase', 'repomix_pack_remote_repository', 'repomix_attach_packed_output', 'repomix_read_repomix_output', 'repomix_grep_repomix_output', 'repomix_file_system_read_file', 'repomix_file_system_read_directory']"} +[DOCKER] {"asctime": "2025-10-12 21:35:09,629", "levelname": "INFO", "name": "openhands.sdk.agent.base", "filename": "base.py", "lineno": 203, "message": "Loaded 11 tools from spec: ['execute_bash', 'str_replace_editor', 'task_tracker', 'fetch_fetch', 'repomix_pack_codebase', 'repomix_pack_remote_repository', 'repomix_attach_packed_output', 'repomix_read_repomix_output', 'repomix_grep_repomix_output', 'repomix_file_system_read_file', 'repomix_file_system_read_directory']"} +[DOCKER] {"asctime": "2025-10-12 21:35:09,629", "levelname": "INFO", "name": "openhands.sdk.agent.base", "filename": "base.py", "lineno": 209, "message": "Filtered to 5 tools after applying regex filter: ['execute_bash', 'str_replace_editor', 'task_tracker', 'fetch_fetch', 'repomix_pack_codebase']"} +[DOCKER] {"asctime": "2025-10-12 21:35:09,629", "levelname": "WARNING", "name": "openhands.sdk.agent.agent", "filename": "agent.py", "lineno": 98, "message": "LLM security analyzer is enabled but confirmation policy is set to NeverConfirm"} +[DOCKER] {"asctime": "2025-10-12 21:35:11,716", "levelname": "INFO", "name": "openhands.sdk.llm.llm_registry", "filename": "llm_registry.py", "lineno": 81, "message": "[LLM registry 52817eb9-ce49-4a76-b3f1-f0fab7b9b69c]: Added LLM for service agent"} +[DOCKER] {"asctime": "2025-10-12 21:35:13,319", "levelname": "INFO", "name": "openhands.sdk.llm.llm_registry", "filename": "llm_registry.py", "lineno": 81, "message": "[LLM registry 52817eb9-ce49-4a76-b3f1-f0fab7b9b69c]: Added LLM for service condenser"} +[DOCKER] {"asctime": "2025-10-12 21:35:13,319", "levelname": "INFO", "name": "openhands.sdk.conversation.impl.local_conversation", "filename": "local_conversation.py", "lineno": 267, "message": "Confirmation policy set to: kind='NeverConfirm'"} +[DOCKER] {"asctime": "2025-10-12 21:35:16,213", "levelname": "INFO", "name": "uvicorn.access", "client_addr": null, "request_line": null, "status_code": null} +[DOCKER] {"asctime": "2025-10-12 21:35:16,219", "levelname": "INFO", "name": "uvicorn.access", "client_addr": null, "request_line": null, "status_code": null} +[10/12/25 23:35:16] INFO 04_vscode_with_docker_sandboxed_server.py:62 + 📋 Conversation ID: + 98aa5ad0-f12d-44fa-94d8-b1152df5a9a1 +[10/12/25 23:35:16] INFO 📝 Sending first message... 04_vscode_with_docker_sandboxed_server.py:63 +[DOCKER] {"asctime": "2025-10-12 21:35:16,235", "levelname": "INFO", "name": "uvicorn.access", "client_addr": null, "request_line": null, "status_code": null} +[DOCKER] /agent-server/.venv/lib/python3.12/site-packages/websockets/legacy/server.py:1178: DeprecationWarning: remove second argument of ws_handler +[DOCKER] warnings.warn("remove second argument of ws_handler", DeprecationWarning) +[DOCKER] {"asctime": "2025-10-12 21:35:16,279", "levelname": "INFO", "name": "uvicorn.error", "filename": "websockets_impl.py", "lineno": 273, "message": "192.168.65.1:33098 - \"WebSocket /sockets/events/98aa5ad0-f12d-44fa-94d8-b1152df5a9a1\" [accepted]", "websocket": ""} +[DOCKER] {"asctime": "2025-10-12 21:35:22,243", "levelname": "INFO", "name": "uvicorn.error", "filename": "server.py", "lineno": 643, "message": "connection open", "websocket": ""} +[10/12/25 23:35:22] INFO 🔔 Callback received event: 04_vscode_with_docker_sandboxed_server.py:49 + ConversationStateUpdateEvent + ConversationStateUpdate(key=full_state, value={'id': + '98aa5ad0-f12d-44fa-94d8-b1152df5a9a1', 'agent': + {'kind': 'Agent', 'llm': {'model': + 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', + 'api_key': '**********', 'base_url': + 'https://llm-proxy.eval.all-hands.dev', + 'openrouter_site_url': 'https://docs.all-hands.dev/', + 'openrouter_app_name': 'OpenHands', 'num_retries': 5, + 'retry_multiplier': 8.0, 'retry_min_wait': 8, + 'retry_max_wait': 64, 'max_message_chars': 30000, + 'temperature': 0.0, 'top_p': 1.0, 'max_input_tokens': + 200000, 'max_output_tokens': 64000, 'drop_params': + True, 'modify_params': True, 'disable_stop_word': + False, 'caching_prompt': True, 'log_completions': + False, 'log_completions_folder': 'logs/completions', + 'enable_encrypted_reasoning': False, + 'extended_thinking_budget': 200000, 'service_id': + 'agent', 'metadata': {}, 'OVERRIDE_ON_SERIALIZE': + ['api_key', 'aws_access_key_id', + 'aws_secret_access_key']}, 'tools': [{'name': + 'BashTool', 'params': {}}, {'name': 'FileEditorTool', + 'params': {}}, {'name': 'TaskTrackerTool', 'params': + {}}], 'mcp_config': {'mcpServers': {'fetch': + {'command': 'uvx', 'args': ['mcp-server-fetch']}, + 'repomix': {'command': 'npx', 'args': ['-y', + 'repomix@1.4.2', '--mcp']}}}, 'filter_tools_regex': + '^(?!repomix)(.*)|^repomix.*pack_codebase.*$', + 'system_prompt_filename': 'system_prompt.j2', + 'system_prompt_kwargs': {'cli_mode': True}, + 'security_analyzer': {'kind': 'LLMSecurityAnalyzer'}, + 'condenser': {'kind': 'LLMSummarizingCondenser', + 'llm': {'model': + 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', + 'api_key': '**********', 'base_url': + 'https://llm-proxy.eval.all-hands.dev', + 'openrouter_site_url': 'https://docs.all-hands.dev/', + 'openrouter_app_name': 'OpenHands', 'num_retries': 5, + 'retry_multiplier': 8.0, 'retry_min_wait': 8, + 'retry_max_wait': 64, 'max_message_chars': 30000, + 'temperature': 0.0, 'top_p': 1.0, 'max_input_tokens': + 200000, 'max_output_tokens': 64000, 'drop_params': + True, 'modify_params': True, 'disable_stop_word': + False, 'caching_prompt': True, 'log_completions': + False, 'log_completions_folder': 'logs/completions', + 'enable_encrypted_reasoning': False, + 'extended_thinking_budget': 200000, 'service_id': + 'condenser', 'metadata': {}, 'OVERRIDE_ON_SERIALIZE': + ['api_key', 'aws_access_key_id', + 'aws_secret_access_key']}, 'max_size': 80, + 'keep_first': 4}}, 'workspace': {'kind': + 'LocalWorkspace', 'working_dir': '/workspace'}, + 'persistence_dir': + 'workspace/conversations/event_service/98aa5ad0-f12d- + 44fa-94d8-b1152df5a9a1', 'max_iterations': 500, + 'stuck_detection': True, 'agent_status': 'idle', + 'confirmation_policy': {'kind': 'NeverConfirm'}, + 'activated_knowledge_microagents': [], 'stats': + {'service_to_metrics': {'agent': {'model_name': + 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', + 'accumulated_cost': 0.03233625, + 'accumulated_token_usage': {'model': + 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', + 'prompt_tokens': 10, 'completion_tokens': 229, + 'cache_read_tokens': 0, 'cache_write_tokens': 7699, + 'reasoning_tokens': 75, 'context_window': 0, + 'per_turn_token': 239, 'response_id': ''}, 'costs': + [{'model': + 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', + 'cost': 0.03233625, 'timestamp': + 1760304922.2394147}], 'response_latencies': + [{'model': + 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', + 'latency': 5.992668390274048, 'response_id': + 'chatcmpl-66f905d5-adbd-4400-8af4-0f3212ca882f'}], + 'token_usages': [{'model': + 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', + 'prompt_tokens': 10, 'completion_tokens': 229, + 'cache_read_tokens': 0, 'cache_write_tokens': 7699, + 'reasoning_tokens': 75, 'context_window': 0, + 'per_turn_token': 239, 'response_id': + 'chatcmpl-66f905d5-adbd-4400-8af4-0f3212ca882f'}]}, + 'condenser': {'model_name': + 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', + 'accumulated_cost': 0.0, 'accumulated_token_usage': + {'model': + 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', + 'prompt_tokens': 0, 'completion_tokens': 0, + 'cache_read_tokens': 0, 'cache_write_tokens': 0, + 'reasoning_tokens': 0, 'context_window': 0, + 'per_turn_token': 0, 'response_id': ''}, 'costs': [], + 'response_latencies': [], 'token_usages': []}}}}) +╭──────────────────────────────────────── UNKNOWN Event: ConversationStateUpdateEvent ────────────────────────────────────────╮ +│ │ +│ Unknown event type: ConversationStateUpdateEvent │ +│ {'kind': 'ConversationStateUpdateEvent', 'id': '6641bfa8-2978-47e7-8b84-738a85f806a9', 'timestamp': │ +│ '2025-10-12T21:35:22.242639', 'source': 'environment', 'key': 'full_state', 'value': {'id': │ +│ '98aa5ad0-f12d-44fa-94d8-b1152df5a9a1', 'agent': {'kind': 'Agent', 'llm': {'model': │ +│ 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', 'api_key': '**********', 'base_url': │ +│ 'https://llm-proxy.eval.all-hands.dev', 'openrouter_site_url': 'https://docs.all-hands.dev/', 'openrouter_app_name': │ +│ 'OpenHands', 'num_retries': 5, 'retry_multiplier': 8.0, 'retry_min_wait': 8, 'retry_max_wait': 64, 'max_message_chars': │ +│ 30000, 'temperature': 0.0, 'top_p': 1.0, 'max_input_tokens': 200000, 'max_output_tokens': 64000, 'drop_params': True, │ +│ 'modify_params': True, 'disable_stop_word': False, 'caching_prompt': True, 'log_completions': False, │ +│ 'log_completions_folder': 'logs/completions', 'enable_encrypted_reasoning': False, 'extended_thinking_budget': 200000, │ +│ 'service_id': 'agent', 'metadata': {}, 'OVERRIDE_ON_SERIALIZE': ['api_key', 'aws_access_key_id', 'aws_secret_access_key']}, │ +│ 'tools': [{'name': 'BashTool', 'params': {}}, {'name': 'FileEditorTool', 'params': {}}, {'name': 'TaskTrackerTool', │ +│ 'params': {}}], 'mcp_config': {'mcpServers': {'fetch': {'command': 'uvx', 'args': ['mcp-server-fetch']}, 'repomix': │ +│ {'command': 'npx', 'args': ['-y', 'repomix@1.4.2', '--mcp']}}}, 'filter_tools_regex': │ +│ '^(?!repomix)(.*)|^repomix.*pack_codebase.*$', 'system_prompt_filename': 'system_prompt.j2', 'system_prompt_kwargs': │ +│ {'cli_mode': True}, 'security_analyzer': {'kind': 'LLMSecurityAnalyzer'}, 'condenser': {'kind': 'LLMSummarizingCondenser', │ +│ 'llm': {'model': 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', 'api_key': '**********', 'base_url': │ +│ 'https://llm-proxy.eval.all-hands.dev', 'openrouter_site_url': 'https://docs.all-hands.dev/', 'openrouter_app_name': │ +│ 'OpenHands', 'num_retries': 5, 'retry_multiplier': 8.0, 'retry_min_wait': 8, 'retry_max_wait': 64, 'max_message_chars': │ +│ 30000, 'temperature': 0.0, 'top_p': 1.0, 'max_input_tokens': 200000, 'max_output_tokens': 64000, 'drop_params': True, │ +│ 'modify_params': True, 'disable_stop_word': False, 'caching_prompt': True, 'log_completions': False, │ +│ 'log_completions_folder': 'logs/completions', 'enable_encrypted_reasoning': False, 'extended_thinking_budget': 200000, │ +│ 'service_id': 'condenser', 'metadata': {}, 'OVERRIDE_ON_SERIALIZE': ['api_key', 'aws_access_key_id', │ +│ 'aws_secret_access_key']}, 'max_size': 80, 'keep_first': 4}}, 'workspace': {'kind': 'LocalWorkspace', 'working_dir': │ +│ '/workspace'}, 'persistence_dir': 'workspace/conversations/event_service/98aa5ad0-f12d-44fa-94d8-b1152df5a9a1', │ +│ 'max_iterations': 500, 'stuck_detection': True, 'agent_status': 'idle', 'confirmation_policy': {'kind': 'NeverConfirm'}, │ +│ 'activated_knowledge_microagents': [], 'stats': {'service_to_metrics': {'agent': {'model_name': │ +│ 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', 'accumulated_cost': 0.03233625, 'accumulated_token_usage': {'model': │ +│ 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', 'prompt_tokens': 10, 'completion_tokens': 229, 'cache_read_tokens': │ +│ 0, 'cache_write_tokens': 7699, 'reasoning_tokens': 75, 'context_window': 0, 'per_turn_token': 239, 'response_id': ''}, │ +│ 'costs': [{'model': 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', 'cost': 0.03233625, 'timestamp': │ +│ 1760304922.2394147}], 'response_latencies': [{'model': 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', 'latency': │ +│ 5.992668390274048, 'response_id': 'chatcmpl-66f905d5-adbd-4400-8af4-0f3212ca882f'}], 'token_usages': [{'model': │ +│ 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', 'prompt_tokens': 10, 'completion_tokens': 229, 'cache_read_tokens': │ +│ 0, 'cache_write_tokens': 7699, 'reasoning_tokens': 75, 'context_window': 0, 'per_turn_token': 239, 'response_id': │ +│ 'chatcmpl-66f905d5-adbd-4400-8af4-0f3212ca882f'}]}, 'condenser': {'model_name': │ +│ 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', 'accumulated_cost': 0.0, 'accumulated_token_usage': {'model': │ +│ 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', 'prompt_tokens': 0, 'completion_tokens': 0, 'cache_read_tokens': 0, │ +│ 'cache_write_tokens': 0, 'reasoning_tokens': 0, 'context_window': 0, 'per_turn_token': 0, 'response_id': ''}, 'costs': [], │ +│ 'response_latencies': [], 'token_usages': []}}}}} │ +│ │ +╰─────────────────────────────────────────────────────── (environment) ───────────────────────────────────────────────────────╯ + +[10/12/25 23:35:22] INFO 🔔 Callback received event: ActionEvent 04_vscode_with_docker_sandboxed_server.py:49 + ActionEvent (agent) + Thought: I'll create a simple Python script that + prints "Hello World" for you. + Action: FileEditorAction +╭─────────────────────────────────────────────────────── Agent Action ────────────────────────────────────────────────────────╮ +│ │ +│ Predicted Security Risk: LOW │ +│ │ +│ Reasoning: │ +│ The user wants me to create a simple Python script that prints "Hello World". This is a straightforward task. I should: │ +│ │ +│ 1. Create a Python file (commonly named something like `hello.py` or `hello_world.py`) │ +│ 2. Add code to print "Hello World" │ +│ │ +│ Let me first check the current working directory to understand the context, then create the file. │ +│ │ +│ Thought: │ +│ I'll create a simple Python script that prints "Hello World" for you. │ +│ │ +│ Action: FileEditorAction │ +│ │ +│ Arguments: │ +│ kind: "FileEditorAction" │ +│ command: "create" │ +│ path: "/workspace/hello_world.py" │ +│ file_text: │ +│ print("Hello World") │ +│ │ +│ │ +│ │ +╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ + +[10/12/25 23:35:22] INFO 🔔 Callback received event: ObservationEvent 04_vscode_with_docker_sandboxed_server.py:49 + ObservationEvent (environment) + Tool: str_replace_editor + Result: File created successfully at: + /workspace/hello_world.py +╭──────────────────────────────────────────────────────── Observation ────────────────────────────────────────────────────────╮ +│ │ +│ Tool: str_replace_editor │ +│ Result: │ +│ [File /workspace/hello_world.py edited with 1 changes.] │ +│ [begin of edit 1 / 1] │ +│ (content before edit) │ +│ 1| │ +│ (content after edit) │ +│ +1|print("Hello World") │ +│ 2| │ +│ [end of edit 1 / 1] │ +│ │ +╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ + +[10/12/25 23:35:25] INFO 🔔 Callback received event: ActionEvent 04_vscode_with_docker_sandboxed_server.py:49 + ActionEvent (agent) + Thought: Now let me run the script to verify it + works: + Action: ExecuteBashAction +╭─────────────────────────────────────────────────────── Agent Action ────────────────────────────────────────────────────────╮ +│ │ +│ Predicted Security Risk: LOW │ +│ │ +│ Reasoning: │ +│ Good! I've created the file. Now let me run it to verify it works correctly. │ +│ │ +│ Thought: │ +│ Now let me run the script to verify it works: │ +│ │ +│ $ python /workspace/hello_world.py │ +│ │ +╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ +[DOCKER] /agent-server/.venv/lib/python3.12/site-packages/pydantic/main.py:463: UserWarning: Pydantic serializer warnings: + +[DOCKER] PydanticSerializationUnexpectedValue(Expected 10 fields but got 7: Expected `Message` - serialized value may not be as expected [input_value=Message(content='Now let ...y it works correctly."}), input_type=Message]) +[DOCKER] PydanticSerializationUnexpectedValue(Expected `StreamingChoices` - serialized value may not be as expected [input_value=Choices(finish_reason='to...ider_specific_fields={}), input_type=Choices]) +[DOCKER] return self.__pydantic_serializer__.to_python( +[10/12/25 23:35:26] INFO 🔔 Callback received event: ObservationEvent 04_vscode_with_docker_sandboxed_server.py:49 + ObservationEvent (environment) + Tool: execute_bash + Result: Hello World + [The command completed with exit code 0.] + [Current working directory: /workspace] + [Python interpreter: /usr/local/bin/python] + [Command finished with exit code 0] +╭──────────────────────────────────────────────────────── Observation ────────────────────────────────────────────────────────╮ +│ │ +│ Tool: execute_bash │ +│ Result: │ +│ Hello World │ +│ │ +│ 📁 Working directory: /workspace │ +│ 🐍 Python interpreter: /usr/local/bin/python │ +│ ✅ Exit code: 0 │ +│ │ +╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ + +[DOCKER] {"asctime": "2025-10-12 21:35:30,260", "levelname": "INFO", "name": "openhands.sdk.agent.agent", "filename": "agent.py", "lineno": 253, "message": "LLM produced a message response - awaits user input"} +[DOCKER] /agent-server/.venv/lib/python3.12/site-packages/pydantic/main.py:463: UserWarning: Pydantic serializer warnings: +[DOCKER] PydanticSerializationUnexpectedValue(Expected 10 fields but got 7: Expected `Message` - serialized value may not be as expected [input_value=Message(content='Perfect!...The task is complete.'}), input_type=Message]) +[DOCKER] PydanticSerializationUnexpectedValue(Expected `StreamingChoices` - serialized value may not be as expected [input_value=Choices(finish_reason='st...ider_specific_fields={}), input_type=Choices]) +[DOCKER] return self.__pydantic_serializer__.to_python( +[10/12/25 23:35:30] INFO 🔔 Callback received event: 04_vscode_with_docker_sandboxed_server.py:49 + ConversationStateUpdateEvent + ConversationStateUpdate(key=agent_status, + value=finished) +╭──────────────────────────────────────── UNKNOWN Event: ConversationStateUpdateEvent ────────────────────────────────────────╮ +│ │ +│ Unknown event type: ConversationStateUpdateEvent │ +│ {'kind': 'ConversationStateUpdateEvent', 'id': '80c2cd61-ea42-4c24-9fd2-f09b90431d55', 'timestamp': │ +│ '2025-10-12T21:35:30.266871', 'source': 'environment', 'key': 'agent_status', 'value': 'finished'} │ +│ │ +╰─────────────────────────────────────────────────────── (environment) ───────────────────────────────────────────────────────╯ +[DOCKER] {"asctime": "2025-10-12 21:35:30,271", "levelname": "INFO", "name": "uvicorn.access", "client_addr": null, "request_line": null, "status_code": null} +[10/12/25 23:35:30] INFO run() triggered successfully: remote_conversation.py:489 + + +Because you've enabled extra_ports=True in DockerWorkspace, you can open VSCode Web to see the workspace. + +VSCode Link: http://localhost:8011 +(Check the agent server logs for the full URL with auth token) + +The VSCode should have the OpenHands settings extension installed: + - Dark theme enabled + - Auto-save enabled + - Telemetry disabled + - Auto-updates disabled + +Press 'y' and Enter to exit and terminate the workspace. +>> [10/12/25 23:35:30] INFO 🔔 Callback received event: MessageEvent 04_vscode_with_docker_sandboxed_server.py:49 + MessageEvent (agent) + assistant: Perfect! I've created a simple Python + script called `hello_world.py` that prints "Hello + World". The script has been successfully tested and + works as expected. [Thinking blocks: 1] +╭──────────────────────────────────────────────────── Message from Agent ─────────────────────────────────────────────────────╮ +│ │ +│ Perfect! I've created a simple Python script called `hello_world.py` that prints "Hello World". The script has been │ +│ successfully tested and works as expected. │ +│ │ +╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ + diff --git a/openhands/sdk/conversation/impl/local_conversation.py b/openhands/sdk/conversation/impl/local_conversation.py index 2ea670f5cc..045eb89528 100644 --- a/openhands/sdk/conversation/impl/local_conversation.py +++ b/openhands/sdk/conversation/impl/local_conversation.py @@ -110,6 +110,20 @@ def _default_callback(e): for llm in list(self.agent.get_all_llms()): self.llm_registry.add(llm) + # Eagerly discover and register LLM profiles from disk so they are + # available through the registry (profiles are stored under + # ~/.openhands/llm-profiles/*.json). This keeps behavior backward + # compatible while making named profiles discoverable to the runtime. + try: + from openhands.sdk.llm.profile_manager import ProfileManager + + ProfileManager().register_all(self.llm_registry) + except Exception: + # Do not fail conversation initialization if profile loading has problems + logger.debug( + "No LLM profiles registered or failed to load profiles", exc_info=True + ) + # Initialize secrets if provided if secrets: # Convert dict[str, str] to dict[str, SecretValue] diff --git a/openhands/sdk/llm/profile_manager.py b/openhands/sdk/llm/profile_manager.py new file mode 100644 index 0000000000..7fadc9700c --- /dev/null +++ b/openhands/sdk/llm/profile_manager.py @@ -0,0 +1,79 @@ +from __future__ import annotations + +import json +import logging +from pathlib import Path + +from openhands.sdk.llm.llm import LLM +from openhands.sdk.llm.llm_registry import LLMRegistry + + +logger = logging.getLogger(__name__) + + +class ProfileManager: + """Manage LLM profile files on disk. + + Profiles are stored as JSON files using the existing LLM schema, typically + at ~/.openhands/llm-profiles/.json. + """ + + def __init__(self, base_dir: str | Path | None = None): + if base_dir is None: + self.base_dir = Path.home() / ".openhands" / "llm-profiles" + else: + self.base_dir = Path(base_dir).expanduser() + self.base_dir.mkdir(parents=True, exist_ok=True) + + def list_profiles(self) -> list[str]: + return sorted([p.stem for p in self.base_dir.glob("*.json")]) + + def get_profile_path(self, name: str) -> Path: + return self.base_dir / f"{name}.json" + + def load_profile(self, name: str) -> LLM: + p = self.get_profile_path(name) + if not p.exists(): + raise FileNotFoundError(f"Profile not found: {name} -> {p}") + # Use LLM.load_from_json to leverage pydantic validation + llm = LLM.load_from_json(str(p)) + # Ensure profile_id is present on loaded LLM + if getattr(llm, "profile_id", None) is None: + try: + llm = llm.model_copy(update={"profile_id": name}) + except Exception: + # Old pydantic versions might not have model_copy; fallback + llm.profile_id = name # type: ignore[attr-defined] + return llm + + def save_profile(self, name: str, llm: LLM, include_secrets: bool = False) -> Path: + p = self.get_profile_path(name) + # Dump model to dict and ensure profile_id is set + data = llm.model_dump(exclude_none=True) + data["profile_id"] = name + # Remove secret fields unless explicitly requested + if not include_secrets: + for secret_field in ( + "api_key", + "aws_access_key_id", + "aws_secret_access_key", + ): + if secret_field in data: + data.pop(secret_field, None) + # Write to file + with open(p, "w", encoding="utf-8") as f: + json.dump(data, f, indent=2, ensure_ascii=False) + logger.info(f"Saved profile {name} -> {p}") + return p + + def register_all(self, registry: LLMRegistry) -> None: + # Load and attempt to register all profiles. Skip duplicates. + for name in self.list_profiles(): + try: + llm = self.load_profile(name) + try: + registry.add(llm) + except Exception as e: + logger.info(f"Skipping profile {name}: registry.add failed: {e}") + except Exception as e: + logger.warning(f"Failed to load profile {name}: {e}") diff --git a/previous.md b/previous.md new file mode 100644 index 0000000000..d3f3efe469 --- /dev/null +++ b/previous.md @@ -0,0 +1,68 @@ +# VSCode Settings Extension Work + +## Branch +`vscode-settings-extension` + +## What We've Done + +### 1. Created VSCode Extension +- **Location**: `openhands/agent_server/vscode_extensions/openhands-settings/` +- **Structure**: + - `src/extension.ts` - Main extension code that configures settings + - `package.json` - Extension metadata (activates on startup with `"*"`) + - `tsconfig.json` - TypeScript configuration + +### 2. Extension Settings Applied +The extension automatically configures: +- `workbench.colorTheme`: "Default Dark+" +- `editor.fontSize`: 14 +- `editor.tabSize`: 4 +- `files.autoSave`: "afterDelay" +- `files.autoSaveDelay`: 1000 +- `update.mode`: "none" +- `telemetry.telemetryLevel`: "off" +- `extensions.autoCheckUpdates`: false +- `extensions.autoUpdate`: false + +### 3. Updated `vscode_service.py` +- **Path**: `openhands/agent_server/vscode_service.py` +- Extensions directory: `self.extensions_dir = self.openvscode_server_root / "extensions"` + - Points to `/openhands/.openvscode-server/extensions` +- Added `_build_extensions()` method: + - Iterates through all extensions in the directory + - Runs `npm install && npm run compile` for each +- Modified `_start_vscode_process()`: + - Conditionally passes `--extensions-dir` flag if directory exists + +### 4. Updated Dockerfile +- **Path**: `openhands/agent_server/docker/Dockerfile` +- Added COPY commands in both `source` and `binary` targets: + ```dockerfile + COPY --chown=${USERNAME}:${USERNAME} --from=builder /agent-server/openhands/agent_server/vscode_extensions /openhands/.openvscode-server/extensions + ``` +- Extensions are copied from source code into VSCode server's extensions directory + +### 5. Created Test Example +- **Path**: `examples/02_remote_agent_server/04_vscode_with_docker_sandboxed_server.py` +- Uses `DockerWorkspace` with `extra_ports=True` +- Exposes VSCode on port 8011 +- Instructions for checking the extension settings + +## Architecture Pattern +Following V0 approach: +- Extensions live in VSCode server's own directory: `/openhands/.openvscode-server/extensions/` +- Extensions are built at runtime when service starts +- No `.vsix` packaging needed - direct source copy + +## Next Steps +1. Test the extension by running the example script +2. Verify settings are applied in VSCode Web +3. Check extension build logs in agent server output + +## Testing Command +```bash +export LLM_API_KEY=your_key_here +uv run python examples/02_remote_agent_server/04_vscode_with_docker_sandboxed_server.py +``` + +Then access VSCode at: http://localhost:8011 From 46ca1b7452c255543d21b06c041f4a4a6b0ea6ef Mon Sep 17 00:00:00 2001 From: openhands Date: Sat, 18 Oct 2025 15:54:45 +0200 Subject: [PATCH 04/37] chore: stop tracking local runtime and worktree files; add to .gitignore --- .beads/agent-sdk.db | Bin 180224 -> 0 bytes .gitignore | 12 + .worktrees/message-typed | 1 - .worktrees/native-responses | 1 - .worktrees/refactor/llm-tool-api-from-main | 1 - .worktrees/responses | 1 - .worktrees/sonnet-thinking | 1 - AGENTS.md | 1 - CLAUDE.md | 180 ----- agent-sdk.workspace.code-workspace | 14 - docs/agent-sdk.workspace.code-workspace | 14 - docs/llm-model-info-and-caps.md | 56 -- docs/llm-refactor.md | 110 --- log.txt | 825 --------------------- previous.md | 68 -- 15 files changed, 12 insertions(+), 1273 deletions(-) delete mode 100644 .beads/agent-sdk.db delete mode 160000 .worktrees/message-typed delete mode 160000 .worktrees/native-responses delete mode 160000 .worktrees/refactor/llm-tool-api-from-main delete mode 160000 .worktrees/responses delete mode 160000 .worktrees/sonnet-thinking delete mode 100644 AGENTS.md delete mode 100644 CLAUDE.md delete mode 100644 agent-sdk.workspace.code-workspace delete mode 100644 docs/agent-sdk.workspace.code-workspace delete mode 100644 docs/llm-model-info-and-caps.md delete mode 100644 docs/llm-refactor.md delete mode 100644 log.txt delete mode 100644 previous.md diff --git a/.beads/agent-sdk.db b/.beads/agent-sdk.db deleted file mode 100644 index 832265a4a5990e7cb7cdac9479fb82677ca38eb9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 180224 zcmeIbdvIJ?dLIT5BtU{_G&wUIEH%43XUXLPy#qA*b~k_qc4uTP3C>W%*86teSP@a*Efm({l7=><)Hrq|Mo||_xgIQFaOoY4$)KUt*`GRBi|eO=1A%2 z|8V5DkACNfb@-2l{{7)!9Qw(@&mQ_e4*kVL<%56i(9pnJ2fs3Ky#K$*{y*G8eWXI* zwJ(efoj7ym@axV^)7Z8Ry}na59j9YB(eG!Mmlju-#1}3sy&$6HVsT}7W$EJ5*;QfY zKRtYY`N}0BTMKjeh3A%*mr(Awy4!KY(_-9e8?EtSAr>#66PK^7ilr}JSXo^W6DShG z>qisZOWi=-y3wkch9l}+;RmizccSGFog;E9+P6+Y*pK~wP72MGmf8L)$tsf z=Q@uMjV+$(fAzSD*SKZYZ&mB2?fSs2Hgy~Guj+2Jbj*YEh0Eubz9@RP;n$AzHWBoe zd$s!5(Ab$X{kQvFeXU_cdyjrU;f=Jq__>QqBKpKzjR07yOJ7_SSC=nbT3minJiqkf zB*P{cP@Dr|tX{aZ#0z@)+Qp0F+|v2QYZq6=*=x(oOP5!x^mJu)@zPb<^!%0Or3=qq zroP04KTJ+6FP#UvTt2(B;;r$7S~w?0@x1hD^VQ^_ z#Lr_fxF#M>wDhJiiI%z7)<=0|?u>Jva$@o`8|y)q`50Ok7cQ?ZJ-f6VMNhGKZ54b1 z%`O4=B*ytuk1v*d#$8sK{CS!ErIb(gnrqo<&8$Yf8X|zK+%mSi%4$}#i8bXeWUX*{ z-g(lwnqhn}1B7enYevJV@;~zXXcs<2>kvN3-xK^#?$zgy^3uKb5-**6CO=1FODD_x zrK1goIB^QdxnkOb2oDPR#-yB1PQvyb&v6#afUw&Kwzdsuf_-QUDh5Tlz$gPq3TwjZ3reCk~DxPF`+^gzMb)b|AyQ_wj+ z$USVAhj-mBBVtxni!Rxb>R@7bVkC*~PO@OA3$?l`5;Due==y7sF@y7r55D+7aL(QJ=q{D?TFq#?I)nnq zGmz12Q@=}#mgPd$k5PWt&>jGLRAT)?&e315Kt}K>Iq0(Jfh4H;%+sR4B7aZ<_>?{9 zFyK?C44OiQJ+Rv@C^4|A$$tl+R30Y9l63RI?C;Xor?USc`|q-UnElt;-4ST>xt)pAJZvuYg@opeUXZPVQp zX0zQeg0iy>LvM*rTezD>q}}4x3)I#TEo&Q(08o0~vRYG)3%R-}+B&*&HqCb1sOQBs z1|GMVh8`yeuR3j`mT%UDYYEbHxC@AK7#=GqYMVxFt6@5>0A4p8G2wLDZOew_9`>NO zO~@vVWPnX1KL{PUx+=ST}F+xW+A=28Q0f zZrCJb3&+C1q7AX$bp&3}tKm!+v?>UsRo5F<%cugX)5TI`PI!ICYGIgsdnYf>Lx$x6 z>YaAOtO12Ey}E_^ek}lCV$A}yY|CK|AC^~LltP*8z8jZGLi&_V3+j1K_qHbGlN8AW#CPc<6k=hNhZd*<6 zZQZm%e~UcrB~qJ#c^0AFx^6b`?ieYKZqpzgd#6np)Yf+zmR@%Rb~>8by47e{+YDs@ zC*TS6$b*kfJ=S#F-Vwkt{KBKOHKR$238G%TwIkL$jQk*R9UNL0u8G0Xk?H1yT-}b@ zsPnk!k0Dz3A(xl079s$VQ=482_~Ovpd8{fxX@(F1f%Zsn76-LDK!`2FXp8XWgZg=~ zSg&J3w!sE0+V*Soa*;R4IVpPeWO2<$(-ogC1x>apjmvYaBaN>aM+SZnXQ}FmSJDjNzfC#aqvnK z!ZGhB95eyIu6{f=o);JVjYizKc=1wR51EZRia@+QJ@08 z5jEl+Z3Bda#gtIOXbvzJ8>JILS;;YVz_QcAR>Esdm+_P9*n9=fcbF=fa9(Rm#&(cEo^E;VC$4;-j zbutwbW>(~;E75(u(})lx+aupQwT zE*gP-J-uqf(l>!+fwlLhiPqqza)@c{A+VXlR*MBN*GUV#3I8iY9EIFs zA({z@s7tq85{9gfEeP^p*|jZNPUbhkI#0-+82@rNNKVmjVtXUQK5!LGMUeG7CS^hZ z1VVayGPaz6HOjpfu&+ZV(YScWt&3$nsM;swOp9ku0qYxqbcu#tr`E zkc5t29Y#d?4zMAES@HL?#Qn}nDV?YvP$f_>=0N6;?dvSKhH^ab*kBfEdm$zVxVHki zvkqCeg|30nw8Fs{%l?+!6X6Pw5U~{&W@hJ85w7cbU*Cbhefs6M##0flNZc(lVPfJ5 zLv0(q!M-cyWQhFvqU0_lRpiV3Ncw~Yvv`&cY{fL74x zz;GZ~q0lk$s2afBFGX?RMGG9Lcf{Hh&tQw0P{W>)6qmOEbwzzjsizir=B) zb%UljWi(CRqG@A~y*tPrdobC@ikK*Iw(f!?6(aHc<0a*L(0L_zWKM1gi9QBZg+UZBlCnkc9|k|>xvo+y|-mMECXCJM@M)|MIy1<%Iv` zG5^b>{+CDmFUS2a$NVqZ;enI=GVb!QUzYK|jQC%UhP8(MvLpVN(eUBGk%5y(yxzQt z4*Jy&`CsVN{-D+Ne-4Gs53v6~yWNM_|3A+Do9y?p{|fs5ufyB_E7@Pn{^RV=W&a`c z|Gxph|6k7j#q5t{-_G97zJluNPeDLIKtVu3KtVu3KtVu3KtVu3KtVu3KtVu3;O-DO zG|=CFV2nQ>=g$-T`51pb%Ab$$=W+f##-CaK9Ocgpe~$3yQT`m}&m;VKm_LX3bC5p| z@#jJQ9N^Cb2M79(c>4c0`?7zQ{rA~F%Kl&3{~Z>9|0VmIhynQTvcC))z(2|UJmLa= zI{SBE1^BDkzmoll?2l!?dG{AoH53FC1QY}m1QY}m1QY}m1QY}m1QY}m1QY}m1bRW> z;6SE-j6NQxj}!Fq7=1iSACJ(-ar!t$A6fbsrH>4KjL^qX`WU8Q_Kl+KxpJjeM z^A9rL%$S*TnI}j7bmV_Rb@itppdg?ipdg?ipdg?ipdg?ipdg?i@X#Uf$$?A#LH^N- z&}J62`Gta(pP4Pr&3pMt$C8!G3mURO&*bOk=PP~=zsFN6p{M!6EHeGc>}e-bDj~gO z1?eZt+KiV>`mxkXCFD$v7}YXUS7dRP7gdEdpk@V>8g?KvN%)l zrgS`|BW(er&CJc0D&CZi#VXASZFZqhK^|;Qf?e_|Wn-0QgoZqZ1q=vS&IMB%O;qA; z2m<9|W!{HCCRqt-=w=p5mHbSlq7}*#0wc*vv&bt;OIOn7<|`hUj>aH>?A^0SXonQD zl<#xSt23PJXa@74oS)iEY0iiDk?u-JSzN5-fdTlFQ##yTsR)pl@p7{ze^G{#mC{n) z4yIO0jE&Zg<6aruwZmes4%ysNEal6vk!YUME;~H5 zYlqrhJ1ou3YTk82vcrSBc38N3hb5ROq%lBtIIvrX+WcKREY6oS?^+V>@X`V5@IsG8 z(JFWEupFCT|E}|!yK8saY`G9!67v5KWvBZP_5X{A@P9kImA!Q6yHK|n!3K|n!3 zK|n!3K|n!3K|n!3K|n#^!wi9s9Xu1gtU!wyScZ0cS{&T%sdi|$r-g&NJk{n0c6(Ym z@KMY;n({v~XKhaE|6lFP{@d*Tn*Hb5{|e{)|8n-LALb=i@2()AAfOk*DR6cuSumI^IFu|nm@F7b797Y9#BBje|9_Zks`^$CP!LcM zP!LcMP!LcMP!LcMP!LcMP!LcMNJ1d4suxQM{l8dD=>J76q5l^PN&SC5ssC4!`u`lA z|L;4NoQwLcAfO>aIj)||RT{oPXZMI$fM4QEWU0D1>Z-fn-R^1TmmMyNdjn?9Yu%o!yj;xJ~4(68^OSjsVW6-m8bE9MH1SNt0s1KUL zgpuFKPlnTWsvcx{ws`$*%ieO@dd-+>7&ncE*lJm8c;7alWjZeR69Pi$HQRC=an@>K z3XUAj?zEgqe%G3XQn78io5D35*AaMOqva6X7y$eTt3hy!G(gIpMQ z!g&L`@E-cchHY%qf)XC&hZFRU;Iy5#3k%|wXpMyI*Pb7EWsLRoF5Hp^;EQFo%PcR(D|5Mqcq)`jK)dJPt)t-G6fagDJ?wrd+TQQI_XTMZ&5nuHGSh4;r0 z#NB|-AQ()CCI`5;gmFu!t}*>4ItO4G_ro!Q1>biH=(_6mE)>vRKS=>p<4$8hQovVV ziBdpmR|+UaC}3r$Rg2(9-Kz15OU}cb#`I}~%esi*Y{LLtJ8fQ(0DJ=UBG8#5TGlom zVNq#yTUKjI0vpHxF6L~S?Y2=5R+U;1e#lL#>R^HMP4IXNY}(%9E@~1tjCy3{K|f94 zvq!n%j^R}GwmB(cv^4>Ab%B!TvE2qH{P`Swm;oVOaZi2)@& zW^7=3bt_T}0GL>_04>|HB$hLna*{nzQ-YHK#oNH@n+9ezDZuXHh{O^=-z#4^X}-LQa^$2T;q-T0}|c8J{Cpy;;!gkjG#M7Hxd3oyC{n_A>xLB zY3q&y>B8!4Y<6Qk@5BsylC2uxHxVjYZvf~_JYe251QUR9%dl%E312`Td#8=PFI6NHb{dvmmkb`P)Lh?@Goh|pBrE|uz$R;*H$5q~xF)tX?l;fd zjZ<~4Y7-CwBZe3@LE|?GXggvZAYieLw@mD~C=aSu@jE1&8#Ki!qiK@Z!!P1*MTEc} zF_QSR26~{KaE*3ja1N?W;%g%>RyVQxI-6DpB)8GgZR{5tB>Lesf<42*gt*O`?&7jI z07`DU5c)t+ytPe%q^C66$-x}Jy?`D2DJ%ig6L*>4m3ET#yJ<(Oym7qW6PBKh(vG$( z?Pw9&xk{`9qHzp8{YBU&Eg%3{hdMR{;GwHn#f|{G|nf@_?lvv_>t6 z+lw1m;(QgG!I}vkt4pCpq8m`mF$^d%MvLShXPP)LKbl_C9Rpl9Cz^&0WTbU&N(2O_ zh6q9;xena9B(W8^1RTVm0k)HB;kblJVE7(l)H*g@EeLe3+g96&>H+~JRS8Kw?S*<< zH*H{DgapB{En?T}5Df`y3D!|IJst9lw$4Q9_-e?y)d0UCOeNq1dnpa$!G~qRnr_=W zBH&vg_=8DGE;7M#R13t}VT}x1Bg9P?oXiBZVrr(F6LPsbxJ!}8#ZU~h| z>Vx0^+u1_)$?WLp|2z8oqyO{hFOUAs(VrN7bM)q@k@Z04=eV)l2kFO7a8vpsq&^XHjs*WU}CNvfxy*;1kIL5ickfpG*{JA5Rn%J{B*~=0BP!s63GHaVnrSe#^!lTK8N0J4{69vU%i2^N~C@7333o`L(%#Xx>SB}Pi z&ke_a7mvh$mk!5&YeVthvxD*9Gl$~8%Ln7XO9S!W#RH=Qr;mhS(^&sM`X_za??HS2 zIcV;0XJ5*`m|e`~vd2gNWb}7Ne`)lmN53=r>Zm#T`O*2&PmCVP{9)!dGry4eo0)HC zb~1YALPpD+%=C|ZZ{$};es1KuBX5tqH1gue;z(}fIJ#AT3IYlO3IYlO3IYlO3IYlO z3IYlO?*#~q4V3zW1BDf#;fP{kp)mhcceyr?@=vFiYq^y2%EF8`ky>8X##75n#ZM*6 ziwiWR(wQFRpX^b7x<~n`9_63tQ7%%;wV5YV%M04aQ_68bv+}Xj@{;z^)N*b1iPZ8! z>HAX3wS|&)vb(%cTqw?bBvw8rwDLlsvY^e4#mZ*{3TFxnTJiB@`P@QLTPT-LbeC&| zh2rdE$@1BSauGv3+Fh<;-0~wS<#T|~@nreTf;O{IsvJv}mw{9Z1uffMUZO{%-Q^g! zSj;5LOBfUfXGglr(ND2*w7a}m#{7p<%7LQVkz{!hXA&0*Gl!GqSS-x5IMiK^gS<1^ zU}|}}bSR~qjy)diF6Z-M1F7X&<-p?ur6U2oW8F0%1J+B%|NmZJ_V?ZkOi9f_K|n!3 zK|n!3K|n!3K|n!3K|n!3K|n!3K|n#^{vq&@{(&=3%8X@5hlZRTK?DH#|NFE5wh#ZQ zKLr5=0R;gC0R;gC0R;gC0R;gC0R;gC0R;gCfe#1-vIA$1L=6Gt{~tU0*ZQ*mefB@i zwzGdBdu;SyBI5s5MEi@(zsdZg%r`REGf$2D>BujQ{KUw{$n40WqrZmg>Q6yHK|n!3 zK|n!3K|n!3K|n!3LEzy)V0vue%%#9#H&rYJVY~?2N4PYF=jTgv^K<3OQ;7X86s8}K zHz-CMAefor>htBYHd85i4Nk-xXsHcKbF=fB*Wj^ugF>_cri2JTC>8US!tBhPKf>{NgSmJEKHQKmSLQ3hJK!jH1gzR@ zyg>?Bp*uwz%tRX?=9|Lq^A)XJ)O@gx#v7D_1~URd^c;7epP8L66#a$E#2bVwjezZW z8IYeZ%ogV~e;r2R4Z@X116-m&k@WeQN@aGo?1A-Yyg|6qXfP{7?DHjLnV<3BVL09( zTxo8=vG@66Wk&OnFMnj(eb> ztx)jbnIDMNkF7Ay2hh=hd|`gBQuG!qe;`&rw!kxV2biYi z3#Hjg$s>b&f2@9NeM|BPLVmVfDi*!@kpF*d^fu-HyOF(;eI`4V{b=^+=%0>$Z}c}t z|5^5TMt^4XCr5vL^fs~s$jpA+01-A^AARTD)WsGG(M`AQxH%PP!LcMP!LcMP!LcM zP!LcMcyB=9lLMDxSqsy`AI6fEg7|_+$iw43D@DQ_P9!U(g*H5vtP~6v31@gTSt%`q z;gRl2-C+yIyDN2vDjZ8zN()cO#wzs)Nf=F3N)1EEBrBzb9*iU_rG*ee-=8Cv$$}4@dse$Y+lJi{XzS`QMIgAO6$BlS6+#^mhl(9r~j~ zf923;5B|o%zdi6v1K+~kf&T;zZXf)}(1~2`@SSIGn#T58!>Vl=^(r4NaN@ttE-x*v zE{QK(Sb9OkpNPek;gzL}OJ`Sy1^&$ZrziQ#*(=vBuTIqSb)#*x>Q2>aRn2-%=uXgn zZKqnZIxTnj{PLAcLXIlT;UBni;j)mej8@Gw98ninE~8_!E}jkci0ZRqtKZxN#3qe~Di z4;O%dp8#g!eKC1{TW{)@udIrtFJ4$#T@e$!XmTNiCU{BWi@+6labhs%#pwletX7@d z$$5l}2Qbot^0VIHC4?f-drNVztCs6LJ~Xy?rvKIBW~*-8GV8ahb<=iz;8vTuP57?6 z(b6#w&KEA9Tl%8t-G)&u(%VGPTW;>e;MntL`uliP=cR_}8dbgHTKqdc1uZ)CP) zCj4P?VtMI2=<4#>r4^5kCy3TTjOV}^z&Fk=uAE&ww=|r4>e0cm??2fKG&QTWZf;aH z`R`+Ej|`2SJlX&1lMD@6C_f*KK|q#y@4jX1^my%?dZS~=Xa4IB=c>mC$L3DJW1Btne9J1h%F0|;XNbf5PC&Jdbm1BcrQz!f7;uAV~?HG9S`|)0VbDc=H0l=?j z2gjazs{cg>jE7;4@lpp2(@6Y076Vn{;Y3Ss8k1<5du@G`fqiG3`;-%tpV?Rsvdl-c zI#}rH(z8p;Q7jXS*H*#A(d-gXNFt6u^*H?HGw!m=+?5g5Umf{Wuep|;*34?utKpi; z$}MBNtE^@y#duRed2*ZH-Vc%5ab{2Ym`vn=!1*>O4s_V2W1;EHJvB77(06_axAIk^xI z%1&M$9vV9f2LD6=&FG$04P}pL@zE~uMxRC+5u~G!Cc^$mmT4Rr8k@)9L+OJv#z&hE z?>=ro?9nzMkDWWb>!!j|mXw#OTC|5Fx95j=Q+Z>CS3LSG`u%Wh^`lSrfCI9+C*-B! zLN{>-JB=?+JveFlWWDC?0C7Ij107>i-xst@LGAb;d*3drgNfaVk;IAYPO@OA3$?l` z5;Due== zy7sF@y7r55D+7aL(;?hN-c8-3ei zqLQWZ^ME(7l+I+CPeL98ji91L7T0te3DHEHeQxRO^AioDwc&0~aD|-s?9*bVQ1Gy{ zZa6jDY`dn_ij!POTH~^N)WmAurIKE&8EsdG1OfDd$ZDJVU0Sp(7us2D%kCQ5gLaQf zQuSgjz+c=zpzx_T=(6a6B&hk!)1tuoK~Mttls)J$;8TQ(pgRaU&XxxtA8GkzB+LxQ zHJdB~0?j(~qMoE9AN#yUckf+<=(OveY-(DCR|*e(%y8Z(6jPod6>9pIH z?FvFAW4+T5%ZAgoS`L;3|2sf`2el*AfHsTuy0Dlu^hVgQY1Iv}4r%fVRQ|;aVMlrK z!luy@HNDX=Z0eRqvuxw(RU0b91X{o{aMMI<-5+z>-6Sm>8X{y%bKNxT+$1_^yB*sQ zH(*5IDdv%?@uq2YoT^^}$>D}rx2>kQ2qL?9@siM6b>VmB3bW-}!s)C@ao!a-P$lm- zc5d(-F+&SY41ZR(XzFN({~ZBk$Te<(*`1a%$?saTP%5@fcT<3@Tu0!Ajg~`jV+6oE=q-U)?lcY2am>~R-m@Eu+=JC| zMZME*m^F-oiZEee!95t{ffkbg03(Ri;J42zPAwECPlY@J6^I?6LP*#s3lDp(%9BQ9 z4J^%9ibZ)83-v={-b?*g!XD}R4xIfdm+t)VL61OAM+v07D}j_Tv`-)xn(cXc-#{Obk|Q(0M)qD7?2e3)mNevP}-FON)ZZJ z*=f}x_))iNyyBAcFsCtn8sV}oA~@SH0M|~NS0n(R0KEuw=7^THjYn8iTHTh_nv%c< zGJuOYn`XOh)Pq%}7K9&ild3vc;CvH2-U6Gpx3~+~BnUT*dSvB6KTY7XN4epS;Z*gu z3B5T=TN6N67buAy+ihSnei_6ATzh9y*dPfmW4vaJLb_lc*T)hfdNrKsf(8NxHtG$lWmKK|)^xEH>|!)KkBIW^oxC^?V+;@AGh+kOt6Py;0KmkW z1!&opC9#~rl#}d%z)Elupm-Ztebd0KCIy&3rnpTY-wrlcq7Xorm{u?e)CTbO8{PRf zru=LHhef@Ut3aGx7=B9q1iEvLH|7sWbpQHT6y1xvqI)rd?j+qr_yg^tEY^gG8v>>c z*%s1;)!EqW#(Lg~8TKSwHNbBoRJ7gz(3yC^ylDs~0OOWn*Gv+=fIjw4n{YT7EiYEg zW~YI^ASgv=BWXeQfHiG&um=f72R&@fi^%ATK3tPH9OF2#UC0iej6l=H@>30P@I_|y zOEug#bklaQ`x3OPrkm!fk2=)vI6XG^&x?9^M zK*>#4SVT~~wM~Jfr!?Bh!5qN7fF1iOECJKCO1@B>ohi@kWvfW3-%UGO<&ES0p0M<6 zlyT2TA(31M?p%`C3S0sXVo*%C#n?`s0>>px0>k$Zqt>ypA-F*2x^1Uga*+v^qgo)&4r^rC8X<1F;AAGK6;m_aoRG`iF&lLr7eg^b z>ptWj0!ca$<73!tn4Vn6XvEWQnqwqB12%E6|mV?TxOnxgdHvyvQAt2B(&+3J$8p@gOq7}_mJ zt2NR+yDS=>m04O5(2-{ymIevxH#FvikcrSeX+(jwD4`dnk*9ZA^qXHEV)Fdjv$xMg z$y3{nJe6Xi6q65%ViMA@R0u;onv8ew;IP{5kWT0sMg#I=y^>T%GC)W(6uFDUmSMCb zyJt{8FXW~k+B-rUIoUmey4dqXCmgVANxa6e;>Z4Kya32Hw#o>l*9>^6lWLl&(Se zUqZW!wCd>qZl?u9Hm^PD_lOKM$<6LHCfJgX(1P8?;CZoE39_B+fNVYN-yvyl&Dhl8 zwY6iou@74%+7rQa>gcvIB8O%MAWN-$38#zJbR_e!B1U;Z^)h z{@rO096#NB^NCF6HSJ{oiA>+6OMRgguyn8f6Xh=XK)6fZd%~7vzz4_|AN7=%7P_U( z=<~?=8YEVgrnNV;{^O^&-~19#-t0zs?Y`{5%Iy20F#Ep01eo1uCSg8~(oA@gtXT3h z&-V2VZk(>Z(Vxj=PIT=OvwN0cl)%E`*t;OGkdTtXJ-Y>oI3{#Izmqw1?DWc8CsQ$D zW)C5Gge)oo@53H}*Bc)bk#t@7n0Wqr>)^4|8*e_FijU<`%L#*^dPcyhfH(0j2fV#; z@xbxZFTXVo(dF_vyaOj|~6$;XfWK_kDVB>0s-?&!IT+_u8Su$4-Cw)p>By zll^041&CbpP&vOurll4FO*uY&vC)`%4mKiZ3Ze3_s9b?;!M~;2NR?Xd6vcO}A+(o6 zD9DNxgcc&Oh{D`^cH@VMlS*;zLf!*NZ4)Np_!}d8M6r^e#|;>>Uf4wRd6*NTP@eO` z^zQb&udn~jGds7dsk@2B3~1%t^Bi9*&2H@fqeywOxPc!9t_c|JC~lDqOBWFBjzc2| zTSmy@rh(V8g%?%hdnp`0VxK~RniNfp@J;4z76Sh5n`C@K@Ol_EFJpn&i)icH9H4z+ z`U=c?u-aJ&kn~RW==g%LeC)RN)VdAxTb<%~5JxHPXoxg*#Ac_70Br7xEk6_@##8s- z367J%h!+~gyd%vp!`7!e~JbLU*^Y!#iUDIeEpf~bh z4jkMg4_aR%%FhMQA#yue1d5*W;SLr1K=1oNbf zCo)=mb{`SB@rl$;r`^zZfC62w#Az@lxH4Tex&aSl7*-Zb&PWD%E0f+`ej zPLt1zbH=)kfW*LXNkGC#k(W(hv@m_k+`NC0P;}AKp5I;_K7QuK+h?$OAL7mX`R601 zM6Ez90yDh^d|o!b0@Ap`s4Zi$5n@(H0A_3B3&E;QpS^g&rxReUT&D4Bm!H3U<%P>) zi6T1}(t8^R9Yko3;LY?R__QhUBAqY@;cTG9U}VJLHF0o(qsb7Dj7QQnFfBfi}R?M+U^>OcMwHB7Y9BnnRc=F-|x!4u6L;heNB0L39UNA~v&w9R`uF-tjCB zRgH%e%cEILNI2Anb>`zQ^0XRnenA{J2b9P6Dweusz%oZeQ#TxM8re5{EnslxQ>pA7 zEQz8kgdQa1D$|`%!SmOL29GII6+rxc^{u5!5n*1g&_@= zMRnn0;<++a!40p@X5RH|W-o;6Iy0F-xUT1P{(q?ddwrR|H!?T;vxooN!3zfu;HUcg zph4jE$Bzy7ok1#vH?KrvkhI-T4rL(c9?-Cg~aFfum;^E+>;ih@O=Q~a1qnQu1MZf|5HD|_Rm1PZdX z1y-lDd_f8a<0i={upfBaN--whGY&>&SlDvNIQiNuBQm1;&XduVp5Ha1dLASDyyk93 zNmNZub@R}zl=}+(H0$MV;7_d| zb#^nNh4->Z0-Z?T1hJ~i?TGq4qqS4(C&11i7A zj{IkpwL0oq-IKK%K-@p+Kh280J4SU|z2jRZtf6EV=lp*MKi7BgbD7nVkt2U_=$Cpb zzF&Ld_^~tkkLP^1N8EQecTOgXB!fNNX6xi}bl?bW>f%!nYAsGe1s^7yqsr`J3C>JlTcJCyQM*Vzf`&Bq~!1&$KIhlX@D@SDN6(jcb> zo%?|`9A`spoJongpO7FOt~3j2Baq~U+FcJZt}Ln;kfBk>CaU5M|Pl zF8LY`zre>s4yC%e&e@r8$VX-!w4MC$@FlZ_gfqxyC>I172OD*`tx*MF)f>5m;VD6L zMBWgWJghk}H5JTKx~6$SalDbCit2v%u_lxXP+k+Do8!0#0hvUQKB#Th=m+Q58%I-i zM+NQtg6anRLT%(^^HMM7@Bk@8uo_UI;ZB-pA-fu-ev4LsBM`ZD;EHD_Jc1gc5B(|P zWVWqSdMs;=9HWs+^qPLHDGF#nGR7JlMzf55%77dQe0iB{@(U1X(%t5gn0EfKo` zU7lhUZc$=H9Et)!r^vGk-yzPwa%wbTo6eT=8NnX#?b`=Oj;VN&u_el$5z_BTAZiOaM>|bqxZ7D6_GikBM#iwjQjdaO zC7)!hrIU|$XWkPrhAKVb4o(!-I0G}06_fH!q%CG%f$Vxn! zYevQ0PykyU0$SM9KTT=TrYY^@w0~TZIQ9iEnt}7n@}3ci>0{Uao?a`m2cuJp@}X!c z2RWc_0#jsqQesIyyGKMwfXlQQ(jJV4s|czCPor#s-m-|M=?7cLRQbInl1i~5hJG>8;M2Tp$cD>c z;t(-f(z?HSI0zrZTP9CtRD=fU4abYM{LI{ZsgjIc?D71sKbn2y%&Skn{$wWe3yUdi zV#ZIz`Z>M6ETa{d=)mg&;X3U^Rhd(~YTT;v!C$0EAbEjj$bwH5CwK)8U|->|lqFQ- z?R-I;dXcika-bYG7iujxNv~tTwN$) zR)s;ZWLX-ity9B`G&oEP3tBNhGdovF2`@^0ekYeXe&*Wkl}zT&kR-BLlw;{U5~@CX zb#-cn-Ru(NgmGl%*B{TZrGb1w!(`PQBERyx9Kaz(i}vU`zyGE)*or zL#%FMH>a|oH@e{jS_=qJW~{{ldM~<}f)a^u<8|`EMuRuOZ#t|Wf7uyC?|K;r5I$@* zIG5)F>2|%7WU_6VAYdaaFGdxNS)s`zmfl$cf%0hv?;Cqwl@YyOrU80IxoiyP5MaDD zVPolrLa?k*Jz;Ugi-r+^Ub+F^OsQ*_SiH>A(S$MtEY1ii6G&UO41rBW)&1Jh~!?tlCp zidBZ3!t#o@@+d|6dDwOG;xfc0+6hC38p&pYy^wPiLvN8xVj9Mr4z^1ePntLw1MUiu zy%En^L!u?XfB$g_X*_2)%VcGtTwK7>@-j{h_(yI|Mcj;PSDOYlw0rPmm*?BE;L)XXL$4VjFf!QfTT(`3p=!Jt^=; zU2}*yDJ&Fc-#+>%Gs>^Ma3_{)P%+8}7n$C-j51_g^4OR>HWQ}#l4l3m>=8)1#OVCw ztE6&x=W6H(iI)eRtnffZ=6Cy>Z|UB-8r;%2WiV6GW{Tbkwis87$mD8UW7TH=x3!qZ zbXM*&Z+!F-=0CqyP2LZ{mykQxN#jLg4M^PD*z8 zqortkm9~4PRxO&U)puX^@}GJxQ*eiPh*hBXvU9=k-=23(xnJpmoqKh^5=7G+u-99?Jh0b01zdELh+m@WyCK9~{IJ~I(>V6e5lg+^ zpC^`D?!gsH)%N6zWxRdYys`VYu~$}8y|@@4Jyf(;{ZO)l-?u3J!hfUw5mtQOdhwrr zEXK=bcDGm0V01rPw1@HZ-o3$H+%DnuOmRr2`-iVTn1h%Q=k6|IiC*{YBqmBOAtH}2 z=8~Rbuc(*1mTt5XI4|gOfW#Yn9!?!xUz@^sK~$;QuWfe+WTdWV2xh95%Wb3HcID- zVNJ;ny{g0t!$-^o`AIJhc#6h*<$CLtyGMv!Z!P*ak4J;Z#CfFCqXC3o70)VBd zpIT@k;0V4^3NiA3&~y-vh47bCuV74yaWVLwRb*TWGp})M7*ce(xdocsc{&W??{{=G zz?rULhq0PfikQ7cCH$?*5r_ycD{UFYY=y}WDKm_X4wUueUfBFSuZJMS$E$2Lq`O#)_ zPKHGCT2goc3mK3q*63zx3Vq~=NQy_5Ddd78VIjdF?xv~(Y3>iTf_SNEia&@zezr6l z!y;!dYrZR5&S|j@?-ea~>AO@Y&T!uz+IkXHIvsK3Avz0JcbadU2{Y`4v{WoeARxd^ z_K9FsM8;i5^fj)R!4>KF-^u$u=IN@Oe4?|@K|X{uABk&ySE4l)$G&fl{jSg_McFVT zqVVWwXmNx@%M$5{a6{sZH(Z6eMZhY7G{QrqBULj*EK4>TBq zCMIyZd1E~v>e;yL8o%W!2pq2RZoz(9mij+)DeuIS;eO=Nl<1fRxmA%KF9$tETD(m= z7tU75Xm~GZ-HT^OIqP~H`At#b#Qaf{l3g-5SWpq-KazjEpG#NrmM6=MBc7k_M{pll50eSsl7BU$X&Err9BDYk9Cj+(%>!26*=W< z7jOce5nNfDSZ#0Xu0qts*>yT&8JV@W7fyFvvk{fuw9>3Z61j*G$o6bej4gXgivZuc z#1(I>phCDH{%GF6;2E}2<;hM5f)q||v?D&&B4itMCAkZg>#^ZPOz*v)J>21qKyA1{hv*lui$ep8uZ>&Xj0%=iH22_wdm_ojShc$di`}$rLU7Ny(f()-~)mA zcyH=7=Cc=ldQaUaH0q%1m`eR%Jw>(ZNKZ+V8Xz859|f+9;pZ7bBeAW;CED6j}T0} zZA@Jcnx^aPJv#|B_Mn3vD*HY3a2KsTt*3j@+k=@#6?f|Nel_==6N>BZ;kZ%dT@HNx z!PR$zqWE%}5E&E_m^7MP7Ef7fn9$Hff4y zFh;jBzt@&s`utrQ2D)xcqu*;IU#I673>bPzv6t}}QIPJUqz7-DQ1BxY*i-8?9Y4}X z8nA;m@6z%k4FhJ=6C2VL{YVGCP~)rm!3%fk`g>}WRQFRF?IJ#PY5Xb8y$SD4D-YhL zm(ss)1N6}QLs=)#+a6Hy_eB;Z{xAs>bM*B}eDT9?9wd~8`+X0HKM%uZ5IG#mVS;Q7 zp!oj-2mhq+$lt<;`cn{45Ks{KFhJn#{1Y;y?b|;b3u)Uug|QaRVZ3XEepeLRuC*!P zgHxTtyebng(nTATDh^H>TwuqUCH>TExEK!csFZ)rOUD^IGQzo0W%4n;e2sEbMAE4t zlZ2nr^loWObCbwwiBMMLNa9>wO&R}+FelD;!MQ|bS~X;gM0!@buuk4gx_I>hVpyhN za&_FQcb3JOKvq#NAo@#my2SZ1SHWxeRVJu7tc66Ya7;}h@j$Cy_3~Z@Rr%2O)0BoI z=#^SieSgwHUFs;THnE1&SI^ZSNaaW#+JUPUp4<9Dm}D$??1KkxDoh5)M7#uhJMvun0<8XCD$a1_ z=(cK`l6U$5?U`UMwFV8dY|tHkSh1@+?j{nJ5R5o|3Ee=-kyRYEZnb0*5yVSPaE24k zFN$ms#x1NZdAPmgg%Sc1o{KdKazfrJyOKW!sz%kFcHsuQ)|5y~7eOA!co_W1Z@h;EgU!%mG6@b>nAS6 z7}oB|zqM%g@7{@*i7vZNA$75^du$(JYFFRi37n-3f%yv`@_5%$ITJ`P-7T3jkp|7x z&sq%Dw`(?Mst+!76AXo@Jj?*Shjh-=HlEKpDCtV*Oiy;t=*(?6tEwPls5ti8LJ-)Lm zAfl{@6`6~C0{pnWQ%9z7@n@N2^116ZX@!-=yrHUe0xy3z)S0P|e$RG9&9@5DfFXOJqCt00CPmLx8SJoPxI#QL( zT-#40n#!Rri8tqY!_;q*HsRlcw?M)?F%2BPK(gm*z0+(@z>dzHv@q1ut9A#vBIyL% zq+?LFu;|kJXF*sKj^4n54Ab&9n^2vCd#uGCaHMp+C^eDDL}(IBoup1z=@O3#nsrX# z?fD*bOoG`P?f_t=CnsM>zG=@p9iSu2%NoK<9X^8{#=!L*F)0PR34Fku?ASXT@2z59 zrZ@ICCFC^jAmRHavfd`4&B@aVKv*&)J|=h(to>UhtQJ%z+{78)`hefw@l~VFFSG0| zI+`Jy@H&>C85ZROM#60$3-Y);Vgk#Zqp_MgvN1#7(}NRNC?vyy_crL4bm({#8%L?p z5%#zO6^&xUwUw2HQZZjC%ogTLeiYk*H_p5|{i9RqsjKJxtfJu-eqUMXIRu7|EE)UW zVX)ot3dzWU?$rO3&zhspgE0AI>TQC2@NPb0`Y7Vxb3lk{;2;>myHi)ayU8ef+q* z+UISV7XJB5eLROLr@l3%sk?pl#96+f`A~d)hJ_&oMPKB;o2gOVkTkAtNP@9W8XrDj zHzd7pG-=42`>#l&(+AX}7dv&x5p@WC3y^FeyC91;l4qG4uv%L+&q#q=tEf(muU_*7 zS0~@3+pH*ZKCZ!|ZlJG66$qRm;fnGy9g<5gc5|9lBgZqvEk&vh1U!k!laUhJd zBl9LqVzLd}HiRP1o8)+zwI-&fNJheq01adScMLCO3~#l?65CdAQOQlSX5bzLY~~QY zNm>hYe#1JElKKzpX6a?N! z5culW$E4fq$9JM`uVP`>#74#Fz4By2?47tSl48-nIV^C0dWw_NvbTiv2!T!f>W@(w8qfw zHPi&=8&s4kzl_tC=6Zh%H-AtX9@}Qypj0QAHCgMRmeTc;xaW;-)Rd!Kxw>@uxy8%p zR;sYYRIe^yIe+2e5=NtdgLN1nH~|Ym4u+vA42mm5Yc5^v+Ho*4cN|zd<@KZ-MNO6| zHhhhf)=U<#z$FbW+h2Qz&v@{n+=%awN;(C(7hI%)a1CGK#v3ENm!O5L9yBNH;|`94 zLx#cZF&bIIQU`;901)sZBWR^!YHFGhVM1mMl1b`<1LJ*`$d`gmK&n(Vz$uM&6e?CT@N50T3asiKf0oH`iji2wpJi9C?vu zLnS;oPq+OsVe*99M#DC8?sjr#sO zgK*JPrBv4Zi=OW44NBJUb_bO!Uu%6-a@)TYkBXY#mD_6b7~D@9d0~GNkA}9kF6O)M zBSd)d;=5vhBfdb$OW4hPU9!r=-Ju*02E0nSI#kwonDc+mfDPEhrHzE0X-t3!kvJ1F zjR@xAI6v2DQUEnh)p?Z5;nb)2jwPJVCG_5+YfO_-KhSXG4MWUi(P?0qoDLU|l_`)1 z_749FLAFp+k9t1s)6lmqf?w;(AKj^ZZaOKLATI?_hI@y3%Om zYi+)io6b7pb|!Zdms0WlImEPlZvof~hV|%+II+&rQ^G(t6{wZ*X#hgFqSkXm%Q-{4 z_2EJH*Ag1}6(skyd2N)!#}y^e8W{6x4utvnvxCHV{w@kUDybv7OdE;G0yhd2bTOOv*;APtG&F* z5G?T&uCjft3$2{Ggf|GiW524oxH*g06c&hSye)2Fl*^6iNdLwriDm0>vN-#AAB`U+ zhRa^(=F63`=k&NM!%x=lX80wI^#6ea`Mv}Bqkre{runQffj}NCFFimq9XmjQ>s(KS{oPij;5r1w>8Lic-AntZE`j0Ry6BMFJ_ zkTchjjysTaNEFJ8K=PFmj;s$M1tJUux7H^Uc6%At;R!h2;e%~t`Xn`#1Gm zDKzR`yaEa3FYXpVKt);0BQZhIi!6Ne(Z`RS+4y=kHJqYY^x{wuYtpJipx)@Rmx=JQ zlH5I2_IWqCp&&Dp>n3!%(52nm5Me2Nr^}2YCj5XJ$fYv&GqhkhxS3t@24?}-w+x}R z*)+&6)VeuELTg=y;&3F)L?lL7nzVc^9(Vy8QJbwxHQ?K2LIX_Gzr9cf5#=$$Rf%yp z?m@AnunKfV%SzMMq(CGrj#O~2KnU2J2hp#LmN|+;!tobCg8G7Si_LO0gw#|aT$3yc z9~B;XnC#&GdE6sQ=O!qyN5?S_lIg(#p*^54j?4*F8SWMUItMcx)Bqi(4~Ejpre(t- z<~iEB0&FoJx!W+43+uEjP#K^z_%DMzxix!--#+R$({b5u%WMK5dR=c*7!fNZH8%Ni z1855Tum3W>sRh$#>)8%`aZP^|(o1y@Xk>S6Z`zc=e?o#P*ON~Y0&tWt29t5a1ec8t z&N|VdmGIRjB-ZGST)5v!sUNU*ElM88$RAl=8TKVjNvs<5igGF~XfqJ#=PR@GGKwim zl;s7j>-k%sKXL5LOFz<|PL&$-)Ja|xd5)%O`YMVv&~=gNy|?h`lccl`o#tT*O)5i^ z!ZS1agu@_{lLFj`7h~DFrU$M{_C#OH^b|XstD<{B@sOB&V3gz_Y@+K`1f(;*CIT-# zVj!~|j3AL4bG+k5!QnO{Gs5D;Tfo*~*n}P6Z~i_6tiB0^BuhM6@ykaQQ{TP;Y@jc?=EuOoy zgzAf8+p@Q)i!NgD<_@blTa{tb&JnJ7*5uwirJ&{K5g9q-?c`lNCGni}|HB9S`i>qL z{N=%wL%(&%IQRz#w^5+}6a*9mKJXCeoH^2WW(t?XzY@6{UYo5O2+c;~>e^0~nV8Xn z+=Wda>@0kQNO0OPWQ_1ExFNPXcdE8&A`CrzQY=P!5JJ043&m3BlMj7ZEp6D-4}I7| z+OVe{_ORM~+OVH^=)+dhh7}Ke*twKp-hA3z0%}Yn)KfE zUaGktiZ3XH4$E{LT$n@xD}Ez9#v--l)IxFcl<7DfqbjdjI^{x*Lj{jmk(VBgLMrEr zGP@LF)R$^QeUJ5%9x&?h?Uz3;`Nof(jPVU^cfO%va6jbX-t}lShGB)1enXMOtKNGT z5e|Zl`Msfy)10I}PB`=+3OSS5JE4)&Ko3JDr+(jOI(ah2jN>-mbK3cC{Ov9Zde>Ut zMMJ0h_pPFX#P^x|B~)3lDx|#}`CijF2n+8hciTgbSCx3F6xo>wrZ3Mo1Mukl?1op}QPb;4Y zpNROQ$>;oG4x4`Y6eoTHcrIe7VHAPz&6hsKo7T5~_-n_b zs+d;Ub<3S<= z&n*?n|KC6SGky3^{V5122q*|B2q*|B2q*|B2q*}=M?FA=RD z`rb3&Cq*37k&p+@H!wULoVh|cGdq%ktRmsDe^_F|WCN6IfKmK1;*rW52?4vko#mCPm6X}F^ z6!fX^uo7LJ6^~=QfJ`#T>auZ{PIY-a702@ExR!qwHW&)WI$dkaXjSQa8J&;9_(-+G z)=^5Dhqz|)QJ>Xa9XDuuNp&LA#+h%* zp)4-pDLWnw(O(YbB0`e&+7`~yQYx4xjOu;{COQKa9^CS-A)|CYx|r~<+(TkA97(Nm z)_5thfiBdGb`D4&<&24=xya&g>U{2u4j*$`A)NX&khsW8 zI) zTe#|ha_AweygU$H_s(eYS}g%9Xfm91ejTZv$*t!`n%ky#7bBjsI5l(Y~R4l-_r z_cGlrNqnl8DMx3(ao!tY)H+@Lhoq3f%+a-_5d1Z?weaijcXDyUFGLy}#kBQtIP1BqAVb8P{+@F60&GGgkb5 zucY|@XE^@9KQrEk|J0v?fP#R6fP#R6fP#R6fP#R6fP#R6fPz3@-<>avA>btY)>eA% z#nJ;iQXZ<`1P3KOugODqnqYu;!Dk}X>4)qx0Tkm76O8(B{3YmkS9eM4l~n(KninJL zH%xoZ@&ALFKK!Tt6a*9m6a*9mKCBSA8+ef0cK8YASHU%j*}|If6(W^6Avc;{lHjJJ5ZXFMGuK0y0iEp_}BD z(Od2&eCTG4esE&tacWD6nHxdFprpECTb(v1d}rquUsVXt7nX;wy%ZDheXWtp$`29X z8cqFHl>>}$rx#L-C!YHAtdb9o57lS|WrLP7b(99)OcNOvt#7>emq1ec7P@pHgWQ_u@%jz?6J zpN&L$la)6azUz0&o4n8I?YiP5cFFAtMYggqqvgwm@|@;fp3xmwQMXzK?jAuzy@@z2 zFL)J16Wq0kdk>g~VmBf*WorMVryq3#;){9sy;2VC^uj7H({(JkrlgHn&G7b`tMQ}} z*r90jNk|yDmXYtbuo2&hXbId;ft=fX=Y-Q~w<%I`g0C{l<&gsyS1LLNVibbR+r<*! b>k)~54T53 diff --git a/.gitignore b/.gitignore index 66a4bae455..3299c4f6b0 100644 --- a/.gitignore +++ b/.gitignore @@ -203,3 +203,15 @@ cache /workspace/ openapi.json .client/ +# Ignore local runtime and developer files +.beads/ +*.db +.worktrees/ +*.code-workspace +log.txt +previous.md +AGENTS.md +CLAUDE.md +docs/agent-sdk.workspace.code-workspace +docs/llm-model-info-and-caps.md +docs/llm-refactor.md diff --git a/.worktrees/message-typed b/.worktrees/message-typed deleted file mode 160000 index 30d52f94b3..0000000000 --- a/.worktrees/message-typed +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 30d52f94b38a2156583ef2cc0698e42335876d1e diff --git a/.worktrees/native-responses b/.worktrees/native-responses deleted file mode 160000 index f867039782..0000000000 --- a/.worktrees/native-responses +++ /dev/null @@ -1 +0,0 @@ -Subproject commit f867039782c66764fff65d8d90286d9ef672091d diff --git a/.worktrees/refactor/llm-tool-api-from-main b/.worktrees/refactor/llm-tool-api-from-main deleted file mode 160000 index 99898e73ec..0000000000 --- a/.worktrees/refactor/llm-tool-api-from-main +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 99898e73ec3119a7e988eb586f7d8f97565c9af5 diff --git a/.worktrees/responses b/.worktrees/responses deleted file mode 160000 index 72a68a1b49..0000000000 --- a/.worktrees/responses +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 72a68a1b49f3cf5a80494092059056e8d34062ee diff --git a/.worktrees/sonnet-thinking b/.worktrees/sonnet-thinking deleted file mode 160000 index e6e480db63..0000000000 --- a/.worktrees/sonnet-thinking +++ /dev/null @@ -1 +0,0 @@ -Subproject commit e6e480db634ba0f9b98818986afa6826f3f66220 diff --git a/AGENTS.md b/AGENTS.md deleted file mode 100644 index 22644d6fc4..0000000000 --- a/AGENTS.md +++ /dev/null @@ -1 +0,0 @@ -We track work in Beads. Run `bd quickstart` to see how. diff --git a/CLAUDE.md b/CLAUDE.md deleted file mode 100644 index db09863b2f..0000000000 --- a/CLAUDE.md +++ /dev/null @@ -1,180 +0,0 @@ -# CLAUDE.md - -This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. - -## Project Overview - -OpenHands Agent SDK enables building software with AI agents. This SDK powers the OpenHands project and allows developers to create custom agents that write code, debug issues, automate tasks, and interact with various tools. - -The repository is structured as a **UV workspace** with four main packages: -- `openhands/sdk`: Core agent functionality, LLM integration, conversation management -- `openhands/tools`: Built-in tools (bash, file editing, task tracking, browser automation) -- `openhands/workspace`: Workspace management (local and remote execution environments) -- `openhands/agent_server`: FastAPI-based REST/WebSocket server for remote agent interactions - -## Development Commands - -### Environment Setup -```bash -# Initial setup (install dependencies + pre-commit hooks) -make build - -# Add new dependencies -uv add package-name # Runtime dependency -uv add --dev package-name # Development dependency -``` - -### Code Quality -```bash -# Format code -make format # or: uv run ruff format - -# Lint and auto-fix -make lint # or: uv run ruff check --fix - -# Type checking -uv run pyright # Runs on pre-commit - -# Run all pre-commit hooks -uv run pre-commit run --all-files -uv run pre-commit run --files path/to/file.py -``` - -### Testing -```bash -# Run all tests -uv run pytest - -# Run specific test suites -uv run pytest tests/sdk/ -uv run pytest tests/tools/ -uv run pytest tests/agent_server/ -uv run pytest tests/cross/ -uv run pytest tests/integration/ - -# Run with coverage -uv run pytest --cov=openhands --cov-report=html - -# Run specific test file or function -uv run pytest tests/sdk/test_conversation.py -uv run pytest tests/sdk/test_conversation.py::test_function_name -``` - -### Agent Server -```bash -# Build server executable -make build-server - -# Validate OpenAPI schema -make test-server-schema -``` - -### Running Examples -```bash -# Set API key first -export LLM_API_KEY=your_key_here - -# Run examples (standalone SDK usage) -uv run python examples/01_standalone_sdk/*.py - -# Examples requiring agent server -cd examples/02_remote_agent_server -# Follow README in that directory -``` - -## Architecture - -### Core SDK Architecture - -**Agent (`openhands/sdk/agent/`)**: The central orchestrator that coordinates LLMs, tools, and conversation state. Agents can be created via presets (`get_default_agent()` or `get_planning_agent()`) or manually configured with specific tools. - -**Conversation (`openhands/sdk/conversation/`)**: Manages interaction flow between users and agents. Key components: -- `Conversation`: Main class for SDK usage -- `LocalConversation`: Runs agent locally in same process -- `RemoteConversation`: Connects to remote agent via WebSocket -- `EventStore`: Persists conversation history -- `StuckDetector`: Detects and handles infinite loops - -**LLM Integration (`openhands/sdk/llm/`)**: Unified interface for multiple LLM providers via LiteLLM. Supports function calling, multimodal inputs, and custom routing strategies. `LLMRegistry` manages shared LLM configurations. - -**Context Management (`openhands/sdk/context/`)**: Controls agent behavior and memory: -- `AgentContext`: System/user message customization -- `Microagents`: Inject context based on triggers (repo-wide or keyword-based) -- `Condenser`: Manages conversation history truncation (e.g., `LLMSummarizingCondenser` replaces old events with summaries) - -**Tools (`openhands/sdk/tool/` and `openhands/tools/`)**: Tools are registered via `register_tool()` and instantiated with `Tool()` specs: -- `BashTool`: Execute bash commands in persistent shell -- `FileEditorTool`: Create/edit files with advanced editing capabilities -- `TaskTrackerTool`: Organize and track development tasks -- `BrowserToolSet`: Web automation (disabled in CLI mode) -- Built-in tools: `ThinkTool` (reasoning) and `FinishTool` (task completion) - -**MCP Integration (`openhands/sdk/mcp/`)**: Model Context Protocol support for external tool providers. Default preset includes `mcp-server-fetch` (web fetching) and `repomix` (codebase packing). - -**Security (`openhands/sdk/security/`)**: `LLMSecurityAnalyzer` analyzes tool calls for potential risks and can prompt for user confirmation on risky actions. - -**Events (`openhands/sdk/event/`)**: All actions and observations are represented as events. `LLMConvertibleEvent` types can be serialized to/from LLM messages. - -### Agent Server Architecture - -**API Layer (`openhands/agent_server/api.py`)**: FastAPI application with REST endpoints and WebSocket support. Routes are organized by domain: -- `conversation_router`: Create/manage conversations -- `event_router`: Query conversation events -- `bash_router`, `file_router`, `tool_router`: Direct tool access -- `vscode_router`, `desktop_router`: IDE/desktop integration -- `sockets_router`: WebSocket connections for real-time updates - -**Services**: -- `conversation_service`: Manages conversation lifecycle -- `vscode_service`, `desktop_service`: Optional IDE/desktop environment management - -**Pub/Sub (`pub_sub.py`)**: In-memory event bus for broadcasting conversation updates to WebSocket clients. - -**Docker Support**: Dockerfiles in `openhands/agent_server/docker/` for containerized deployment. - -### Workspace Management - -**Workspace (`openhands/workspace/`)**: Abstracts execution environments. `LocalWorkspace` runs on host, `RemoteWorkspace` connects to remote environments via API. - -## Key Patterns and Conventions - -### Tool Development -Tools must inherit from `ToolBase` and implement `get_schema()` and `execute()`. Register tools before agent creation: -```python -from openhands.sdk.tool import register_tool -register_tool("MyTool", MyToolClass) -``` - -### Conversation Flow -1. Create agent with LLM and tools -2. Create conversation with agent -3. Send messages via `conversation.send_message()` -4. Run conversation with `conversation.run()` (blocks until agent awaits user input) -5. Access events via `conversation.events` - -### Event-Driven Design -All interactions are events. Tools produce `Action` events (what agent wants to do) and `Observation` events (results). The conversation loop processes events until agent enters "await user input" state. - -### UV Workspace Structure -This is a monorepo with inter-package dependencies managed by UV workspace. When modifying dependencies: -- Add to the appropriate package's `pyproject.toml` -- Run `uv sync` to update lockfile -- Workspace sources are defined in root `pyproject.toml` `[tool.uv.sources]` - -### Testing Structure -- `tests/sdk/`: Core SDK functionality tests -- `tests/tools/`: Individual tool tests -- `tests/agent_server/`: Server API tests -- `tests/cross/`: Cross-package integration tests -- `tests/integration/`: Full end-to-end tests -- Use `pytest-asyncio` for async tests (asyncio_mode = "auto" in pyproject.toml) - -## Important Notes - -- Python 3.12+ required -- UV 0.8.13+ required for workspace support -- Pre-commit hooks enforce ruff formatting, linting, pycodestyle, and pyright type checking -- All LLM interactions go through LiteLLM for provider abstraction -- Default preset includes MCP servers: `mcp-server-fetch` and `repomix` -- Browser tools are automatically disabled when `cli_mode=True` -- Security analyzer is enabled by default in the default preset diff --git a/agent-sdk.workspace.code-workspace b/agent-sdk.workspace.code-workspace deleted file mode 100644 index ef4df70c86..0000000000 --- a/agent-sdk.workspace.code-workspace +++ /dev/null @@ -1,14 +0,0 @@ -{ - "folders": [ - { - "path": "." - }, - { - "path": "../odie-cli" - }, - { - "path": "../../.openhands" - } - ], - "settings": {} -} \ No newline at end of file diff --git a/docs/agent-sdk.workspace.code-workspace b/docs/agent-sdk.workspace.code-workspace deleted file mode 100644 index ef4df70c86..0000000000 --- a/docs/agent-sdk.workspace.code-workspace +++ /dev/null @@ -1,14 +0,0 @@ -{ - "folders": [ - { - "path": "." - }, - { - "path": "../odie-cli" - }, - { - "path": "../../.openhands" - } - ], - "settings": {} -} \ No newline at end of file diff --git a/docs/llm-model-info-and-caps.md b/docs/llm-model-info-and-caps.md deleted file mode 100644 index ecd93bce8d..0000000000 --- a/docs/llm-model-info-and-caps.md +++ /dev/null @@ -1,56 +0,0 @@ -# Model Info and Capabilities Initialization - -Problem -- `_init_model_info_and_caps()` mixes network I/O, name fallback heuristics, capability derivation, and policy (e.g., Claude 64k override). This reduces readability, slows object construction, and complicates testing. - -Goals -- Keep initialization fast and predictable. -- Isolate provider-specific probing and capability derivation. -- Make Anthropic-specific rules easy to find and change. -- Avoid repeated network calls for the same model/base_url. - -Proposed Structure -1) Resolver with cache -- `resolve_model_info(model: str, base_url: str | None, api_key: SecretStr | None) -> dict | None` -- Tries in order: - 1. If model.startswith("openrouter"): litellm.get_model_info(model) - 2. If model.startswith("litellm_proxy/"): fetch from `{base_url}/v1/model/info`, find matching `model_name`, return `model_info` - 3. Fallback: litellm.get_model_info(model.split(":")[0]) - 4. Fallback: litellm.get_model_info(model.split("/")[-1]) -- Wrap in an LRU cache keyed by `(provider_tag, normalized_model, base_url)`. -- Apply a short timeout on httpx.get and handle errors gracefully. - -2) Pure derivations -- `derive_token_limits(model: str, model_info: dict | None, existing_max_in: int | None, existing_max_out: int | None) -> tuple[int | None, int | None]` - - Respect existing values when already provided by the user. - - If Anthropic family and no explicit max_output_tokens, apply a practical cap (e.g., 64k) via a shared Anthropic helper. - - Use model_info["max_input_tokens"] / ["max_output_tokens"] / ["max_tokens"] as fallbacks. -- `compute_function_calling_active(native_override: bool | None, features) -> bool` - - If user sets `native_tool_calling` use it; otherwise features.supports_function_calling. - -3) Anthropic helpers (co-located) -- `anthropic/cache.py` → apply_prompt_caching(messages) -- `anthropic/tokens.py` → claude_practical_max_output(model) -> int | None -- `anthropic/reasoning.py` → headers and interleaved-thinking beta logic - -4) Initialization flow inside LLM -- During validation: set telemetry/metrics/tokenizer. -- Call `self._initialize_model_profile()` (small): - - `self._model_info = resolve_model_info(self.model, self.base_url, self.api_key)` - - `(self.max_input_tokens, self.max_output_tokens) = derive_token_limits(...)` - - `self._function_calling_active = compute_function_calling_active(self.native_tool_calling, get_features(self.model))` -- Optionally lazy: if we defer resolver to first use, ensure `clone()` carries resolved profile forward to avoid surprises. - -Base URL Scheme for Local/Proxy -- If `base_url` lacks a scheme, default to `http://` for localhost/intranet friendliness, with a clear debug log: "No scheme in base_url, defaulting to http://". -- Optionally add `force_https: bool = False` flag to override behavior when desired. - -Why This Works -- Readability: every function does one thing; the big method is gone. -- Testability: resolver can be mocked, derivations are pure and easy to unit test. -- Performance: model info cached across instances; no repeated network calls. -- Extensibility: Anthropic rules live together; adding providers won’t bloat LLM. - -Open Questions -- Should we always default to `http://` when no scheme, or default to `https://` and special-case `localhost`/`127.0.0.1`? Defaulting to `http://` is convenient for local dev; we can add a security note in docs. -- How large should the resolver LRU cache be? Likely tiny (e.g., 64 entries) since models are a short list. diff --git a/docs/llm-refactor.md b/docs/llm-refactor.md deleted file mode 100644 index 9fcd7143b9..0000000000 --- a/docs/llm-refactor.md +++ /dev/null @@ -1,110 +0,0 @@ -# LLM Refactor Plan: Simplicity, Streaming/Async, Stateful Responses - -Context -- The current LLM class (openhands/sdk/llm/llm.py) has grown large and mixes several concerns: config, feature detection, message formatting, tool strategy (native vs mock), provider option selection, transport calls, retry+telemetry, and post-processing. -- Today: sync-only, non-streaming for both Chat Completions and OpenAI Responses API. No stateful Responses API. -- Goals: improve readability, keep public API stable, and create clear extension points for stateful Responses API, streaming, async, and on‑the‑fly LLM switching. - -Design Principles -- Thin Facade: Keep LLM as a small, readable entry point that delegates. -- Small Modules, One Responsibility: favor 50–150 LOC modules that do one thing well. -- Composition over Inheritance: avoid complex adapter hierarchies; use simple functions/classes. -- Backward Compatible: keep LLM.completion and LLM.responses behavior intact. - -Proposed Architecture -1) Formatters (pure): - - formatters/chat.py - - prepare_chat_messages(llm, messages) -> list[dict] - - Applies Anthropic cache markers only when relevant. - - Applies vision/function-calling flags. - - Uses Message.to_llm_dict(). - - formatters/responses.py - - prepare_responses_input(llm, messages) -> (instructions: str | None, input_items: list[dict]) - - Vision only; no cache flags. - - Uses Message.to_responses_value(). - -2) Tools: - - tools/prepare.py - - build_chat_tools(tools) -> list[ChatCompletionToolParam] - - build_responses_tools(tools) -> list[Responses ToolParam] - - tools/strategy.py - - choose_tool_strategy(llm, chat_tools) -> strategy - - NativeToolStrategy: send tools natively (when supported) - - MockToolStrategy: pre/post transforms for prompt-mocked tool calls - -3) Options (rename normalize_* → select_options_*): - - options/chat_options.py - - select_chat_options(llm, user_kwargs, has_tools: bool) -> dict - - options/responses_options.py - - select_responses_options(llm, user_kwargs, include, store) -> dict - -4) Transport (litellm boundary): - - transport/chat.py - - transport_chat_sync(model, messages, options) -> ModelResponse - - (future) transport_chat_stream/async - - transport/responses.py - - transport_responses_sync(model, instructions, input_items, tools, options) -> ResponsesAPIResponse - - (future) transport_responses_stream/async - - Keep litellm.modify_params guard centralized here. - -5) Invocation (retry + telemetry): - - invocation/chat_invoker.py - - call_sync(ctx) -> LLMResponse - - (future) call_stream/call_async/call_async_stream - - invocation/responses_invoker.py - - call_sync(ctx) -> LLMResponse - -6) Caching (Anthropic-only flags today): - - caching/anthropic_cache.py - - apply_prompt_caching(messages) -> None - -7) Streaming/Async (future): - - streaming/events.py: {TextDelta, ToolCallDelta, ReasoningDelta, UsageDelta, Error, End} - - streaming/aggregator.py: fold deltas into final Message/Usage - -Public LLM Surface (unchanged now, future-ready) -- completion(...) -- responses(...) -- (future) completion_stream(...), responses_stream(...) -- (future) acompletion(...), aresponses(...), acompletion_stream(...), aresponses_stream(...) - -On‑the‑fly LLM switching -- Prefer clone-and-swap: LLM.clone(**overrides) returns a new configured instance; Agent swaps atomically. -- Optionally use a lightweight LLMHandle wrapper that the Agent holds; handle.set(new_llm) hot-swaps internally. - -Stateful Responses API (future) -- responses_invoker + responses transport accept store=True and session/thread identifiers from select_responses_options. -- No changes required in LLM facade beyond plumbing. - -Refactor of _init_model_info_and_caps() -Current behavior (mixed concerns): performs provider-specific model_info fetches (including network), sets token limits and function-calling capability. This couples init-time side effects, network I/O, and policy. - -We will: -- Extract a resolver: resolve_model_info(model, base_url, api_key) with an LRU cache. Supports openrouter, litellm_proxy, and basename fallbacks. -- Extract pure derivations: - - derive_token_limits(model, model_info) - - compute_function_calling_active(native_override, features) -- Consider lazy loading guarded by ensure_model_info_loaded(), but be mindful of clone(): clone should carry over resolved model profile so we avoid late surprises. - -Anthropic-specific logic -- Group Anthropic-specific concerns behind a small module anthropic/: - - anthropic/cache.py: apply_prompt_caching(...) - - anthropic/tokens.py: optional token/output caps overrides (e.g., Claude practical 64k) - - anthropic/reasoning.py: extended thinking headers, interleaved-thinking beta, etc. -- Chat and Responses option selectors call into these helpers only when get_features(model).is_anthropic is true. This keeps Anthropic “stuff” co-located and easy to find without over-engineering adapters. - -Base URL scheme for local/proxy -- If base_url has no scheme, default to http:// to support localhost/intranet usage, but log a concise debug message. If security is a concern in some environments, allow an LLM flag to force https. - -Migration Plan (incremental) -1) Extract prepare_* and select_options_* helpers (rename from normalize_*). No behavior change. -2) Extract chat/responses transport and centralize litellm.modify_params guard. -3) Introduce ToolStrategy (native/mock) using existing mixin logic. -4) Add Chat/Responses invokers (retry + telemetry) and delegate from LLM. -5) Introduce model_info resolver + derivations; replace _init_model_info_and_caps with a small initializer that calls the resolver and derivations. -6) Add streaming/async in invokers. - -Readability Wins -- Each module is short, with purpose-revealing names. -- LLM methods read as: prepare → select options → transport → postprocess → wrap. -- Provider quirks (Anthropic) are grouped and opt-in by features. diff --git a/log.txt b/log.txt deleted file mode 100644 index 000890ce36..0000000000 --- a/log.txt +++ /dev/null @@ -1,825 +0,0 @@ -#15 FROM ghcr.io/astral-sh/uv:latest@sha256:6dbd7c42a9088083fa79e41431a579196a189bcee3ae68ba904ac2bf77765867 -#15 DONE 0.0s - -#16 importing cache manifest from ghcr.io/all-hands-ai/agent-server:buildcache-source-nikolaik_s_python-nodejs_tag_python3.12-nodejs22-main -#16 ... - -#17 importing cache manifest from ghcr.io/all-hands-ai/agent-server:buildcache-source-nikolaik_s_python-nodejs_tag_python3.12-nodejs22-vscode-settings-extension -#17 ERROR: failed to configure registry cache importer: ghcr.io/all-hands-ai/agent-server:buildcache-source-nikolaik_s_python-nodejs_tag_python3.12-nodejs22-vscode-settings-extension: not found - -#16 importing cache manifest from ghcr.io/all-hands-ai/agent-server:buildcache-source-nikolaik_s_python-nodejs_tag_python3.12-nodejs22-main -#16 ERROR: failed to configure registry cache importer: ghcr.io/all-hands-ai/agent-server:buildcache-source-nikolaik_s_python-nodejs_tag_python3.12-nodejs22-main: not found - -#13 [internal] load build context -#13 transferring context: 40.71kB 0.0s done -#13 DONE 0.0s - -#11 [internal] settings cache mount permissions -#11 CACHED - -#18 [builder 2/7] COPY --from=ghcr.io/astral-sh/uv /uv /uvx /bin/ -#18 CACHED - -#19 [builder 3/7] RUN groupadd -g 10001 openhands && useradd -m -u 10001 -g 10001 -s /usr/sbin/nologin openhands -#19 CACHED - -#20 [builder 4/7] WORKDIR /agent-server -#20 CACHED - -#21 [builder 5/7] COPY --chown=openhands:openhands pyproject.toml uv.lock README.md LICENSE ./ -#21 CACHED - -#22 [builder 6/7] COPY --chown=openhands:openhands openhands ./openhands -#22 DONE 0.2s - -#23 [builder 7/7] RUN --mount=type=cache,target=/home/openhands/.cache,uid=10001,gid=10001 uv sync --frozen --no-editable -#23 0.103 Using CPython 3.12.12 interpreter at: /usr/local/bin/python -#23 0.103 Creating virtual environment at: .venv -#23 0.127 Building openhands-agent-server @ file:///agent-server/openhands/agent_server -#23 0.128 Building openhands-sdk @ file:///agent-server/openhands/sdk -#23 0.128 Building openhands-tools @ file:///agent-server/openhands/tools -#23 0.129 Building openhands-workspace @ file:///agent-server/openhands/workspace -#23 1.155 Built openhands-workspace @ file:///agent-server/openhands/workspace -#23 1.155 Built openhands-tools @ file:///agent-server/openhands/tools -#23 1.162 Built openhands-agent-server @ file:///agent-server/openhands/agent_server -#23 1.163 Built openhands-sdk @ file:///agent-server/openhands/sdk -#23 1.188 Prepared 4 packages in 1.06s -#23 1.189 warning: Failed to hardlink files; falling back to full copy. This may lead to degraded performance. -#23 1.189 If the cache and target directories are on different filesystems, hardlinking may not be supported. -#23 1.189 If this is intentional, set `export UV_LINK_MODE=copy` or use `--link-mode=copy` to suppress this warning. -#23 1.783 Installed 183 packages in 594ms -#23 1.783 + aiofiles==24.1.0 -#23 1.783 + aiohappyeyeballs==2.6.1 -#23 1.783 + aiohttp==3.12.15 -#23 1.783 + aiosignal==1.4.0 -#23 1.783 + aiosqlite==0.21.0 -#23 1.783 + alembic==1.16.5 -#23 1.783 + altair==5.5.0 -#23 1.783 + altgraph==0.17.4 -#23 1.783 + annotated-types==0.7.0 -#23 1.783 + anthropic==0.68.0 -#23 1.783 + anyio==4.10.0 -#23 1.783 + attrs==25.3.0 -#23 1.783 + authlib==1.6.4 -#23 1.783 + backoff==2.2.1 -#23 1.783 + bashlex==0.18 -#23 1.783 + binaryornot==0.4.4 -#23 1.783 + blinker==1.9.0 -#23 1.783 + browser-use==0.7.9 -#23 1.783 + bubus==1.5.6 -#23 1.783 + cachetools==5.5.2 -#23 1.783 + cdp-use==1.4.1 -#23 1.783 + certifi==2025.8.3 -#23 1.783 + cffi==2.0.0 -#23 1.783 + cfgv==3.4.0 -#23 1.783 + chardet==5.2.0 -#23 1.783 + charset-normalizer==3.4.3 -#23 1.783 + click==8.2.1 -#23 1.783 + coverage==7.10.6 -#23 1.783 + cryptography==46.0.1 -#23 1.783 + cyclopts==3.24.0 -#23 1.783 + distlib==0.4.0 -#23 1.783 + distro==1.9.0 -#23 1.783 + dnspython==2.8.0 -#23 1.783 + docker==7.1.0 -#23 1.783 + docstring-parser==0.17.0 -#23 1.783 + docutils==0.22.1 -#23 1.783 + email-validator==2.3.0 -#23 1.783 + exceptiongroup==1.3.0 -#23 1.783 + fastapi==0.116.2 -#23 1.783 + fastmcp==2.12.3 -#23 1.783 + fastuuid==0.13.5 -#23 1.783 + filelock==3.19.1 -#23 1.783 + frozenlist==1.7.0 -#23 1.783 + fsspec==2025.9.0 -#23 1.783 + func-timeout==4.3.5 -#23 1.783 + gitdb==4.0.12 -#23 1.783 + gitpython==3.1.45 -#23 1.783 + google-api-core==2.25.1 -#23 1.784 + google-api-python-client==2.182.0 -#23 1.784 + google-auth==2.40.3 -#23 1.784 + google-auth-httplib2==0.2.0 -#23 1.784 + google-auth-oauthlib==1.2.2 -#23 1.784 + google-genai==1.38.0 -#23 1.784 + googleapis-common-protos==1.70.0 -#23 1.784 + greenlet==3.2.4 -#23 1.784 + groq==0.31.1 -#23 1.784 + h11==0.16.0 -#23 1.784 + hf-xet==1.1.10 -#23 1.784 + html2text==2025.4.15 -#23 1.784 + httpcore==1.0.9 -#23 1.784 + httplib2==0.31.0 -#23 1.784 + httpx==0.28.1 -#23 1.784 + httpx-sse==0.4.1 -#23 1.784 + huggingface-hub==0.35.0 -#23 1.784 + identify==2.6.14 -#23 1.784 + idna==3.10 -#23 1.784 + importlib-metadata==8.7.0 -#23 1.784 + iniconfig==2.1.0 -#23 1.784 + isodate==0.7.2 -#23 1.784 + jinja2==3.1.6 -#23 1.784 + jiter==0.11.0 -#23 1.784 + jsonschema==4.25.1 -#23 1.784 + jsonschema-path==0.3.4 -#23 1.784 + jsonschema-specifications==2025.9.1 -#23 1.784 + lazy-object-proxy==1.12.0 -#23 1.784 + libtmux==0.46.2 -#23 1.784 + litellm==1.77.7 (from git+https://github.com/BerriAI/litellm.git@763d2f8ccdd8412dbe6d4ac0e136d9ac34dcd4c0) -#23 1.784 + mako==1.3.10 -#23 1.784 + markdown-it-py==4.0.0 -#23 1.784 + markupsafe==3.0.2 -#23 1.784 + mcp==1.14.1 -#23 1.784 + mdurl==0.1.2 -#23 1.784 + more-itertools==10.8.0 -#23 1.784 + multidict==6.6.4 -#23 1.784 + narwhals==2.5.0 -#23 1.784 + nodeenv==1.9.1 -#23 1.784 + nodejs-wheel-binaries==22.19.0 -#23 1.784 + numpy==2.3.3 -#23 1.784 + oauthlib==3.3.1 -#23 1.784 + ollama==0.5.4 -#23 1.784 + openai==1.108.1 -#23 1.784 + openapi-core==0.19.5 -#23 1.784 + openapi-pydantic==0.5.1 -#23 1.784 + openapi-schema-validator==0.6.3 -#23 1.784 + openapi-spec-validator==0.7.2 -#23 1.784 + openhands-agent-server==1.0.0 (from file:///agent-server/openhands/agent_server) -#23 1.784 + openhands-sdk==1.0.0 (from file:///agent-server/openhands/sdk) -#23 1.784 + openhands-tools==1.0.0 (from file:///agent-server/openhands/tools) -#23 1.784 + openhands-workspace==1.0.0 (from file:///agent-server/openhands/workspace) -#23 1.784 + packaging==25.0 -#23 1.784 + pandas==2.3.2 -#23 1.784 + parse==1.20.2 -#23 1.784 + pathable==0.4.4 -#23 1.784 + pillow==11.3.0 -#23 1.784 + platformdirs==4.4.0 -#23 1.784 + pluggy==1.6.0 -#23 1.784 + portalocker==2.10.1 -#23 1.784 + posthog==6.7.5 -#23 1.784 + pre-commit==4.3.0 -#23 1.784 + propcache==0.3.2 -#23 1.784 + proto-plus==1.26.1 -#23 1.784 + protobuf==6.32.1 -#23 1.784 + psutil==7.1.0 -#23 1.784 + py==1.11.0 -#23 1.785 + pyarrow==21.0.0 -#23 1.785 + pyasn1==0.6.1 -#23 1.785 + pyasn1-modules==0.4.2 -#23 1.785 + pycodestyle==2.14.0 -#23 1.785 + pycparser==2.23 -#23 1.785 + pydantic==2.11.9 -#23 1.785 + pydantic-core==2.33.2 -#23 1.785 + pydantic-settings==2.10.1 -#23 1.785 + pydeck==0.9.1 -#23 1.785 + pygments==2.19.2 -#23 1.785 + pyinstaller==6.16.0 -#23 1.785 + pyinstaller-hooks-contrib==2025.8 -#23 1.785 + pyotp==2.9.0 -#23 1.785 + pyparsing==3.2.4 -#23 1.785 + pypdf==6.0.0 -#23 1.785 + pyperclip==1.10.0 -#23 1.785 + pyright==1.1.405 -#23 1.785 + pytest==8.4.2 -#23 1.785 + pytest-asyncio==1.2.0 -#23 1.785 + pytest-cov==7.0.0 -#23 1.785 + pytest-forked==1.6.0 -#23 1.785 + pytest-timeout==2.4.0 -#23 1.785 + python-dateutil==2.9.0.post0 -#23 1.785 + python-dotenv==1.1.1 -#23 1.785 + python-frontmatter==1.1.0 -#23 1.785 + python-json-logger==3.3.0 -#23 1.785 + python-multipart==0.0.20 -#23 1.785 + pytz==2025.2 -#23 1.785 + pyyaml==6.0.2 -#23 1.785 + referencing==0.36.2 -#23 1.785 + regex==2025.9.18 -#23 1.785 + reportlab==4.4.4 -#23 1.785 + requests==2.32.5 -#23 1.785 + requests-oauthlib==2.0.0 -#23 1.785 + rfc3339-validator==0.1.4 -#23 1.785 + rich==14.1.0 -#23 1.785 + rich-rst==1.3.1 -#23 1.785 + rpds-py==0.27.1 -#23 1.785 + rsa==4.9.1 -#23 1.785 + ruff==0.13.1 -#23 1.785 + screeninfo==0.8.1 -#23 1.785 + setuptools==80.9.0 -#23 1.785 + six==1.17.0 -#23 1.785 + smmap==5.0.2 -#23 1.785 + sniffio==1.3.1 -#23 1.785 + sqlalchemy==2.0.43 -#23 1.785 + sse-starlette==3.0.2 -#23 1.785 + starlette==0.48.0 -#23 1.785 + streamlit==1.49.1 -#23 1.785 + tabulate==0.9.0 -#23 1.785 + tenacity==9.1.2 -#23 1.785 + tiktoken==0.11.0 -#23 1.785 + tokenizers==0.22.1 -#23 1.785 + toml==0.10.2 -#23 1.785 + tornado==6.5.2 -#23 1.785 + tqdm==4.67.1 -#23 1.785 + typing-extensions==4.15.0 -#23 1.785 + typing-inspection==0.4.1 -#23 1.785 + tzdata==2025.2 -#23 1.785 + uritemplate==4.2.0 -#23 1.785 + urllib3==2.5.0 -#23 1.785 + uuid7==0.1.0 -#23 1.785 + uvicorn==0.35.0 -#23 1.785 + virtualenv==20.34.0 -#23 1.785 + watchdog==6.0.0 -#23 1.785 + websockets==15.0.1 -#23 1.785 + werkzeug==3.1.1 -#23 1.785 + yarl==1.20.1 -#23 1.785 + zipp==3.23.0 -#23 DONE 2.4s - -#24 [base-image-minimal 3/4] COPY --from=ghcr.io/astral-sh/uv /uv /uvx /bin/ -#24 CACHED - -#25 [base-image 1/7] RUN set -eux; mkdir -p $(dirname /openhands/.openvscode-server); arch=$(uname -m); if [ "${arch}" = "x86_64" ]; then arch="x64"; elif [ "${arch}" = "aarch64" ]; then arch="arm64"; elif [ "${arch}" = "armv7l" ]; then arch="armhf"; fi; wget https://github.com/gitpod-io/openvscode-server/releases/download/openvscode-server-v1.98.2/openvscode-server-v1.98.2-linux-${arch}.tar.gz; tar -xzf openvscode-server-v1.98.2-linux-${arch}.tar.gz; if [ -d "/openhands/.openvscode-server" ]; then rm -rf "/openhands/.openvscode-server"; fi; mv openvscode-server-v1.98.2-linux-${arch} /openhands/.openvscode-server; cp /openhands/.openvscode-server/bin/remote-cli/openvscode-server /openhands/.openvscode-server/bin/remote-cli/code; rm -f openvscode-server-v1.98.2-linux-${arch}.tar.gz; chown -R openhands:openhands /openhands/.openvscode-server -#25 CACHED - -#26 [base-image 2/7] RUN set -eux; if grep -q "ubuntu" /etc/os-release; then install -m 0755 -d /etc/apt/keyrings; curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc; chmod a+r /etc/apt/keyrings/docker.asc; echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null; else install -m 0755 -d /etc/apt/keyrings; curl -fsSL https://download.docker.com/linux/debian/gpg -o /etc/apt/keyrings/docker.asc; chmod a+r /etc/apt/keyrings/docker.asc; echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/debian bookworm stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null; fi; apt-get update; apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin; apt-get clean; rm -rf /var/lib/apt/lists/* -#26 CACHED - -#27 [base-image 3/7] RUN mkdir -p /etc/docker && echo '{"mtu": 1450}' > /etc/docker/daemon.json -#27 CACHED - -#28 [base-image 4/7] RUN set -eux; apt-get update; apt-get install -y --no-install-recommends tigervnc-standalone-server xfce4 dbus-x11 novnc websockify $(if grep -q "ubuntu" /etc/os-release; then echo "chromium-browser"; else echo "chromium"; fi); apt-get clean; rm -rf /var/lib/apt/lists/* -#28 CACHED - -#29 [base-image 5/7] RUN chown -R openhands:openhands /usr/share/novnc -#29 CACHED - -#30 [base-image-minimal 2/4] RUN set -eux; apt-get update; apt-get install -y --no-install-recommends ca-certificates curl wget sudo apt-utils git jq tmux build-essential coreutils util-linux procps findutils grep sed apt-transport-https gnupg lsb-release; (getent group 10001 || groupadd -g 10001 openhands); (id -u openhands >/dev/null 2>&1 || useradd -m -u 10001 -g 10001 -s /bin/bash openhands); usermod -aG sudo openhands; echo "openhands ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers; mkdir -p /workspace/project; chown -R openhands:openhands /workspace; rm -rf /var/lib/apt/lists/* -#30 CACHED - -#31 [base-image 6/7] COPY --chown=openhands:openhands openhands/agent_server/docker/wallpaper.svg /usr/share/backgrounds/xfce/xfce-shapes.svg -#31 CACHED - -#32 [source 1/2] COPY --chown=openhands:openhands --from=builder /agent-server /agent-server -#32 DONE 2.0s - -#33 [source 2/2] COPY --chown=openhands:openhands --from=builder /agent-server/openhands/agent_server/vscode_extensions /openhands/.openvscode-server/extensions -#33 DONE 0.1s - -#34 exporting to image -#34 exporting layers -#34 exporting layers 1.7s done -#34 writing image sha256:97a17ba72cf8e9a01177eaa430636ec6de8c86bd13da14ced864b4569a064d12 -#34 writing image sha256:97a17ba72cf8e9a01177eaa430636ec6de8c86bd13da14ced864b4569a064d12 done -#34 naming to ghcr.io/all-hands-ai/agent-server:71d1a9f-custom-dev done -#34 naming to ghcr.io/all-hands-ai/agent-server:v1.0.0_nikolaik_s_python-nodejs_tag_python3.12-nodejs22-dev done -#34 DONE 1.7s ------- - > importing cache manifest from ghcr.io/all-hands-ai/agent-server:buildcache-source-nikolaik_s_python-nodejs_tag_python3.12-nodejs22-vscode-settings-extension: ------- ------- - > importing cache manifest from ghcr.io/all-hands-ai/agent-server:buildcache-source-nikolaik_s_python-nodejs_tag_python3.12-nodejs22-main: ------- - -View build details: docker-desktop://dashboard/build/desktop-linux/desktop-linux/c52mydc2har9e73bfl9d6ipr1 -[build] Done. Tags: - - ghcr.io/all-hands-ai/agent-server:71d1a9f-custom-dev - - ghcr.io/all-hands-ai/agent-server:v1.0.0_nikolaik_s_python-nodejs_tag_python3.12-nodejs22-dev -BUILD DONE. -[10/12/25 23:34:14] INFO Using image: ghcr.io/all-hands-ai/agent-server:71d1a9f-custom-dev workspace.py:179 -[10/12/25 23:34:14] INFO $ docker run -d --platform linux/arm64 --rm --name command.py:27 - agent-server-9727b297-a0b7-4606-acc0-72d2df11fe35 -e - LLM_API_KEY=sk-7QlqTKmhtDfJfwQjwZZqPQ -p 8010:8000 -p 8011:8001 -p 8012:8002 - ghcr.io/all-hands-ai/agent-server:71d1a9f-custom-dev --host 0.0.0.0 --port 8000 -a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -[10/12/25 23:34:14] INFO Started container: workspace.py:326 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -[10/12/25 23:34:14] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -true -[10/12/25 23:34:15] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -true -[10/12/25 23:34:16] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -true -[DOCKER] {"asctime": "2025-10-12 21:34:16,780", "levelname": "WARNING", "name": "uvicorn.error", "filename": "config.py", "lineno": 283, "message": "Current configuration will not reload as not all conditions are met, please refer to documentation."} -[DOCKER] /agent-server/.venv/lib/python3.12/site-packages/websockets/legacy/__init__.py:6: DeprecationWarning: websockets.legacy is deprecated; see https://websockets.readthedocs.io/en/stable/howto/upgrade.html for upgrade instructions -[DOCKER] warnings.warn( # deprecated in 14.0 - 2024-11-09 -[DOCKER] /agent-server/.venv/lib/python3.12/site-packages/uvicorn/protocols/websockets/websockets_impl.py:17: DeprecationWarning: websockets.server.WebSocketServerProtocol is deprecated -[DOCKER] from websockets.server import WebSocketServerProtocol -[10/12/25 23:34:17] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -true -[DOCKER] {"asctime": "2025-10-12 21:34:17,932", "levelname": "INFO", "name": "uvicorn.error", "filename": "server.py", "lineno": 84, "message": "Started server process [1]", "color_message": "Started server process [\u001b[36m%d\u001b[0m]"} -[DOCKER] {"asctime": "2025-10-12 21:34:17,932", "levelname": "INFO", "name": "uvicorn.error", "filename": "on.py", "lineno": 48, "message": "Waiting for application startup."} -[DOCKER] {"asctime": "2025-10-12 21:34:18,177", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension sql build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:18,354", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension json build failed"} -[10/12/25 23:34:18] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -true -[DOCKER] {"asctime": "2025-10-12 21:34:18,540", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension objective-c build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:18,720", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension julia build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:18,897", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension handlebars build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:19,074", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension docker build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:19,241", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension go build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:19,409", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension search-result build failed"} -[10/12/25 23:34:19] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -true -[DOCKER] {"asctime": "2025-10-12 21:34:19,587", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension powershell build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:19,753", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension shellscript build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:19,918", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension gulp build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:20,104", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension groovy build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:20,319", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension csharp build failed"} -[10/12/25 23:34:20] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -[DOCKER] {"asctime": "2025-10-12 21:34:20,492", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension terminal-suggest build failed"} -true -[DOCKER] {"asctime": "2025-10-12 21:34:20,660", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension typescript-basics build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:20,826", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension php build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:20,992", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension github-authentication build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:21,161", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension theme-monokai-dimmed build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:21,330", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension tunnel-forwarding build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:21,498", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension theme-solarized-dark build failed"} -[10/12/25 23:34:21] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -true -[DOCKER] {"asctime": "2025-10-12 21:34:21,665", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension ipynb build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:21,830", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension less build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:21,999", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension swift build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:22,165", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension perl build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:22,337", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension ruby build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:22,509", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension debug-auto-launch build failed"} -[10/12/25 23:34:22] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -true -[DOCKER] {"asctime": "2025-10-12 21:34:22,684", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension log build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:22,866", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension python build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:23,042", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension jake build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:23,212", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension media-preview build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:23,380", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension shaderlab build failed"} -[10/12/25 23:34:23] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -[DOCKER] {"asctime": "2025-10-12 21:34:23,547", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension rust build failed"} -true -[DOCKER] {"asctime": "2025-10-12 21:34:23,724", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension debug-server-ready build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:23,894", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension git build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:24,062", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension markdown-math build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:24,237", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension html-language-features build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:24,412", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension configuration-editing build failed"} -[10/12/25 23:34:24] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -[DOCKER] {"asctime": "2025-10-12 21:34:24,584", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension scss build failed"} -true -[DOCKER] {"asctime": "2025-10-12 21:34:24,755", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension ms-vscode.js-debug-companion build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:24,921", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension json-language-features build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:25,093", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension r build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:25,260", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension emmet build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:25,430", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension theme-solarized-light build failed"} -[10/12/25 23:34:25] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -[DOCKER] {"asctime": "2025-10-12 21:34:25,610", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension ms-vscode.js-debug build failed"} -true -[DOCKER] {"asctime": "2025-10-12 21:34:25,790", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension npm build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:25,959", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension make build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:26,129", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension css-language-features build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:26,297", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension hlsl build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:26,464", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension simple-browser build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:26,634", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension latex build failed"} -[10/12/25 23:34:26] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -true -[DOCKER] {"asctime": "2025-10-12 21:34:26,807", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension git-base build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:26,977", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension javascript build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:27,143", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension yaml build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:27,309", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension markdown-basics build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:27,476", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension bat build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:27,646", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension theme-kimbie-dark build failed"} -[10/12/25 23:34:27] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -true -[DOCKER] {"asctime": "2025-10-12 21:34:27,816", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension ini build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:27,984", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension typescript-language-features build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:28,148", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension theme-quietlight build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:28,313", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension theme-monokai build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:28,480", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension theme-seti build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:28,650", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension theme-tomorrow-night-blue build failed"} -[10/12/25 23:34:28] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -true -[DOCKER] {"asctime": "2025-10-12 21:34:28,819", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension vb build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:28,988", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension grunt build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:29,156", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension coffeescript build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:29,325", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension dart build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:29,495", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension github build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:29,666", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension cpp build failed"} -[10/12/25 23:34:29] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -true -[DOCKER] {"asctime": "2025-10-12 21:34:29,858", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension theme-defaults build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:30,029", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension clojure build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:30,200", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension fsharp build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:30,370", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension theme-abyss build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:30,541", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension xml build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:30,713", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension pug build failed"} -[10/12/25 23:34:30] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -true -[DOCKER] {"asctime": "2025-10-12 21:34:30,885", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension lua build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:31,055", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension razor build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:31,224", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension extension-editing build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:31,392", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension diff build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:31,563", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension microsoft-authentication build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:31,732", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension java build failed"} -[10/12/25 23:34:31] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -true -[DOCKER] {"asctime": "2025-10-12 21:34:31,907", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension css build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:32,077", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension markdown-language-features build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:32,247", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension html build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:32,419", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension theme-red build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:32,593", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension references-view build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:32,761", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension php-language-features build failed"} -[10/12/25 23:34:32] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -true -[DOCKER] {"asctime": "2025-10-12 21:34:32,929", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension restructuredtext build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:33,103", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension merge-conflict build failed"} -[DOCKER] {"asctime": "2025-10-12 21:34:33,273", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension ms-vscode.vscode-js-profile-table build failed"} -[10/12/25 23:34:33] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -true -[10/12/25 23:34:34] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -true -[10/12/25 23:34:35] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -true -[10/12/25 23:34:36] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -true -[10/12/25 23:34:38] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -true -[10/12/25 23:34:39] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -true -[10/12/25 23:34:40] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -true -[10/12/25 23:34:41] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -true -[DOCKER] {"asctime": "2025-10-12 21:34:42,024", "levelname": "WARNING", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 220, "message": "Extension notebook-renderers build failed"} -[10/12/25 23:34:42] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -true -[10/12/25 23:34:43] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -true -[DOCKER] {"asctime": "2025-10-12 21:34:43,775", "levelname": "INFO", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 218, "message": "Extension openhands-settings built successfully"} -[DOCKER] {"asctime": "2025-10-12 21:34:43,886", "levelname": "INFO", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 191, "message": "VSCode server startup detected"} -[DOCKER] {"asctime": "2025-10-12 21:34:43,886", "levelname": "INFO", "name": "openhands.agent_server.vscode_service", "filename": "vscode_service.py", "lineno": 64, "message": "VSCode server started successfully on port 8001"} -[DOCKER] {"asctime": "2025-10-12 21:34:43,886", "levelname": "INFO", "name": "openhands.agent_server.api", "filename": "api.py", "lineno": 49, "message": "VSCode service started successfully"} -[DOCKER] {"asctime": "2025-10-12 21:34:43,893", "levelname": "INFO", "name": "openhands.agent_server.desktop_service", "filename": "desktop_service.py", "lineno": 78, "message": "Starting TigerVNC on :1 (1280x800)..."} -[DOCKER] Please be aware that you are exposing your VNC server to all users on the -[DOCKER] local machine. These users can access your server without authentication! -[DOCKER] /usr/bin/xauth: file /home/openhands/.Xauthority does not exist -[DOCKER] -[DOCKER] New Xtigervnc server 'a22128f07980:1 (openhands)' on port 5901 for display :1. -[DOCKER] Use xtigervncviewer -SecurityTypes None :1 to connect to the VNC server. -[DOCKER] -[10/12/25 23:34:44] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -true -[10/12/25 23:34:45] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -true -[10/12/25 23:34:46] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -true -[DOCKER] {"asctime": "2025-10-12 21:34:47,043", "levelname": "INFO", "name": "openhands.agent_server.desktop_service", "filename": "desktop_service.py", "lineno": 121, "message": "Starting noVNC proxy on 0.0.0.0:8002 -> 127.0.0.1:5901 ..."} -[DOCKER] {"asctime": "2025-10-12 21:34:47,045", "levelname": "INFO", "name": "openhands.agent_server.desktop_service", "filename": "desktop_service.py", "lineno": 143, "message": "noVNC URL: http://localhost:8002/vnc.html?autoconnect=1&resize=remote"} -[10/12/25 23:34:47] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -true -[10/12/25 23:34:48] INFO $ docker inspect -f '{{.State.Running}}' command.py:27 - a22128f079805d2e9f280efdd0745b47e4ddffa42e1841a456705d1f936f0bea -true -[DOCKER] {"asctime": "2025-10-12 21:34:49,048", "levelname": "INFO", "name": "openhands.agent_server.desktop_service", "filename": "desktop_service.py", "lineno": 153, "message": "Desktop started successfully"} -[DOCKER] {"asctime": "2025-10-12 21:34:49,048", "levelname": "INFO", "name": "openhands.agent_server.api", "filename": "api.py", "lineno": 59, "message": "Desktop service started successfully"} -[DOCKER] {"asctime": "2025-10-12 21:34:49,245", "levelname": "INFO", "name": "uvicorn.error", "filename": "on.py", "lineno": 62, "message": "Application startup complete."} -[DOCKER] {"asctime": "2025-10-12 21:34:49,245", "levelname": "INFO", "name": "uvicorn.error", "filename": "server.py", "lineno": 216, "message": "Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit)", "color_message": "Uvicorn running on \u001b[1m%s://%s:%d\u001b[0m (Press CTRL+C to quit)"} -[DOCKER] 🙌 Starting OpenHands Agent Server on 0.0.0.0:8000 -[10/12/25 23:34:49] INFO Docker workspace is ready at http://localhost:8010 workspace.py:343 -[DOCKER] 📖 API docs will be available at http://0.0.0.0:8000/docs -[DOCKER] 🔄 Auto-reload: disabled -[DOCKER] 🔒 DEBUG mode: DISABLED -[DOCKER] -[DOCKER] {"asctime": "2025-10-12 21:34:49,439", "levelname": "INFO", "name": "uvicorn.access", "client_addr": null, "request_line": null, "status_code": null} -[DOCKER] {"asctime": "2025-10-12 21:35:02,330", "levelname": "INFO", "name": "openhands.sdk.conversation.state", "filename": "state.py", "lineno": 213, "message": "Created new conversation 98aa5ad0-f12d-44fa-94d8-b1152df5a9a1\nState: {'id': UUID('98aa5ad0-f12d-44fa-94d8-b1152df5a9a1'), 'workspace': {'kind': 'LocalWorkspace', 'working_dir': '/workspace'}, 'persistence_dir': 'workspace/conversations/event_service/98aa5ad0-f12d-44fa-94d8-b1152df5a9a1', 'max_iterations': 500, 'stuck_detection': True, 'agent_status': , 'confirmation_policy': {'kind': 'NeverConfirm'}, 'activated_knowledge_microagents': [], 'stats': {'service_to_metrics': {}}}\nAgent: {'kind': 'Agent', 'llm': {'model': 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', 'api_key': SecretStr('**********'), 'base_url': 'https://llm-proxy.eval.all-hands.dev', 'openrouter_site_url': 'https://docs.all-hands.dev/', 'openrouter_app_name': 'OpenHands', 'num_retries': 5, 'retry_multiplier': 8.0, 'retry_min_wait': 8, 'retry_max_wait': 64, 'max_message_chars': 30000, 'temperature': 0.0, 'top_p': 1.0, 'max_input_tokens': 200000, 'max_output_tokens': 64000, 'drop_params': True, 'modify_params': True, 'disable_stop_word': False, 'caching_prompt': True, 'log_completions': False, 'log_completions_folder': 'logs/completions', 'enable_encrypted_reasoning': False, 'extended_thinking_budget': 200000, 'service_id': 'agent', 'metadata': {}, 'OVERRIDE_ON_SERIALIZE': ('api_key', 'aws_access_key_id', 'aws_secret_access_key')}, 'tools': [{'name': 'BashTool', 'params': {}}, {'name': 'FileEditorTool', 'params': {}}, {'name': 'TaskTrackerTool', 'params': {}}], 'mcp_config': {'mcpServers': {'fetch': {'command': 'uvx', 'args': ['mcp-server-fetch']}, 'repomix': {'command': 'npx', 'args': ['-y', 'repomix@1.4.2', '--mcp']}}}, 'filter_tools_regex': '^(?!repomix)(.*)|^repomix.*pack_codebase.*$', 'system_prompt_filename': 'system_prompt.j2', 'system_prompt_kwargs': {'cli_mode': True}, 'security_analyzer': {'kind': 'LLMSecurityAnalyzer'}, 'condenser': {'kind': 'LLMSummarizingCondenser', 'llm': {'model': 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', 'api_key': SecretStr('**********'), 'base_url': 'https://llm-proxy.eval.all-hands.dev', 'openrouter_site_url': 'https://docs.all-hands.dev/', 'openrouter_app_name': 'OpenHands', 'num_retries': 5, 'retry_multiplier': 8.0, 'retry_min_wait': 8, 'retry_max_wait': 64, 'max_message_chars': 30000, 'temperature': 0.0, 'top_p': 1.0, 'max_input_tokens': 200000, 'max_output_tokens': 64000, 'drop_params': True, 'modify_params': True, 'disable_stop_word': False, 'caching_prompt': True, 'log_completions': False, 'log_completions_folder': 'logs/completions', 'enable_encrypted_reasoning': False, 'extended_thinking_budget': 200000, 'service_id': 'condenser', 'metadata': {}, 'OVERRIDE_ON_SERIALIZE': ('api_key', 'aws_access_key_id', 'aws_secret_access_key')}, 'max_size': 80, 'keep_first': 4}}"} -[DOCKER] {"asctime": "2025-10-12 21:35:02,337", "levelname": "INFO", "name": "openhands.tools.execute_bash.terminal.factory", "filename": "factory.py", "lineno": 103, "message": "Auto-detected: Using TmuxTerminal (tmux available)"} -[DOCKER] {"asctime": "2025-10-12 21:35:02,596", "levelname": "INFO", "name": "openhands.tools.execute_bash.impl", "filename": "impl.py", "lineno": 51, "message": "BashExecutor initialized with working_dir: /workspace, username: None, terminal_type: TerminalSession"} -[DOCKER] {"asctime": "2025-10-12 21:35:02,598", "levelname": "INFO", "name": "openhands.tools.file_editor.editor", "filename": "editor.py", "lineno": 85, "message": "FileEditor initialized with cwd: /workspace"} -[DOCKER] {"asctime": "2025-10-12 21:35:02,598", "levelname": "INFO", "name": "openhands.tools.task_tracker.definition", "filename": "definition.py", "lineno": 155, "message": "TaskTrackerExecutor initialized with save_dir: workspace/conversations/event_service/98aa5ad0-f12d-44fa-94d8-b1152df5a9a1"} -[DOCKER] {"asctime": "2025-10-12 21:35:02,643", "levelname": "INFO", "name": "mcp.server.lowlevel.server", "filename": "server.py", "lineno": 623, "message": "Processing request of type ListToolsRequest"} -[DOCKER] Downloading lxml (4.8MiB) -[DOCKER] Downloading pydantic-core (1.9MiB) -[DOCKER] Downloading pydantic-core -[DOCKER] Downloading lxml -[DOCKER] Installed 40 packages in 10ms -[DOCKER] {"asctime": "2025-10-12 21:35:09,628", "levelname": "INFO", "name": "openhands.sdk.mcp.utils", "filename": "utils.py", "lineno": 62, "message": "Created 8 MCP tools: ['fetch_fetch', 'repomix_pack_codebase', 'repomix_pack_remote_repository', 'repomix_attach_packed_output', 'repomix_read_repomix_output', 'repomix_grep_repomix_output', 'repomix_file_system_read_file', 'repomix_file_system_read_directory']"} -[DOCKER] {"asctime": "2025-10-12 21:35:09,629", "levelname": "INFO", "name": "openhands.sdk.agent.base", "filename": "base.py", "lineno": 203, "message": "Loaded 11 tools from spec: ['execute_bash', 'str_replace_editor', 'task_tracker', 'fetch_fetch', 'repomix_pack_codebase', 'repomix_pack_remote_repository', 'repomix_attach_packed_output', 'repomix_read_repomix_output', 'repomix_grep_repomix_output', 'repomix_file_system_read_file', 'repomix_file_system_read_directory']"} -[DOCKER] {"asctime": "2025-10-12 21:35:09,629", "levelname": "INFO", "name": "openhands.sdk.agent.base", "filename": "base.py", "lineno": 209, "message": "Filtered to 5 tools after applying regex filter: ['execute_bash', 'str_replace_editor', 'task_tracker', 'fetch_fetch', 'repomix_pack_codebase']"} -[DOCKER] {"asctime": "2025-10-12 21:35:09,629", "levelname": "WARNING", "name": "openhands.sdk.agent.agent", "filename": "agent.py", "lineno": 98, "message": "LLM security analyzer is enabled but confirmation policy is set to NeverConfirm"} -[DOCKER] {"asctime": "2025-10-12 21:35:11,716", "levelname": "INFO", "name": "openhands.sdk.llm.llm_registry", "filename": "llm_registry.py", "lineno": 81, "message": "[LLM registry 52817eb9-ce49-4a76-b3f1-f0fab7b9b69c]: Added LLM for service agent"} -[DOCKER] {"asctime": "2025-10-12 21:35:13,319", "levelname": "INFO", "name": "openhands.sdk.llm.llm_registry", "filename": "llm_registry.py", "lineno": 81, "message": "[LLM registry 52817eb9-ce49-4a76-b3f1-f0fab7b9b69c]: Added LLM for service condenser"} -[DOCKER] {"asctime": "2025-10-12 21:35:13,319", "levelname": "INFO", "name": "openhands.sdk.conversation.impl.local_conversation", "filename": "local_conversation.py", "lineno": 267, "message": "Confirmation policy set to: kind='NeverConfirm'"} -[DOCKER] {"asctime": "2025-10-12 21:35:16,213", "levelname": "INFO", "name": "uvicorn.access", "client_addr": null, "request_line": null, "status_code": null} -[DOCKER] {"asctime": "2025-10-12 21:35:16,219", "levelname": "INFO", "name": "uvicorn.access", "client_addr": null, "request_line": null, "status_code": null} -[10/12/25 23:35:16] INFO 04_vscode_with_docker_sandboxed_server.py:62 - 📋 Conversation ID: - 98aa5ad0-f12d-44fa-94d8-b1152df5a9a1 -[10/12/25 23:35:16] INFO 📝 Sending first message... 04_vscode_with_docker_sandboxed_server.py:63 -[DOCKER] {"asctime": "2025-10-12 21:35:16,235", "levelname": "INFO", "name": "uvicorn.access", "client_addr": null, "request_line": null, "status_code": null} -[DOCKER] /agent-server/.venv/lib/python3.12/site-packages/websockets/legacy/server.py:1178: DeprecationWarning: remove second argument of ws_handler -[DOCKER] warnings.warn("remove second argument of ws_handler", DeprecationWarning) -[DOCKER] {"asctime": "2025-10-12 21:35:16,279", "levelname": "INFO", "name": "uvicorn.error", "filename": "websockets_impl.py", "lineno": 273, "message": "192.168.65.1:33098 - \"WebSocket /sockets/events/98aa5ad0-f12d-44fa-94d8-b1152df5a9a1\" [accepted]", "websocket": ""} -[DOCKER] {"asctime": "2025-10-12 21:35:22,243", "levelname": "INFO", "name": "uvicorn.error", "filename": "server.py", "lineno": 643, "message": "connection open", "websocket": ""} -[10/12/25 23:35:22] INFO 🔔 Callback received event: 04_vscode_with_docker_sandboxed_server.py:49 - ConversationStateUpdateEvent - ConversationStateUpdate(key=full_state, value={'id': - '98aa5ad0-f12d-44fa-94d8-b1152df5a9a1', 'agent': - {'kind': 'Agent', 'llm': {'model': - 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', - 'api_key': '**********', 'base_url': - 'https://llm-proxy.eval.all-hands.dev', - 'openrouter_site_url': 'https://docs.all-hands.dev/', - 'openrouter_app_name': 'OpenHands', 'num_retries': 5, - 'retry_multiplier': 8.0, 'retry_min_wait': 8, - 'retry_max_wait': 64, 'max_message_chars': 30000, - 'temperature': 0.0, 'top_p': 1.0, 'max_input_tokens': - 200000, 'max_output_tokens': 64000, 'drop_params': - True, 'modify_params': True, 'disable_stop_word': - False, 'caching_prompt': True, 'log_completions': - False, 'log_completions_folder': 'logs/completions', - 'enable_encrypted_reasoning': False, - 'extended_thinking_budget': 200000, 'service_id': - 'agent', 'metadata': {}, 'OVERRIDE_ON_SERIALIZE': - ['api_key', 'aws_access_key_id', - 'aws_secret_access_key']}, 'tools': [{'name': - 'BashTool', 'params': {}}, {'name': 'FileEditorTool', - 'params': {}}, {'name': 'TaskTrackerTool', 'params': - {}}], 'mcp_config': {'mcpServers': {'fetch': - {'command': 'uvx', 'args': ['mcp-server-fetch']}, - 'repomix': {'command': 'npx', 'args': ['-y', - 'repomix@1.4.2', '--mcp']}}}, 'filter_tools_regex': - '^(?!repomix)(.*)|^repomix.*pack_codebase.*$', - 'system_prompt_filename': 'system_prompt.j2', - 'system_prompt_kwargs': {'cli_mode': True}, - 'security_analyzer': {'kind': 'LLMSecurityAnalyzer'}, - 'condenser': {'kind': 'LLMSummarizingCondenser', - 'llm': {'model': - 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', - 'api_key': '**********', 'base_url': - 'https://llm-proxy.eval.all-hands.dev', - 'openrouter_site_url': 'https://docs.all-hands.dev/', - 'openrouter_app_name': 'OpenHands', 'num_retries': 5, - 'retry_multiplier': 8.0, 'retry_min_wait': 8, - 'retry_max_wait': 64, 'max_message_chars': 30000, - 'temperature': 0.0, 'top_p': 1.0, 'max_input_tokens': - 200000, 'max_output_tokens': 64000, 'drop_params': - True, 'modify_params': True, 'disable_stop_word': - False, 'caching_prompt': True, 'log_completions': - False, 'log_completions_folder': 'logs/completions', - 'enable_encrypted_reasoning': False, - 'extended_thinking_budget': 200000, 'service_id': - 'condenser', 'metadata': {}, 'OVERRIDE_ON_SERIALIZE': - ['api_key', 'aws_access_key_id', - 'aws_secret_access_key']}, 'max_size': 80, - 'keep_first': 4}}, 'workspace': {'kind': - 'LocalWorkspace', 'working_dir': '/workspace'}, - 'persistence_dir': - 'workspace/conversations/event_service/98aa5ad0-f12d- - 44fa-94d8-b1152df5a9a1', 'max_iterations': 500, - 'stuck_detection': True, 'agent_status': 'idle', - 'confirmation_policy': {'kind': 'NeverConfirm'}, - 'activated_knowledge_microagents': [], 'stats': - {'service_to_metrics': {'agent': {'model_name': - 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', - 'accumulated_cost': 0.03233625, - 'accumulated_token_usage': {'model': - 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', - 'prompt_tokens': 10, 'completion_tokens': 229, - 'cache_read_tokens': 0, 'cache_write_tokens': 7699, - 'reasoning_tokens': 75, 'context_window': 0, - 'per_turn_token': 239, 'response_id': ''}, 'costs': - [{'model': - 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', - 'cost': 0.03233625, 'timestamp': - 1760304922.2394147}], 'response_latencies': - [{'model': - 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', - 'latency': 5.992668390274048, 'response_id': - 'chatcmpl-66f905d5-adbd-4400-8af4-0f3212ca882f'}], - 'token_usages': [{'model': - 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', - 'prompt_tokens': 10, 'completion_tokens': 229, - 'cache_read_tokens': 0, 'cache_write_tokens': 7699, - 'reasoning_tokens': 75, 'context_window': 0, - 'per_turn_token': 239, 'response_id': - 'chatcmpl-66f905d5-adbd-4400-8af4-0f3212ca882f'}]}, - 'condenser': {'model_name': - 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', - 'accumulated_cost': 0.0, 'accumulated_token_usage': - {'model': - 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', - 'prompt_tokens': 0, 'completion_tokens': 0, - 'cache_read_tokens': 0, 'cache_write_tokens': 0, - 'reasoning_tokens': 0, 'context_window': 0, - 'per_turn_token': 0, 'response_id': ''}, 'costs': [], - 'response_latencies': [], 'token_usages': []}}}}) -╭──────────────────────────────────────── UNKNOWN Event: ConversationStateUpdateEvent ────────────────────────────────────────╮ -│ │ -│ Unknown event type: ConversationStateUpdateEvent │ -│ {'kind': 'ConversationStateUpdateEvent', 'id': '6641bfa8-2978-47e7-8b84-738a85f806a9', 'timestamp': │ -│ '2025-10-12T21:35:22.242639', 'source': 'environment', 'key': 'full_state', 'value': {'id': │ -│ '98aa5ad0-f12d-44fa-94d8-b1152df5a9a1', 'agent': {'kind': 'Agent', 'llm': {'model': │ -│ 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', 'api_key': '**********', 'base_url': │ -│ 'https://llm-proxy.eval.all-hands.dev', 'openrouter_site_url': 'https://docs.all-hands.dev/', 'openrouter_app_name': │ -│ 'OpenHands', 'num_retries': 5, 'retry_multiplier': 8.0, 'retry_min_wait': 8, 'retry_max_wait': 64, 'max_message_chars': │ -│ 30000, 'temperature': 0.0, 'top_p': 1.0, 'max_input_tokens': 200000, 'max_output_tokens': 64000, 'drop_params': True, │ -│ 'modify_params': True, 'disable_stop_word': False, 'caching_prompt': True, 'log_completions': False, │ -│ 'log_completions_folder': 'logs/completions', 'enable_encrypted_reasoning': False, 'extended_thinking_budget': 200000, │ -│ 'service_id': 'agent', 'metadata': {}, 'OVERRIDE_ON_SERIALIZE': ['api_key', 'aws_access_key_id', 'aws_secret_access_key']}, │ -│ 'tools': [{'name': 'BashTool', 'params': {}}, {'name': 'FileEditorTool', 'params': {}}, {'name': 'TaskTrackerTool', │ -│ 'params': {}}], 'mcp_config': {'mcpServers': {'fetch': {'command': 'uvx', 'args': ['mcp-server-fetch']}, 'repomix': │ -│ {'command': 'npx', 'args': ['-y', 'repomix@1.4.2', '--mcp']}}}, 'filter_tools_regex': │ -│ '^(?!repomix)(.*)|^repomix.*pack_codebase.*$', 'system_prompt_filename': 'system_prompt.j2', 'system_prompt_kwargs': │ -│ {'cli_mode': True}, 'security_analyzer': {'kind': 'LLMSecurityAnalyzer'}, 'condenser': {'kind': 'LLMSummarizingCondenser', │ -│ 'llm': {'model': 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', 'api_key': '**********', 'base_url': │ -│ 'https://llm-proxy.eval.all-hands.dev', 'openrouter_site_url': 'https://docs.all-hands.dev/', 'openrouter_app_name': │ -│ 'OpenHands', 'num_retries': 5, 'retry_multiplier': 8.0, 'retry_min_wait': 8, 'retry_max_wait': 64, 'max_message_chars': │ -│ 30000, 'temperature': 0.0, 'top_p': 1.0, 'max_input_tokens': 200000, 'max_output_tokens': 64000, 'drop_params': True, │ -│ 'modify_params': True, 'disable_stop_word': False, 'caching_prompt': True, 'log_completions': False, │ -│ 'log_completions_folder': 'logs/completions', 'enable_encrypted_reasoning': False, 'extended_thinking_budget': 200000, │ -│ 'service_id': 'condenser', 'metadata': {}, 'OVERRIDE_ON_SERIALIZE': ['api_key', 'aws_access_key_id', │ -│ 'aws_secret_access_key']}, 'max_size': 80, 'keep_first': 4}}, 'workspace': {'kind': 'LocalWorkspace', 'working_dir': │ -│ '/workspace'}, 'persistence_dir': 'workspace/conversations/event_service/98aa5ad0-f12d-44fa-94d8-b1152df5a9a1', │ -│ 'max_iterations': 500, 'stuck_detection': True, 'agent_status': 'idle', 'confirmation_policy': {'kind': 'NeverConfirm'}, │ -│ 'activated_knowledge_microagents': [], 'stats': {'service_to_metrics': {'agent': {'model_name': │ -│ 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', 'accumulated_cost': 0.03233625, 'accumulated_token_usage': {'model': │ -│ 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', 'prompt_tokens': 10, 'completion_tokens': 229, 'cache_read_tokens': │ -│ 0, 'cache_write_tokens': 7699, 'reasoning_tokens': 75, 'context_window': 0, 'per_turn_token': 239, 'response_id': ''}, │ -│ 'costs': [{'model': 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', 'cost': 0.03233625, 'timestamp': │ -│ 1760304922.2394147}], 'response_latencies': [{'model': 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', 'latency': │ -│ 5.992668390274048, 'response_id': 'chatcmpl-66f905d5-adbd-4400-8af4-0f3212ca882f'}], 'token_usages': [{'model': │ -│ 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', 'prompt_tokens': 10, 'completion_tokens': 229, 'cache_read_tokens': │ -│ 0, 'cache_write_tokens': 7699, 'reasoning_tokens': 75, 'context_window': 0, 'per_turn_token': 239, 'response_id': │ -│ 'chatcmpl-66f905d5-adbd-4400-8af4-0f3212ca882f'}]}, 'condenser': {'model_name': │ -│ 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', 'accumulated_cost': 0.0, 'accumulated_token_usage': {'model': │ -│ 'litellm_proxy/anthropic/claude-sonnet-4-5-20250929', 'prompt_tokens': 0, 'completion_tokens': 0, 'cache_read_tokens': 0, │ -│ 'cache_write_tokens': 0, 'reasoning_tokens': 0, 'context_window': 0, 'per_turn_token': 0, 'response_id': ''}, 'costs': [], │ -│ 'response_latencies': [], 'token_usages': []}}}}} │ -│ │ -╰─────────────────────────────────────────────────────── (environment) ───────────────────────────────────────────────────────╯ - -[10/12/25 23:35:22] INFO 🔔 Callback received event: ActionEvent 04_vscode_with_docker_sandboxed_server.py:49 - ActionEvent (agent) - Thought: I'll create a simple Python script that - prints "Hello World" for you. - Action: FileEditorAction -╭─────────────────────────────────────────────────────── Agent Action ────────────────────────────────────────────────────────╮ -│ │ -│ Predicted Security Risk: LOW │ -│ │ -│ Reasoning: │ -│ The user wants me to create a simple Python script that prints "Hello World". This is a straightforward task. I should: │ -│ │ -│ 1. Create a Python file (commonly named something like `hello.py` or `hello_world.py`) │ -│ 2. Add code to print "Hello World" │ -│ │ -│ Let me first check the current working directory to understand the context, then create the file. │ -│ │ -│ Thought: │ -│ I'll create a simple Python script that prints "Hello World" for you. │ -│ │ -│ Action: FileEditorAction │ -│ │ -│ Arguments: │ -│ kind: "FileEditorAction" │ -│ command: "create" │ -│ path: "/workspace/hello_world.py" │ -│ file_text: │ -│ print("Hello World") │ -│ │ -│ │ -│ │ -╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ - -[10/12/25 23:35:22] INFO 🔔 Callback received event: ObservationEvent 04_vscode_with_docker_sandboxed_server.py:49 - ObservationEvent (environment) - Tool: str_replace_editor - Result: File created successfully at: - /workspace/hello_world.py -╭──────────────────────────────────────────────────────── Observation ────────────────────────────────────────────────────────╮ -│ │ -│ Tool: str_replace_editor │ -│ Result: │ -│ [File /workspace/hello_world.py edited with 1 changes.] │ -│ [begin of edit 1 / 1] │ -│ (content before edit) │ -│ 1| │ -│ (content after edit) │ -│ +1|print("Hello World") │ -│ 2| │ -│ [end of edit 1 / 1] │ -│ │ -╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ - -[10/12/25 23:35:25] INFO 🔔 Callback received event: ActionEvent 04_vscode_with_docker_sandboxed_server.py:49 - ActionEvent (agent) - Thought: Now let me run the script to verify it - works: - Action: ExecuteBashAction -╭─────────────────────────────────────────────────────── Agent Action ────────────────────────────────────────────────────────╮ -│ │ -│ Predicted Security Risk: LOW │ -│ │ -│ Reasoning: │ -│ Good! I've created the file. Now let me run it to verify it works correctly. │ -│ │ -│ Thought: │ -│ Now let me run the script to verify it works: │ -│ │ -│ $ python /workspace/hello_world.py │ -│ │ -╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ -[DOCKER] /agent-server/.venv/lib/python3.12/site-packages/pydantic/main.py:463: UserWarning: Pydantic serializer warnings: - -[DOCKER] PydanticSerializationUnexpectedValue(Expected 10 fields but got 7: Expected `Message` - serialized value may not be as expected [input_value=Message(content='Now let ...y it works correctly."}), input_type=Message]) -[DOCKER] PydanticSerializationUnexpectedValue(Expected `StreamingChoices` - serialized value may not be as expected [input_value=Choices(finish_reason='to...ider_specific_fields={}), input_type=Choices]) -[DOCKER] return self.__pydantic_serializer__.to_python( -[10/12/25 23:35:26] INFO 🔔 Callback received event: ObservationEvent 04_vscode_with_docker_sandboxed_server.py:49 - ObservationEvent (environment) - Tool: execute_bash - Result: Hello World - [The command completed with exit code 0.] - [Current working directory: /workspace] - [Python interpreter: /usr/local/bin/python] - [Command finished with exit code 0] -╭──────────────────────────────────────────────────────── Observation ────────────────────────────────────────────────────────╮ -│ │ -│ Tool: execute_bash │ -│ Result: │ -│ Hello World │ -│ │ -│ 📁 Working directory: /workspace │ -│ 🐍 Python interpreter: /usr/local/bin/python │ -│ ✅ Exit code: 0 │ -│ │ -╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ - -[DOCKER] {"asctime": "2025-10-12 21:35:30,260", "levelname": "INFO", "name": "openhands.sdk.agent.agent", "filename": "agent.py", "lineno": 253, "message": "LLM produced a message response - awaits user input"} -[DOCKER] /agent-server/.venv/lib/python3.12/site-packages/pydantic/main.py:463: UserWarning: Pydantic serializer warnings: -[DOCKER] PydanticSerializationUnexpectedValue(Expected 10 fields but got 7: Expected `Message` - serialized value may not be as expected [input_value=Message(content='Perfect!...The task is complete.'}), input_type=Message]) -[DOCKER] PydanticSerializationUnexpectedValue(Expected `StreamingChoices` - serialized value may not be as expected [input_value=Choices(finish_reason='st...ider_specific_fields={}), input_type=Choices]) -[DOCKER] return self.__pydantic_serializer__.to_python( -[10/12/25 23:35:30] INFO 🔔 Callback received event: 04_vscode_with_docker_sandboxed_server.py:49 - ConversationStateUpdateEvent - ConversationStateUpdate(key=agent_status, - value=finished) -╭──────────────────────────────────────── UNKNOWN Event: ConversationStateUpdateEvent ────────────────────────────────────────╮ -│ │ -│ Unknown event type: ConversationStateUpdateEvent │ -│ {'kind': 'ConversationStateUpdateEvent', 'id': '80c2cd61-ea42-4c24-9fd2-f09b90431d55', 'timestamp': │ -│ '2025-10-12T21:35:30.266871', 'source': 'environment', 'key': 'agent_status', 'value': 'finished'} │ -│ │ -╰─────────────────────────────────────────────────────── (environment) ───────────────────────────────────────────────────────╯ -[DOCKER] {"asctime": "2025-10-12 21:35:30,271", "levelname": "INFO", "name": "uvicorn.access", "client_addr": null, "request_line": null, "status_code": null} -[10/12/25 23:35:30] INFO run() triggered successfully: remote_conversation.py:489 - - -Because you've enabled extra_ports=True in DockerWorkspace, you can open VSCode Web to see the workspace. - -VSCode Link: http://localhost:8011 -(Check the agent server logs for the full URL with auth token) - -The VSCode should have the OpenHands settings extension installed: - - Dark theme enabled - - Auto-save enabled - - Telemetry disabled - - Auto-updates disabled - -Press 'y' and Enter to exit and terminate the workspace. ->> [10/12/25 23:35:30] INFO 🔔 Callback received event: MessageEvent 04_vscode_with_docker_sandboxed_server.py:49 - MessageEvent (agent) - assistant: Perfect! I've created a simple Python - script called `hello_world.py` that prints "Hello - World". The script has been successfully tested and - works as expected. [Thinking blocks: 1] -╭──────────────────────────────────────────────────── Message from Agent ─────────────────────────────────────────────────────╮ -│ │ -│ Perfect! I've created a simple Python script called `hello_world.py` that prints "Hello World". The script has been │ -│ successfully tested and works as expected. │ -│ │ -╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ - diff --git a/previous.md b/previous.md deleted file mode 100644 index d3f3efe469..0000000000 --- a/previous.md +++ /dev/null @@ -1,68 +0,0 @@ -# VSCode Settings Extension Work - -## Branch -`vscode-settings-extension` - -## What We've Done - -### 1. Created VSCode Extension -- **Location**: `openhands/agent_server/vscode_extensions/openhands-settings/` -- **Structure**: - - `src/extension.ts` - Main extension code that configures settings - - `package.json` - Extension metadata (activates on startup with `"*"`) - - `tsconfig.json` - TypeScript configuration - -### 2. Extension Settings Applied -The extension automatically configures: -- `workbench.colorTheme`: "Default Dark+" -- `editor.fontSize`: 14 -- `editor.tabSize`: 4 -- `files.autoSave`: "afterDelay" -- `files.autoSaveDelay`: 1000 -- `update.mode`: "none" -- `telemetry.telemetryLevel`: "off" -- `extensions.autoCheckUpdates`: false -- `extensions.autoUpdate`: false - -### 3. Updated `vscode_service.py` -- **Path**: `openhands/agent_server/vscode_service.py` -- Extensions directory: `self.extensions_dir = self.openvscode_server_root / "extensions"` - - Points to `/openhands/.openvscode-server/extensions` -- Added `_build_extensions()` method: - - Iterates through all extensions in the directory - - Runs `npm install && npm run compile` for each -- Modified `_start_vscode_process()`: - - Conditionally passes `--extensions-dir` flag if directory exists - -### 4. Updated Dockerfile -- **Path**: `openhands/agent_server/docker/Dockerfile` -- Added COPY commands in both `source` and `binary` targets: - ```dockerfile - COPY --chown=${USERNAME}:${USERNAME} --from=builder /agent-server/openhands/agent_server/vscode_extensions /openhands/.openvscode-server/extensions - ``` -- Extensions are copied from source code into VSCode server's extensions directory - -### 5. Created Test Example -- **Path**: `examples/02_remote_agent_server/04_vscode_with_docker_sandboxed_server.py` -- Uses `DockerWorkspace` with `extra_ports=True` -- Exposes VSCode on port 8011 -- Instructions for checking the extension settings - -## Architecture Pattern -Following V0 approach: -- Extensions live in VSCode server's own directory: `/openhands/.openvscode-server/extensions/` -- Extensions are built at runtime when service starts -- No `.vsix` packaging needed - direct source copy - -## Next Steps -1. Test the extension by running the example script -2. Verify settings are applied in VSCode Web -3. Check extension build logs in agent server output - -## Testing Command -```bash -export LLM_API_KEY=your_key_here -uv run python examples/02_remote_agent_server/04_vscode_with_docker_sandboxed_server.py -``` - -Then access VSCode at: http://localhost:8011 From 5efdaeee851d24223f91600a9e2fb7fc4e5e7a96 Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Sat, 18 Oct 2025 17:14:30 +0200 Subject: [PATCH 05/37] chore: only ignore bead databases Co-authored-by: openhands --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 3299c4f6b0..21cfd96675 100644 --- a/.gitignore +++ b/.gitignore @@ -204,7 +204,7 @@ cache openapi.json .client/ # Ignore local runtime and developer files -.beads/ +.beads/*.db *.db .worktrees/ *.code-workspace From 9cbf67f7c5c98b36e0be728414b26ae816c360b6 Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Sat, 18 Oct 2025 17:28:31 +0200 Subject: [PATCH 06/37] test: cover llm profile manager Co-authored-by: openhands --- tests/sdk/llm/test_profile_manager.py | 70 +++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) create mode 100644 tests/sdk/llm/test_profile_manager.py diff --git a/tests/sdk/llm/test_profile_manager.py b/tests/sdk/llm/test_profile_manager.py new file mode 100644 index 0000000000..6bda7646ce --- /dev/null +++ b/tests/sdk/llm/test_profile_manager.py @@ -0,0 +1,70 @@ +import json + +from pydantic import SecretStr + +from openhands.sdk.llm.llm import LLM +from openhands.sdk.llm.llm_registry import LLMRegistry +from openhands.sdk.llm.profile_manager import ProfileManager + + +def test_list_profiles_returns_sorted_names(tmp_path): + manager = ProfileManager(base_dir=tmp_path) + (tmp_path / "b.json").write_text("{}", encoding="utf-8") + (tmp_path / "a.json").write_text("{}", encoding="utf-8") + + assert manager.list_profiles() == ["a", "b"] + + +def test_save_profile_excludes_secret_fields(tmp_path): + manager = ProfileManager(base_dir=tmp_path) + llm = LLM( + model="gpt-4o-mini", + service_id="service", + api_key=SecretStr("secret"), + aws_access_key_id=SecretStr("id"), + aws_secret_access_key=SecretStr("value"), + ) + + path = manager.save_profile("sample", llm) + data = json.loads(path.read_text(encoding="utf-8")) + + assert data["profile_id"] == "sample" + assert data["service_id"] == "service" + assert "api_key" not in data + assert "aws_access_key_id" not in data + assert "aws_secret_access_key" not in data + + +def test_load_profile_assigns_profile_id_when_missing(tmp_path): + manager = ProfileManager(base_dir=tmp_path) + profile_path = tmp_path / "foo.json" + profile_path.write_text( + json.dumps({"model": "gpt-4o-mini", "service_id": "svc"}), + encoding="utf-8", + ) + + llm = manager.load_profile("foo") + + assert llm.profile_id == "foo" + assert llm.service_id == "svc" + + +def test_register_all_skips_invalid_and_duplicate_profiles(tmp_path): + manager = ProfileManager(base_dir=tmp_path) + registry = LLMRegistry() + + llm = LLM(model="gpt-4o-mini", service_id="shared") + manager.save_profile("alpha", llm) + + duplicate_data = llm.model_dump(exclude_none=True) + duplicate_data["profile_id"] = "beta" + (tmp_path / "beta.json").write_text( + json.dumps(duplicate_data), + encoding="utf-8", + ) + + (tmp_path / "gamma.json").write_text("{", encoding="utf-8") + + manager.register_all(registry) + + assert registry.list_services() == ["shared"] From dfab517416339aa9b99173d8b2d40f4919ed89d3 Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Sat, 18 Oct 2025 22:01:59 +0200 Subject: [PATCH 07/37] Update .gitignore --- .gitignore | 7 ------- 1 file changed, 7 deletions(-) diff --git a/.gitignore b/.gitignore index 21cfd96675..4011780b8f 100644 --- a/.gitignore +++ b/.gitignore @@ -208,10 +208,3 @@ openapi.json *.db .worktrees/ *.code-workspace -log.txt -previous.md -AGENTS.md -CLAUDE.md -docs/agent-sdk.workspace.code-workspace -docs/llm-model-info-and-caps.md -docs/llm-refactor.md From 441eb25a86833c20c0a3327cc2f4942479e1014c Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Sat, 18 Oct 2025 22:57:10 +0200 Subject: [PATCH 08/37] Improve LLM profile manager persistence Co-authored-by: openhands --- openhands/sdk/llm/profile_manager.py | 99 +++++++++------- scripts/worktree.sh | 158 ++++++++++++++++++++++++++ tests/sdk/llm/test_profile_manager.py | 30 +++++ 3 files changed, 248 insertions(+), 39 deletions(-) create mode 100755 scripts/worktree.sh diff --git a/openhands/sdk/llm/profile_manager.py b/openhands/sdk/llm/profile_manager.py index 7fadc9700c..f2d63463f3 100644 --- a/openhands/sdk/llm/profile_manager.py +++ b/openhands/sdk/llm/profile_manager.py @@ -2,7 +2,11 @@ import json import logging +from collections.abc import Mapping from pathlib import Path +from typing import Any + +from pydantic import SecretStr, ValidationError from openhands.sdk.llm.llm import LLM from openhands.sdk.llm.llm_registry import LLMRegistry @@ -10,15 +14,21 @@ logger = logging.getLogger(__name__) +_SECRET_FIELDS: tuple[str, ...] = ( + "api_key", + "aws_access_key_id", + "aws_secret_access_key", +) + class ProfileManager: """Manage LLM profile files on disk. - Profiles are stored as JSON files using the existing LLM schema, typically - at ~/.openhands/llm-profiles/.json. + Profiles are stored as JSON files using the existing LLM schema. By default + they live under ``~/.openhands/llm-profiles/.json``. """ - def __init__(self, base_dir: str | Path | None = None): + def __init__(self, base_dir: str | Path | None = None) -> None: if base_dir is None: self.base_dir = Path.home() / ".openhands" / "llm-profiles" else: @@ -26,54 +36,65 @@ def __init__(self, base_dir: str | Path | None = None): self.base_dir.mkdir(parents=True, exist_ok=True) def list_profiles(self) -> list[str]: - return sorted([p.stem for p in self.base_dir.glob("*.json")]) + return sorted([path.stem for path in self.base_dir.glob("*.json")]) def get_profile_path(self, name: str) -> Path: return self.base_dir / f"{name}.json" def load_profile(self, name: str) -> LLM: - p = self.get_profile_path(name) - if not p.exists(): - raise FileNotFoundError(f"Profile not found: {name} -> {p}") - # Use LLM.load_from_json to leverage pydantic validation - llm = LLM.load_from_json(str(p)) - # Ensure profile_id is present on loaded LLM - if getattr(llm, "profile_id", None) is None: - try: - llm = llm.model_copy(update={"profile_id": name}) - except Exception: - # Old pydantic versions might not have model_copy; fallback - llm.profile_id = name # type: ignore[attr-defined] - return llm + path = self.get_profile_path(name) + if not path.exists(): + raise FileNotFoundError(f"Profile not found: {name} -> {path}") + return self._load_profile_from_path(path, name) def save_profile(self, name: str, llm: LLM, include_secrets: bool = False) -> Path: - p = self.get_profile_path(name) - # Dump model to dict and ensure profile_id is set + path = self.get_profile_path(name) data = llm.model_dump(exclude_none=True) data["profile_id"] = name - # Remove secret fields unless explicitly requested if not include_secrets: - for secret_field in ( - "api_key", - "aws_access_key_id", - "aws_secret_access_key", - ): - if secret_field in data: - data.pop(secret_field, None) - # Write to file - with open(p, "w", encoding="utf-8") as f: - json.dump(data, f, indent=2, ensure_ascii=False) - logger.info(f"Saved profile {name} -> {p}") - return p + for secret_field in _SECRET_FIELDS: + data.pop(secret_field, None) + else: + for secret_field in _SECRET_FIELDS: + value = data.get(secret_field) + if isinstance(value, SecretStr): + data[secret_field] = value.get_secret_value() + with path.open("w", encoding="utf-8") as file: + json.dump(data, file, indent=2, ensure_ascii=False) + logger.info("Saved profile %s -> %s", name, path) + return path def register_all(self, registry: LLMRegistry) -> None: - # Load and attempt to register all profiles. Skip duplicates. for name in self.list_profiles(): try: llm = self.load_profile(name) - try: - registry.add(llm) - except Exception as e: - logger.info(f"Skipping profile {name}: registry.add failed: {e}") - except Exception as e: - logger.warning(f"Failed to load profile {name}: {e}") + except Exception as exc: # noqa: BLE001 - log and continue + logger.warning("Failed to load profile %s: %s", name, exc) + continue + try: + registry.add(llm) + except Exception as exc: # noqa: BLE001 - registry enforces its own invariants + logger.info("Skipping profile %s: registry.add failed: %s", name, exc) + + def validate_profile(self, data: Mapping[str, Any]) -> tuple[bool, list[str]]: + try: + LLM.model_validate(dict(data)) + except ValidationError as exc: + messages: list[str] = [] + for error in exc.errors(): + loc = ".".join(str(piece) for piece in error.get("loc", ())) + if loc: + messages.append(f"{loc}: {error.get('msg')}") + else: + messages.append(error.get("msg", "Unknown validation error")) + return False, messages + return True, [] + + def _load_profile_from_path(self, path: Path, name: str) -> LLM: + llm = LLM.load_from_json(str(path)) + if getattr(llm, "profile_id", None) != name: + try: + llm = llm.model_copy(update={"profile_id": name}) + except Exception: + llm.profile_id = name # type: ignore[attr-defined] + return llm diff --git a/scripts/worktree.sh b/scripts/worktree.sh new file mode 100755 index 0000000000..2b517a1ce3 --- /dev/null +++ b/scripts/worktree.sh @@ -0,0 +1,158 @@ +#!/usr/bin/env bash + +set -euo pipefail + +usage() { + cat <<'EOF' +Usage: $(basename "$0") [options] + +Commands: + list Show all worktrees managed by this repository + path Print the filesystem path for a worktree branch + create [start] Create a new worktree in ${WORKTREES_DIR:-.worktrees}/ + remove Remove the worktree for + prune Run 'git worktree prune' + open Open the worktree in VS Code (requires 'code' on PATH) + +Environment variables: + WORKTREES_DIR Override worktree base directory (default: /.worktrees) + DEFAULT_BASE Default branch/commit used when creating a new branch (default: origin/main) +EOF +} + +err() { + printf 'Error: %s\n' "$1" >&2 + exit 1 +} + +require_branch_name() { + if [[ -z "${1:-}" ]]; then + err "missing branch name" + fi +} + +resolve_repo_root() { + git rev-parse --show-toplevel 2>/dev/null || err "not inside a git repository" +} + +worktree_path_for_branch() { + local branch=$1 + printf '%s/%s' "$WORKTREES_DIR" "$branch" +} + +branch_exists() { + git rev-parse --verify --quiet "refs/heads/$1" >/dev/null +} + +branch_checked_out_elsewhere() { + local branch=$1 + git worktree list --porcelain | awk -v b="refs/heads/$branch" ' + $1 == "branch" && $2 == b { found = 1 } + END { exit found ? 0 : 1 } + ' +} + +create_worktree() { + local branch=$1 + local start_ref=${2:-$DEFAULT_BASE} + local path + path=$(worktree_path_for_branch "$branch") + + if [[ -d "$path" ]]; then + err "target path $path already exists" + fi + + if branch_checked_out_elsewhere "$branch"; then + err "branch $branch is already checked out in another worktree" + fi + + mkdir -p "$WORKTREES_DIR" + + if branch_exists "$branch"; then + git worktree add "$path" "$branch" + else + git worktree add -b "$branch" "$path" "$start_ref" + fi +} + +remove_worktree() { + local branch=$1 + local path + path=$(worktree_path_for_branch "$branch") + + if [[ ! -d "$path" ]]; then + err "no worktree directory found for branch $branch at $path" + fi + + git worktree remove "$path" +} + +open_in_vscode() { + local branch=$1 + local path + path=$(worktree_path_for_branch "$branch") + + if [[ ! -d "$path" ]]; then + err "no worktree directory found for branch $branch at $path" + fi + + if ! command -v code >/dev/null 2>&1; then + err "'code' executable not found on PATH" + fi + + code "$path" +} + +list_worktrees() { + git worktree list +} + +main() { + local repo_root + repo_root=$(resolve_repo_root) + cd "$repo_root" + + WORKTREES_DIR=${WORKTREES_DIR:-"$repo_root/.worktrees"} + DEFAULT_BASE=${DEFAULT_BASE:-origin/main} + + local command=${1:-} + + case "$command" in + list) + shift + list_worktrees "$@" + ;; + path) + shift + require_branch_name "${1:-}" + worktree_path_for_branch "$1" + ;; + create) + shift + require_branch_name "${1:-}" + create_worktree "$1" "${2:-}" + ;; + remove) + shift + require_branch_name "${1:-}" + remove_worktree "$1" + ;; + prune) + shift + git worktree prune "$@" + ;; + open) + shift + require_branch_name "${1:-}" + open_in_vscode "$1" + ;; + -h|--help|help|"") + usage + ;; + *) + err "unknown command: $command" + ;; + esac +} + +main "$@" diff --git a/tests/sdk/llm/test_profile_manager.py b/tests/sdk/llm/test_profile_manager.py index 6bda7646ce..0ff8aa918c 100644 --- a/tests/sdk/llm/test_profile_manager.py +++ b/tests/sdk/llm/test_profile_manager.py @@ -35,6 +35,24 @@ def test_save_profile_excludes_secret_fields(tmp_path): assert "aws_secret_access_key" not in data +def test_save_profile_can_include_secret_fields(tmp_path): + manager = ProfileManager(base_dir=tmp_path) + llm = LLM( + model="gpt-4o-mini", + service_id="service", + api_key=SecretStr("secret"), + aws_access_key_id=SecretStr("id"), + aws_secret_access_key=SecretStr("value"), + ) + + path = manager.save_profile("sample", llm, include_secrets=True) + data = json.loads(path.read_text(encoding="utf-8")) + + assert data["api_key"] == "secret" + assert data["aws_access_key_id"] == "id" + assert data["aws_secret_access_key"] == "value" + + def test_load_profile_assigns_profile_id_when_missing(tmp_path): manager = ProfileManager(base_dir=tmp_path) profile_path = tmp_path / "foo.json" @@ -68,3 +86,15 @@ def test_register_all_skips_invalid_and_duplicate_profiles(tmp_path): manager.register_all(registry) assert registry.list_services() == ["shared"] + + +def test_validate_profile_reports_errors(tmp_path): + manager = ProfileManager(base_dir=tmp_path) + + ok, errors = manager.validate_profile({"model": "gpt-4o-mini", "service_id": "svc"}) + assert ok + assert errors == [] + + ok, errors = manager.validate_profile({"service_id": "svc"}) + assert not ok + assert any("model" in message for message in errors) From e7cd0398bed73442a7ea46bd9ea6cded59e89970 Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Sat, 18 Oct 2025 23:28:38 +0200 Subject: [PATCH 09/37] Add example for managing LLM profiles Co-authored-by: openhands --- examples/01_standalone_sdk/25_llm_profiles.py | 76 +++++++++++++++++++ examples/llm-profiles/gpt-5-mini.json | 11 +++ 2 files changed, 87 insertions(+) create mode 100644 examples/01_standalone_sdk/25_llm_profiles.py create mode 100644 examples/llm-profiles/gpt-5-mini.json diff --git a/examples/01_standalone_sdk/25_llm_profiles.py b/examples/01_standalone_sdk/25_llm_profiles.py new file mode 100644 index 0000000000..c9870ec17a --- /dev/null +++ b/examples/01_standalone_sdk/25_llm_profiles.py @@ -0,0 +1,76 @@ +"""Create and use an LLM profile with :class:`ProfileManager`. + +Run with:: + + uv run python examples/01_standalone_sdk/25_llm_profiles.py + +Profiles are stored under ``~/.openhands/llm-profiles/.json`` by default. +Set ``LLM_PROFILE_NAME`` to pick a profile and ``LLM_API_KEY`` to supply +credentials when the profile omits secrets. +""" + +import os + +from pydantic import SecretStr + +from openhands.sdk import Agent, Conversation +from openhands.sdk.llm.llm import LLM +from openhands.sdk.llm.profile_manager import ProfileManager +from openhands.sdk.tool import Tool, register_tool +from openhands.tools.execute_bash import BashTool + + +DEFAULT_PROFILE_NAME = "gpt-5-mini" +PROFILE_NAME = os.getenv("LLM_PROFILE_NAME", DEFAULT_PROFILE_NAME) + + +def ensure_profile_exists(manager: ProfileManager, name: str) -> None: + """Create a starter profile in the default directory when missing.""" + + if name in manager.list_profiles(): + return + + profile_defaults = LLM( + model="litellm_proxy/openai/gpt-5-mini", + base_url="https://llm-proxy.eval.all-hands.dev", + temperature=0.2, + max_output_tokens=4096, + service_id="agent", + metadata={ + "profile_description": "Sample GPT-5 Mini profile created by example 25.", + }, + ) + path = manager.save_profile(name, profile_defaults) + print(f"Created profile '{name}' at {path}") + + +def load_profile(manager: ProfileManager, name: str) -> LLM: + llm = manager.load_profile(name) + if llm.api_key is None: + api_key = os.getenv("LLM_API_KEY") + if api_key is None: + raise RuntimeError( + "Set LLM_API_KEY to authenticate, or save the profile with " + "include_secrets=True." + ) + llm = llm.model_copy(update={"api_key": SecretStr(api_key)}) + return llm + + +def main() -> None: + manager = ProfileManager() + ensure_profile_exists(manager, PROFILE_NAME) + + llm = load_profile(manager, PROFILE_NAME) + + register_tool("BashTool", BashTool) + tools = [Tool(name="BashTool")] + agent = Agent(llm=llm, tools=tools) + + conversation = Conversation(agent=agent, workspace=os.getcwd()) + conversation.send_message("Print 'Profile created successfully.'") + conversation.run() + + +if __name__ == "__main__": # pragma: no cover + main() diff --git a/examples/llm-profiles/gpt-5-mini.json b/examples/llm-profiles/gpt-5-mini.json new file mode 100644 index 0000000000..906389c7cf --- /dev/null +++ b/examples/llm-profiles/gpt-5-mini.json @@ -0,0 +1,11 @@ +{ + "model": "litellm_proxy/openai/gpt-5-mini", + "base_url": "https://llm-proxy.eval.all-hands.dev", + "api_key": null, + "temperature": 0.2, + "max_output_tokens": 4096, + "service_id": "agent", + "metadata": { + "profile_description": "Sample configuration for the GPT-5 Mini profile managed by ProfileManager." + } +} From 269610a68054c22f3c884dc1ef12401dc8bb8efb Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Sat, 18 Oct 2025 23:34:46 +0200 Subject: [PATCH 10/37] Document plan for profile references Co-authored-by: openhands --- docs/llm_profiles.md | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/docs/llm_profiles.md b/docs/llm_profiles.md index e87b2ab6d2..e5964d469b 100644 --- a/docs/llm_profiles.md +++ b/docs/llm_profiles.md @@ -40,3 +40,46 @@ Notes on service_id rename - There is an ongoing discussion about renaming `LLM.service_id` to a clearer name (e.g., `usage_id` or `token_tracking_id`) because `service_id` is overloaded. We will not rename immediately; agent-sdk-23 will investigate the migration and impact. + +## Proposed changes for agent-sdk-19 (profile references in persistence) + +### Goals +- Allow agent settings and conversation snapshots to reference stored LLM profiles by name instead of embedding full JSON payloads. +- Maintain backward compatibility with existing inline configurations. +- Enable a migration path so that users can opt in to profiles without losing existing data. + +### Persistence format updates +- **Agent settings (`~/.openhands/agent_settings.json`)** + - Add an optional `profile_id` (or `llm_profile`) field wherever an LLM is configured (agent, condenser, router, etc.). + - When `profile_id` is present, omit the inline LLM payload in favor of the reference. + - Continue accepting inline definitions when `profile_id` is absent. +- **Conversation base state (`~/.openhands/conversations//base_state.json`)** + - Store `profile_id` for any LLM that originated from a profile when the conversation was created. + - Inline the full LLM payload only when no profile reference exists. + +### Loader behavior +- On startup, configuration loaders must detect `profile_id` and load the corresponding LLM via `ProfileManager.load_profile(profile_id)`. +- If the referenced profile cannot be found, fall back to existing inline data (if available) and surface a clear warning. +- Inject secrets after loading (same flow used today when constructing LLM instances). + +### Writer behavior +- When persisting updated agent settings or conversation snapshots, write back the `profile_id` whenever the active LLM was sourced from a profile. +- Only write the raw LLM configuration for ad-hoc instances (no associated profile), preserving current behavior. + +### Migration helper +- Provide a utility (script or CLI command) that: + 1. Scans existing agent settings and conversation base states for inline LLM configs. + 2. Uses `ProfileManager.save_profile` to serialize them into `~/.openhands/llm-profiles/.json`. + 3. Rewrites the source files to reference the new profiles via `profile_id`. +- Keep the migration opt-in and idempotent so users can review changes before adopting profiles. + +### Testing & validation +- Extend persistence tests to cover: + - Loading agent settings with `profile_id` only. + - Mixed scenarios (profile reference plus inline fallback). + - Conversation snapshots that retain profile references across reloads. +- Add regression tests ensuring legacy inline-only configurations continue to work. + +### Follow-up coordination +- Subsequent tasks (agent-sdk-20/21/22) will build on this foundation to expose CLI flags, update documentation, and improve secrets handling. + From d0ab9524c32012e9fc1ac67f8daba203997d69f0 Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Sun, 19 Oct 2025 11:42:31 +0200 Subject: [PATCH 11/37] Integrate profile-aware persistence Co-authored-by: openhands --- docs/llm_profiles.md | 1 + examples/llm-profiles/example.json | 11 -- .../sdk/conversation/persistence_utils.py | 114 ++++++++++++++++++ openhands/sdk/conversation/state.py | 18 ++- openhands/sdk/utils/agent_settings.py | 51 ++++++++ .../local/test_state_serialization.py | 106 +++++++++++++++- tests/sdk/utils/test_agent_settings.py | 50 ++++++++ 7 files changed, 333 insertions(+), 18 deletions(-) delete mode 100644 examples/llm-profiles/example.json create mode 100644 openhands/sdk/conversation/persistence_utils.py create mode 100644 openhands/sdk/utils/agent_settings.py create mode 100644 tests/sdk/utils/test_agent_settings.py diff --git a/docs/llm_profiles.md b/docs/llm_profiles.md index e5964d469b..c64bb61847 100644 --- a/docs/llm_profiles.md +++ b/docs/llm_profiles.md @@ -65,6 +65,7 @@ Notes on service_id rename ### Writer behavior - When persisting updated agent settings or conversation snapshots, write back the `profile_id` whenever the active LLM was sourced from a profile. - Only write the raw LLM configuration for ad-hoc instances (no associated profile), preserving current behavior. +- Respect the `OPENHANDS_INLINE_CONVERSATIONS` flag (default: true for reproducibility). When enabled, always inline full LLM payloads—even if `profile_id` exists—and surface an error if a conversation only contains `profile_id` entries. ### Migration helper - Provide a utility (script or CLI command) that: diff --git a/examples/llm-profiles/example.json b/examples/llm-profiles/example.json deleted file mode 100644 index c1ff2e0537..0000000000 --- a/examples/llm-profiles/example.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "model": "gpt-4o-mini", - "base_url": "https://api.openai.com/v1", - "api_key": null, - "temperature": 0.0, - "max_output_tokens": 1024, - "service_id": "agent", - "metadata": { - "profile_description": "Example profile for local testing (no api_key stored)." - } -} diff --git a/openhands/sdk/conversation/persistence_utils.py b/openhands/sdk/conversation/persistence_utils.py new file mode 100644 index 0000000000..bf92d25040 --- /dev/null +++ b/openhands/sdk/conversation/persistence_utils.py @@ -0,0 +1,114 @@ +"""Helpers for serializing and deserializing persisted conversation data.""" + +from __future__ import annotations + +import os +from collections.abc import Mapping +from typing import Any + +from openhands.sdk.llm.profile_manager import ProfileManager + + +_INLINE_ENV_VAR = "OPENHANDS_INLINE_CONVERSATIONS" +_FALSE_VALUES = {"0", "false", "no"} + + +def should_inline_conversations() -> bool: + """Return True when conversations should be persisted with inline LLM payloads.""" + + value = os.getenv(_INLINE_ENV_VAR, "true").strip().lower() + return value not in _FALSE_VALUES + + +def prepare_payload_for_persistence( + payload: Mapping[str, Any], *, inline: bool | None = None +) -> dict[str, Any]: + """Return a payload ready to be written to disk. + + When ``inline`` is False and an LLM dict contains ``profile_id``, the body is + replaced with ``{"profile_id": }``. Otherwise the payload is left intact. + """ + + inline = should_inline_conversations() if inline is None else inline + return _transform(payload, inline=inline, deserialize=False, profile_manager=None) + + +def expand_profiles_in_payload( + payload: Mapping[str, Any], + *, + inline: bool | None = None, + profile_manager: ProfileManager | None = None, +) -> dict[str, Any]: + """Expand persisted payload back into inline LLM dictionaries.""" + + inline = should_inline_conversations() if inline is None else inline + manager = profile_manager or ProfileManager() + return _transform(payload, inline=inline, deserialize=True, profile_manager=manager) + + +def _transform( + payload: Mapping[str, Any] | list[Any], + *, + inline: bool, + deserialize: bool, + profile_manager: ProfileManager | None, +) -> Any: + if isinstance(payload, Mapping): + data = { + key: _transform( + value, + inline=inline, + deserialize=deserialize, + profile_manager=profile_manager, + ) + for key, value in payload.items() + } + + if deserialize: + if _is_profile_reference(data): + if inline: + profile_id = data["profile_id"] + raise ValueError( + "Encountered profile reference for LLM while " + "OPENHANDS_INLINE_CONVERSATIONS is enabled. " + "Inline the profile or set " + "OPENHANDS_INLINE_CONVERSATIONS=false." + ) + assert profile_manager is not None + profile_id = data["profile_id"] + llm = profile_manager.load_profile(profile_id) + llm_dict = llm.model_dump(exclude_none=True) + llm_dict["profile_id"] = profile_id + return _transform( + llm_dict, + inline=inline, + deserialize=True, + profile_manager=profile_manager, + ) + else: + if not inline and _is_llm_dict(data): + profile_id = data.get("profile_id") + if profile_id: + return {"profile_id": profile_id} + return data + + if isinstance(payload, list): + return [ + _transform( + item, + inline=inline, + deserialize=deserialize, + profile_manager=profile_manager, + ) + for item in payload + ] + + return payload + + +def _is_llm_dict(value: Mapping[str, Any]) -> bool: + return "model" in value and "service_id" in value + + +def _is_profile_reference(value: Mapping[str, Any]) -> bool: + return "profile_id" in value and "model" not in value diff --git a/openhands/sdk/conversation/state.py b/openhands/sdk/conversation/state.py index 1ac0428b57..7958e402f2 100644 --- a/openhands/sdk/conversation/state.py +++ b/openhands/sdk/conversation/state.py @@ -11,6 +11,11 @@ from openhands.sdk.conversation.event_store import EventLog from openhands.sdk.conversation.fifo_lock import FIFOLock from openhands.sdk.conversation.persistence_const import BASE_STATE, EVENTS_DIR +from openhands.sdk.conversation.persistence_utils import ( + expand_profiles_in_payload, + prepare_payload_for_persistence, + should_inline_conversations, +) from openhands.sdk.conversation.secrets_manager import SecretsManager from openhands.sdk.conversation.types import ConversationCallbackType, ConversationID from openhands.sdk.event import ActionEvent, ObservationEvent, UserRejectObservation @@ -133,8 +138,11 @@ def _save_base_state(self, fs: FileStore) -> None: """ Persist base state snapshot (no events; events are file-backed). """ - payload = self.model_dump_json(exclude_none=True) - fs.write(BASE_STATE, payload) + inline = should_inline_conversations() + payload = prepare_payload_for_persistence( + self.model_dump(mode="json", exclude_none=True), inline=inline + ) + fs.write(BASE_STATE, json.dumps(payload)) # ===== Factory: open-or-create (no load/save methods needed) ===== @classmethod @@ -161,9 +169,13 @@ def create( except FileNotFoundError: base_text = None + inline_mode = should_inline_conversations() + # ---- Resume path ---- if base_text: - state = cls.model_validate(json.loads(base_text)) + raw_payload = json.loads(base_text) + payload = expand_profiles_in_payload(raw_payload, inline=inline_mode) + state = cls.model_validate(payload) # Enforce conversation id match if state.id != id: diff --git a/openhands/sdk/utils/agent_settings.py b/openhands/sdk/utils/agent_settings.py new file mode 100644 index 0000000000..3047b8cd58 --- /dev/null +++ b/openhands/sdk/utils/agent_settings.py @@ -0,0 +1,51 @@ +"""Utilities for reading and writing agent_settings.json.""" + +from __future__ import annotations + +import json +from collections.abc import Mapping +from pathlib import Path +from typing import Any + +from openhands.sdk.conversation.persistence_utils import ( + expand_profiles_in_payload, + prepare_payload_for_persistence, +) +from openhands.sdk.llm.profile_manager import ProfileManager + + +DEFAULT_AGENT_SETTINGS_PATH = Path.home() / ".openhands" / "agent_settings.json" + + +def load_agent_settings( + path: Path | str | None = None, + *, + inline: bool | None = None, + profile_manager: ProfileManager | None = None, +) -> dict[str, Any]: + """Load agent settings from ``path`` applying profile expansion.""" + + settings_path = Path(path) if path is not None else DEFAULT_AGENT_SETTINGS_PATH + with settings_path.open("r", encoding="utf-8") as fh: + payload = json.load(fh) + return expand_profiles_in_payload( + payload, + inline=inline, + profile_manager=profile_manager, + ) + + +def save_agent_settings( + settings: Mapping[str, Any], + path: Path | str | None = None, + *, + inline: bool | None = None, +) -> Path: + """Persist ``settings`` to disk, returning the destination path.""" + + settings_path = Path(path) if path is not None else DEFAULT_AGENT_SETTINGS_PATH + settings_path.parent.mkdir(parents=True, exist_ok=True) + payload = prepare_payload_for_persistence(settings, inline=inline) + with settings_path.open("w", encoding="utf-8") as fh: + json.dump(payload, fh, indent=2, ensure_ascii=False) + return settings_path diff --git a/tests/sdk/conversation/local/test_state_serialization.py b/tests/sdk/conversation/local/test_state_serialization.py index 3e0489bb8f..c4954e408c 100644 --- a/tests/sdk/conversation/local/test_state_serialization.py +++ b/tests/sdk/conversation/local/test_state_serialization.py @@ -15,6 +15,7 @@ from openhands.sdk.event.llm_convertible import MessageEvent, SystemPromptEvent from openhands.sdk.llm import LLM, Message, TextContent from openhands.sdk.llm.llm_registry import RegistryEvent +from openhands.sdk.llm.profile_manager import ProfileManager from openhands.sdk.security.confirmation_policy import AlwaysConfirm from openhands.sdk.workspace import LocalWorkspace @@ -125,13 +126,100 @@ def test_conversation_state_persistence_save_load(): assert isinstance(loaded_state.events[1], MessageEvent) assert loaded_state.agent.llm.model == agent.llm.model assert loaded_state.agent.__class__ == agent.__class__ - # Test model_dump equality - assert loaded_state.model_dump(mode="json") == state.model_dump(mode="json") + # Test model_dump equality ignoring any additional runtime stats + loaded_dump = loaded_state.model_dump(mode="json") + original_dump = state.model_dump(mode="json") + loaded_stats = loaded_dump.pop("stats", None) + original_stats = original_dump.pop("stats", None) + assert loaded_dump == original_dump + if original_stats is not None: + assert loaded_stats is not None + loaded_metrics = loaded_stats.get("service_to_metrics", {}) + for key, metric in original_stats.get("service_to_metrics", {}).items(): + assert key in loaded_metrics + assert loaded_metrics[key] == metric # Also verify key fields are preserved assert loaded_state.id == state.id assert len(loaded_state.events) == len(state.events) +def test_conversation_state_profile_reference_mode(tmp_path, monkeypatch): + """When inline persistence is disabled we store profile references.""" + + home_dir = tmp_path / "home" + monkeypatch.setenv("HOME", str(home_dir)) + monkeypatch.setenv("OPENHANDS_INLINE_CONVERSATIONS", "false") + + manager = ProfileManager() + llm = LLM(model="litellm_proxy/openai/gpt-5-mini", service_id="agent") + manager.save_profile("profile-tests", llm) + + agent = Agent(llm=manager.load_profile("profile-tests"), tools=[]) + conv_id = uuid.UUID("12345678-1234-5678-9abc-1234567890ff") + persistence_root = tmp_path / "conv" + persistence_dir = LocalConversation.get_persistence_dir(persistence_root, conv_id) + + ConversationState.create( + workspace=LocalWorkspace(working_dir="/tmp"), + persistence_dir=persistence_dir, + agent=agent, + id=conv_id, + ) + + base_state = json.loads((Path(persistence_dir) / "base_state.json").read_text()) + assert base_state["agent"]["llm"] == {"profile_id": "profile-tests"} + + conversation = Conversation( + agent=agent, + persistence_dir=persistence_root, + workspace=LocalWorkspace(working_dir="/tmp"), + conversation_id=conv_id, + ) + + loaded_state = conversation.state + assert loaded_state.agent.llm.profile_id == "profile-tests" + assert loaded_state.agent.llm.model == llm.model + + +def test_conversation_state_inline_mode_errors_on_profile_reference( + tmp_path, monkeypatch +): + """Inline mode raises when encountering a persisted profile reference.""" + + home_dir = tmp_path / "home" + monkeypatch.setenv("HOME", str(home_dir)) + monkeypatch.setenv("OPENHANDS_INLINE_CONVERSATIONS", "false") + + manager = ProfileManager() + llm = LLM(model="litellm_proxy/openai/gpt-5-mini", service_id="agent") + manager.save_profile("profile-inline", llm) + agent = Agent(llm=manager.load_profile("profile-inline"), tools=[]) + + conv_id = uuid.UUID("12345678-1234-5678-9abc-1234567890aa") + persistence_root = tmp_path / "conv" + persistence_dir = LocalConversation.get_persistence_dir(persistence_root, conv_id) + + ConversationState.create( + workspace=LocalWorkspace(working_dir="/tmp"), + persistence_dir=persistence_dir, + agent=agent, + id=conv_id, + ) + + # Switch env back to inline mode and expect a failure on reload + monkeypatch.setenv("OPENHANDS_INLINE_CONVERSATIONS", "true") + + with pytest.raises(ValueError) as exc: + Conversation( + agent=agent, + persistence_dir=persistence_root, + workspace=LocalWorkspace(working_dir="/tmp"), + conversation_id=conv_id, + ) + + assert "OPENHANDS_INLINE_CONVERSATIONS" in str(exc.value) + + def test_conversation_state_incremental_save(): """Test that ConversationState saves events incrementally.""" with tempfile.TemporaryDirectory() as temp_dir: @@ -184,8 +272,18 @@ def test_conversation_state_incremental_save(): assert conversation.state.persistence_dir == persist_path_for_state loaded_state = conversation._state assert len(loaded_state.events) == 2 - # Test model_dump equality - assert loaded_state.model_dump(mode="json") == state.model_dump(mode="json") + # Test model_dump equality ignoring any additional runtime stats + loaded_dump = loaded_state.model_dump(mode="json") + original_dump = state.model_dump(mode="json") + loaded_stats = loaded_dump.pop("stats", None) + original_stats = original_dump.pop("stats", None) + assert loaded_dump == original_dump + if original_stats is not None: + assert loaded_stats is not None + loaded_metrics = loaded_stats.get("service_to_metrics", {}) + for key, metric in original_stats.get("service_to_metrics", {}).items(): + assert key in loaded_metrics + assert loaded_metrics[key] == metric def test_conversation_state_event_file_scanning(): diff --git a/tests/sdk/utils/test_agent_settings.py b/tests/sdk/utils/test_agent_settings.py new file mode 100644 index 0000000000..353aa0aff2 --- /dev/null +++ b/tests/sdk/utils/test_agent_settings.py @@ -0,0 +1,50 @@ +"""Tests for agent settings helpers.""" + +from __future__ import annotations + +import json + +from openhands.sdk.llm import LLM +from openhands.sdk.llm.profile_manager import ProfileManager +from openhands.sdk.utils.agent_settings import load_agent_settings, save_agent_settings + + +def test_agent_settings_inline_default(tmp_path, monkeypatch): + monkeypatch.setenv("HOME", str(tmp_path / "home")) + settings_path = tmp_path / "agent_settings.json" + + llm = LLM(model="gpt-4o-mini", service_id="agent") + settings = {"agent": {"llm": llm.model_dump(exclude_none=True)}} + + save_agent_settings(settings, settings_path) + + stored = json.loads(settings_path.read_text(encoding="utf-8")) + assert stored["agent"]["llm"]["model"] == "gpt-4o-mini" + + loaded = load_agent_settings(settings_path) + assert loaded["agent"]["llm"]["model"] == "gpt-4o-mini" + + +def test_agent_settings_profile_reference_mode(tmp_path, monkeypatch): + home_dir = tmp_path / "home" + monkeypatch.setenv("HOME", str(home_dir)) + monkeypatch.setenv("OPENHANDS_INLINE_CONVERSATIONS", "false") + + manager = ProfileManager() + profile_name = "settings-profile" + manager.save_profile( + profile_name, LLM(model="litellm_proxy/openai/gpt-5-mini", service_id="agent") + ) + + llm = manager.load_profile(profile_name) + settings_path = tmp_path / "agent_settings.json" + settings = {"agent": {"llm": llm.model_dump(exclude_none=True)}} + + save_agent_settings(settings, settings_path) + + stored = json.loads(settings_path.read_text(encoding="utf-8")) + assert stored["agent"]["llm"] == {"profile_id": profile_name} + + loaded = load_agent_settings(settings_path) + assert loaded["agent"]["llm"]["profile_id"] == profile_name + assert loaded["agent"]["llm"]["model"] == "litellm_proxy/openai/gpt-5-mini" From f74d0509f48742529991238227c329489fb388db Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Sun, 19 Oct 2025 11:49:40 +0200 Subject: [PATCH 12/37] Simplify profile registration logging Co-authored-by: openhands --- .../sdk/conversation/impl/local_conversation.py | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/openhands/sdk/conversation/impl/local_conversation.py b/openhands/sdk/conversation/impl/local_conversation.py index 045eb89528..0c2109a179 100644 --- a/openhands/sdk/conversation/impl/local_conversation.py +++ b/openhands/sdk/conversation/impl/local_conversation.py @@ -17,6 +17,7 @@ ) from openhands.sdk.llm import LLM, Message, TextContent from openhands.sdk.llm.llm_registry import LLMRegistry +from openhands.sdk.llm.profile_manager import ProfileManager from openhands.sdk.logger import get_logger from openhands.sdk.security.confirmation_policy import ( ConfirmationPolicyBase, @@ -110,19 +111,11 @@ def _default_callback(e): for llm in list(self.agent.get_all_llms()): self.llm_registry.add(llm) - # Eagerly discover and register LLM profiles from disk so they are - # available through the registry (profiles are stored under - # ~/.openhands/llm-profiles/*.json). This keeps behavior backward - # compatible while making named profiles discoverable to the runtime. + # Eagerly register LLM profiles from disk. try: - from openhands.sdk.llm.profile_manager import ProfileManager - ProfileManager().register_all(self.llm_registry) except Exception: - # Do not fail conversation initialization if profile loading has problems - logger.debug( - "No LLM profiles registered or failed to load profiles", exc_info=True - ) + logger.debug("No LLM profiles registered") # Initialize secrets if provided if secrets: From df308fb991d1a9401c0f50cda63079e98057f47e Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Sun, 19 Oct 2025 11:54:43 +0200 Subject: [PATCH 13/37] Normalize inline_mode naming Co-authored-by: openhands --- openhands/sdk/conversation/persistence_utils.py | 12 ++++++++---- openhands/sdk/conversation/state.py | 4 ++-- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/openhands/sdk/conversation/persistence_utils.py b/openhands/sdk/conversation/persistence_utils.py index bf92d25040..11e3b5573c 100644 --- a/openhands/sdk/conversation/persistence_utils.py +++ b/openhands/sdk/conversation/persistence_utils.py @@ -29,8 +29,10 @@ def prepare_payload_for_persistence( replaced with ``{"profile_id": }``. Otherwise the payload is left intact. """ - inline = should_inline_conversations() if inline is None else inline - return _transform(payload, inline=inline, deserialize=False, profile_manager=None) + inline_mode = should_inline_conversations() if inline is None else inline + return _transform( + payload, inline=inline_mode, deserialize=False, profile_manager=None + ) def expand_profiles_in_payload( @@ -41,9 +43,11 @@ def expand_profiles_in_payload( ) -> dict[str, Any]: """Expand persisted payload back into inline LLM dictionaries.""" - inline = should_inline_conversations() if inline is None else inline + inline_mode = should_inline_conversations() if inline is None else inline manager = profile_manager or ProfileManager() - return _transform(payload, inline=inline, deserialize=True, profile_manager=manager) + return _transform( + payload, inline=inline_mode, deserialize=True, profile_manager=manager + ) def _transform( diff --git a/openhands/sdk/conversation/state.py b/openhands/sdk/conversation/state.py index 7958e402f2..6671ad9446 100644 --- a/openhands/sdk/conversation/state.py +++ b/openhands/sdk/conversation/state.py @@ -138,9 +138,9 @@ def _save_base_state(self, fs: FileStore) -> None: """ Persist base state snapshot (no events; events are file-backed). """ - inline = should_inline_conversations() + inline_mode = should_inline_conversations() payload = prepare_payload_for_persistence( - self.model_dump(mode="json", exclude_none=True), inline=inline + self.model_dump(mode="json", exclude_none=True), inline=inline_mode ) fs.write(BASE_STATE, json.dumps(payload)) From 4d293db45de870ad00dbe46d26ac2a4191e54828 Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Sun, 19 Oct 2025 12:01:48 +0200 Subject: [PATCH 14/37] Simplify profile_id sync in ProfileManager Co-authored-by: openhands --- openhands/sdk/llm/profile_manager.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/openhands/sdk/llm/profile_manager.py b/openhands/sdk/llm/profile_manager.py index f2d63463f3..7f10bd6f8b 100644 --- a/openhands/sdk/llm/profile_manager.py +++ b/openhands/sdk/llm/profile_manager.py @@ -92,9 +92,6 @@ def validate_profile(self, data: Mapping[str, Any]) -> tuple[bool, list[str]]: def _load_profile_from_path(self, path: Path, name: str) -> LLM: llm = LLM.load_from_json(str(path)) - if getattr(llm, "profile_id", None) != name: - try: - llm = llm.model_copy(update={"profile_id": name}) - except Exception: - llm.profile_id = name # type: ignore[attr-defined] + if llm.profile_id != name: + llm = llm.model_copy(update={"profile_id": name}) return llm From 7d1a525292500d965c9ccb3490ed7dda22197142 Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Sun, 19 Oct 2025 12:06:26 +0200 Subject: [PATCH 15/37] Rename profile sync helper Co-authored-by: openhands --- openhands/sdk/llm/profile_manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/openhands/sdk/llm/profile_manager.py b/openhands/sdk/llm/profile_manager.py index 7f10bd6f8b..b4a987c14e 100644 --- a/openhands/sdk/llm/profile_manager.py +++ b/openhands/sdk/llm/profile_manager.py @@ -45,7 +45,7 @@ def load_profile(self, name: str) -> LLM: path = self.get_profile_path(name) if not path.exists(): raise FileNotFoundError(f"Profile not found: {name} -> {path}") - return self._load_profile_from_path(path, name) + return self._load_profile_with_synced_id(path, name) def save_profile(self, name: str, llm: LLM, include_secrets: bool = False) -> Path: path = self.get_profile_path(name) @@ -90,7 +90,7 @@ def validate_profile(self, data: Mapping[str, Any]) -> tuple[bool, list[str]]: return False, messages return True, [] - def _load_profile_from_path(self, path: Path, name: str) -> LLM: + def _load_profile_with_synced_id(self, path: Path, name: str) -> LLM: llm = LLM.load_from_json(str(path)) if llm.profile_id != name: llm = llm.model_copy(update={"profile_id": name}) From ec45ed563676c9e9dd8a8a3c495332ab244691c3 Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Sun, 19 Oct 2025 13:43:41 +0200 Subject: [PATCH 16/37] LLMRegistry handles profile management - embed profile lifecycle APIs into the registry - update persistence helpers, docs, and examples to use registry - replace profile manager tests with registry profile coverage Co-authored-by: openhands --- docs/llm_profiles.md | 10 +- examples/01_standalone_sdk/25_llm_profiles.py | 20 +-- examples/llm-profiles/gpt-5-mini.json | 2 +- .../conversation/impl/local_conversation.py | 3 +- .../sdk/conversation/persistence_utils.py | 24 ++- openhands/sdk/llm/llm_registry.py | 150 ++++++++++++++---- openhands/sdk/llm/profile_manager.py | 97 ----------- openhands/sdk/utils/agent_settings.py | 6 +- .../local/test_state_serialization.py | 15 +- ...nager.py => test_llm_registry_profiles.py} | 34 ++-- tests/sdk/utils/test_agent_settings.py | 8 +- 11 files changed, 182 insertions(+), 187 deletions(-) delete mode 100644 openhands/sdk/llm/profile_manager.py rename tests/sdk/llm/{test_profile_manager.py => test_llm_registry_profiles.py} (71%) diff --git a/docs/llm_profiles.md b/docs/llm_profiles.md index c64bb61847..012e5197c5 100644 --- a/docs/llm_profiles.md +++ b/docs/llm_profiles.md @@ -9,15 +9,15 @@ Key decisions - Reuse the existing LLM Pydantic model schema. A profile file is simply the JSON dump of an LLM instance (the same shape produced by LLM.model_dump(exclude_none=True) or LLM.load_from_json). - Storage location: ~/.openhands/llm-profiles/.json. The profile_name is the filename (no extension) used to refer to the profile. - Do not change ConversationState or Agent serialization format for now. Profiles are a convenience for creating LLM instances and registering them in the runtime LLMRegistry. -- Secrets: do NOT store plaintext API keys in profile files by default. Prefer storing the env var name in the LLM.api_key (via LLM.load_from_env) or keep the API key in runtime SecretsManager. The ProfileManager.save_profile API will expose an include_secrets flag; default False. +- Secrets: do NOT store plaintext API keys in profile files by default. Prefer storing the env var name in the LLM.api_key (via LLM.load_from_env) or keep the API key in runtime SecretsManager. The LLMRegistry.save_profile API exposes an include_secrets flag; default False. - LLM.service_id semantics: keep current behavior (a small set of runtime "usage" identifiers such as 'agent', 'condenser', 'title-gen', etc.). Do not use service_id as the profile name. We will evaluate a rename (service_id -> usage_id) in a separate task (see agent-sdk-23). -ProfileManager API (summary) +LLMRegistry profile API (summary) - list_profiles() -> list[str] - load_profile(name: str) -> LLM - save_profile(name: str, llm: LLM, include_secrets: bool = False) -> str (path) -- register_all(registry: LLMRegistry) -> None +- register_profiles(profile_ids: Iterable[str] | None = None) -> None Implementation notes @@ -58,7 +58,7 @@ Notes on service_id rename - Inline the full LLM payload only when no profile reference exists. ### Loader behavior -- On startup, configuration loaders must detect `profile_id` and load the corresponding LLM via `ProfileManager.load_profile(profile_id)`. +- On startup, configuration loaders must detect `profile_id` and load the corresponding LLM via `LLMRegistry.load_profile(profile_id)`. - If the referenced profile cannot be found, fall back to existing inline data (if available) and surface a clear warning. - Inject secrets after loading (same flow used today when constructing LLM instances). @@ -70,7 +70,7 @@ Notes on service_id rename ### Migration helper - Provide a utility (script or CLI command) that: 1. Scans existing agent settings and conversation base states for inline LLM configs. - 2. Uses `ProfileManager.save_profile` to serialize them into `~/.openhands/llm-profiles/.json`. + 2. Uses `LLMRegistry.save_profile` to serialize them into `~/.openhands/llm-profiles/.json`. 3. Rewrites the source files to reference the new profiles via `profile_id`. - Keep the migration opt-in and idempotent so users can review changes before adopting profiles. diff --git a/examples/01_standalone_sdk/25_llm_profiles.py b/examples/01_standalone_sdk/25_llm_profiles.py index c9870ec17a..f5a32274e9 100644 --- a/examples/01_standalone_sdk/25_llm_profiles.py +++ b/examples/01_standalone_sdk/25_llm_profiles.py @@ -1,4 +1,4 @@ -"""Create and use an LLM profile with :class:`ProfileManager`. +"""Create and use an LLM profile with :class:`LLMRegistry`. Run with:: @@ -15,7 +15,7 @@ from openhands.sdk import Agent, Conversation from openhands.sdk.llm.llm import LLM -from openhands.sdk.llm.profile_manager import ProfileManager +from openhands.sdk.llm.llm_registry import LLMRegistry from openhands.sdk.tool import Tool, register_tool from openhands.tools.execute_bash import BashTool @@ -24,10 +24,10 @@ PROFILE_NAME = os.getenv("LLM_PROFILE_NAME", DEFAULT_PROFILE_NAME) -def ensure_profile_exists(manager: ProfileManager, name: str) -> None: +def ensure_profile_exists(registry: LLMRegistry, name: str) -> None: """Create a starter profile in the default directory when missing.""" - if name in manager.list_profiles(): + if name in registry.list_profiles(): return profile_defaults = LLM( @@ -40,12 +40,12 @@ def ensure_profile_exists(manager: ProfileManager, name: str) -> None: "profile_description": "Sample GPT-5 Mini profile created by example 25.", }, ) - path = manager.save_profile(name, profile_defaults) + path = registry.save_profile(name, profile_defaults) print(f"Created profile '{name}' at {path}") -def load_profile(manager: ProfileManager, name: str) -> LLM: - llm = manager.load_profile(name) +def load_profile(registry: LLMRegistry, name: str) -> LLM: + llm = registry.load_profile(name) if llm.api_key is None: api_key = os.getenv("LLM_API_KEY") if api_key is None: @@ -58,10 +58,10 @@ def load_profile(manager: ProfileManager, name: str) -> LLM: def main() -> None: - manager = ProfileManager() - ensure_profile_exists(manager, PROFILE_NAME) + registry = LLMRegistry() + ensure_profile_exists(registry, PROFILE_NAME) - llm = load_profile(manager, PROFILE_NAME) + llm = load_profile(registry, PROFILE_NAME) register_tool("BashTool", BashTool) tools = [Tool(name="BashTool")] diff --git a/examples/llm-profiles/gpt-5-mini.json b/examples/llm-profiles/gpt-5-mini.json index 906389c7cf..e87d1ada8d 100644 --- a/examples/llm-profiles/gpt-5-mini.json +++ b/examples/llm-profiles/gpt-5-mini.json @@ -6,6 +6,6 @@ "max_output_tokens": 4096, "service_id": "agent", "metadata": { - "profile_description": "Sample configuration for the GPT-5 Mini profile managed by ProfileManager." + "profile_description": "Sample configuration for the GPT-5 Mini profile managed by the LLM registry." } } diff --git a/openhands/sdk/conversation/impl/local_conversation.py b/openhands/sdk/conversation/impl/local_conversation.py index 0c2109a179..d75c38e891 100644 --- a/openhands/sdk/conversation/impl/local_conversation.py +++ b/openhands/sdk/conversation/impl/local_conversation.py @@ -17,7 +17,6 @@ ) from openhands.sdk.llm import LLM, Message, TextContent from openhands.sdk.llm.llm_registry import LLMRegistry -from openhands.sdk.llm.profile_manager import ProfileManager from openhands.sdk.logger import get_logger from openhands.sdk.security.confirmation_policy import ( ConfirmationPolicyBase, @@ -113,7 +112,7 @@ def _default_callback(e): # Eagerly register LLM profiles from disk. try: - ProfileManager().register_all(self.llm_registry) + self.llm_registry.register_profiles() except Exception: logger.debug("No LLM profiles registered") diff --git a/openhands/sdk/conversation/persistence_utils.py b/openhands/sdk/conversation/persistence_utils.py index 11e3b5573c..0014e5cfa5 100644 --- a/openhands/sdk/conversation/persistence_utils.py +++ b/openhands/sdk/conversation/persistence_utils.py @@ -6,7 +6,7 @@ from collections.abc import Mapping from typing import Any -from openhands.sdk.llm.profile_manager import ProfileManager +from openhands.sdk.llm.llm_registry import LLMRegistry _INLINE_ENV_VAR = "OPENHANDS_INLINE_CONVERSATIONS" @@ -30,23 +30,21 @@ def prepare_payload_for_persistence( """ inline_mode = should_inline_conversations() if inline is None else inline - return _transform( - payload, inline=inline_mode, deserialize=False, profile_manager=None - ) + return _transform(payload, inline=inline_mode, deserialize=False, llm_registry=None) def expand_profiles_in_payload( payload: Mapping[str, Any], *, inline: bool | None = None, - profile_manager: ProfileManager | None = None, + llm_registry: LLMRegistry | None = None, ) -> dict[str, Any]: """Expand persisted payload back into inline LLM dictionaries.""" inline_mode = should_inline_conversations() if inline is None else inline - manager = profile_manager or ProfileManager() + registry = llm_registry or LLMRegistry() return _transform( - payload, inline=inline_mode, deserialize=True, profile_manager=manager + payload, inline=inline_mode, deserialize=True, llm_registry=registry ) @@ -55,7 +53,7 @@ def _transform( *, inline: bool, deserialize: bool, - profile_manager: ProfileManager | None, + llm_registry: LLMRegistry | None, ) -> Any: if isinstance(payload, Mapping): data = { @@ -63,7 +61,7 @@ def _transform( value, inline=inline, deserialize=deserialize, - profile_manager=profile_manager, + llm_registry=llm_registry, ) for key, value in payload.items() } @@ -78,16 +76,16 @@ def _transform( "Inline the profile or set " "OPENHANDS_INLINE_CONVERSATIONS=false." ) - assert profile_manager is not None + assert llm_registry is not None profile_id = data["profile_id"] - llm = profile_manager.load_profile(profile_id) + llm = llm_registry.load_profile(profile_id) llm_dict = llm.model_dump(exclude_none=True) llm_dict["profile_id"] = profile_id return _transform( llm_dict, inline=inline, deserialize=True, - profile_manager=profile_manager, + llm_registry=llm_registry, ) else: if not inline and _is_llm_dict(data): @@ -102,7 +100,7 @@ def _transform( item, inline=inline, deserialize=deserialize, - profile_manager=profile_manager, + llm_registry=llm_registry, ) for item in payload ] diff --git a/openhands/sdk/llm/llm_registry.py b/openhands/sdk/llm/llm_registry.py index 63a8e68791..6c9ef104cb 100644 --- a/openhands/sdk/llm/llm_registry.py +++ b/openhands/sdk/llm/llm_registry.py @@ -1,7 +1,12 @@ -from collections.abc import Callable +from __future__ import annotations + +import json +from collections.abc import Callable, Iterable, Mapping +from pathlib import Path +from typing import Any from uuid import uuid4 -from pydantic import BaseModel, ConfigDict +from pydantic import BaseModel, ConfigDict, SecretStr, ValidationError from openhands.sdk.llm.llm import LLM from openhands.sdk.logger import get_logger @@ -9,6 +14,13 @@ logger = get_logger(__name__) +_SECRET_FIELDS: tuple[str, ...] = ( + "api_key", + "aws_access_key_id", + "aws_secret_access_key", +) +_DEFAULT_PROFILE_DIR = Path.home() / ".openhands" / "llm-profiles" + class RegistryEvent(BaseModel): llm: LLM @@ -19,25 +31,25 @@ class RegistryEvent(BaseModel): class LLMRegistry: - """A minimal LLM registry for managing LLM instances by service ID. - - This registry provides a simple way to manage multiple LLM instances, - avoiding the need to recreate LLMs with the same configuration. - """ + """Manage in-memory LLM instances and their on-disk profiles.""" def __init__( self, retry_listener: Callable[[int, int], None] | None = None, + profile_dir: str | Path | None = None, ): """Initialize the LLM registry. Args: retry_listener: Optional callback for retry events. + profile_dir: Directory where LLM profiles are persisted. Defaults to + ``~/.openhands/llm-profiles`` when not provided. """ self.registry_id = str(uuid4()) self.retry_listener = retry_listener self.service_to_llm: dict[str, LLM] = {} self.subscriber: Callable[[RegistryEvent], None] | None = None + self.profile_dir = self._resolve_profile_dir(profile_dir) def subscribe(self, callback: Callable[[RegistryEvent], None]) -> None: """Subscribe to registry events. @@ -56,8 +68,8 @@ def notify(self, event: RegistryEvent) -> None: if self.subscriber: try: self.subscriber(event) - except Exception as e: - logger.warning(f"Failed to emit event: {e}") + except Exception as exc: # noqa: BLE001 + logger.warning("Failed to emit event: %s", exc) def add(self, llm: LLM) -> None: """Add an LLM instance to the registry. @@ -66,7 +78,7 @@ def add(self, llm: LLM) -> None: llm: The LLM instance to register. Raises: - ValueError: If llm.service_id already exists in the registry. + ValueError: If ``llm.service_id`` already exists in the registry. """ service_id = llm.service_id if service_id in self.service_to_llm: @@ -79,21 +91,11 @@ def add(self, llm: LLM) -> None: self.service_to_llm[service_id] = llm self.notify(RegistryEvent(llm=llm)) logger.info( - f"[LLM registry {self.registry_id}]: Added LLM for service {service_id}" + "[LLM registry %s]: Added LLM for service %s", self.registry_id, service_id ) def get(self, service_id: str) -> LLM: - """Get an LLM instance from the registry. - - Args: - service_id: Unique identifier for the LLM service. - - Returns: - The LLM instance. - - Raises: - KeyError: If service_id is not found in the registry. - """ + """Get an LLM instance from the registry.""" if service_id not in self.service_to_llm: raise KeyError( f"Service ID '{service_id}' not found in registry. " @@ -101,14 +103,108 @@ def get(self, service_id: str) -> LLM: ) logger.info( - f"[LLM registry {self.registry_id}]: Retrieved LLM for service {service_id}" + "[LLM registry %s]: Retrieved LLM for service %s", + self.registry_id, + service_id, ) return self.service_to_llm[service_id] def list_services(self) -> list[str]: - """List all registered service IDs. + """Return all registered service IDs.""" + return list(self.service_to_llm.keys()) + + # ------------------------------------------------------------------ + # Profile management helpers + # ------------------------------------------------------------------ + def list_profiles(self) -> list[str]: + """List all profile IDs stored on disk.""" + return sorted(path.stem for path in self.profile_dir.glob("*.json")) + + def get_profile_path(self, profile_id: str) -> Path: + """Return the path where ``profile_id`` is stored.""" + return self.profile_dir / f"{profile_id}.json" + + def load_profile(self, profile_id: str) -> LLM: + """Load ``profile_id`` from disk and return an :class:`LLM`.""" + path = self.get_profile_path(profile_id) + if not path.exists(): + raise FileNotFoundError(f"Profile not found: {profile_id} -> {path}") + return self._load_profile_with_synced_id(path, profile_id) + + def save_profile( + self, profile_id: str, llm: LLM, include_secrets: bool = False + ) -> Path: + """Persist ``llm`` under ``profile_id``. - Returns: - List of service IDs currently in the registry. + Args: + profile_id: Destination identifier (filename stem). + llm: Instance to serialize. + include_secrets: When True, persist secret values instead of omitting + them from the stored payload. """ - return list(self.service_to_llm.keys()) + path = self.get_profile_path(profile_id) + data = llm.model_dump(exclude_none=True) + data["profile_id"] = profile_id + if not include_secrets: + for secret_field in _SECRET_FIELDS: + data.pop(secret_field, None) + else: + for secret_field in _SECRET_FIELDS: + value = data.get(secret_field) + if isinstance(value, SecretStr): + data[secret_field] = value.get_secret_value() + + with path.open("w", encoding="utf-8") as handle: + json.dump(data, handle, indent=2, ensure_ascii=False) + logger.info("Saved profile %s -> %s", profile_id, path) + return path + + def register_profiles(self, profile_ids: Iterable[str] | None = None) -> None: + """Register profiles from disk into the in-memory registry.""" + candidates = profile_ids if profile_ids is not None else self.list_profiles() + for profile_id in candidates: + try: + llm = self.load_profile(profile_id) + except Exception as exc: # noqa: BLE001 + logger.warning("Failed to load profile %s: %s", profile_id, exc) + continue + + try: + self.add(llm) + except Exception as exc: # noqa: BLE001 + logger.info( + "Skipping profile %s: registry.add failed: %s", profile_id, exc + ) + + def validate_profile(self, data: Mapping[str, Any]) -> tuple[bool, list[str]]: + """Return (is_valid, errors) after validating a profile payload.""" + try: + LLM.model_validate(dict(data)) + except ValidationError as exc: + messages: list[str] = [] + for error in exc.errors(): + loc = ".".join(str(piece) for piece in error.get("loc", ())) + if loc: + messages.append(f"{loc}: {error.get('msg')}") + else: + messages.append(error.get("msg", "Unknown validation error")) + return False, messages + return True, [] + + # ------------------------------------------------------------------ + # Internal helper methods + # ------------------------------------------------------------------ + def _resolve_profile_dir(self, profile_dir: str | Path | None) -> Path: + directory = ( + Path(profile_dir).expanduser() + if profile_dir is not None + else _DEFAULT_PROFILE_DIR + ) + directory.mkdir(parents=True, exist_ok=True) + return directory + + def _load_profile_with_synced_id(self, path: Path, profile_id: str) -> LLM: + llm = LLM.load_from_json(str(path)) + if llm.profile_id != profile_id: + llm = llm.model_copy(update={"profile_id": profile_id}) + return llm diff --git a/openhands/sdk/llm/profile_manager.py b/openhands/sdk/llm/profile_manager.py deleted file mode 100644 index b4a987c14e..0000000000 --- a/openhands/sdk/llm/profile_manager.py +++ /dev/null @@ -1,97 +0,0 @@ -from __future__ import annotations - -import json -import logging -from collections.abc import Mapping -from pathlib import Path -from typing import Any - -from pydantic import SecretStr, ValidationError - -from openhands.sdk.llm.llm import LLM -from openhands.sdk.llm.llm_registry import LLMRegistry - - -logger = logging.getLogger(__name__) - -_SECRET_FIELDS: tuple[str, ...] = ( - "api_key", - "aws_access_key_id", - "aws_secret_access_key", -) - - -class ProfileManager: - """Manage LLM profile files on disk. - - Profiles are stored as JSON files using the existing LLM schema. By default - they live under ``~/.openhands/llm-profiles/.json``. - """ - - def __init__(self, base_dir: str | Path | None = None) -> None: - if base_dir is None: - self.base_dir = Path.home() / ".openhands" / "llm-profiles" - else: - self.base_dir = Path(base_dir).expanduser() - self.base_dir.mkdir(parents=True, exist_ok=True) - - def list_profiles(self) -> list[str]: - return sorted([path.stem for path in self.base_dir.glob("*.json")]) - - def get_profile_path(self, name: str) -> Path: - return self.base_dir / f"{name}.json" - - def load_profile(self, name: str) -> LLM: - path = self.get_profile_path(name) - if not path.exists(): - raise FileNotFoundError(f"Profile not found: {name} -> {path}") - return self._load_profile_with_synced_id(path, name) - - def save_profile(self, name: str, llm: LLM, include_secrets: bool = False) -> Path: - path = self.get_profile_path(name) - data = llm.model_dump(exclude_none=True) - data["profile_id"] = name - if not include_secrets: - for secret_field in _SECRET_FIELDS: - data.pop(secret_field, None) - else: - for secret_field in _SECRET_FIELDS: - value = data.get(secret_field) - if isinstance(value, SecretStr): - data[secret_field] = value.get_secret_value() - with path.open("w", encoding="utf-8") as file: - json.dump(data, file, indent=2, ensure_ascii=False) - logger.info("Saved profile %s -> %s", name, path) - return path - - def register_all(self, registry: LLMRegistry) -> None: - for name in self.list_profiles(): - try: - llm = self.load_profile(name) - except Exception as exc: # noqa: BLE001 - log and continue - logger.warning("Failed to load profile %s: %s", name, exc) - continue - try: - registry.add(llm) - except Exception as exc: # noqa: BLE001 - registry enforces its own invariants - logger.info("Skipping profile %s: registry.add failed: %s", name, exc) - - def validate_profile(self, data: Mapping[str, Any]) -> tuple[bool, list[str]]: - try: - LLM.model_validate(dict(data)) - except ValidationError as exc: - messages: list[str] = [] - for error in exc.errors(): - loc = ".".join(str(piece) for piece in error.get("loc", ())) - if loc: - messages.append(f"{loc}: {error.get('msg')}") - else: - messages.append(error.get("msg", "Unknown validation error")) - return False, messages - return True, [] - - def _load_profile_with_synced_id(self, path: Path, name: str) -> LLM: - llm = LLM.load_from_json(str(path)) - if llm.profile_id != name: - llm = llm.model_copy(update={"profile_id": name}) - return llm diff --git a/openhands/sdk/utils/agent_settings.py b/openhands/sdk/utils/agent_settings.py index 3047b8cd58..a1213f94e5 100644 --- a/openhands/sdk/utils/agent_settings.py +++ b/openhands/sdk/utils/agent_settings.py @@ -11,7 +11,7 @@ expand_profiles_in_payload, prepare_payload_for_persistence, ) -from openhands.sdk.llm.profile_manager import ProfileManager +from openhands.sdk.llm.llm_registry import LLMRegistry DEFAULT_AGENT_SETTINGS_PATH = Path.home() / ".openhands" / "agent_settings.json" @@ -21,7 +21,7 @@ def load_agent_settings( path: Path | str | None = None, *, inline: bool | None = None, - profile_manager: ProfileManager | None = None, + llm_registry: LLMRegistry | None = None, ) -> dict[str, Any]: """Load agent settings from ``path`` applying profile expansion.""" @@ -31,7 +31,7 @@ def load_agent_settings( return expand_profiles_in_payload( payload, inline=inline, - profile_manager=profile_manager, + llm_registry=llm_registry, ) diff --git a/tests/sdk/conversation/local/test_state_serialization.py b/tests/sdk/conversation/local/test_state_serialization.py index c4954e408c..b236c248c2 100644 --- a/tests/sdk/conversation/local/test_state_serialization.py +++ b/tests/sdk/conversation/local/test_state_serialization.py @@ -14,8 +14,7 @@ from openhands.sdk.conversation.state import AgentExecutionStatus, ConversationState from openhands.sdk.event.llm_convertible import MessageEvent, SystemPromptEvent from openhands.sdk.llm import LLM, Message, TextContent -from openhands.sdk.llm.llm_registry import RegistryEvent -from openhands.sdk.llm.profile_manager import ProfileManager +from openhands.sdk.llm.llm_registry import LLMRegistry, RegistryEvent from openhands.sdk.security.confirmation_policy import AlwaysConfirm from openhands.sdk.workspace import LocalWorkspace @@ -150,11 +149,11 @@ def test_conversation_state_profile_reference_mode(tmp_path, monkeypatch): monkeypatch.setenv("HOME", str(home_dir)) monkeypatch.setenv("OPENHANDS_INLINE_CONVERSATIONS", "false") - manager = ProfileManager() + registry = LLMRegistry() llm = LLM(model="litellm_proxy/openai/gpt-5-mini", service_id="agent") - manager.save_profile("profile-tests", llm) + registry.save_profile("profile-tests", llm) - agent = Agent(llm=manager.load_profile("profile-tests"), tools=[]) + agent = Agent(llm=registry.load_profile("profile-tests"), tools=[]) conv_id = uuid.UUID("12345678-1234-5678-9abc-1234567890ff") persistence_root = tmp_path / "conv" persistence_dir = LocalConversation.get_persistence_dir(persistence_root, conv_id) @@ -190,10 +189,10 @@ def test_conversation_state_inline_mode_errors_on_profile_reference( monkeypatch.setenv("HOME", str(home_dir)) monkeypatch.setenv("OPENHANDS_INLINE_CONVERSATIONS", "false") - manager = ProfileManager() + registry = LLMRegistry() llm = LLM(model="litellm_proxy/openai/gpt-5-mini", service_id="agent") - manager.save_profile("profile-inline", llm) - agent = Agent(llm=manager.load_profile("profile-inline"), tools=[]) + registry.save_profile("profile-inline", llm) + agent = Agent(llm=registry.load_profile("profile-inline"), tools=[]) conv_id = uuid.UUID("12345678-1234-5678-9abc-1234567890aa") persistence_root = tmp_path / "conv" diff --git a/tests/sdk/llm/test_profile_manager.py b/tests/sdk/llm/test_llm_registry_profiles.py similarity index 71% rename from tests/sdk/llm/test_profile_manager.py rename to tests/sdk/llm/test_llm_registry_profiles.py index 0ff8aa918c..cbecb8ae51 100644 --- a/tests/sdk/llm/test_profile_manager.py +++ b/tests/sdk/llm/test_llm_registry_profiles.py @@ -4,19 +4,18 @@ from openhands.sdk.llm.llm import LLM from openhands.sdk.llm.llm_registry import LLMRegistry -from openhands.sdk.llm.profile_manager import ProfileManager def test_list_profiles_returns_sorted_names(tmp_path): - manager = ProfileManager(base_dir=tmp_path) + registry = LLMRegistry(profile_dir=tmp_path) (tmp_path / "b.json").write_text("{}", encoding="utf-8") (tmp_path / "a.json").write_text("{}", encoding="utf-8") - assert manager.list_profiles() == ["a", "b"] + assert registry.list_profiles() == ["a", "b"] def test_save_profile_excludes_secret_fields(tmp_path): - manager = ProfileManager(base_dir=tmp_path) + registry = LLMRegistry(profile_dir=tmp_path) llm = LLM( model="gpt-4o-mini", service_id="service", @@ -25,7 +24,7 @@ def test_save_profile_excludes_secret_fields(tmp_path): aws_secret_access_key=SecretStr("value"), ) - path = manager.save_profile("sample", llm) + path = registry.save_profile("sample", llm) data = json.loads(path.read_text(encoding="utf-8")) assert data["profile_id"] == "sample" @@ -36,7 +35,7 @@ def test_save_profile_excludes_secret_fields(tmp_path): def test_save_profile_can_include_secret_fields(tmp_path): - manager = ProfileManager(base_dir=tmp_path) + registry = LLMRegistry(profile_dir=tmp_path) llm = LLM( model="gpt-4o-mini", service_id="service", @@ -45,7 +44,7 @@ def test_save_profile_can_include_secret_fields(tmp_path): aws_secret_access_key=SecretStr("value"), ) - path = manager.save_profile("sample", llm, include_secrets=True) + path = registry.save_profile("sample", llm, include_secrets=True) data = json.loads(path.read_text(encoding="utf-8")) assert data["api_key"] == "secret" @@ -54,25 +53,24 @@ def test_save_profile_can_include_secret_fields(tmp_path): def test_load_profile_assigns_profile_id_when_missing(tmp_path): - manager = ProfileManager(base_dir=tmp_path) + registry = LLMRegistry(profile_dir=tmp_path) profile_path = tmp_path / "foo.json" profile_path.write_text( json.dumps({"model": "gpt-4o-mini", "service_id": "svc"}), encoding="utf-8", ) - llm = manager.load_profile("foo") + llm = registry.load_profile("foo") assert llm.profile_id == "foo" assert llm.service_id == "svc" -def test_register_all_skips_invalid_and_duplicate_profiles(tmp_path): - manager = ProfileManager(base_dir=tmp_path) - registry = LLMRegistry() +def test_register_profiles_skips_invalid_and_duplicate_profiles(tmp_path): + registry = LLMRegistry(profile_dir=tmp_path) llm = LLM(model="gpt-4o-mini", service_id="shared") - manager.save_profile("alpha", llm) + registry.save_profile("alpha", llm) duplicate_data = llm.model_dump(exclude_none=True) duplicate_data["profile_id"] = "beta" @@ -83,18 +81,20 @@ def test_register_all_skips_invalid_and_duplicate_profiles(tmp_path): (tmp_path / "gamma.json").write_text("{", encoding="utf-8") - manager.register_all(registry) + registry.register_profiles() assert registry.list_services() == ["shared"] def test_validate_profile_reports_errors(tmp_path): - manager = ProfileManager(base_dir=tmp_path) + registry = LLMRegistry(profile_dir=tmp_path) - ok, errors = manager.validate_profile({"model": "gpt-4o-mini", "service_id": "svc"}) + ok, errors = registry.validate_profile( + {"model": "gpt-4o-mini", "service_id": "svc"} + ) assert ok assert errors == [] - ok, errors = manager.validate_profile({"service_id": "svc"}) + ok, errors = registry.validate_profile({"service_id": "svc"}) assert not ok assert any("model" in message for message in errors) diff --git a/tests/sdk/utils/test_agent_settings.py b/tests/sdk/utils/test_agent_settings.py index 353aa0aff2..bebe1e531b 100644 --- a/tests/sdk/utils/test_agent_settings.py +++ b/tests/sdk/utils/test_agent_settings.py @@ -5,7 +5,7 @@ import json from openhands.sdk.llm import LLM -from openhands.sdk.llm.profile_manager import ProfileManager +from openhands.sdk.llm.llm_registry import LLMRegistry from openhands.sdk.utils.agent_settings import load_agent_settings, save_agent_settings @@ -30,13 +30,13 @@ def test_agent_settings_profile_reference_mode(tmp_path, monkeypatch): monkeypatch.setenv("HOME", str(home_dir)) monkeypatch.setenv("OPENHANDS_INLINE_CONVERSATIONS", "false") - manager = ProfileManager() + registry = LLMRegistry() profile_name = "settings-profile" - manager.save_profile( + registry.save_profile( profile_name, LLM(model="litellm_proxy/openai/gpt-5-mini", service_id="agent") ) - llm = manager.load_profile(profile_name) + llm = registry.load_profile(profile_name) settings_path = tmp_path / "agent_settings.json" settings = {"agent": {"llm": llm.model_dump(exclude_none=True)}} From 1566df45f1b397d8ba43e14fd546dbb0971932a9 Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Sun, 19 Oct 2025 17:40:22 +0200 Subject: [PATCH 17/37] docs: clarify LLMRegistry profile guidance - note that LLMRegistry is the unified entry point for disk and runtime profiles - mention how to override the profile directory when embedding the SDK Co-authored-by: openhands --- docs/llm_profiles.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/llm_profiles.md b/docs/llm_profiles.md index 012e5197c5..299df3e202 100644 --- a/docs/llm_profiles.md +++ b/docs/llm_profiles.md @@ -21,6 +21,7 @@ LLMRegistry profile API (summary) Implementation notes +- LLMRegistry is the single entry point for both in-memory registration and on-disk profile persistence. Pass ``profile_dir`` to the constructor to override the default location when embedding the SDK. - Use LLM.load_from_json(path) for loading and llm.model_dump(exclude_none=True) for saving. - Default directory: os.path.expanduser('~/.openhands/llm-profiles/') - When loading, do not inject secrets. The runtime should reconcile secrets via ConversationState/Agent resolve_diff_from_deserialized or via SecretsManager. From 8f8b5b9bf607243f459d38b2b87d717a99d047c6 Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Sun, 19 Oct 2025 18:55:48 +0200 Subject: [PATCH 18/37] refactor: rename profile persistence helpers - rename payload helpers to resolve_llm_profiles/compact_llm_profiles - update conversation state to use clearer helper names - drop the optional agent_settings convenience module and its tests Co-authored-by: openhands --- .../sdk/conversation/persistence_utils.py | 46 ++++++++--------- openhands/sdk/conversation/state.py | 8 +-- openhands/sdk/utils/agent_settings.py | 51 ------------------- tests/sdk/utils/test_agent_settings.py | 50 ------------------ 4 files changed, 26 insertions(+), 129 deletions(-) delete mode 100644 openhands/sdk/utils/agent_settings.py delete mode 100644 tests/sdk/utils/test_agent_settings.py diff --git a/openhands/sdk/conversation/persistence_utils.py b/openhands/sdk/conversation/persistence_utils.py index 0014e5cfa5..bb2affbc2b 100644 --- a/openhands/sdk/conversation/persistence_utils.py +++ b/openhands/sdk/conversation/persistence_utils.py @@ -20,56 +20,54 @@ def should_inline_conversations() -> bool: return value not in _FALSE_VALUES -def prepare_payload_for_persistence( - payload: Mapping[str, Any], *, inline: bool | None = None +def compact_llm_profiles( + data: Mapping[str, Any], *, inline: bool | None = None ) -> dict[str, Any]: - """Return a payload ready to be written to disk. + """Return a mapping ready to be persisted to disk. When ``inline`` is False and an LLM dict contains ``profile_id``, the body is - replaced with ``{"profile_id": }``. Otherwise the payload is left intact. + replaced with ``{"profile_id": }``. Otherwise the structure is left intact. """ inline_mode = should_inline_conversations() if inline is None else inline - return _transform(payload, inline=inline_mode, deserialize=False, llm_registry=None) + return _transform(data, inline=inline_mode, deserialize=False, llm_registry=None) -def expand_profiles_in_payload( - payload: Mapping[str, Any], +def resolve_llm_profiles( + data: Mapping[str, Any], *, inline: bool | None = None, llm_registry: LLMRegistry | None = None, ) -> dict[str, Any]: - """Expand persisted payload back into inline LLM dictionaries.""" + """Expand stored profile references back into inline LLM dictionaries.""" inline_mode = should_inline_conversations() if inline is None else inline registry = llm_registry or LLMRegistry() - return _transform( - payload, inline=inline_mode, deserialize=True, llm_registry=registry - ) + return _transform(data, inline=inline_mode, deserialize=True, llm_registry=registry) def _transform( - payload: Mapping[str, Any] | list[Any], + data: Mapping[str, Any] | list[Any], *, inline: bool, deserialize: bool, llm_registry: LLMRegistry | None, ) -> Any: - if isinstance(payload, Mapping): - data = { + if isinstance(data, Mapping): + expanded = { key: _transform( value, inline=inline, deserialize=deserialize, llm_registry=llm_registry, ) - for key, value in payload.items() + for key, value in data.items() } if deserialize: - if _is_profile_reference(data): + if _is_profile_reference(expanded): if inline: - profile_id = data["profile_id"] + profile_id = expanded["profile_id"] raise ValueError( "Encountered profile reference for LLM while " "OPENHANDS_INLINE_CONVERSATIONS is enabled. " @@ -77,7 +75,7 @@ def _transform( "OPENHANDS_INLINE_CONVERSATIONS=false." ) assert llm_registry is not None - profile_id = data["profile_id"] + profile_id = expanded["profile_id"] llm = llm_registry.load_profile(profile_id) llm_dict = llm.model_dump(exclude_none=True) llm_dict["profile_id"] = profile_id @@ -88,13 +86,13 @@ def _transform( llm_registry=llm_registry, ) else: - if not inline and _is_llm_dict(data): - profile_id = data.get("profile_id") + if not inline and _is_llm_dict(expanded): + profile_id = expanded.get("profile_id") if profile_id: return {"profile_id": profile_id} - return data + return expanded - if isinstance(payload, list): + if isinstance(data, list): return [ _transform( item, @@ -102,10 +100,10 @@ def _transform( deserialize=deserialize, llm_registry=llm_registry, ) - for item in payload + for item in data ] - return payload + return data def _is_llm_dict(value: Mapping[str, Any]) -> bool: diff --git a/openhands/sdk/conversation/state.py b/openhands/sdk/conversation/state.py index 6671ad9446..25454e16fc 100644 --- a/openhands/sdk/conversation/state.py +++ b/openhands/sdk/conversation/state.py @@ -12,8 +12,8 @@ from openhands.sdk.conversation.fifo_lock import FIFOLock from openhands.sdk.conversation.persistence_const import BASE_STATE, EVENTS_DIR from openhands.sdk.conversation.persistence_utils import ( - expand_profiles_in_payload, - prepare_payload_for_persistence, + compact_llm_profiles, + resolve_llm_profiles, should_inline_conversations, ) from openhands.sdk.conversation.secrets_manager import SecretsManager @@ -139,7 +139,7 @@ def _save_base_state(self, fs: FileStore) -> None: Persist base state snapshot (no events; events are file-backed). """ inline_mode = should_inline_conversations() - payload = prepare_payload_for_persistence( + payload = compact_llm_profiles( self.model_dump(mode="json", exclude_none=True), inline=inline_mode ) fs.write(BASE_STATE, json.dumps(payload)) @@ -174,7 +174,7 @@ def create( # ---- Resume path ---- if base_text: raw_payload = json.loads(base_text) - payload = expand_profiles_in_payload(raw_payload, inline=inline_mode) + payload = resolve_llm_profiles(raw_payload, inline=inline_mode) state = cls.model_validate(payload) # Enforce conversation id match diff --git a/openhands/sdk/utils/agent_settings.py b/openhands/sdk/utils/agent_settings.py deleted file mode 100644 index a1213f94e5..0000000000 --- a/openhands/sdk/utils/agent_settings.py +++ /dev/null @@ -1,51 +0,0 @@ -"""Utilities for reading and writing agent_settings.json.""" - -from __future__ import annotations - -import json -from collections.abc import Mapping -from pathlib import Path -from typing import Any - -from openhands.sdk.conversation.persistence_utils import ( - expand_profiles_in_payload, - prepare_payload_for_persistence, -) -from openhands.sdk.llm.llm_registry import LLMRegistry - - -DEFAULT_AGENT_SETTINGS_PATH = Path.home() / ".openhands" / "agent_settings.json" - - -def load_agent_settings( - path: Path | str | None = None, - *, - inline: bool | None = None, - llm_registry: LLMRegistry | None = None, -) -> dict[str, Any]: - """Load agent settings from ``path`` applying profile expansion.""" - - settings_path = Path(path) if path is not None else DEFAULT_AGENT_SETTINGS_PATH - with settings_path.open("r", encoding="utf-8") as fh: - payload = json.load(fh) - return expand_profiles_in_payload( - payload, - inline=inline, - llm_registry=llm_registry, - ) - - -def save_agent_settings( - settings: Mapping[str, Any], - path: Path | str | None = None, - *, - inline: bool | None = None, -) -> Path: - """Persist ``settings`` to disk, returning the destination path.""" - - settings_path = Path(path) if path is not None else DEFAULT_AGENT_SETTINGS_PATH - settings_path.parent.mkdir(parents=True, exist_ok=True) - payload = prepare_payload_for_persistence(settings, inline=inline) - with settings_path.open("w", encoding="utf-8") as fh: - json.dump(payload, fh, indent=2, ensure_ascii=False) - return settings_path diff --git a/tests/sdk/utils/test_agent_settings.py b/tests/sdk/utils/test_agent_settings.py deleted file mode 100644 index bebe1e531b..0000000000 --- a/tests/sdk/utils/test_agent_settings.py +++ /dev/null @@ -1,50 +0,0 @@ -"""Tests for agent settings helpers.""" - -from __future__ import annotations - -import json - -from openhands.sdk.llm import LLM -from openhands.sdk.llm.llm_registry import LLMRegistry -from openhands.sdk.utils.agent_settings import load_agent_settings, save_agent_settings - - -def test_agent_settings_inline_default(tmp_path, monkeypatch): - monkeypatch.setenv("HOME", str(tmp_path / "home")) - settings_path = tmp_path / "agent_settings.json" - - llm = LLM(model="gpt-4o-mini", service_id="agent") - settings = {"agent": {"llm": llm.model_dump(exclude_none=True)}} - - save_agent_settings(settings, settings_path) - - stored = json.loads(settings_path.read_text(encoding="utf-8")) - assert stored["agent"]["llm"]["model"] == "gpt-4o-mini" - - loaded = load_agent_settings(settings_path) - assert loaded["agent"]["llm"]["model"] == "gpt-4o-mini" - - -def test_agent_settings_profile_reference_mode(tmp_path, monkeypatch): - home_dir = tmp_path / "home" - monkeypatch.setenv("HOME", str(home_dir)) - monkeypatch.setenv("OPENHANDS_INLINE_CONVERSATIONS", "false") - - registry = LLMRegistry() - profile_name = "settings-profile" - registry.save_profile( - profile_name, LLM(model="litellm_proxy/openai/gpt-5-mini", service_id="agent") - ) - - llm = registry.load_profile(profile_name) - settings_path = tmp_path / "agent_settings.json" - settings = {"agent": {"llm": llm.model_dump(exclude_none=True)}} - - save_agent_settings(settings, settings_path) - - stored = json.loads(settings_path.read_text(encoding="utf-8")) - assert stored["agent"]["llm"] == {"profile_id": profile_name} - - loaded = load_agent_settings(settings_path) - assert loaded["agent"]["llm"]["profile_id"] == profile_name - assert loaded["agent"]["llm"]["model"] == "litellm_proxy/openai/gpt-5-mini" From a3efa6e74daaeabe5798fd184962969b54cb5c41 Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Sun, 19 Oct 2025 19:03:31 +0200 Subject: [PATCH 19/37] refactor: split profile transform helpers - replace the _transform flag with dedicated _compact/_resolve helpers - make compact_llm_profiles/resolve_llm_profiles easier to follow by delegating to the new helpers Co-authored-by: openhands --- .../sdk/conversation/persistence_utils.py | 86 +++++++++---------- 1 file changed, 39 insertions(+), 47 deletions(-) diff --git a/openhands/sdk/conversation/persistence_utils.py b/openhands/sdk/conversation/persistence_utils.py index bb2affbc2b..f8b5ad2178 100644 --- a/openhands/sdk/conversation/persistence_utils.py +++ b/openhands/sdk/conversation/persistence_utils.py @@ -30,7 +30,7 @@ def compact_llm_profiles( """ inline_mode = should_inline_conversations() if inline is None else inline - return _transform(data, inline=inline_mode, deserialize=False, llm_registry=None) + return _compact(data, inline=inline_mode) def resolve_llm_profiles( @@ -43,67 +43,59 @@ def resolve_llm_profiles( inline_mode = should_inline_conversations() if inline is None else inline registry = llm_registry or LLMRegistry() - return _transform(data, inline=inline_mode, deserialize=True, llm_registry=registry) + return _resolve(data, inline=inline_mode, llm_registry=registry) -def _transform( - data: Mapping[str, Any] | list[Any], +def _compact(value: Mapping[str, Any] | list[Any] | Any, *, inline: bool) -> Any: + if isinstance(value, Mapping): + compacted = {key: _compact(item, inline=inline) for key, item in value.items()} + if not inline and _is_llm_dict(compacted): + profile_id = compacted.get("profile_id") + if profile_id: + return {"profile_id": profile_id} + return compacted + + if isinstance(value, list): + return [_compact(item, inline=inline) for item in value] + + return value + + +def _resolve( + value: Mapping[str, Any] | list[Any] | Any, *, inline: bool, - deserialize: bool, - llm_registry: LLMRegistry | None, + llm_registry: LLMRegistry, ) -> Any: - if isinstance(data, Mapping): + if isinstance(value, Mapping): expanded = { - key: _transform( - value, - inline=inline, - deserialize=deserialize, - llm_registry=llm_registry, - ) - for key, value in data.items() + key: _resolve(item, inline=inline, llm_registry=llm_registry) + for key, item in value.items() } - if deserialize: - if _is_profile_reference(expanded): - if inline: - profile_id = expanded["profile_id"] - raise ValueError( - "Encountered profile reference for LLM while " - "OPENHANDS_INLINE_CONVERSATIONS is enabled. " - "Inline the profile or set " - "OPENHANDS_INLINE_CONVERSATIONS=false." - ) - assert llm_registry is not None + if _is_profile_reference(expanded): + if inline: profile_id = expanded["profile_id"] - llm = llm_registry.load_profile(profile_id) - llm_dict = llm.model_dump(exclude_none=True) - llm_dict["profile_id"] = profile_id - return _transform( - llm_dict, - inline=inline, - deserialize=True, - llm_registry=llm_registry, + raise ValueError( + "Encountered profile reference for LLM while " + "OPENHANDS_INLINE_CONVERSATIONS is enabled. " + "Inline the profile or set " + "OPENHANDS_INLINE_CONVERSATIONS=false." ) - else: - if not inline and _is_llm_dict(expanded): - profile_id = expanded.get("profile_id") - if profile_id: - return {"profile_id": profile_id} + profile_id = expanded["profile_id"] + llm = llm_registry.load_profile(profile_id) + llm_dict = llm.model_dump(exclude_none=True) + llm_dict["profile_id"] = profile_id + return _resolve(llm_dict, inline=inline, llm_registry=llm_registry) + return expanded - if isinstance(data, list): + if isinstance(value, list): return [ - _transform( - item, - inline=inline, - deserialize=deserialize, - llm_registry=llm_registry, - ) - for item in data + _resolve(item, inline=inline, llm_registry=llm_registry) for item in value ] - return data + return value def _is_llm_dict(value: Mapping[str, Any]) -> bool: From 17617aace287f42a9dcd2380cf0cc103f8d05be7 Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Sun, 19 Oct 2025 19:07:42 +0200 Subject: [PATCH 20/37] style: use f-strings in LLMRegistry logging --- openhands/sdk/llm/llm_registry.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/openhands/sdk/llm/llm_registry.py b/openhands/sdk/llm/llm_registry.py index 6c9ef104cb..31c6e9b15b 100644 --- a/openhands/sdk/llm/llm_registry.py +++ b/openhands/sdk/llm/llm_registry.py @@ -91,7 +91,7 @@ def add(self, llm: LLM) -> None: self.service_to_llm[service_id] = llm self.notify(RegistryEvent(llm=llm)) logger.info( - "[LLM registry %s]: Added LLM for service %s", self.registry_id, service_id + f"[LLM registry {self.registry_id}]: Added LLM for service {service_id}" ) def get(self, service_id: str) -> LLM: @@ -103,9 +103,7 @@ def get(self, service_id: str) -> LLM: ) logger.info( - "[LLM registry %s]: Retrieved LLM for service %s", - self.registry_id, - service_id, + f"[LLM registry {self.registry_id}]: Retrieved LLM for service {service_id}" ) return self.service_to_llm[service_id] @@ -156,7 +154,7 @@ def save_profile( with path.open("w", encoding="utf-8") as handle: json.dump(data, handle, indent=2, ensure_ascii=False) - logger.info("Saved profile %s -> %s", profile_id, path) + logger.info(f"Saved profile {profile_id} -> {path}") return path def register_profiles(self, profile_ids: Iterable[str] | None = None) -> None: @@ -166,14 +164,14 @@ def register_profiles(self, profile_ids: Iterable[str] | None = None) -> None: try: llm = self.load_profile(profile_id) except Exception as exc: # noqa: BLE001 - logger.warning("Failed to load profile %s: %s", profile_id, exc) + logger.warning(f"Failed to load profile {profile_id}: {exc}") continue try: self.add(llm) except Exception as exc: # noqa: BLE001 logger.info( - "Skipping profile %s: registry.add failed: %s", profile_id, exc + f"Skipping profile {profile_id}: registry.add failed: {exc}" ) def validate_profile(self, data: Mapping[str, Any]) -> tuple[bool, list[str]]: From 9134aa1c3243dadbee54b5151b1e6a0c6f463d15 Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Sun, 19 Oct 2025 19:30:06 +0200 Subject: [PATCH 21/37] Update openhands/sdk/llm/llm_registry.py --- openhands/sdk/llm/llm_registry.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/openhands/sdk/llm/llm_registry.py b/openhands/sdk/llm/llm_registry.py index 31c6e9b15b..4e5a5bc983 100644 --- a/openhands/sdk/llm/llm_registry.py +++ b/openhands/sdk/llm/llm_registry.py @@ -204,5 +204,7 @@ def _resolve_profile_dir(self, profile_dir: str | Path | None) -> Path: def _load_profile_with_synced_id(self, path: Path, profile_id: str) -> LLM: llm = LLM.load_from_json(str(path)) if llm.profile_id != profile_id: + # force llm.profile_id to carry the correct name + # (the name of the .json we loaded it from) llm = llm.model_copy(update={"profile_id": profile_id}) return llm From 36ab58049e23a85773394393edd4f65b3633532c Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Sun, 19 Oct 2025 20:30:32 +0200 Subject: [PATCH 22/37] chore: stop tracking scripts/worktree.sh --- .gitignore | 1 + scripts/worktree.sh | 158 -------------------------------------------- 2 files changed, 1 insertion(+), 158 deletions(-) delete mode 100755 scripts/worktree.sh diff --git a/.gitignore b/.gitignore index 4011780b8f..399a0fc176 100644 --- a/.gitignore +++ b/.gitignore @@ -208,3 +208,4 @@ openapi.json *.db .worktrees/ *.code-workspace +scripts/worktree.sh diff --git a/scripts/worktree.sh b/scripts/worktree.sh deleted file mode 100755 index 2b517a1ce3..0000000000 --- a/scripts/worktree.sh +++ /dev/null @@ -1,158 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -usage() { - cat <<'EOF' -Usage: $(basename "$0") [options] - -Commands: - list Show all worktrees managed by this repository - path Print the filesystem path for a worktree branch - create [start] Create a new worktree in ${WORKTREES_DIR:-.worktrees}/ - remove Remove the worktree for - prune Run 'git worktree prune' - open Open the worktree in VS Code (requires 'code' on PATH) - -Environment variables: - WORKTREES_DIR Override worktree base directory (default: /.worktrees) - DEFAULT_BASE Default branch/commit used when creating a new branch (default: origin/main) -EOF -} - -err() { - printf 'Error: %s\n' "$1" >&2 - exit 1 -} - -require_branch_name() { - if [[ -z "${1:-}" ]]; then - err "missing branch name" - fi -} - -resolve_repo_root() { - git rev-parse --show-toplevel 2>/dev/null || err "not inside a git repository" -} - -worktree_path_for_branch() { - local branch=$1 - printf '%s/%s' "$WORKTREES_DIR" "$branch" -} - -branch_exists() { - git rev-parse --verify --quiet "refs/heads/$1" >/dev/null -} - -branch_checked_out_elsewhere() { - local branch=$1 - git worktree list --porcelain | awk -v b="refs/heads/$branch" ' - $1 == "branch" && $2 == b { found = 1 } - END { exit found ? 0 : 1 } - ' -} - -create_worktree() { - local branch=$1 - local start_ref=${2:-$DEFAULT_BASE} - local path - path=$(worktree_path_for_branch "$branch") - - if [[ -d "$path" ]]; then - err "target path $path already exists" - fi - - if branch_checked_out_elsewhere "$branch"; then - err "branch $branch is already checked out in another worktree" - fi - - mkdir -p "$WORKTREES_DIR" - - if branch_exists "$branch"; then - git worktree add "$path" "$branch" - else - git worktree add -b "$branch" "$path" "$start_ref" - fi -} - -remove_worktree() { - local branch=$1 - local path - path=$(worktree_path_for_branch "$branch") - - if [[ ! -d "$path" ]]; then - err "no worktree directory found for branch $branch at $path" - fi - - git worktree remove "$path" -} - -open_in_vscode() { - local branch=$1 - local path - path=$(worktree_path_for_branch "$branch") - - if [[ ! -d "$path" ]]; then - err "no worktree directory found for branch $branch at $path" - fi - - if ! command -v code >/dev/null 2>&1; then - err "'code' executable not found on PATH" - fi - - code "$path" -} - -list_worktrees() { - git worktree list -} - -main() { - local repo_root - repo_root=$(resolve_repo_root) - cd "$repo_root" - - WORKTREES_DIR=${WORKTREES_DIR:-"$repo_root/.worktrees"} - DEFAULT_BASE=${DEFAULT_BASE:-origin/main} - - local command=${1:-} - - case "$command" in - list) - shift - list_worktrees "$@" - ;; - path) - shift - require_branch_name "${1:-}" - worktree_path_for_branch "$1" - ;; - create) - shift - require_branch_name "${1:-}" - create_worktree "$1" "${2:-}" - ;; - remove) - shift - require_branch_name "${1:-}" - remove_worktree "$1" - ;; - prune) - shift - git worktree prune "$@" - ;; - open) - shift - require_branch_name "${1:-}" - open_in_vscode "$1" - ;; - -h|--help|help|"") - usage - ;; - *) - err "unknown command: $command" - ;; - esac -} - -main "$@" From 12eec55938f3aa139154d0f6157d80f32aac604a Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Tue, 21 Oct 2025 22:34:49 +0200 Subject: [PATCH 23/37] fix: remove runtime llm switching Revert the in-progress switch_llm helpers and tests; agent-sdk-18 branch now only contains LLM profile persistence. --- openhands-sdk/openhands/sdk/agent/base.py | 7 -- .../conversation/impl/local_conversation.py | 12 --- .../openhands/sdk/conversation/state.py | 25 ----- .../openhands/sdk/llm/llm_registry.py | 22 ---- .../local/test_state_serialization.py | 101 ------------------ tests/sdk/llm/test_llm_registry_profiles.py | 39 ------- 6 files changed, 206 deletions(-) diff --git a/openhands-sdk/openhands/sdk/agent/base.py b/openhands-sdk/openhands/sdk/agent/base.py index 609793c873..9a2d13a2d0 100644 --- a/openhands-sdk/openhands/sdk/agent/base.py +++ b/openhands-sdk/openhands/sdk/agent/base.py @@ -327,13 +327,6 @@ def model_dump_succint(self, **kwargs): dumped["tools"] = list(dumped["tools"].keys()) return dumped - def _clone_with_llm(self, llm: LLM) -> "AgentBase": - """Return a copy of this agent with ``llm`` swapped in.""" - - clone = self.model_copy(update={"llm": llm}) - clone._tools = dict(self._tools) - return clone - def get_all_llms(self) -> Generator[LLM, None, None]: """Recursively yield unique *base-class* LLM objects reachable from `self`. diff --git a/openhands-sdk/openhands/sdk/conversation/impl/local_conversation.py b/openhands-sdk/openhands/sdk/conversation/impl/local_conversation.py index aefb905d1e..a4c4c76752 100644 --- a/openhands-sdk/openhands/sdk/conversation/impl/local_conversation.py +++ b/openhands-sdk/openhands/sdk/conversation/impl/local_conversation.py @@ -160,18 +160,6 @@ def stuck_detector(self) -> StuckDetector | None: """Get the stuck detector instance if enabled.""" return self._stuck_detector - def switch_llm(self, profile_id: str) -> None: - """Switch the active agent LLM to ``profile_id`` at runtime.""" - - with self._state: - self._state.switch_agent_llm(profile_id, registry=self.llm_registry) - self.agent = self._state.agent - logger.info( - "Switched conversation %s to profile %s", - self._state.id, - profile_id, - ) - def send_message(self, message: str | Message) -> None: """Send a message to the agent. diff --git a/openhands-sdk/openhands/sdk/conversation/state.py b/openhands-sdk/openhands/sdk/conversation/state.py index bd70a792a0..d900d92d2c 100644 --- a/openhands-sdk/openhands/sdk/conversation/state.py +++ b/openhands-sdk/openhands/sdk/conversation/state.py @@ -21,7 +21,6 @@ from openhands.sdk.event import ActionEvent, ObservationEvent, UserRejectObservation from openhands.sdk.event.base import Event from openhands.sdk.io import FileStore, InMemoryFileStore, LocalFileStore -from openhands.sdk.llm.llm_registry import LLMRegistry from openhands.sdk.logger import get_logger from openhands.sdk.security.confirmation_policy import ( ConfirmationPolicyBase, @@ -273,30 +272,6 @@ def __setattr__(self, name, value): f"State change callback failed for field {name}", exc_info=True ) - def switch_agent_llm(self, profile_id: str, *, registry: LLMRegistry) -> None: - """Swap the agent's primary LLM to ``profile_id`` using ``registry``.""" - - if should_inline_conversations(): - raise RuntimeError( - "LLM switching requires OPENHANDS_INLINE_CONVERSATIONS to be false." - ) - - if self.agent_status not in ( - AgentExecutionStatus.IDLE, - AgentExecutionStatus.FINISHED, - ): - raise RuntimeError("Agent must be idle before switching LLM profiles.") - - usage_id = self.agent.llm.usage_id - try: - new_llm = registry.switch_profile(usage_id, profile_id) - except (FileNotFoundError, KeyError) as exc: - raise ValueError(str(exc)) from exc - - self.agent = self.agent._clone_with_llm(new_llm) - if self.agent_status == AgentExecutionStatus.FINISHED: - self.agent_status = AgentExecutionStatus.IDLE - @staticmethod def get_unmatched_actions(events: Sequence[Event]) -> list[ActionEvent]: """Find actions in the event history that don't have matching observations. diff --git a/openhands-sdk/openhands/sdk/llm/llm_registry.py b/openhands-sdk/openhands/sdk/llm/llm_registry.py index ff91d226ce..fcef27007e 100644 --- a/openhands-sdk/openhands/sdk/llm/llm_registry.py +++ b/openhands-sdk/openhands/sdk/llm/llm_registry.py @@ -183,28 +183,6 @@ def register_profiles(self, profile_ids: Iterable[str] | None = None) -> None: f"Skipping profile {profile_id}: registry.add failed: {exc}" ) - def switch_profile(self, usage_id: str, profile_id: str) -> LLM: - """Replace ``usage_id``'s active LLM with ``profile_id`` and return it.""" - - if usage_id not in self._usage_to_llm: - raise KeyError(f"Usage ID '{usage_id}' not found in registry") - - current_llm = self._usage_to_llm[usage_id] - if current_llm.profile_id == profile_id: - return current_llm - - llm = self.load_profile(profile_id) - llm = llm.model_copy(update={"usage_id": usage_id}) - self._usage_to_llm[usage_id] = llm - self.notify(RegistryEvent(llm=llm)) - logger.info( - "[LLM registry %s]: Switched usage %s to profile %s", - self.registry_id, - usage_id, - profile_id, - ) - return llm - def validate_profile(self, data: Mapping[str, Any]) -> tuple[bool, list[str]]: """Return (is_valid, errors) after validating a profile payload.""" diff --git a/tests/sdk/conversation/local/test_state_serialization.py b/tests/sdk/conversation/local/test_state_serialization.py index 3370d31951..e5be434326 100644 --- a/tests/sdk/conversation/local/test_state_serialization.py +++ b/tests/sdk/conversation/local/test_state_serialization.py @@ -638,104 +638,3 @@ def test_conversation_with_agent_different_llm_config(): # Test that the core state structure is preserved (excluding agent differences) new_dump = new_conversation._state.model_dump(mode="json", exclude={"agent"}) assert new_dump == original_state_dump - - -def test_local_conversation_switch_llm_persists_profile(tmp_path, monkeypatch): - home_dir = tmp_path / "home" - home_dir.mkdir() - monkeypatch.setenv("HOME", str(home_dir)) - monkeypatch.setenv("OPENHANDS_INLINE_CONVERSATIONS", "false") - - registry = LLMRegistry() - base_llm = LLM(model="gpt-4o-mini", usage_id="test-llm") - registry.save_profile("base", base_llm) - alt_llm = LLM(model="gpt-4o", usage_id="alternate", temperature=0.4) - registry.save_profile("alt", alt_llm) - - agent = Agent(llm=registry.load_profile("base"), tools=[]) - workspace_dir = tmp_path / "workspace" - persistence_dir = tmp_path / "persist" - - conversation = Conversation( - agent=agent, - workspace=str(workspace_dir), - persistence_dir=str(persistence_dir), - visualize=False, - ) - assert isinstance(conversation, LocalConversation) - - conversation.switch_llm("alt") - - assert conversation.agent.llm.profile_id == "alt" - assert conversation.state.agent.llm.profile_id == "alt" - assert conversation.agent.llm.usage_id == "test-llm" - assert conversation.llm_registry.get("test-llm").model == alt_llm.model - - persistence_path = conversation.state.persistence_dir - assert persistence_path is not None - base_state_path = Path(persistence_path) / "base_state.json" - data = json.loads(base_state_path.read_text()) - assert data["agent"]["llm"] == {"profile_id": "alt"} - - reloaded_agent = Agent(llm=registry.load_profile("alt"), tools=[]) - reloaded = Conversation( - agent=reloaded_agent, - workspace=str(workspace_dir), - persistence_dir=str(persistence_dir), - conversation_id=conversation.id, - visualize=False, - ) - assert isinstance(reloaded, LocalConversation) - assert reloaded.state.agent.llm.profile_id == "alt" - assert reloaded.state.agent.llm.usage_id == "test-llm" - - -def test_local_conversation_switch_llm_inline_mode_rejected(tmp_path, monkeypatch): - home_dir = tmp_path / "home" - home_dir.mkdir() - monkeypatch.setenv("HOME", str(home_dir)) - monkeypatch.setenv("OPENHANDS_INLINE_CONVERSATIONS", "true") - - registry = LLMRegistry() - base_llm = LLM(model="gpt-4o-mini", usage_id="test-llm") - registry.save_profile("base", base_llm) - registry.save_profile("alt", LLM(model="gpt-4o", usage_id="alternate")) - - agent = Agent(llm=registry.load_profile("base"), tools=[]) - conversation = Conversation( - agent=agent, - workspace=str(tmp_path / "workspace"), - persistence_dir=str(tmp_path / "persist"), - visualize=False, - ) - assert isinstance(conversation, LocalConversation) - - with pytest.raises(RuntimeError, match="OPENHANDS_INLINE_CONVERSATIONS"): - conversation.switch_llm("alt") - - -def test_local_conversation_switch_llm_requires_idle(tmp_path, monkeypatch): - home_dir = tmp_path / "home" - home_dir.mkdir() - monkeypatch.setenv("HOME", str(home_dir)) - monkeypatch.setenv("OPENHANDS_INLINE_CONVERSATIONS", "false") - - registry = LLMRegistry() - base_llm = LLM(model="gpt-4o-mini", usage_id="test-llm") - registry.save_profile("base", base_llm) - registry.save_profile("alt", LLM(model="gpt-4o", usage_id="alternate")) - - agent = Agent(llm=registry.load_profile("base"), tools=[]) - conversation = Conversation( - agent=agent, - workspace=str(tmp_path / "workspace"), - persistence_dir=str(tmp_path / "persist"), - visualize=False, - ) - assert isinstance(conversation, LocalConversation) - - with conversation.state: - conversation.state.agent_status = AgentExecutionStatus.RUNNING - - with pytest.raises(RuntimeError, match="Agent must be idle"): - conversation.switch_llm("alt") diff --git a/tests/sdk/llm/test_llm_registry_profiles.py b/tests/sdk/llm/test_llm_registry_profiles.py index fb91d70dd8..519af5babf 100644 --- a/tests/sdk/llm/test_llm_registry_profiles.py +++ b/tests/sdk/llm/test_llm_registry_profiles.py @@ -1,6 +1,5 @@ import json -import pytest from pydantic import SecretStr from openhands.sdk.llm.llm import LLM @@ -97,41 +96,3 @@ def test_validate_profile_reports_errors(tmp_path): ok, errors = registry.validate_profile({"usage_id": "svc"}) assert not ok assert any("model" in message for message in errors) - - -def test_switch_profile_replaces_active_llm(tmp_path): - registry = LLMRegistry(profile_dir=tmp_path) - base_llm = LLM(model="gpt-4o-mini", usage_id="service") - registry.add(base_llm) - registry.save_profile("alternate", LLM(model="gpt-4o", usage_id="alternate")) - - events: list = [] - registry.subscribe(events.append) - - switched = registry.switch_profile("service", "alternate") - - assert switched.profile_id == "alternate" - assert switched.usage_id == "service" - assert registry.get("service") is switched - assert switched.model == "gpt-4o" - assert len(events) == 1 - assert events[0].llm is switched - - # switching to the same profile should be a no-op - again = registry.switch_profile("service", "alternate") - assert again is switched - assert len(events) == 1 - - -def test_switch_profile_unknown_usage(tmp_path): - registry = LLMRegistry(profile_dir=tmp_path) - with pytest.raises(KeyError): - registry.switch_profile("missing", "profile") - - -def test_switch_profile_missing_profile(tmp_path): - registry = LLMRegistry(profile_dir=tmp_path) - registry.add(LLM(model="gpt-4o-mini", usage_id="service")) - - with pytest.raises(FileNotFoundError): - registry.switch_profile("service", "does-not-exist") From 03b4600141390ff20494bf2f4083d5f7622d47b1 Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Tue, 21 Oct 2025 22:44:52 +0200 Subject: [PATCH 24/37] style: use f-string for registry logging --- openhands-sdk/openhands/sdk/llm/llm_registry.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/openhands-sdk/openhands/sdk/llm/llm_registry.py b/openhands-sdk/openhands/sdk/llm/llm_registry.py index fcef27007e..0557cfef14 100644 --- a/openhands-sdk/openhands/sdk/llm/llm_registry.py +++ b/openhands-sdk/openhands/sdk/llm/llm_registry.py @@ -117,9 +117,7 @@ def add(self, llm: LLM) -> None: self._usage_to_llm[usage_id] = llm self.notify(RegistryEvent(llm=llm)) logger.info( - "[LLM registry %s]: Added LLM for usage %s", - self.registry_id, - usage_id, + f"[LLM registry {self.registry_id}]: Added LLM for usage {usage_id}" ) # ------------------------------------------------------------------ From acf67e35876c33c3ec22be09cb5321f662baa138 Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Tue, 21 Oct 2025 22:57:00 +0200 Subject: [PATCH 25/37] docs: expand LLM profile example Example 25 now performs a read/write/delete workflow and verifies the persisted profile reference. --- examples/01_standalone_sdk/25_llm_profiles.py | 55 ++++++++++++++++++- 1 file changed, 52 insertions(+), 3 deletions(-) diff --git a/examples/01_standalone_sdk/25_llm_profiles.py b/examples/01_standalone_sdk/25_llm_profiles.py index f5a32274e9..c3c9e4479e 100644 --- a/examples/01_standalone_sdk/25_llm_profiles.py +++ b/examples/01_standalone_sdk/25_llm_profiles.py @@ -9,7 +9,9 @@ credentials when the profile omits secrets. """ +import json import os +from pathlib import Path from pydantic import SecretStr @@ -35,7 +37,7 @@ def ensure_profile_exists(registry: LLMRegistry, name: str) -> None: base_url="https://llm-proxy.eval.all-hands.dev", temperature=0.2, max_output_tokens=4096, - service_id="agent", + usage_id="agent", metadata={ "profile_description": "Sample GPT-5 Mini profile created by example 25.", }, @@ -67,10 +69,57 @@ def main() -> None: tools = [Tool(name="BashTool")] agent = Agent(llm=llm, tools=tools) - conversation = Conversation(agent=agent, workspace=os.getcwd()) - conversation.send_message("Print 'Profile created successfully.'") + workspace_dir = Path(os.getcwd()) + summary_path = workspace_dir / "summary_readme.md" + if summary_path.exists(): + summary_path.unlink() + + persistence_root = workspace_dir / ".conversations_llm_profiles" + conversation = Conversation( + agent=agent, + workspace=str(workspace_dir), + persistence_dir=str(persistence_root), + visualize=False, + ) + + conversation.send_message( + "Read README.md in this workspace, create a concise summary in " + "summary_readme.md (overwrite it if it exists), and respond with " + "SUMMARY_READY when the file is written." + ) + conversation.run() + + if summary_path.exists(): + print(f"summary_readme.md written to {summary_path}") + else: + print("summary_readme.md not found after first run") + + conversation.send_message( + "Thanks! Delete summary_readme.md from the workspace and respond with " + "SUMMARY_REMOVED once it is gone." + ) conversation.run() + if summary_path.exists(): + print("summary_readme.md still present after deletion request") + else: + print("summary_readme.md removed") + + persistence_dir = conversation.state.persistence_dir + if persistence_dir is None: + raise RuntimeError("Conversation did not persist base state to disk") + + base_state_path = Path(persistence_dir) / "base_state.json" + state_payload = json.loads(base_state_path.read_text()) + llm_entry = state_payload.get("agent", {}).get("llm", {}) + profile_in_state = llm_entry.get("profile_id") + print(f"Profile recorded in base_state.json: {profile_in_state}") + if profile_in_state != PROFILE_NAME: + print( + "Warning: profile_id in base_state.json does not match the profile " + "used at runtime." + ) + if __name__ == "__main__": # pragma: no cover main() From 218728ef14e7b04093858a5e818e97758520fba2 Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Tue, 21 Oct 2025 23:58:16 +0200 Subject: [PATCH 26/37] Refine LLM profile persistence - move inline/profile compaction into LLM serializer/validator - use model_dump_json context in ConversationState persistence - add persistence settings module and cover profile reference tests - document persistence comparison and recommendations --- docs/llm_profiles.md | 19 ++++ .../sdk/conversation/persistence_utils.py | 106 ------------------ .../openhands/sdk/conversation/state.py | 22 ++-- openhands-sdk/openhands/sdk/llm/llm.py | 59 +++++++++- .../openhands/sdk/persistence/__init__.py | 10 ++ .../openhands/sdk/persistence/settings.py | 17 +++ tests/sdk/llm/test_llm_registry_profiles.py | 27 +++++ 7 files changed, 139 insertions(+), 121 deletions(-) delete mode 100644 openhands-sdk/openhands/sdk/conversation/persistence_utils.py create mode 100644 openhands-sdk/openhands/sdk/persistence/__init__.py create mode 100644 openhands-sdk/openhands/sdk/persistence/settings.py diff --git a/docs/llm_profiles.md b/docs/llm_profiles.md index 299df3e202..b2ec2ceaf7 100644 --- a/docs/llm_profiles.md +++ b/docs/llm_profiles.md @@ -85,3 +85,22 @@ Notes on service_id rename ### Follow-up coordination - Subsequent tasks (agent-sdk-20/21/22) will build on this foundation to expose CLI flags, update documentation, and improve secrets handling. + +## Persistence integration review + +### Conversation snapshots vs. profile-aware serialization +- **Caller experience:** Conversations that opt into profile references should behave the same as the legacy inline flow. Callers still receive fully expanded `LLM` payloads when they work with `ConversationState` objects or remote conversation APIs. The only observable change is that persisted `base_state.json` files can shrink to `{ "profile_id": "" }` instead of storing every field. +- **Inline vs. referenced storage:** Conversation persistence previously delegated everything to Pydantic (`model_dump_json` / `model_validate`). The draft implementation added a recursive helper (`compact_llm_profiles` / `resolve_llm_profiles`) that walked arbitrary dictionaries and manually replaced or expanded embedded LLMs. This duplication diverged from the rest of the SDK, where polymorphic models rely on validators and discriminators to control serialization. +- **Relationship to `DiscriminatedUnionMixin`:** That mixin exists so we can ship objects across process boundaries (e.g., remote conversations) without bespoke traversal code. Keeping serialization rules on the models themselves, rather than sprinkling special cases in persistence helpers, lets us benefit from the same rebuild/validation pipeline. + +### Remote conversation compatibility +- The agent server still exposes fully inlined LLM payloads to remote clients. Because the manual compaction was only invoked when writing `base_state.json`, remote APIs were unaffected. We need to preserve that behaviour so remote callers do not have to resolve profiles themselves. +- When a conversation is restored on the server (or locally), any profile references in `base_state.json` must be expanded **before** the state is materialised; otherwise, components that expect a concrete `LLM` instance (e.g., secret reconciliation, spend tracking) will break. + +### Recommendation +- Move profile resolution/compaction into the `LLM` model: + - A `model_validator(mode="before")` can load `{ "profile_id": ... }` payloads with the `LLMRegistry`, while respecting `OPENHANDS_INLINE_CONVERSATIONS` (raise when inline mode is enforced but only a profile reference is available). + - A `model_serializer(mode="json")` can honour the same inline flag via `model_dump(..., context={"inline_llm_persistence": bool})`, returning either the full inline payload or a `{ "profile_id": ... }` stub. Callers that do not provide explicit context will continue to receive inline payloads by default. +- Have `ConversationState._save_base_state` call `model_dump_json` with the appropriate context instead of the bespoke traversal helpers. This keeps persistence logic co-located with the models, reduces drift, and keeps remote conversations working without additional glue. +- With this approach we still support inline overrides (`OPENHANDS_INLINE_CONVERSATIONS=true`), profile-backed storage, and remote access with no behavioural changes for callers. + diff --git a/openhands-sdk/openhands/sdk/conversation/persistence_utils.py b/openhands-sdk/openhands/sdk/conversation/persistence_utils.py deleted file mode 100644 index 9193d31d3d..0000000000 --- a/openhands-sdk/openhands/sdk/conversation/persistence_utils.py +++ /dev/null @@ -1,106 +0,0 @@ -"""Helpers for serializing and deserializing persisted conversation data.""" - -from __future__ import annotations - -import os -from collections.abc import Mapping -from typing import Any - -from openhands.sdk.llm.llm_registry import LLMRegistry - - -_INLINE_ENV_VAR = "OPENHANDS_INLINE_CONVERSATIONS" -_FALSE_VALUES = {"0", "false", "no"} - - -def should_inline_conversations() -> bool: - """Return True when conversations should be persisted with inline LLM payloads.""" - - value = os.getenv(_INLINE_ENV_VAR, "true").strip().lower() - return value not in _FALSE_VALUES - - -def compact_llm_profiles( - data: Mapping[str, Any], *, inline: bool | None = None -) -> dict[str, Any]: - """Return a mapping ready to be persisted to disk. - - When ``inline`` is False and an LLM dict contains ``profile_id``, the body is - replaced with ``{"profile_id": }``. Otherwise the structure is left intact. - """ - - inline_mode = should_inline_conversations() if inline is None else inline - return _compact(data, inline=inline_mode) - - -def resolve_llm_profiles( - data: Mapping[str, Any], - *, - inline: bool | None = None, - llm_registry: LLMRegistry | None = None, -) -> dict[str, Any]: - """Expand stored profile references back into inline LLM dictionaries.""" - - inline_mode = should_inline_conversations() if inline is None else inline - registry = llm_registry or LLMRegistry() - return _resolve(data, inline=inline_mode, llm_registry=registry) - - -def _compact(value: Mapping[str, Any] | list[Any] | Any, *, inline: bool) -> Any: - if isinstance(value, Mapping): - compacted = {key: _compact(item, inline=inline) for key, item in value.items()} - if not inline and _is_llm_dict(compacted): - profile_id = compacted.get("profile_id") - if profile_id: - return {"profile_id": profile_id} - return compacted - - if isinstance(value, list): - return [_compact(item, inline=inline) for item in value] - - return value - - -def _resolve( - value: Mapping[str, Any] | list[Any] | Any, - *, - inline: bool, - llm_registry: LLMRegistry, -) -> Any: - if isinstance(value, Mapping): - expanded = { - key: _resolve(item, inline=inline, llm_registry=llm_registry) - for key, item in value.items() - } - - if _is_profile_reference(expanded): - if inline: - profile_id = expanded["profile_id"] - raise ValueError( - "Encountered profile reference for LLM while " - "OPENHANDS_INLINE_CONVERSATIONS is enabled. " - "Inline the profile or set " - "OPENHANDS_INLINE_CONVERSATIONS=false." - ) - profile_id = expanded["profile_id"] - llm = llm_registry.load_profile(profile_id) - llm_dict = llm.model_dump(exclude_none=True) - llm_dict["profile_id"] = profile_id - return _resolve(llm_dict, inline=inline, llm_registry=llm_registry) - - return expanded - - if isinstance(value, list): - return [ - _resolve(item, inline=inline, llm_registry=llm_registry) for item in value - ] - - return value - - -def _is_llm_dict(value: Mapping[str, Any]) -> bool: - return "model" in value and ("usage_id" in value or "service_id" in value) - - -def _is_profile_reference(value: Mapping[str, Any]) -> bool: - return "profile_id" in value and "model" not in value diff --git a/openhands-sdk/openhands/sdk/conversation/state.py b/openhands-sdk/openhands/sdk/conversation/state.py index d900d92d2c..f61c3dd097 100644 --- a/openhands-sdk/openhands/sdk/conversation/state.py +++ b/openhands-sdk/openhands/sdk/conversation/state.py @@ -1,5 +1,4 @@ # state.py -import json from collections.abc import Sequence from enum import Enum from typing import TYPE_CHECKING, Any, Self @@ -11,17 +10,16 @@ from openhands.sdk.conversation.event_store import EventLog from openhands.sdk.conversation.fifo_lock import FIFOLock from openhands.sdk.conversation.persistence_const import BASE_STATE, EVENTS_DIR -from openhands.sdk.conversation.persistence_utils import ( - compact_llm_profiles, - resolve_llm_profiles, - should_inline_conversations, -) from openhands.sdk.conversation.secrets_manager import SecretsManager from openhands.sdk.conversation.types import ConversationCallbackType, ConversationID from openhands.sdk.event import ActionEvent, ObservationEvent, UserRejectObservation from openhands.sdk.event.base import Event from openhands.sdk.io import FileStore, InMemoryFileStore, LocalFileStore from openhands.sdk.logger import get_logger +from openhands.sdk.persistence.settings import ( + INLINE_CONTEXT_KEY, + should_inline_conversations, +) from openhands.sdk.security.confirmation_policy import ( ConfirmationPolicyBase, NeverConfirm, @@ -139,10 +137,11 @@ def _save_base_state(self, fs: FileStore) -> None: Persist base state snapshot (no events; events are file-backed). """ inline_mode = should_inline_conversations() - payload = compact_llm_profiles( - self.model_dump(mode="json", exclude_none=True), inline=inline_mode + payload = self.model_dump_json( + exclude_none=True, + context={INLINE_CONTEXT_KEY: inline_mode}, ) - fs.write(BASE_STATE, json.dumps(payload)) + fs.write(BASE_STATE, payload) # ===== Factory: open-or-create (no load/save methods needed) ===== @classmethod @@ -170,12 +169,11 @@ def create( base_text = None inline_mode = should_inline_conversations() + context = {INLINE_CONTEXT_KEY: inline_mode} # ---- Resume path ---- if base_text: - raw_payload = json.loads(base_text) - payload = resolve_llm_profiles(raw_payload, inline=inline_mode) - state = cls.model_validate(payload) + state = cls.model_validate_json(base_text, context=context) # Enforce conversation id match if state.id != id: diff --git a/openhands-sdk/openhands/sdk/llm/llm.py b/openhands-sdk/openhands/sdk/llm/llm.py index 58aa117f78..4086988d4a 100644 --- a/openhands-sdk/openhands/sdk/llm/llm.py +++ b/openhands-sdk/openhands/sdk/llm/llm.py @@ -4,7 +4,7 @@ import json import os import warnings -from collections.abc import Callable, Sequence +from collections.abc import Callable, Mapping, Sequence from contextlib import contextmanager from typing import TYPE_CHECKING, Any, ClassVar, Literal, get_args, get_origin @@ -16,8 +16,12 @@ Field, PrivateAttr, SecretStr, + SerializationInfo, + SerializerFunctionWrapHandler, + ValidationInfo, field_serializer, field_validator, + model_serializer, model_validator, ) from pydantic.json_schema import SkipJsonSchema @@ -75,6 +79,10 @@ from openhands.sdk.llm.utils.retry_mixin import RetryMixin from openhands.sdk.llm.utils.telemetry import Telemetry from openhands.sdk.logger import ENV_LOG_DIR, get_logger +from openhands.sdk.persistence.settings import ( + INLINE_CONTEXT_KEY, + should_inline_conversations, +) logger = get_logger(__name__) @@ -267,6 +275,22 @@ class LLM(BaseModel, RetryMixin, NonNativeToolCallingMixin): extra="forbid", arbitrary_types_allowed=True ) + @model_serializer(mode="wrap", when_used="json") + def _serialize_with_profiles( + self, handler: SerializerFunctionWrapHandler, info: SerializationInfo + ) -> Mapping[str, Any]: + inline_pref = None + if info.context is not None and INLINE_CONTEXT_KEY in info.context: + inline_pref = info.context[INLINE_CONTEXT_KEY] + if inline_pref is None: + inline_pref = True + + data = handler(self) + profile_id = data.get("profile_id") if isinstance(data, dict) else None + if not inline_pref and profile_id: + return {"profile_id": profile_id} + return data + # ========================================================================= # Validators # ========================================================================= @@ -291,11 +315,40 @@ def _validate_api_key(cls, v): @model_validator(mode="before") @classmethod - def _coerce_inputs(cls, data): - if not isinstance(data, dict): + def _coerce_inputs(cls, data: Any, info: ValidationInfo): + if not isinstance(data, Mapping): return data d = dict(data) + profile_id = d.get("profile_id") + if profile_id and "model" not in d: + inline_pref = None + if info.context is not None and INLINE_CONTEXT_KEY in info.context: + inline_pref = info.context[INLINE_CONTEXT_KEY] + if inline_pref is None: + inline_pref = should_inline_conversations() + + if inline_pref: + raise ValueError( + "Encountered profile reference for LLM while " + "OPENHANDS_INLINE_CONVERSATIONS is enabled. " + "Inline the profile or set " + "OPENHANDS_INLINE_CONVERSATIONS=false." + ) + + registry = None + if info.context is not None: + registry = info.context.get("llm_registry") + if registry is None: + from openhands.sdk.llm.llm_registry import LLMRegistry + + registry = LLMRegistry() + + llm = registry.load_profile(profile_id) + expanded = llm.model_dump(exclude_none=True) + expanded["profile_id"] = profile_id + d.update(expanded) + if "service_id" in d and "usage_id" not in d: warnings.warn( SERVICE_ID_DEPRECATION_MSG, diff --git a/openhands-sdk/openhands/sdk/persistence/__init__.py b/openhands-sdk/openhands/sdk/persistence/__init__.py new file mode 100644 index 0000000000..700174f618 --- /dev/null +++ b/openhands-sdk/openhands/sdk/persistence/__init__.py @@ -0,0 +1,10 @@ +"""Persistence configuration helpers.""" + +from .settings import INLINE_CONTEXT_KEY, INLINE_ENV_VAR, should_inline_conversations + + +__all__ = [ + "INLINE_CONTEXT_KEY", + "INLINE_ENV_VAR", + "should_inline_conversations", +] diff --git a/openhands-sdk/openhands/sdk/persistence/settings.py b/openhands-sdk/openhands/sdk/persistence/settings.py new file mode 100644 index 0000000000..bd8f51e490 --- /dev/null +++ b/openhands-sdk/openhands/sdk/persistence/settings.py @@ -0,0 +1,17 @@ +"""Shared helpers for SDK persistence configuration.""" + +from __future__ import annotations + +import os + + +INLINE_ENV_VAR = "OPENHANDS_INLINE_CONVERSATIONS" +INLINE_CONTEXT_KEY = "inline_llm_persistence" +_FALSE_VALUES = {"0", "false", "no"} + + +def should_inline_conversations() -> bool: + """Return True when conversations should be persisted with inline LLM payloads.""" + + value = os.getenv(INLINE_ENV_VAR, "true").strip().lower() + return value not in _FALSE_VALUES diff --git a/tests/sdk/llm/test_llm_registry_profiles.py b/tests/sdk/llm/test_llm_registry_profiles.py index 519af5babf..948366df75 100644 --- a/tests/sdk/llm/test_llm_registry_profiles.py +++ b/tests/sdk/llm/test_llm_registry_profiles.py @@ -4,6 +4,7 @@ from openhands.sdk.llm.llm import LLM from openhands.sdk.llm.llm_registry import LLMRegistry +from openhands.sdk.persistence.settings import INLINE_CONTEXT_KEY def test_list_profiles_returns_sorted_names(tmp_path): @@ -86,6 +87,32 @@ def test_register_profiles_skips_invalid_and_duplicate_profiles(tmp_path): assert registry.list_usage_ids() == ["shared"] +def test_llm_serializer_respects_inline_context(): + llm = LLM(model="gpt-4o-mini", usage_id="service", profile_id="sample") + + inline_payload = llm.model_dump(mode="json") + assert inline_payload["model"] == "gpt-4o-mini" + + referenced = llm.model_dump(mode="json", context={INLINE_CONTEXT_KEY: False}) + assert referenced == {"profile_id": "sample"} + + +def test_llm_validator_loads_profile_reference(tmp_path, monkeypatch): + monkeypatch.setenv("OPENHANDS_INLINE_CONVERSATIONS", "false") + registry = LLMRegistry(profile_dir=tmp_path) + source_llm = LLM(model="gpt-4o-mini", usage_id="service") + registry.save_profile("profile-tests", source_llm) + + parsed = LLM.model_validate( + {"profile_id": "profile-tests"}, + context={INLINE_CONTEXT_KEY: False, "llm_registry": registry}, + ) + + assert parsed.model == source_llm.model + assert parsed.profile_id == "profile-tests" + assert parsed.usage_id == source_llm.usage_id + + def test_validate_profile_reports_errors(tmp_path): registry = LLMRegistry(profile_dir=tmp_path) From 75e8ecd02e1d935d1dad866ffb040a7fafc69291 Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Wed, 22 Oct 2025 12:56:57 +0200 Subject: [PATCH 27/37] Update LLM profile docs for usage_id semantics --- docs/llm_profiles.md | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/docs/llm_profiles.md b/docs/llm_profiles.md index b2ec2ceaf7..0eeede6d1b 100644 --- a/docs/llm_profiles.md +++ b/docs/llm_profiles.md @@ -10,7 +10,7 @@ Key decisions - Storage location: ~/.openhands/llm-profiles/.json. The profile_name is the filename (no extension) used to refer to the profile. - Do not change ConversationState or Agent serialization format for now. Profiles are a convenience for creating LLM instances and registering them in the runtime LLMRegistry. - Secrets: do NOT store plaintext API keys in profile files by default. Prefer storing the env var name in the LLM.api_key (via LLM.load_from_env) or keep the API key in runtime SecretsManager. The LLMRegistry.save_profile API exposes an include_secrets flag; default False. -- LLM.service_id semantics: keep current behavior (a small set of runtime "usage" identifiers such as 'agent', 'condenser', 'title-gen', etc.). Do not use service_id as the profile name. We will evaluate a rename (service_id -> usage_id) in a separate task (see agent-sdk-23). +- LLM.usage_id semantics: keep current behavior (a small set of runtime identifiers such as 'agent', 'condenser', 'title-gen', etc.). Do not use usage_id as the profile name. LLMRegistry profile API (summary) @@ -37,11 +37,6 @@ Migration - Migration from inline configs to profiles: provide a migration helper script to extract inline LLMs from ~/.openhands/agent_settings.json and conversation base_state.json into ~/.openhands/llm-profiles/.json and update references (manual opt-in by user). -Notes on service_id rename - -- There is an ongoing discussion about renaming `LLM.service_id` to a clearer name (e.g., `usage_id` or `token_tracking_id`) because `service_id` is overloaded. We will not rename immediately; agent-sdk-23 will investigate the migration and impact. - - ## Proposed changes for agent-sdk-19 (profile references in persistence) ### Goals From 142faee71f9661fcd64f467096daa9633c4fcd71 Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Sat, 25 Oct 2025 16:02:55 +0200 Subject: [PATCH 28/37] fix LLM mutation for profiles to respect immutability; add docstring; add vscode microagent --- .openhands/microagents/vscode.md | 48 +++++++++++++++++++ examples/01_standalone_sdk/25_llm_profiles.py | 3 +- .../openhands/sdk/llm/llm_registry.py | 25 ++++++++-- 3 files changed, 71 insertions(+), 5 deletions(-) create mode 100644 .openhands/microagents/vscode.md diff --git a/.openhands/microagents/vscode.md b/.openhands/microagents/vscode.md new file mode 100644 index 0000000000..a5436b98ac --- /dev/null +++ b/.openhands/microagents/vscode.md @@ -0,0 +1,48 @@ +--- +name: vscode +version: 1.1.0 +agent: CodeActAgent +triggers: + - vscode +--- + +# VSCode Quick Start for Agent SDK Repo + +## Open the project in a fresh VSCode window +```bash +code -n /Users/enyst/repos/agent-sdk-clone +``` + +If `code` is not on PATH, launch VSCode manually, then **File → Open...** and select the repository root. + +## Use the repo virtual environment +The workspace sets the interpreter automatically via `.vscode/settings.json`: +```json +{ + "python.defaultInterpreterPath": "${workspaceFolder}/.venv/bin/python", + "python.terminal.activateEnvironment": true, + "python.envFile": "${workspaceFolder}/.env" +} +``` +Verify inside VSCode with **Python: Select Interpreter → agent-sdk-clone/.venv**. + +## Run / debug example 25 (LLM profiles) +Launch configuration lives in `.vscode/launch.json`: +```json +{ + "name": "Example 25 – Debug LLM Profiles", + "type": "python", + "request": "launch", + "python": "${workspaceFolder}/.venv/bin/python", + "program": "${workspaceFolder}/examples/01_standalone_sdk/25_llm_profiles.py", + "console": "integratedTerminal", + "justMyCode": false, + "envFile": "${workspaceFolder}/.env" +} +``` +Steps: +1. Ensure `.env` contains your `LLM_API_KEY` (and optional `LLM_PROFILE_NAME`). +2. In VSCode, open the **Run and Debug** view. +3. Choose **Example 25 – Debug LLM Profiles** and press **Start Debugging** (F5). + +This will start the script under debugpy with the repo’s virtualenv, attach breakpoints as needed, and reuse environment variables from `.env`. diff --git a/examples/01_standalone_sdk/25_llm_profiles.py b/examples/01_standalone_sdk/25_llm_profiles.py index c3c9e4479e..8ef133a48b 100644 --- a/examples/01_standalone_sdk/25_llm_profiles.py +++ b/examples/01_standalone_sdk/25_llm_profiles.py @@ -22,8 +22,7 @@ from openhands.tools.execute_bash import BashTool -DEFAULT_PROFILE_NAME = "gpt-5-mini" -PROFILE_NAME = os.getenv("LLM_PROFILE_NAME", DEFAULT_PROFILE_NAME) +PROFILE_NAME = os.getenv("LLM_PROFILE_NAME", "gpt-5-mini") def ensure_profile_exists(registry: LLMRegistry, name: str) -> None: diff --git a/openhands-sdk/openhands/sdk/llm/llm_registry.py b/openhands-sdk/openhands/sdk/llm/llm_registry.py index 0557cfef14..7e4b411abc 100644 --- a/openhands-sdk/openhands/sdk/llm/llm_registry.py +++ b/openhands-sdk/openhands/sdk/llm/llm_registry.py @@ -9,6 +9,7 @@ from openhands.sdk.llm.llm import LLM from openhands.sdk.logger import get_logger +from openhands.sdk.persistence.settings import should_inline_conversations logger = get_logger(__name__) @@ -129,12 +130,12 @@ def list_profiles(self) -> list[str]: return sorted(path.stem for path in self.profile_dir.glob("*.json")) def get_profile_path(self, profile_id: str) -> Path: - """Return the path where ``profile_id`` is stored.""" + """Return the path where profile_id is stored.""" return self.profile_dir / f"{profile_id}.json" def load_profile(self, profile_id: str) -> LLM: - """Load ``profile_id`` from disk and return an :class:`LLM`.""" + """Load profile_id from disk and return an LLM.""" path = self.get_profile_path(profile_id) if not path.exists(): @@ -210,9 +211,27 @@ def _resolve_profile_dir(self, profile_dir: str | Path | None) -> Path: return directory def _load_profile_with_synced_id(self, path: Path, profile_id: str) -> LLM: + """Load an LLM profile while keeping profile metadata aligned. + + Most callers expect the loaded LLM to reflect the profile file name so the + client apps can surface the active profile (e.g., in conversation history or CLI + prompts). We construct a *new* ``LLM`` via :meth:`model_copy` instead of + mutating the loaded instance to respect the SDK's immutability + conventions. + + When ``OPENHANDS_INLINE_CONVERSATIONS`` is enabled (the default for + reproducible evaluations) we skip the metadata normalization entirely so + inline persistence sees the profile file exactly as stored on disk. Set + ``OPENHANDS_INLINE_CONVERSATIONS=false`` to restore the UX-oriented + normalization. + """ + llm = LLM.load_from_json(str(path)) + if should_inline_conversations(): + return llm + if getattr(llm, "profile_id", None) != profile_id: - llm = llm.model_copy(update={"profile_id": profile_id}) + return llm.model_copy(update={"profile_id": profile_id}) return llm def get(self, usage_id: str) -> LLM: From 82138dd8711f742a7a4f47d2e062e0e1ad5ec29e Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Sat, 25 Oct 2025 19:14:19 +0200 Subject: [PATCH 29/37] refactor: keep LLM profile expansion at persistence layer --- .../conversation/impl/local_conversation.py | 5 +- .../openhands/sdk/conversation/state.py | 66 ++++++++++++++++++- openhands-sdk/openhands/sdk/llm/llm.py | 48 ++++---------- 3 files changed, 81 insertions(+), 38 deletions(-) diff --git a/openhands-sdk/openhands/sdk/conversation/impl/local_conversation.py b/openhands-sdk/openhands/sdk/conversation/impl/local_conversation.py index c8e460e6cf..5983bdd74a 100644 --- a/openhands-sdk/openhands/sdk/conversation/impl/local_conversation.py +++ b/openhands-sdk/openhands/sdk/conversation/impl/local_conversation.py @@ -70,6 +70,9 @@ def __init__( application to provide visualization through callbacks. stuck_detection: Whether to enable stuck detection """ + # Initialize the registry early so profile references resolve during resume. + self.llm_registry = LLMRegistry() + self.agent = agent if isinstance(workspace, str): workspace = LocalWorkspace(working_dir=workspace) @@ -91,6 +94,7 @@ def __init__( else None, max_iterations=max_iteration_per_run, stuck_detection=stuck_detection, + llm_registry=self.llm_registry, ) # Default callback: persist every event to state @@ -118,7 +122,6 @@ def _default_callback(e): self.agent.init_state(self._state, on_event=self._on_event) # Register existing llms in agent - self.llm_registry = LLMRegistry() self.llm_registry.subscribe(self._state.stats.register_llm) for llm in list(self.agent.get_all_llms()): self.llm_registry.add(llm) diff --git a/openhands-sdk/openhands/sdk/conversation/state.py b/openhands-sdk/openhands/sdk/conversation/state.py index f61c3dd097..d6ec4b2e1b 100644 --- a/openhands-sdk/openhands/sdk/conversation/state.py +++ b/openhands-sdk/openhands/sdk/conversation/state.py @@ -1,4 +1,5 @@ # state.py +import json from collections.abc import Sequence from enum import Enum from typing import TYPE_CHECKING, Any, Self @@ -16,6 +17,12 @@ from openhands.sdk.event.base import Event from openhands.sdk.io import FileStore, InMemoryFileStore, LocalFileStore from openhands.sdk.logger import get_logger + + +if TYPE_CHECKING: + from openhands.sdk.llm.llm_registry import LLMRegistry + + from openhands.sdk.persistence.settings import ( INLINE_CONTEXT_KEY, should_inline_conversations, @@ -137,6 +144,8 @@ def _save_base_state(self, fs: FileStore) -> None: Persist base state snapshot (no events; events are file-backed). """ inline_mode = should_inline_conversations() + # Pass the inline preference down so LLM serialization knows whether to + # inline credentials or persist a profile reference. payload = self.model_dump_json( exclude_none=True, context={INLINE_CONTEXT_KEY: inline_mode}, @@ -153,11 +162,16 @@ def create( persistence_dir: str | None = None, max_iterations: int = 500, stuck_detection: bool = True, + llm_registry: "LLMRegistry | None" = None, ) -> "ConversationState": """ If base_state.json exists: resume (attach EventLog, reconcile agent, enforce id). Else: create fresh (agent required), persist base, and return. + + Args: + llm_registry: Optional registry used to expand profile references when + conversations persist profile IDs instead of inline credentials. """ file_store = ( LocalFileStore(persistence_dir) if persistence_dir else InMemoryFileStore() @@ -169,11 +183,28 @@ def create( base_text = None inline_mode = should_inline_conversations() + # Keep validation and serialization in sync when loading previously + # persisted state. context = {INLINE_CONTEXT_KEY: inline_mode} # ---- Resume path ---- if base_text: - state = cls.model_validate_json(base_text, context=context) + base_payload = json.loads(base_text) + if inline_mode: + if _contains_profile_reference(base_payload): + raise ValueError( + "Persisted base state contains LLM profile references but " + "OPENHANDS_INLINE_CONVERSATIONS is enabled." + ) + else: + registry = llm_registry + if registry is None: + from openhands.sdk.llm.llm_registry import LLMRegistry + + registry = LLMRegistry() + _expand_profile_references(base_payload, registry) + + state = cls.model_validate(base_payload, context=context) # Enforce conversation id match if state.id != id: @@ -344,3 +375,36 @@ def owned(self) -> bool: Return True if the lock is currently held by the calling thread. """ return self._lock.owned() + + +def _contains_profile_reference(node: Any) -> bool: + """Return True if ``node`` contains an LLM profile reference payload.""" + + if isinstance(node, dict): + if "profile_id" in node and "model" not in node: + return True + return any(_contains_profile_reference(value) for value in node.values()) + + if isinstance(node, list): + return any(_contains_profile_reference(item) for item in node) + + return False + + +def _expand_profile_references(node: Any, registry: "LLMRegistry") -> None: + """Inline LLM payloads for any profile references contained in ``node``.""" + + if isinstance(node, dict): + if "profile_id" in node and "model" not in node: + profile_id = node["profile_id"] + llm = registry.load_profile(profile_id) + expanded = llm.model_dump(exclude_none=True) + expanded["profile_id"] = profile_id + node.clear() + node.update(expanded) + return + for value in node.values(): + _expand_profile_references(value, registry) + elif isinstance(node, list): + for item in node: + _expand_profile_references(item, registry) diff --git a/openhands-sdk/openhands/sdk/llm/llm.py b/openhands-sdk/openhands/sdk/llm/llm.py index 4086988d4a..bfca38a35f 100644 --- a/openhands-sdk/openhands/sdk/llm/llm.py +++ b/openhands-sdk/openhands/sdk/llm/llm.py @@ -18,7 +18,6 @@ SecretStr, SerializationInfo, SerializerFunctionWrapHandler, - ValidationInfo, field_serializer, field_validator, model_serializer, @@ -79,10 +78,7 @@ from openhands.sdk.llm.utils.retry_mixin import RetryMixin from openhands.sdk.llm.utils.telemetry import Telemetry from openhands.sdk.logger import ENV_LOG_DIR, get_logger -from openhands.sdk.persistence.settings import ( - INLINE_CONTEXT_KEY, - should_inline_conversations, -) +from openhands.sdk.persistence.settings import INLINE_CONTEXT_KEY logger = get_logger(__name__) @@ -279,6 +275,15 @@ class LLM(BaseModel, RetryMixin, NonNativeToolCallingMixin): def _serialize_with_profiles( self, handler: SerializerFunctionWrapHandler, info: SerializationInfo ) -> Mapping[str, Any]: + """Scope LLM serialization to either inline payloads or profile refs. + + We default to inlining the full LLM payload, but when the persistence + layer explicitly opts out (by passing ``inline_llm_persistence=False`` in + ``context``) we strip the payload down to just ``{"profile_id": ...}`` so + the conversation state can round-trip a profile reference without + exposing secrets. + """ + inline_pref = None if info.context is not None and INLINE_CONTEXT_KEY in info.context: inline_pref = info.context[INLINE_CONTEXT_KEY] @@ -315,40 +320,11 @@ def _validate_api_key(cls, v): @model_validator(mode="before") @classmethod - def _coerce_inputs(cls, data: Any, info: ValidationInfo): - if not isinstance(data, Mapping): + def _coerce_inputs(cls, data): + if not isinstance(data, dict): return data d = dict(data) - profile_id = d.get("profile_id") - if profile_id and "model" not in d: - inline_pref = None - if info.context is not None and INLINE_CONTEXT_KEY in info.context: - inline_pref = info.context[INLINE_CONTEXT_KEY] - if inline_pref is None: - inline_pref = should_inline_conversations() - - if inline_pref: - raise ValueError( - "Encountered profile reference for LLM while " - "OPENHANDS_INLINE_CONVERSATIONS is enabled. " - "Inline the profile or set " - "OPENHANDS_INLINE_CONVERSATIONS=false." - ) - - registry = None - if info.context is not None: - registry = info.context.get("llm_registry") - if registry is None: - from openhands.sdk.llm.llm_registry import LLMRegistry - - registry = LLMRegistry() - - llm = registry.load_profile(profile_id) - expanded = llm.model_dump(exclude_none=True) - expanded["profile_id"] = profile_id - d.update(expanded) - if "service_id" in d and "usage_id" not in d: warnings.warn( SERVICE_ID_DEPRECATION_MSG, From f5404b60d2eace036fff29cd1c58a34c3ecf9ebf Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Sun, 26 Oct 2025 18:23:34 +0100 Subject: [PATCH 30/37] fix: restore LLM profile validation behavior --- openhands-sdk/openhands/sdk/llm/llm.py | 37 +++++++++++++++++-- .../openhands/sdk/llm/llm_registry.py | 12 ++---- 2 files changed, 37 insertions(+), 12 deletions(-) diff --git a/openhands-sdk/openhands/sdk/llm/llm.py b/openhands-sdk/openhands/sdk/llm/llm.py index bfca38a35f..c690ad24f3 100644 --- a/openhands-sdk/openhands/sdk/llm/llm.py +++ b/openhands-sdk/openhands/sdk/llm/llm.py @@ -18,6 +18,7 @@ SecretStr, SerializationInfo, SerializerFunctionWrapHandler, + ValidationInfo, field_serializer, field_validator, model_serializer, @@ -78,7 +79,10 @@ from openhands.sdk.llm.utils.retry_mixin import RetryMixin from openhands.sdk.llm.utils.telemetry import Telemetry from openhands.sdk.logger import ENV_LOG_DIR, get_logger -from openhands.sdk.persistence.settings import INLINE_CONTEXT_KEY +from openhands.sdk.persistence.settings import ( + INLINE_CONTEXT_KEY, + should_inline_conversations, +) logger = get_logger(__name__) @@ -320,11 +324,38 @@ def _validate_api_key(cls, v): @model_validator(mode="before") @classmethod - def _coerce_inputs(cls, data): - if not isinstance(data, dict): + def _coerce_inputs(cls, data: Any, info: ValidationInfo): + if not isinstance(data, Mapping): return data d = dict(data) + profile_id = d.get("profile_id") + if profile_id and "model" not in d: + inline_pref = None + if info.context is not None and INLINE_CONTEXT_KEY in info.context: + inline_pref = info.context[INLINE_CONTEXT_KEY] + if inline_pref is None: + inline_pref = should_inline_conversations() + + if inline_pref: + raise ValueError( + "Encountered profile reference for LLM while " + "OPENHANDS_INLINE_CONVERSATIONS is enabled. " + "Inline the profile or set " + "OPENHANDS_INLINE_CONVERSATIONS=false." + ) + + if info.context is None or "llm_registry" not in info.context: + raise ValueError( + "LLM registry required in context to load profile references." + ) + + registry = info.context["llm_registry"] + llm = registry.load_profile(profile_id) + expanded = llm.model_dump(exclude_none=True) + expanded["profile_id"] = profile_id + d.update(expanded) + if "service_id" in d and "usage_id" not in d: warnings.warn( SERVICE_ID_DEPRECATION_MSG, diff --git a/openhands-sdk/openhands/sdk/llm/llm_registry.py b/openhands-sdk/openhands/sdk/llm/llm_registry.py index 7e4b411abc..63087c69e0 100644 --- a/openhands-sdk/openhands/sdk/llm/llm_registry.py +++ b/openhands-sdk/openhands/sdk/llm/llm_registry.py @@ -9,7 +9,6 @@ from openhands.sdk.llm.llm import LLM from openhands.sdk.logger import get_logger -from openhands.sdk.persistence.settings import should_inline_conversations logger = get_logger(__name__) @@ -219,17 +218,12 @@ def _load_profile_with_synced_id(self, path: Path, profile_id: str) -> LLM: mutating the loaded instance to respect the SDK's immutability conventions. - When ``OPENHANDS_INLINE_CONVERSATIONS`` is enabled (the default for - reproducible evaluations) we skip the metadata normalization entirely so - inline persistence sees the profile file exactly as stored on disk. Set - ``OPENHANDS_INLINE_CONVERSATIONS=false`` to restore the UX-oriented - normalization. + We always align ``profile_id`` with the filename so callers get a precise + view of which profile is active without mutating the on-disk payload. This + mirrors previous behavior while avoiding in-place mutation. """ llm = LLM.load_from_json(str(path)) - if should_inline_conversations(): - return llm - if getattr(llm, "profile_id", None) != profile_id: return llm.model_copy(update={"profile_id": profile_id}) return llm From ba4bd501c7a0ab4bcaa13e9e912c1234e6d196fb Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Sun, 26 Oct 2025 21:51:30 +0100 Subject: [PATCH 31/37] harden profile handling - validate profile identifiers before touching disk and reuse safe ids - surface real secrets only when requested and cover traversal with a test - refresh docs and sample assets to avoid hardcoded local details --- .openhands/microagents/vscode.md | 2 +- examples/llm-profiles/gpt-5-mini.json | 2 +- .../openhands/sdk/llm/llm_registry.py | 50 +++++++++++++------ tests/sdk/llm/test_llm_registry_profiles.py | 7 +++ 4 files changed, 43 insertions(+), 18 deletions(-) diff --git a/.openhands/microagents/vscode.md b/.openhands/microagents/vscode.md index a5436b98ac..0c39f03528 100644 --- a/.openhands/microagents/vscode.md +++ b/.openhands/microagents/vscode.md @@ -10,7 +10,7 @@ triggers: ## Open the project in a fresh VSCode window ```bash -code -n /Users/enyst/repos/agent-sdk-clone +code -n ``` If `code` is not on PATH, launch VSCode manually, then **File → Open...** and select the repository root. diff --git a/examples/llm-profiles/gpt-5-mini.json b/examples/llm-profiles/gpt-5-mini.json index e87d1ada8d..78113638fb 100644 --- a/examples/llm-profiles/gpt-5-mini.json +++ b/examples/llm-profiles/gpt-5-mini.json @@ -4,7 +4,7 @@ "api_key": null, "temperature": 0.2, "max_output_tokens": 4096, - "service_id": "agent", + "usage_id": "agent", "metadata": { "profile_description": "Sample configuration for the GPT-5 Mini profile managed by the LLM registry." } diff --git a/openhands-sdk/openhands/sdk/llm/llm_registry.py b/openhands-sdk/openhands/sdk/llm/llm_registry.py index 63087c69e0..2a986d3e58 100644 --- a/openhands-sdk/openhands/sdk/llm/llm_registry.py +++ b/openhands-sdk/openhands/sdk/llm/llm_registry.py @@ -1,11 +1,12 @@ import json +import re import warnings from collections.abc import Callable, Iterable, Mapping from pathlib import Path from typing import Any, ClassVar from uuid import uuid4 -from pydantic import BaseModel, ConfigDict, SecretStr, ValidationError +from pydantic import BaseModel, ConfigDict, ValidationError from openhands.sdk.llm.llm import LLM from openhands.sdk.logger import get_logger @@ -21,6 +22,8 @@ ) _DEFAULT_PROFILE_DIR = Path.home() / ".openhands" / "llm-profiles" +_PROFILE_ID_PATTERN = re.compile(r"^[A-Za-z0-9._-]+$") + SERVICE_TO_LLM_DEPRECATION_MSG = ( "LLMRegistry.service_to_llm is deprecated and will be removed in a future " "release; use usage_to_llm instead." @@ -120,6 +123,17 @@ def add(self, llm: LLM) -> None: f"[LLM registry {self.registry_id}]: Added LLM for usage {usage_id}" ) + def _ensure_safe_profile_id(self, profile_id: str) -> str: + if not profile_id or profile_id in {".", ".."}: + raise ValueError("Invalid profile ID.") + if Path(profile_id).name != profile_id: + raise ValueError("Profile IDs cannot contain path separators.") + if not _PROFILE_ID_PATTERN.fullmatch(profile_id): + raise ValueError( + "Profile IDs may only contain alphanumerics, '.', '_', or '-'." + ) + return profile_id + # ------------------------------------------------------------------ # Profile management helpers # ------------------------------------------------------------------ @@ -131,7 +145,8 @@ def list_profiles(self) -> list[str]: def get_profile_path(self, profile_id: str) -> Path: """Return the path where profile_id is stored.""" - return self.profile_dir / f"{profile_id}.json" + safe_id = self._ensure_safe_profile_id(profile_id) + return self.profile_dir / f"{safe_id}.json" def load_profile(self, profile_id: str) -> LLM: """Load profile_id from disk and return an LLM.""" @@ -146,21 +161,20 @@ def save_profile( ) -> Path: """Persist ``llm`` under ``profile_id``.""" - path = self.get_profile_path(profile_id) - data = llm.model_dump(exclude_none=True) - data["profile_id"] = profile_id + safe_id = self._ensure_safe_profile_id(profile_id) + path = self.get_profile_path(safe_id) + data = llm.model_dump( + exclude_none=True, + context={"expose_secrets": include_secrets}, + ) + data["profile_id"] = safe_id if not include_secrets: for secret_field in _SECRET_FIELDS: data.pop(secret_field, None) - else: - for secret_field in _SECRET_FIELDS: - value = data.get(secret_field) - if isinstance(value, SecretStr): - data[secret_field] = value.get_secret_value() with path.open("w", encoding="utf-8") as handle: json.dump(data, handle, indent=2, ensure_ascii=False) - logger.info(f"Saved profile {profile_id} -> {path}") + logger.info(f"Saved profile {safe_id} -> {path}") return path def register_profiles(self, profile_ids: Iterable[str] | None = None) -> None: @@ -169,17 +183,21 @@ def register_profiles(self, profile_ids: Iterable[str] | None = None) -> None: candidates = profile_ids if profile_ids is not None else self.list_profiles() for profile_id in candidates: try: - llm = self.load_profile(profile_id) + safe_id = self._ensure_safe_profile_id(profile_id) + except ValueError as exc: + logger.warning(f"Skipping profile {profile_id}: {exc}") + continue + + try: + llm = self.load_profile(safe_id) except Exception as exc: # noqa: BLE001 - logger.warning(f"Failed to load profile {profile_id}: {exc}") + logger.warning(f"Failed to load profile {safe_id}: {exc}") continue try: self.add(llm) except Exception as exc: # noqa: BLE001 - logger.info( - f"Skipping profile {profile_id}: registry.add failed: {exc}" - ) + logger.info(f"Skipping profile {safe_id}: registry.add failed: {exc}") def validate_profile(self, data: Mapping[str, Any]) -> tuple[bool, list[str]]: """Return (is_valid, errors) after validating a profile payload.""" diff --git a/tests/sdk/llm/test_llm_registry_profiles.py b/tests/sdk/llm/test_llm_registry_profiles.py index 948366df75..b9aefb723f 100644 --- a/tests/sdk/llm/test_llm_registry_profiles.py +++ b/tests/sdk/llm/test_llm_registry_profiles.py @@ -1,5 +1,6 @@ import json +import pytest from pydantic import SecretStr from openhands.sdk.llm.llm import LLM @@ -123,3 +124,9 @@ def test_validate_profile_reports_errors(tmp_path): ok, errors = registry.validate_profile({"usage_id": "svc"}) assert not ok assert any("model" in message for message in errors) + + +def test_get_profile_path_rejects_traversal(tmp_path): + registry = LLMRegistry(profile_dir=tmp_path) + with pytest.raises(ValueError): + registry.get_profile_path("../secret") From cbf886e139da4394f593872b22b8d3a639fd6167 Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Tue, 21 Oct 2025 00:43:45 +0200 Subject: [PATCH 32/37] docs: capture runtime LLM switching investigation --- docs/llm_runtime_switch_investigation.md | 68 ++++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 docs/llm_runtime_switch_investigation.md diff --git a/docs/llm_runtime_switch_investigation.md b/docs/llm_runtime_switch_investigation.md new file mode 100644 index 0000000000..0a358959e4 --- /dev/null +++ b/docs/llm_runtime_switch_investigation.md @@ -0,0 +1,68 @@ +# Runtime LLM Profile Switching – Investigation (agent-sdk-24) + +## Current architecture + +### LLMRegistry +- Keeps an in-memory mapping `service_to_llm: dict[str, LLM]`. +- Loads/saves JSON profiles under `~/.openhands/llm-profiles` (or a custom directory) via: + - `list_profiles()` / `get_profile_path()` + - `save_profile(profile_id, llm)` – strips secret fields unless explicitly asked not to. + - `load_profile(profile_id)` – rehydrates an `LLM`, ensuring the runtime instance’s `profile_id` matches the file stem via `_load_profile_with_synced_id`. + - `register_profiles(profile_ids=None)` – iterates `list_profiles()`, calling `load_profile` then `add` for each profile; skips invalid payloads or duplicates. + - `validate_profile(data)` – wraps `LLM.model_validate` to report pydantic errors as strings. +- `add(llm)` publishes a `RegistryEvent` to the optional subscriber and records the LLM in `service_to_llm` keyed by `llm.service_id`. +- Currently assumes a one-to-one mapping of service_id ↔ active LLM instance. + +### Agent & LLM ownership +- `AgentBase.llm` is a (frozen) `LLM` Basemodel. Agents may also own other LLMs (e.g., condensers) discovered via `AgentBase.get_all_llms()`. +- `AgentBase.resolve_diff_from_deserialized(persisted)` reconciles a persisted agent with the runtime agent: + - Calls `self.llm.resolve_diff_from_deserialized(persisted.llm)`; this only permits differences in fields listed in `LLM.OVERRIDE_ON_SERIALIZE` (api keys, AWS secrets, etc.). Any other field diff raises. + - Ensures tool names match and the rest of the agent models are identical. +- `LLM.resolve_diff_from_deserialized(persisted)` compares `model_dump(exclude_none=True)` between runtime and persisted objects, allowing overrides only for secret fields. Any other difference triggers a `ValueError`. + +### Conversation persistence +- `ConversationState._save_base_state()` -> `compact_llm_profiles(...)` when `OPENHANDS_INLINE_CONVERSATIONS` is false, replacing inline LLM dicts with `{"profile_id": id}` entries. +- `ConversationState.create()` -> `resolve_llm_profiles(...)` prior to validation, so profile references become concrete LLM dicts loaded from `LLMRegistry`. +- When inline mode is enabled (`OPENHANDS_INLINE_CONVERSATIONS=true`), profiles are fully embedded and *any* LLM diff is rejected by the reconciliation flow above. + +### Conversation bootstrapping +- `LocalConversation.__init__()` adds all LLMS from the agent to the registry and eagerly calls `register_profiles()` (errors logged at DEBUG level). This ensures the in-memory registry is primed with persisted profiles before a conversation resumes. + +## Implications for runtime switching + +1. **Registry as switch authority** + - Registry already centralizes active LLM instances and profile management, so introducing a “switch-to-profile” operation belongs here. That operation will need to: + - Load the target profile (if not already loaded). + - Update `service_to_llm` (and notify subscribers) atomically. + - Return the new `LLM` so callers can update their Agent / Conversation state. + +2. **Agent/LLM reconciliation barriers** + - Current `resolve_diff_from_deserialized` logic rejects *any* non-secret field change. A runtime profile swap would alter at least `LLM.model`, maybe provider-specific params. We therefore need a sanctioned path that: + - Skips reconciliation when conversations are persisted with profile references (i.e., inline mode disabled). + - Refuses to switch when inline mode is required (e.g., evals with `OPENHANDS_INLINE_CONVERSATIONS=true`). Switching in inline mode would otherwise break diff validation. + - This aligns with the instruction to “REJECT SWITCH for eval mode,” but “JUST SWITCH” when persistence is profile-based. + +3. **State & metrics consistency** + - After a switch we must ensure: + - `ConversationState.agent.llm` points at the new object (and any secondary LLM references, e.g., condensers, are updated if needed). + - `ConversationState.stats.service_to_metrics` either resets or continues per usage_id; we must decide what data should carry over when the service swaps to a different profile. + - Event persistence continues to work: future saves should store the new profile ID, and reloads should retrieve the same profile in the registry. + +4. **Runtime API surface** + - Need an ergonomic call for agents/conversations to request a new profile by name (manual selection or automated policy). Potential entry points: + - `LLMRegistry.switch_profile(service_id, profile_id)` returning the active `LLM`. + - Conversation-level helper (e.g., `LocalConversation.switch_llm(profile_id)`) that coordinates registry + agent updates + persistence. + +5. **Observer / callback considerations** + - Registry already has a single `subscriber`. If multiple components need to react to switches, we might extend this to a small pub/sub mechanism. Otherwise we can keep a single callback and have the conversation install its own handler. + +## Open questions / risks +- What happens to in-flight operations when the switch occurs? (For initial implementation we can require the agent to be idle.) +- How should token metrics roll over? We likely reset or create a new entry keyed by the new profile. +- Tool / condenser LLMs: do we switch only the primary agent LLM, or should condensers also reference profiles? (Out of scope unless required by the plan.) +- Tests must cover: successful switch, rejected switch in inline mode, persistence after switch, registry events. + +## Next steps +1. Capture the desired UX/API in the follow-up planning issue (agent-sdk-25). +2. Decide how to bypass reconciliation safely when profile references are used. +3. Define exact testing matrix (registry unit tests, conversation integration tests, persistence roundtrip). From af1fd40fe2ee2df316012cf91fb82df95c05983b Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Tue, 21 Oct 2025 00:46:33 +0200 Subject: [PATCH 33/37] docs: outline runtime LLM switching plan --- docs/llm_runtime_switch_plan.md | 92 +++++++++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) create mode 100644 docs/llm_runtime_switch_plan.md diff --git a/docs/llm_runtime_switch_plan.md b/docs/llm_runtime_switch_plan.md new file mode 100644 index 0000000000..2895be6ae7 --- /dev/null +++ b/docs/llm_runtime_switch_plan.md @@ -0,0 +1,92 @@ +# Runtime LLM Profile Switching – Implementation Plan (agent-sdk-25) + +This plan builds on the investigation captured in `docs/llm_runtime_switch_investigation.md` and outlines the work required to let callers swap an agent’s primary LLM to another persisted profile at runtime. + +## Goals + +1. Allow manual or automated selection of a persisted LLM profile while a conversation is active. +2. Keep the LLMRegistry as the single source of truth for active LLM instances and profile knowledge. +3. Respect existing persistence behaviour: + - Profile-reference mode (`OPENHANDS_INLINE_CONVERSATIONS=false`) → switching is supported. + - Inline mode (`OPENHANDS_INLINE_CONVERSATIONS=true`) → switching is rejected early with a clear error. +4. Maintain or improve unit/integration test coverage. + +## Proposed architecture changes + +### 1. Extend LLMRegistry + +Add a `switch_profile(service_id: str, profile_id: str) -> LLM` method that: +- Loads `profile_id` via existing helpers (re-using `_load_profile_with_synced_id`). +- Registers the loaded LLM (using `add` semantics) **replacing** the previous instance for `service_id`. +- Publishes a `RegistryEvent` (re-using the existing subscriber hook) so listeners can update state. +- Returns the new `LLM` instance so callers can synchronously update their agent/state. + +Implementation notes: +- If the profile is already active, short-circuit and return the existing LLM. +- Raise a descriptive error when the profile is missing or when the service ID is unknown. + +### 2. Conversation-level coordination + +Introduce a method on `ConversationState` (and corresponding `LocalConversation`) such as `switch_agent_llm(profile_id: str)` which orchestrates the swap: + +1. Check `should_inline_conversations()`; if inline mode is enabled, raise an `LLMSwitchError` instructing the caller to disable inline persistence. +2. Resolve the agent’s `service_id` (current LLM) and call `LLMRegistry.switch_profile(...)`. +3. Update `state.agent` with a new agent object whose `.llm` is the returned instance. + - To bypass the strict `resolve_diff_from_deserialized` diff, introduce an internal helper on `AgentBase`, e.g. `_with_swapped_llm(new_llm: LLM)`, that clones the agent via `model_copy(update={"llm": new_llm})`. This avoids running `resolve_diff_from_deserialized`, which would otherwise reject the change. + - Apply the same logic to any condensers or secondary LLMs once the need arises (out of scope for v1). +4. Update `ConversationState.stats` bookkeeping: + - Either reset the metrics entry for the service to zero or continue accumulating under the same key (decision: start a fresh metrics entry to avoid mixing usage between profiles). +5. Persist immediately by calling existing save hooks (`_save_base_state`) to ensure the new profile ID is captured. + +A convenience method on `LocalConversation` (e.g. `switch_llm(profile_id: str)`) will forward to the state method and surface the error if inline mode blocks the operation. + +### 3. User-facing API + +Depending on SDK ergonomics, expose the feature through: +- A direct conversation method (`conversation.switch_llm("profile-a")`). +- CLI / higher-level integration left for a follow-up once core switching works. + +### 4. Prevent mid-operation switches (initial scope) + +For the first iteration we can require `state.agent_status == AgentExecutionStatus.IDLE` before switching. If the agent is mid-run we raise an error to avoid edge cases with in-flight steps. Future work can relax this once step coordination is available. + +## Testing strategy + +1. **Registry unit tests** + - New suite in `tests/sdk/llm/test_llm_registry_profiles.py` for `switch_profile` covering: + - Successful swap (existing service → new profile). + - Swap to the same profile is a no-op. + - Unknown profile → raises. + - Unknown service → raises. + - Subscriber notifications fire. + +2. **Conversation integration tests** (extend `tests/sdk/conversation/local/test_state_serialization.py` or create new module): + - Switching succeeds in profile-reference mode, updating `ConversationState.agent.llm.profile_id` and saving the new profile ID to disk. + - Switching persists across reloads (start a conversation, switch, re-open conversation, verify new profile in state and registry). + - Switching is rejected when `OPENHANDS_INLINE_CONVERSATIONS=true`. + - Switching while the agent is not idle (if enforced) raises the appropriate error. + +3. **Metrics tests** + - Verify that stats either reset or continue according to the chosen policy by inspecting `ConversationState.stats.service_to_metrics` before/after the swap. + +4. **Serialization tests** + - Ensure `compact_llm_profiles` writes the new `profile_id` after a switch, and `resolve_llm_profiles` rehydrates it successfully on reload. + +5. **Subscriber behaviour** + - If we extend beyond a single subscriber, add tests for multiple listeners; otherwise ensure the existing single-subscriber path still works. + +## Rollout steps + +1. Implement `LLMRegistry.switch_profile` plus targeted unit tests. +2. Wire conversation-level orchestration (`ConversationState` and `LocalConversation`). +3. Expose public API and update documentation (including usage notes and inline-mode restriction). +4. Add/cherry-pick helper utilities (e.g., agent cloning) as required. +5. Update CLI or higher-level integrations if time permits (optional follow-up). +6. Ensure documentation references the new feature in the LLM profiles guide. + +## Risks & mitigations + +- **Strict diff enforcement**: bypassed via controlled cloning instead of calling `resolve_diff_from_deserialized` when profile references are in use. +- **Persistence consistency**: immediate re-save after switching guarantees the new profile ID lands on disk. +- **Concurrent use**: initial restriction to idle state avoids race conditions; revisit later if needed. +- **Backward compatibility**: existing code paths remain untouched unless `switch_llm(...)` is invoked. From 2ec0f9f5812c737f06e6edfb931c154c4cdf8816 Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Tue, 21 Oct 2025 01:07:17 +0200 Subject: [PATCH 34/37] feat: allow switching runtime LLM profiles --- openhands-sdk/openhands/sdk/agent/base.py | 7 ++ .../conversation/impl/local_conversation.py | 12 +++ .../openhands/sdk/conversation/state.py | 28 ++++- .../openhands/sdk/llm/llm_registry.py | 21 ++++ .../local/test_state_serialization.py | 100 ++++++++++++++++++ tests/sdk/llm/test_llm_registry_profiles.py | 38 +++++++ 6 files changed, 205 insertions(+), 1 deletion(-) diff --git a/openhands-sdk/openhands/sdk/agent/base.py b/openhands-sdk/openhands/sdk/agent/base.py index 8cc31e16db..f8f1cd1ed6 100644 --- a/openhands-sdk/openhands/sdk/agent/base.py +++ b/openhands-sdk/openhands/sdk/agent/base.py @@ -329,6 +329,13 @@ def resolve_diff_from_deserialized(self, persisted: "AgentBase") -> "AgentBase": ) return reconciled + def _clone_with_llm(self, llm: LLM) -> "AgentBase": + """Return a copy of this agent with ``llm`` swapped in.""" + + clone = self.model_copy(update={"llm": llm}) + clone._tools = dict(self._tools) + return clone + def model_dump_succint(self, **kwargs): """Like model_dump, but excludes None fields by default.""" if "exclude_none" not in kwargs: diff --git a/openhands-sdk/openhands/sdk/conversation/impl/local_conversation.py b/openhands-sdk/openhands/sdk/conversation/impl/local_conversation.py index d308429d20..3f8edd46e8 100644 --- a/openhands-sdk/openhands/sdk/conversation/impl/local_conversation.py +++ b/openhands-sdk/openhands/sdk/conversation/impl/local_conversation.py @@ -178,6 +178,18 @@ def stuck_detector(self) -> StuckDetector | None: """Get the stuck detector instance if enabled.""" return self._stuck_detector + def switch_llm(self, profile_id: str) -> None: + """Switch the active agent LLM to ``profile_id`` at runtime.""" + + with self._state: + self._state.switch_agent_llm(profile_id, registry=self.llm_registry) + self.agent = self._state.agent + logger.info( + "Switched conversation %s to profile %s", + self._state.id, + profile_id, + ) + @observe(name="conversation.send_message") def send_message(self, message: str | Message) -> None: """Send a message to the agent. diff --git a/openhands-sdk/openhands/sdk/conversation/state.py b/openhands-sdk/openhands/sdk/conversation/state.py index 6b24c78d12..d68167fb05 100644 --- a/openhands-sdk/openhands/sdk/conversation/state.py +++ b/openhands-sdk/openhands/sdk/conversation/state.py @@ -2,7 +2,7 @@ import json from collections.abc import Sequence from enum import Enum -from typing import Any, Self +from typing import TYPE_CHECKING, Any, Self from pydantic import AliasChoices, Field, PrivateAttr @@ -301,6 +301,32 @@ def __setattr__(self, name, value): f"State change callback failed for field {name}", exc_info=True ) + def switch_agent_llm(self, profile_id: str, *, registry: "LLMRegistry") -> None: + """Swap the agent's primary LLM to ``profile_id`` using ``registry``.""" + + if should_inline_conversations(): + raise RuntimeError( + "LLM switching requires OPENHANDS_INLINE_CONVERSATIONS to be false." + ) + + if self.execution_status not in ( + ConversationExecutionStatus.IDLE, + ConversationExecutionStatus.FINISHED, + ): + raise RuntimeError("Agent must be idle before switching LLM profiles.") + + usage_id = self.agent.llm.usage_id + try: + new_llm = registry.switch_profile(usage_id, profile_id) + except FileNotFoundError as exc: + raise ValueError(str(exc)) from exc + except KeyError as exc: + raise ValueError(str(exc)) from exc + + self.agent = self.agent._clone_with_llm(new_llm) + if self.execution_status == ConversationExecutionStatus.FINISHED: + self.execution_status = ConversationExecutionStatus.IDLE + @staticmethod def get_unmatched_actions(events: Sequence[Event]) -> list[ActionEvent]: """Find actions in the event history that don't have matching observations. diff --git a/openhands-sdk/openhands/sdk/llm/llm_registry.py b/openhands-sdk/openhands/sdk/llm/llm_registry.py index 2a986d3e58..16907f2c38 100644 --- a/openhands-sdk/openhands/sdk/llm/llm_registry.py +++ b/openhands-sdk/openhands/sdk/llm/llm_registry.py @@ -148,6 +148,27 @@ def get_profile_path(self, profile_id: str) -> Path: safe_id = self._ensure_safe_profile_id(profile_id) return self.profile_dir / f"{safe_id}.json" + def switch_profile(self, usage_id: str, profile_id: str) -> LLM: + """Replace ``usage_id``'s active LLM with ``profile_id`` and return it.""" + + if usage_id not in self._usage_to_llm: + raise KeyError(f"Usage ID '{usage_id}' not found in registry") + + current_llm = self._usage_to_llm[usage_id] + safe_id = self._ensure_safe_profile_id(profile_id) + if getattr(current_llm, "profile_id", None) == safe_id: + return current_llm + + llm = self.load_profile(safe_id) + llm = llm.model_copy(update={"usage_id": usage_id}) + self._usage_to_llm[usage_id] = llm + self.notify(RegistryEvent(llm=llm)) + logger.info( + f"[LLM registry {self.registry_id}]: Switched usage {usage_id} " + f"to profile {safe_id}" + ) + return llm + def load_profile(self, profile_id: str) -> LLM: """Load profile_id from disk and return an LLM.""" diff --git a/tests/sdk/conversation/local/test_state_serialization.py b/tests/sdk/conversation/local/test_state_serialization.py index 8827a45003..e6b499956c 100644 --- a/tests/sdk/conversation/local/test_state_serialization.py +++ b/tests/sdk/conversation/local/test_state_serialization.py @@ -642,3 +642,103 @@ def test_conversation_with_agent_different_llm_config(): # Test that the core state structure is preserved (excluding agent differences) new_dump = new_conversation._state.model_dump(mode="json", exclude={"agent"}) assert new_dump == original_state_dump + + +def test_local_conversation_switch_llm_persists_profile(tmp_path, monkeypatch): + home_dir = tmp_path / "home" + home_dir.mkdir() + monkeypatch.setenv("HOME", str(home_dir)) + monkeypatch.setenv("OPENHANDS_INLINE_CONVERSATIONS", "false") + + registry = LLMRegistry() + base_llm = LLM(model="gpt-4o-mini", usage_id="test-llm") + registry.save_profile("base", base_llm) + alt_llm = LLM(model="gpt-4o", usage_id="alternate", temperature=0.4) + registry.save_profile("alt", alt_llm) + + agent = Agent(llm=registry.load_profile("base"), tools=[]) + workspace_dir = tmp_path / "workspace" + persistence_dir = tmp_path / "persist" + + conversation = Conversation( + agent=agent, + workspace=str(workspace_dir), + persistence_dir=str(persistence_dir), + visualize=False, + ) + assert isinstance(conversation, LocalConversation) + + conversation.switch_llm("alt") + + assert conversation.agent.llm.profile_id == "alt" + assert conversation.state.agent.llm.profile_id == "alt" + assert conversation.agent.llm.usage_id == "test-llm" + assert conversation.llm_registry.get("test-llm").model == alt_llm.model + + persistence_path = conversation.state.persistence_dir + assert persistence_path is not None + base_state_path = Path(persistence_path) / "base_state.json" + data = json.loads(base_state_path.read_text()) + assert data["agent"]["llm"] == {"profile_id": "alt"} + + reloaded_agent = Agent(llm=registry.load_profile("alt"), tools=[]) + reloaded = Conversation( + agent=reloaded_agent, + workspace=str(workspace_dir), + persistence_dir=str(persistence_dir), + conversation_id=conversation.id, + visualize=False, + ) + assert isinstance(reloaded, LocalConversation) + assert reloaded.state.agent.llm.profile_id == "alt" + + +def test_local_conversation_switch_llm_inline_mode_rejected(tmp_path, monkeypatch): + home_dir = tmp_path / "home" + home_dir.mkdir() + monkeypatch.setenv("HOME", str(home_dir)) + monkeypatch.setenv("OPENHANDS_INLINE_CONVERSATIONS", "true") + + registry = LLMRegistry() + base_llm = LLM(model="gpt-4o-mini", usage_id="test-llm") + registry.save_profile("base", base_llm) + registry.save_profile("alt", LLM(model="gpt-4o", usage_id="alternate")) + + agent = Agent(llm=registry.load_profile("base"), tools=[]) + conversation = Conversation( + agent=agent, + workspace=str(tmp_path / "workspace"), + persistence_dir=str(tmp_path / "persist"), + visualize=False, + ) + assert isinstance(conversation, LocalConversation) + + with pytest.raises(RuntimeError, match="OPENHANDS_INLINE_CONVERSATIONS"): + conversation.switch_llm("alt") + + +def test_local_conversation_switch_llm_requires_idle(tmp_path, monkeypatch): + home_dir = tmp_path / "home" + home_dir.mkdir() + monkeypatch.setenv("HOME", str(home_dir)) + monkeypatch.setenv("OPENHANDS_INLINE_CONVERSATIONS", "false") + + registry = LLMRegistry() + base_llm = LLM(model="gpt-4o-mini", usage_id="test-llm") + registry.save_profile("base", base_llm) + registry.save_profile("alt", LLM(model="gpt-4o", usage_id="alternate")) + + agent = Agent(llm=registry.load_profile("base"), tools=[]) + conversation = Conversation( + agent=agent, + workspace=str(tmp_path / "workspace"), + persistence_dir=str(tmp_path / "persist"), + visualize=False, + ) + assert isinstance(conversation, LocalConversation) + + with conversation.state: + conversation.state.execution_status = ConversationExecutionStatus.RUNNING + + with pytest.raises(RuntimeError, match="Agent must be idle"): + conversation.switch_llm("alt") diff --git a/tests/sdk/llm/test_llm_registry_profiles.py b/tests/sdk/llm/test_llm_registry_profiles.py index b9aefb723f..2c06a283c9 100644 --- a/tests/sdk/llm/test_llm_registry_profiles.py +++ b/tests/sdk/llm/test_llm_registry_profiles.py @@ -130,3 +130,41 @@ def test_get_profile_path_rejects_traversal(tmp_path): registry = LLMRegistry(profile_dir=tmp_path) with pytest.raises(ValueError): registry.get_profile_path("../secret") + + +def test_switch_profile_replaces_active_llm(tmp_path): + registry = LLMRegistry(profile_dir=tmp_path) + base_llm = LLM(model="gpt-4o-mini", usage_id="primary") + registry.add(base_llm) + registry.save_profile("alternate", LLM(model="gpt-4o", usage_id="alternate")) + + events: list = [] + registry.subscribe(events.append) + + switched = registry.switch_profile("primary", "alternate") + + assert switched.profile_id == "alternate" + assert switched.usage_id == "primary" + assert registry.get("primary") is switched + assert switched.model == "gpt-4o" + assert len(events) == 1 + assert events[0].llm is switched + + # switching to the same profile should be a no-op + again = registry.switch_profile("primary", "alternate") + assert again is switched + assert len(events) == 1 + + +def test_switch_profile_unknown_usage(tmp_path): + registry = LLMRegistry(profile_dir=tmp_path) + with pytest.raises(KeyError): + registry.switch_profile("missing", "profile") + + +def test_switch_profile_missing_profile(tmp_path): + registry = LLMRegistry(profile_dir=tmp_path) + registry.add(LLM(model="gpt-4o-mini", usage_id="primary")) + + with pytest.raises(FileNotFoundError): + registry.switch_profile("primary", "does-not-exist") From 3bf69db8502a2ba468e4c3c7fa565bd86889fc4e Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Tue, 21 Oct 2025 10:57:55 +0200 Subject: [PATCH 35/37] docs: add runtime LLM switch example --- .../26_runtime_llm_switch.py | 125 ++++++++++++++++++ 1 file changed, 125 insertions(+) create mode 100644 examples/01_standalone_sdk/26_runtime_llm_switch.py diff --git a/examples/01_standalone_sdk/26_runtime_llm_switch.py b/examples/01_standalone_sdk/26_runtime_llm_switch.py new file mode 100644 index 0000000000..4e185d5505 --- /dev/null +++ b/examples/01_standalone_sdk/26_runtime_llm_switch.py @@ -0,0 +1,125 @@ +"""Demonstrate switching LLM profiles at runtime and persisting the result.""" + +from __future__ import annotations + +import json +import os +import uuid +from pathlib import Path + +from pydantic import SecretStr + +from openhands.sdk import LLM, Agent, Conversation, LLMRegistry, Message, TextContent +from openhands.sdk.logger import get_logger + + +logger = get_logger(__name__) + +# --------------------------------------------------------------------------- +# Prerequisites +# --------------------------------------------------------------------------- +# 1. Configure the API key for the provider you want to use +api_key = os.getenv("LLM_API_KEY") +assert api_key is not None, "LLM_API_KEY environment variable is not set." + +# 2. Disable inline conversations so profile references are stored instead +os.environ.setdefault("OPENHANDS_INLINE_CONVERSATIONS", "false") + +# 3. Profiles live under ~/.openhands/llm-profiles by default. We create two +# variants that share the same service_id so they can be swapped at runtime. +registry = LLMRegistry() +service_id = "support-agent" + +base_profile_id = "support-mini" +alt_profile_id = "support-pro" + +base_llm = LLM( + service_id=service_id, + model="litellm_proxy/anthropic/claude-sonnet-4-5-20250929", + base_url="https://llm-proxy.eval.all-hands.dev", + api_key=SecretStr(api_key), + temperature=0.0, +) +alt_llm = base_llm.model_copy( + update={ + "model": "litellm_proxy/anthropic/claude-3-5-sonnet-20240620", + "temperature": 0.4, + } +) + +registry.save_profile(base_profile_id, base_llm) +registry.save_profile(alt_profile_id, alt_llm) + +logger.info("Saved profiles %s and %s", base_profile_id, alt_profile_id) + +# --------------------------------------------------------------------------- +# Start a conversation with the base profile and persist it to disk +# --------------------------------------------------------------------------- +conversation_id = uuid.uuid4() +persistence_dir = Path("./.conversations_switch_demo").resolve() +workspace_dir = Path.cwd() + +agent = Agent(llm=registry.load_profile(base_profile_id), tools=[]) +conversation = Conversation( + agent=agent, + workspace=str(workspace_dir), + persistence_dir=str(persistence_dir), + conversation_id=conversation_id, + visualize=False, +) + +conversation.send_message( + Message( + role="user", + content=[TextContent(text="What model are you using? Keep it short.")], + ) +) +conversation.run() + +print("First run finished with profile:", conversation.agent.llm.profile_id) + +# --------------------------------------------------------------------------- +# Switch to the alternate profile while the conversation is idle +# --------------------------------------------------------------------------- +conversation.switch_llm(alt_profile_id) +print("Switched runtime profile to:", conversation.agent.llm.profile_id) + +conversation.send_message( + Message( + role="user", content=[TextContent(text="Now say hello using the new profile.")] + ) +) +conversation.run() + +print("Second run finished with profile:", conversation.agent.llm.profile_id) + +# Verify the persistence artefacts mention the new profile +base_state_path = Path(conversation.state.persistence_dir or ".") / "base_state.json" +print("base_state.json saved to:", base_state_path) +state_payload = json.loads(base_state_path.read_text()) +print("Persisted profile entry:", state_payload["agent"]["llm"]) + +# --------------------------------------------------------------------------- +# Delete the in-memory conversation and reload from disk +# --------------------------------------------------------------------------- +print("\nReloading conversation from disk...") +del conversation + +reloaded_agent = Agent(llm=registry.load_profile(alt_profile_id), tools=[]) +reloaded = Conversation( + agent=reloaded_agent, + workspace=str(workspace_dir), + persistence_dir=str(persistence_dir), + conversation_id=conversation_id, + visualize=False, +) + +print("Reloaded conversation is using profile:", reloaded.state.agent.llm.profile_id) +print("Active LLM model:", reloaded.state.agent.llm.model) + +reloaded.send_message( + Message(role="user", content=[TextContent(text="Confirm you survived a reload.")]) +) +reloaded.run() + +print("Reloaded run finished with profile:", reloaded.state.agent.llm.profile_id) From 12d7264d07b59244ccf8502e4a46df6805f4599f Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Tue, 21 Oct 2025 11:28:27 +0200 Subject: [PATCH 36/37] docs: document inline mode switch rejection --- .../26_runtime_llm_switch.py | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/examples/01_standalone_sdk/26_runtime_llm_switch.py b/examples/01_standalone_sdk/26_runtime_llm_switch.py index 4e185d5505..47a5be59a9 100644 --- a/examples/01_standalone_sdk/26_runtime_llm_switch.py +++ b/examples/01_standalone_sdk/26_runtime_llm_switch.py @@ -123,3 +123,29 @@ reloaded.run() print("Reloaded run finished with profile:", reloaded.state.agent.llm.profile_id) + +# --------------------------------------------------------------------------- +# Part 2: Inline persistence rejects runtime switching +# --------------------------------------------------------------------------- +# When OPENHANDS_INLINE_CONVERSATIONS is true the conversation persists full +# LLM payloads instead of profile references. Switching profiles would break +# the diff reconciliation step, so the SDK deliberately rejects it with a +# RuntimeError. We demonstrate that behaviour below. +os.environ["OPENHANDS_INLINE_CONVERSATIONS"] = "true" + +inline_persistence_dir = Path("./.conversations_switch_demo_inline").resolve() +inline_agent = Agent(llm=registry.load_profile(base_profile_id), tools=[]) +inline_conversation = Conversation( + agent=inline_agent, + workspace=str(workspace_dir), + persistence_dir=str(inline_persistence_dir), + conversation_id=uuid.uuid4(), + visualize=False, +) + +try: + inline_conversation.switch_llm(alt_profile_id) +except RuntimeError as exc: + print("Inline mode switch attempt rejected as expected:", exc) +else: + raise AssertionError("Inline mode should have rejected the LLM switch") From 0b075c9224b1b8b13dd6133972e2a5f5895c4fa7 Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Thu, 6 Nov 2025 03:32:05 +0100 Subject: [PATCH 37/37] Delete .openhands/microagents/vscode.md --- .openhands/microagents/vscode.md | 48 -------------------------------- 1 file changed, 48 deletions(-) delete mode 100644 .openhands/microagents/vscode.md diff --git a/.openhands/microagents/vscode.md b/.openhands/microagents/vscode.md deleted file mode 100644 index 0c39f03528..0000000000 --- a/.openhands/microagents/vscode.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -name: vscode -version: 1.1.0 -agent: CodeActAgent -triggers: - - vscode ---- - -# VSCode Quick Start for Agent SDK Repo - -## Open the project in a fresh VSCode window -```bash -code -n -``` - -If `code` is not on PATH, launch VSCode manually, then **File → Open...** and select the repository root. - -## Use the repo virtual environment -The workspace sets the interpreter automatically via `.vscode/settings.json`: -```json -{ - "python.defaultInterpreterPath": "${workspaceFolder}/.venv/bin/python", - "python.terminal.activateEnvironment": true, - "python.envFile": "${workspaceFolder}/.env" -} -``` -Verify inside VSCode with **Python: Select Interpreter → agent-sdk-clone/.venv**. - -## Run / debug example 25 (LLM profiles) -Launch configuration lives in `.vscode/launch.json`: -```json -{ - "name": "Example 25 – Debug LLM Profiles", - "type": "python", - "request": "launch", - "python": "${workspaceFolder}/.venv/bin/python", - "program": "${workspaceFolder}/examples/01_standalone_sdk/25_llm_profiles.py", - "console": "integratedTerminal", - "justMyCode": false, - "envFile": "${workspaceFolder}/.env" -} -``` -Steps: -1. Ensure `.env` contains your `LLM_API_KEY` (and optional `LLM_PROFILE_NAME`). -2. In VSCode, open the **Run and Debug** view. -3. Choose **Example 25 – Debug LLM Profiles** and press **Start Debugging** (F5). - -This will start the script under debugpy with the repo’s virtualenv, attach breakpoints as needed, and reuse environment variables from `.env`.