Conversation
| let result = match cmd { | ||
| Command::Initialize(args) => self.handle_initialize(req, args), | ||
| Command::Attach(args) => self.handle_attach(req, args), | ||
| Command::Disconnect(args) => self.handle_disconnect(req, args), | ||
| Command::Restart(args) => self.handle_restart(req, args), | ||
| Command::Threads => self.handle_threads(req), | ||
| Command::SetBreakpoints(args) => self.handle_set_breakpoints(req, args), | ||
| Command::SetExceptionBreakpoints(args) => { | ||
| self.handle_set_exception_breakpoints(req, args) | ||
| }, | ||
| Command::StackTrace(args) => self.handle_stacktrace(req, args), | ||
| Command::Source(args) => self.handle_source(req, args), | ||
| Command::Scopes(args) => self.handle_scopes(req, args), | ||
| Command::Variables(args) => self.handle_variables(req, args), | ||
| Command::Evaluate(args) => self.handle_evaluate(req, args), | ||
| Command::Initialize(args) => self.handle_initialize(args), |
There was a problem hiding this comment.
I would be extremely tempted to leave Command::Evaluate(args) in here with a specific panic of
panic!("Expected `Evaluate` to be handled via transport-specific delivery: {args}")
Which gets to take the place of your doc comment in a more meaningful way
There was a problem hiding this comment.
Even if we added "fill-in" support to the Jupyter handler right away, I would be very uneasy about panicking here.
I think returning a DAP error makes more sense and is more defensive.
| let server = Server::new(reader, writer); | ||
| let output = server.output.clone(); |
There was a problem hiding this comment.
Why is the output held separately? 🤔
There was a problem hiding this comment.
This is pre-existing. It's a shareable handle that can be handed off separately. E.g. signature:
pub struct DapServer<R: Read, W: Write> {
server: Server<R, W>,
pub output: Arc<Mutex<ServerOutput<W>>>,| fn deliver(&mut self, output: DapOutput) -> bool { | ||
| if self.respond(output.response).log_err().is_none() { | ||
| return false; | ||
| } | ||
| let rsp = req.success(ResponseBody::SetExceptionBreakpoints( | ||
| SetExceptionBreakpointsResponse { | ||
| // This field is only useful for reporting problems with | ||
| // individual filters. Since we always accept all filters, | ||
| // `None` is fine here. | ||
| breakpoints: None, | ||
| }, | ||
| )); | ||
| self.respond(rsp) | ||
| } | ||
|
|
||
| fn handle_stacktrace( | ||
| &mut self, | ||
| req: Request, | ||
| args: StackTraceArguments, | ||
| ) -> Result<(), ServerError> { | ||
| let stack = { | ||
| let state = self.state.lock().unwrap(); | ||
| let fallback_sources = &state.fallback_sources; | ||
| match &state.stack { | ||
| Some(stack) => stack | ||
| .iter() | ||
| .map(|frame| into_dap_frame(frame, fallback_sources)) | ||
| .collect(), | ||
| _ => vec![], | ||
| for event in output.dap_events { | ||
| if self.send_event(event).log_err().is_none() { | ||
| return false; | ||
| } | ||
| }; | ||
|
|
||
| // Slice the stack as requested | ||
| let n_usize = stack.len(); | ||
|
|
||
| let start_frame = args.start_frame.unwrap_or(0); | ||
| let Ok(start) = usize::try_from(start_frame) else { | ||
| let rsp = req.error(&format!("Invalid start_frame: {start_frame}")); | ||
| return self.respond(rsp); | ||
| }; | ||
| let start = std::cmp::min(start, n_usize); | ||
|
|
||
| let end = if let Some(levels) = args.levels { | ||
| let Ok(levels) = usize::try_from(levels) else { | ||
| let rsp = req.error(&format!("Invalid levels: {levels}")); | ||
| return self.respond(rsp); | ||
| }; | ||
| std::cmp::min(start.saturating_add(levels), n_usize) | ||
| } else { | ||
| n_usize | ||
| }; | ||
|
|
||
| let Ok(total_frames) = i64::try_from(n_usize) else { | ||
| let rsp = req.error(&format!("Stack frame count overflows i64: {n_usize}")); | ||
| return self.respond(rsp); | ||
| }; | ||
| let stack = stack[start..end].to_vec(); | ||
| } | ||
|
|
||
| let rsp = req.success(ResponseBody::StackTrace(StackTraceResponse { | ||
| stack_frames: stack, | ||
| total_frames: Some(total_frames), | ||
| })); | ||
| for event in output.console_events { | ||
| self.handle_console_event(event); | ||
| } | ||
|
|
||
| self.respond(rsp) | ||
| true | ||
| } |
There was a problem hiding this comment.
So inability to respond() or send_event() is immediate cause for serve() to also bail and return false, causing the whole DAP to disconnect. I think that feels right.
There was a problem hiding this comment.
But notably handle_console_event could fail and not bring down the whole DAP right now
37d682d to
e9eef70
Compare
Progress towards posit-dev/positron#12104
We're going to have two handlers for DAP messages, one through the TCP connection for Console debugging, and one through a Jupyter connection (Control socket) for Notebook debugging.
This PR abstracts out DAP handling to make it agnostic to the transport: a
DapHandlertakes a DAP message, handles it, and returns aResult<DapHandlerOutput>.dispatch()is in charge of transforming error cases to a DAP response (success or error). The caller (transport layer) then propagates via appropriate transport.Evaluate remains tied to the TCP transport because it needs to run R code asynchronously, and Jupyter's Control socket requires quick synchronous replies.
This PR is pure reorganisation, no functional difference.