fix: moved select_buffer out of client

due to instance mutexing it held the lock while awaiting for events,
which is undesirable!
This commit is contained in:
əlemi 2023-11-28 02:22:21 +01:00
parent 1265f716d6
commit d562e406f2
4 changed files with 38 additions and 56 deletions

View file

@ -150,46 +150,4 @@ impl Client {
Err(Error::InvalidState { msg: "join a workspace first".into() })
}
}
/// invoke .poll() on all buffer controllers and wait, return name of first one ready
///
/// this will spawn tasks for each buffer controller, each blocked in a poll() call. as soon as
/// one finishes, all other tasks will be canceled and the name of ready controller will be
/// returned. just do client.get_buffer(name).try_recv()
///
/// this is not super efficient as of now but has room for improvement. using this API may
/// provide significant improvements on editor-side
pub async fn select_buffer(&self) -> crate::Result<String> {
match &self.workspace {
None => Err(Error::InvalidState { msg: "join workspace first".into() }),
Some(workspace) => {
let (tx, mut rx) = mpsc::unbounded_channel();
let mut tasks = Vec::new();
for (id, buffer) in workspace.buffers.iter() {
let _tx = tx.clone();
let _id = id.clone();
let _buffer = buffer.clone();
tasks.push(tokio::spawn(async move {
match _buffer.poll().await {
Ok(()) => _tx.send(Ok(_id)),
Err(_) => _tx.send(Err(Error::Channel { send: true })),
}
}))
}
loop {
match rx.recv().await {
None => return Err(Error::Channel { send: false }),
Some(Err(_)) => continue, // TODO log errors
Some(Ok(x)) => {
for t in tasks {
t.abort();
}
return Ok(x.clone());
},
}
}
}
}
}
}

View file

@ -120,16 +120,6 @@ pub mod a_sync {
.disconnect_buffer(path);
Ok(res)
}
pub async fn select_buffer(&self) -> crate::Result<String> {
let res = self.client
.lock().await
.as_ref()
.ok_or(Error::InvalidState { msg: "connect first".into() })?
.select_buffer()
.await?;
Ok(res)
}
}
}
@ -218,9 +208,5 @@ pub mod sync {
pub fn disconnect_buffer(&self, path: &str) -> crate::Result<bool> {
self.if_client(|c| c.disconnect_buffer(path))
}
pub fn select_buffer(&self) -> crate::Result<String> {
self.if_client(|c| self.rt().block_on(c.select_buffer()))?
}
}
}

View file

@ -148,6 +148,8 @@ pub mod errors;
#[cfg(feature = "client")]
pub mod client;
pub mod tools;
/// client wrapper to handle memory persistence
#[cfg(feature = "client")]
pub mod instance;

36
src/tools.rs Normal file
View file

@ -0,0 +1,36 @@
use crate::{Error, api::Controller};
/// invoke .poll() on all buffer controllers and wait, return name of first one ready
///
/// this will spawn tasks for each buffer controller, each blocked in a poll() call. as soon as
/// one finishes, all other tasks will be canceled and the name of ready controller will be
/// returned. just do client.get_buffer(name).try_recv()
///
/// this is not super efficient as of now but has room for improvement. using this API may
/// provide significant improvements on editor-side
pub async fn select_buffer(buffers: &[std::sync::Arc<crate::buffer::Controller>]) -> crate::Result<String> {
let (tx, mut rx) = tokio::sync::mpsc::unbounded_channel();
let mut tasks = Vec::new();
for buffer in buffers {
let _tx = tx.clone();
let _buffer = buffer.clone();
tasks.push(tokio::spawn(async move {
match _buffer.poll().await {
Ok(()) => _tx.send(Ok(_buffer.name.clone())),
Err(_) => _tx.send(Err(Error::Channel { send: true })),
}
}))
}
loop {
match rx.recv().await {
None => return Err(Error::Channel { send: false }),
Some(Err(_)) => continue, // TODO log errors maybe?
Some(Ok(x)) => {
for t in tasks {
t.abort();
}
return Ok(x.clone());
},
}
}
}