mirror of
https://github.com/rustdesk/hbb_common.git
synced 2025-07-04 00:45:32 +00:00
Merge pull request #24 from fufesou/refact/optimize_preload_peers
refact: optimize, preload peers
This commit is contained in:
commit
0d7f746291
137
src/config.rs
137
src/config.rs
@ -1214,7 +1214,7 @@ impl PeerConfig {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if store {
|
if store {
|
||||||
config.store(id);
|
config.store_(id);
|
||||||
}
|
}
|
||||||
config
|
config
|
||||||
}
|
}
|
||||||
@ -1232,6 +1232,10 @@ impl PeerConfig {
|
|||||||
|
|
||||||
pub fn store(&self, id: &str) {
|
pub fn store(&self, id: &str) {
|
||||||
let _lock = CONFIG.read().unwrap();
|
let _lock = CONFIG.read().unwrap();
|
||||||
|
self.store_(id);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn store_(&self, id: &str) {
|
||||||
let mut config = self.clone();
|
let mut config = self.clone();
|
||||||
config.password =
|
config.password =
|
||||||
encrypt_vec_or_original(&config.password, PASSWORD_ENC_VERSION, ENCRYPT_MAX_LEN);
|
encrypt_vec_or_original(&config.password, PASSWORD_ENC_VERSION, ENCRYPT_MAX_LEN);
|
||||||
@ -1269,17 +1273,33 @@ impl PeerConfig {
|
|||||||
Config::with_extension(Config::path(path))
|
Config::with_extension(Config::path(path))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn peers(id_filters: Option<Vec<String>>) -> Vec<(String, SystemTime, PeerConfig)> {
|
#[inline]
|
||||||
|
// The number of peers to load in the first round when showing the peers card list in the main window.
|
||||||
|
// When there're too many peers, loading all of them at once will take a long time.
|
||||||
|
// We can load them in two rouds, the first round loads the first 100 peers, and the second round loads the rest.
|
||||||
|
// Then the UI will show the first 100 peers first, and the rest will be loaded and shown later.
|
||||||
|
pub fn get_loading_batch_count() -> usize {
|
||||||
|
100
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_vec_id_modified_time_path(
|
||||||
|
id_filters: &Option<Vec<String>>,
|
||||||
|
) -> Vec<(String, SystemTime, PathBuf)> {
|
||||||
if let Ok(peers) = Config::path(PEERS).read_dir() {
|
if let Ok(peers) = Config::path(PEERS).read_dir() {
|
||||||
if let Ok(peers) = peers
|
let mut vec_id_modified_time_path = peers
|
||||||
.map(|res| res.map(|e| e.path()))
|
.into_iter()
|
||||||
.collect::<Result<Vec<_>, _>>()
|
.filter_map(|res| match res {
|
||||||
{
|
Ok(res) => {
|
||||||
let mut peers: Vec<_> = peers
|
let p = res.path();
|
||||||
.iter()
|
if p.is_file()
|
||||||
.filter(|p| {
|
|
||||||
p.is_file()
|
|
||||||
&& p.extension().map(|p| p.to_str().unwrap_or("")) == Some("toml")
|
&& p.extension().map(|p| p.to_str().unwrap_or("")) == Some("toml")
|
||||||
|
{
|
||||||
|
Some(p)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => None,
|
||||||
})
|
})
|
||||||
.map(|p| {
|
.map(|p| {
|
||||||
let id = p
|
let id = p
|
||||||
@ -1289,8 +1309,8 @@ impl PeerConfig {
|
|||||||
.to_owned();
|
.to_owned();
|
||||||
|
|
||||||
let id_decoded_string = if id.starts_with("base64_") && id.len() != 7 {
|
let id_decoded_string = if id.starts_with("base64_") && id.len() != 7 {
|
||||||
let id_decoded = base64::decode(&id[7..], base64::Variant::Original)
|
let id_decoded =
|
||||||
.unwrap_or_default();
|
base64::decode(&id[7..], base64::Variant::Original).unwrap_or_default();
|
||||||
String::from_utf8_lossy(&id_decoded).as_ref().to_owned()
|
String::from_utf8_lossy(&id_decoded).as_ref().to_owned()
|
||||||
} else {
|
} else {
|
||||||
id
|
id
|
||||||
@ -1298,26 +1318,103 @@ impl PeerConfig {
|
|||||||
(id_decoded_string, p)
|
(id_decoded_string, p)
|
||||||
})
|
})
|
||||||
.filter(|(id, _)| {
|
.filter(|(id, _)| {
|
||||||
let Some(filters) = &id_filters else {
|
let Some(filters) = id_filters else {
|
||||||
return true;
|
return true;
|
||||||
};
|
};
|
||||||
filters.contains(id)
|
filters.contains(id)
|
||||||
})
|
})
|
||||||
.map(|(id, p)| {
|
.map(|(id, p)| {
|
||||||
let t = crate::get_modified_time(p);
|
let t = crate::get_modified_time(&p);
|
||||||
|
(id, t, p)
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
vec_id_modified_time_path.sort_unstable_by(|a, b| b.1.cmp(&a.1));
|
||||||
|
vec_id_modified_time_path
|
||||||
|
} else {
|
||||||
|
vec![]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
async fn preload_file_async(path: PathBuf) {
|
||||||
|
let _ = tokio::fs::File::open(path).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main(flavor = "current_thread")]
|
||||||
|
async fn preload_peers_async() {
|
||||||
|
let now = std::time::Instant::now();
|
||||||
|
let vec_id_modified_time_path = Self::get_vec_id_modified_time_path(&None);
|
||||||
|
let batch_count = 300;
|
||||||
|
let total_count = vec_id_modified_time_path.len();
|
||||||
|
let mut futs = vec![];
|
||||||
|
for (_, _, path) in vec_id_modified_time_path.into_iter() {
|
||||||
|
futs.push(Self::preload_file_async(path));
|
||||||
|
if futs.len() >= batch_count {
|
||||||
|
futures::future::join_all(futs).await;
|
||||||
|
futs = vec![];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !futs.is_empty() {
|
||||||
|
futures::future::join_all(futs).await;
|
||||||
|
}
|
||||||
|
log::info!(
|
||||||
|
"Preload peers done in {:?}, batch_count: {}, total: {}",
|
||||||
|
now.elapsed(),
|
||||||
|
batch_count,
|
||||||
|
total_count
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// We have to preload all peers in a background thread.
|
||||||
|
// Because we find that opening files the first time after the system (Windows) booting will be very slow, up to 200~400ms.
|
||||||
|
// The reason is that the Windows has "Microsoft Defender Antivirus Service" running in the background, which will scan the file when it's opened the first time.
|
||||||
|
// So we have to preload all peers in a background thread to avoid the delay when opening the file the first time.
|
||||||
|
// We can temporarily stop "Microsoft Defender Antivirus Service" or add the fold to the white list, to verify this. But don't do this in the release version.
|
||||||
|
pub fn preload_peers() {
|
||||||
|
std::thread::spawn(|| {
|
||||||
|
Self::preload_peers_async();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn peers(id_filters: Option<Vec<String>>) -> Vec<(String, SystemTime, PeerConfig)> {
|
||||||
|
let vec_id_modified_time_path = Self::get_vec_id_modified_time_path(&id_filters);
|
||||||
|
Self::batch_peers(
|
||||||
|
&vec_id_modified_time_path,
|
||||||
|
0,
|
||||||
|
Some(vec_id_modified_time_path.len()),
|
||||||
|
)
|
||||||
|
.0
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn batch_peers(
|
||||||
|
all: &Vec<(String, SystemTime, PathBuf)>,
|
||||||
|
from: usize,
|
||||||
|
to: Option<usize>,
|
||||||
|
) -> (Vec<(String, SystemTime, PeerConfig)>, usize) {
|
||||||
|
if from >= all.len() {
|
||||||
|
return (vec![], 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
let to = match to {
|
||||||
|
Some(to) => to.min(all.len()),
|
||||||
|
None => {
|
||||||
|
let batch_count = Self::get_loading_batch_count();
|
||||||
|
(from + batch_count).min(all.len())
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let peers: Vec<_> = all[from..to]
|
||||||
|
.iter()
|
||||||
|
.map(|(id, t, p)| {
|
||||||
let c = PeerConfig::load(&id);
|
let c = PeerConfig::load(&id);
|
||||||
if c.info.platform.is_empty() {
|
if c.info.platform.is_empty() {
|
||||||
fs::remove_file(p).ok();
|
fs::remove_file(p).ok();
|
||||||
}
|
}
|
||||||
(id, t, c)
|
(id.clone(), t.clone(), c)
|
||||||
})
|
})
|
||||||
.filter(|p| !p.2.info.platform.is_empty())
|
.filter(|p| !p.2.info.platform.is_empty())
|
||||||
.collect();
|
.collect();
|
||||||
peers.sort_unstable_by(|a, b| b.1.cmp(&a.1));
|
(peers, to)
|
||||||
return peers;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Default::default()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn exists(id: &str) -> bool {
|
pub fn exists(id: &str) -> bool {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user