Move to Rocket !

This commit is contained in:
2024-10-03 10:34:56 +02:00
parent 04aea8558f
commit 25df2642e9
5 changed files with 293 additions and 276 deletions

View File

@ -38,17 +38,19 @@ members = [
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
ws2 = "0.2.5" rocket = { git = "https://github.com/rwf2/Rocket/", rev = "3bf9ef02d6e803fe9f753777f5a829dda6d2453d"}
log2 = "0.1.11" # contrib/db_pools
actix-web = "4" rocket_db_pools = { git = "https://github.com/rwf2/Rocket/", rev = "3bf9ef02d6e803fe9f753777f5a829dda6d2453d", default-features = false, features = ["diesel_mysql"] }
actix-files = "0.6.6"
# mariadb-dev on Alpine # mariadb-dev on Alpine
# "mysqlclient-src" "mysql_backend" # "mysqlclient-src" "mysql_backend"
diesel = { version = "2.2.0", default-features = false, features = ["mysql", "chrono", "uuid", "r2d2"] } diesel = { version = "^2", default-features = false, features = ["mysql", "chrono", "uuid"] }
ws = { package = "rocket_ws", version = "0.1.1" }
dns-ptr-resolver = {git = "https://github.com/wdes/dns-ptr-resolver.git"} dns-ptr-resolver = {git = "https://github.com/wdes/dns-ptr-resolver.git"}
hickory-resolver = { version = "0.24.1", default-features = false, features = ["tokio-runtime", "dns-over-h3", "dns-over-https", "dns-over-quic"]} hickory-resolver = { version = "0.24.1", default-features = false, features = ["tokio-runtime", "dns-over-h3", "dns-over-https", "dns-over-quic"]}
chrono = "0.4.38" chrono = "0.4.38"
uuid = { version = "1.10.0", default-features = false, features = ["v7", "serde", "std"] } uuid = { version = "1.10.0", default-features = false, features = ["v7", "serde", "std"] }
cidr = "0.2.2" cidr = "0.3.0"
serde = "1.0.210" serde = { version = "1.0.210", features = ["derive"] }
serde_json = "1.0.128" serde_json = "1.0.128"

View File

@ -1,28 +1,38 @@
use actix_files::NamedFile;
use actix_web::error::ErrorInternalServerError;
use actix_web::http::header::ContentType;
use actix_web::{web, App, HttpRequest, HttpResponse, HttpServer};
use log2::*;
use chrono::{NaiveDateTime, Utc}; use chrono::{NaiveDateTime, Utc};
use diesel::deserialize::{self};
use diesel::mysql::{Mysql, MysqlValue};
use diesel::sql_types::Text;
use diesel::r2d2::ConnectionManager; #[macro_use]
use diesel::r2d2::Pool; extern crate rocket;
use rocket::{fairing::AdHoc, Build, Rocket, State};
use rocket_db_pools::{
rocket::{
figment::{
util::map,
value::{Map, Value},
},
form::Form,
fs::NamedFile,
launch, Responder,
},
Connection,
};
use rocket_db_pools::diesel::mysql::{Mysql, MysqlValue};
use rocket_db_pools::diesel::serialize::IsNull;
use rocket_db_pools::diesel::sql_types::Text;
use rocket_db_pools::diesel::{deserialize, serialize};
use rocket_db_pools::diesel::MysqlPool;
use rocket_db_pools::Database;
use worker::detection::{detect_scanner, get_dns_client, Scanners}; use worker::detection::{detect_scanner, get_dns_client, Scanners};
use std::collections::HashMap; use std::{io::Write, net::SocketAddr};
use std::io::Write;
use std::path::PathBuf; use std::path::PathBuf;
use std::{env, fmt}; use std::{env, fmt};
use uuid::Uuid; use uuid::Uuid;
use serde::{Deserialize, Deserializer, Serialize}; use serde::{Deserialize, Deserializer, Serialize};
use diesel::serialize::IsNull;
use diesel::{serialize, MysqlConnection};
use dns_ptr_resolver::{get_ptr, ResolvedResult}; use dns_ptr_resolver::{get_ptr, ResolvedResult};
pub mod models; pub mod models;
@ -31,10 +41,12 @@ pub mod server;
pub mod worker; pub mod worker;
use crate::models::*; use crate::models::*;
use crate::server::Server;
/// Short-hand for the database pool type to use throughout the app. #[derive(Database)]
type DbPool = Pool<ConnectionManager<MysqlConnection>>; #[database("snow_scanner_db")]
pub struct SnowDb(MysqlPool);
type DbConn = Connection<SnowDb>;
trait IsStatic { trait IsStatic {
fn is_static(self: &Self) -> bool; fn is_static(self: &Self) -> bool;
@ -103,16 +115,29 @@ impl serialize::ToSql<Text, Mysql> for Scanners {
impl deserialize::FromSql<Text, Mysql> for Scanners { impl deserialize::FromSql<Text, Mysql> for Scanners {
fn from_sql(bytes: MysqlValue) -> deserialize::Result<Self> { fn from_sql(bytes: MysqlValue) -> deserialize::Result<Self> {
let value = <String as deserialize::FromSql<Text, Mysql>>::from_sql(bytes)?; let value = <String as deserialize::FromSql<Text, Mysql>>::from_sql(bytes)?;
match &value as &str { let value = &value as &str;
"stretchoid" => Ok(Scanners::Stretchoid), let value: Result<Scanners, String> = value.try_into();
"binaryedge" => Ok(Scanners::Binaryedge), match value {
"internet-measurement.com" => Ok(Scanners::InternetMeasurement), Ok(d) => Ok(d),
_ => Err("Unrecognized enum variant".into()), Err(err) => Err(err.into()),
} }
} }
} }
async fn handle_ip(pool: web::Data<DbPool>, ip: String) -> Result<Scanner, Option<ResolvedResult>> { impl TryInto<Scanners> for &str {
type Error = String;
fn try_into(self) -> Result<Scanners, Self::Error> {
match self {
"stretchoid" => Ok(Scanners::Stretchoid),
"binaryedge" => Ok(Scanners::Binaryedge),
"internet-measurement.com" => Ok(Scanners::InternetMeasurement),
value => Err(format!("Invalid value: {value}")),
}
}
}
async fn handle_ip(mut conn: DbConn, ip: String) -> Result<Scanner, Option<ResolvedResult>> {
let query_address = ip.parse().expect("To parse"); let query_address = ip.parse().expect("To parse");
let ptr_result: Result<ResolvedResult, ()> = std::thread::spawn(move || { let ptr_result: Result<ResolvedResult, ()> = std::thread::spawn(move || {
@ -135,17 +160,11 @@ async fn handle_ip(pool: web::Data<DbPool>, ip: String) -> Result<Scanner, Optio
match detect_scanner(&result) { match detect_scanner(&result) {
Ok(Some(scanner_type)) => { Ok(Some(scanner_type)) => {
// use web::block to offload blocking Diesel queries without blocking server thread match Scanner::find_or_new(query_address, scanner_type, result.result, &mut conn).await
web::block(move || { {
// note that obtaining a connection from the pool is also potentially blocking Ok(scanner) => Ok(scanner),
let conn = &mut pool.get().unwrap(); Err(_) => Err(None),
match Scanner::find_or_new(query_address, scanner_type, result.result, conn) { }
Ok(scanner) => Ok(scanner),
Err(_) => Err(None),
}
})
.await
.unwrap()
} }
Ok(None) => Err(None), Ok(None) => Err(None),
@ -176,55 +195,63 @@ static FORM: &str = r#"
</html> </html>
"#; "#;
#[derive(Serialize, Deserialize)] #[derive(FromForm, Serialize, Deserialize)]
pub struct ScanParams { pub struct ScanParams<'r> {
username: String, username: &'r str,
ips: String, ips: &'r str,
} }
async fn handle_scan(pool: web::Data<DbPool>, params: web::Form<ScanParams>) -> HttpResponse { #[derive(Responder)]
if params.username.len() < 4 { enum MultiReply {
return plain_contents("Invalid username".to_string()); #[response(status = 500, content_type = "text")]
Error(ServerError),
#[response(status = 422)]
FormError(PlainText),
#[response(status = 404)]
NotFound(String),
#[response(status = 200)]
Content(HtmlContents),
#[response(status = 200)]
FileContents(NamedFile),
}
#[post("/scan", data = "<form>")]
async fn handle_scan(mut db: DbConn, form: Form<ScanParams<'_>>) -> MultiReply {
if form.username.len() < 4 {
return MultiReply::FormError(PlainText("Invalid username".to_string()));
} }
let task_group_id: Uuid = Uuid::now_v7(); let task_group_id: Uuid = Uuid::now_v7();
// use web::block to offload blocking Diesel queries without blocking server thread for ip in form.ips.lines() {
let _ = web::block(move || { let scan_task = ScanTask {
// note that obtaining a connection from the pool is also potentially blocking task_group_id: task_group_id.to_string(),
let conn = &mut pool.get().unwrap(); cidr: ip.to_string(),
for ip in params.ips.lines() { created_by_username: form.username.to_string(),
let scan_task = ScanTask { created_at: Utc::now().naive_utc(),
task_group_id: task_group_id.to_string(), updated_at: None,
cidr: ip.to_string(), started_at: None,
created_by_username: params.username.clone(), still_processing_at: None,
created_at: Utc::now().naive_utc(), ended_at: None,
updated_at: None, };
started_at: None, match scan_task.save(&mut db).await {
still_processing_at: None, Ok(_) => error!("Added {}", ip.to_string()),
ended_at: None, Err(err) => error!("Not added: {:?}", err),
};
match scan_task.save(conn) {
Ok(_) => error!("Added {}", ip.to_string()),
Err(err) => error!("Not added: {:?}", err),
}
} }
}) }
.await
// map diesel query errors to a 500 error response
.map_err(|err| ErrorInternalServerError(err));
html_contents(format!("New task added: {} !", task_group_id)) MultiReply::Content(HtmlContents(format!("New task added: {} !", task_group_id)))
} }
#[derive(Serialize, Deserialize)] #[derive(FromForm, Serialize, Deserialize)]
pub struct ReportParams { pub struct ReportParams {
ip: String, ip: String,
} }
async fn handle_report(pool: web::Data<DbPool>, params: web::Form<ReportParams>) -> HttpResponse { #[post("/report", data = "<form>")]
match handle_ip(pool, params.ip.clone()).await { async fn handle_report(db: DbConn, form: Form<ReportParams>) -> HtmlContents {
Ok(scanner) => html_contents(match scanner.scanner_name { match handle_ip(db, form.ip.clone()).await {
Ok(scanner) => HtmlContents(match scanner.scanner_name {
Scanners::Binaryedge => match scanner.last_checked_at { Scanners::Binaryedge => match scanner.last_checked_at {
Some(date) => format!( Some(date) => format!(
"Reported a binaryedge ninja! <b>{}</b> known as {} since {date}.", "Reported a binaryedge ninja! <b>{}</b> known as {} since {date}.",
@ -252,9 +279,9 @@ async fn handle_report(pool: web::Data<DbPool>, params: web::Form<ReportParams>)
_ => format!("Not supported"), _ => format!("Not supported"),
}), }),
Err(ptr_result) => html_contents(format!( Err(ptr_result) => HtmlContents(format!(
"The IP <b>{}</a> resolved as {:?} did not match known scanners patterns.", "The IP <b>{}</a> resolved as {:?} did not match known scanners patterns.",
params.ip, form.ip,
match ptr_result { match ptr_result {
Some(res) => res.result, Some(res) => res.result,
None => None, None => None,
@ -267,87 +294,86 @@ struct SecurePath {
pub data: String, pub data: String,
} }
impl<'de> Deserialize<'de> for SecurePath { impl TryInto<SecurePath> for &str {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> type Error = String;
where
D: Deserializer<'de>, fn try_into(self) -> Result<SecurePath, Self::Error> {
{
let s = <String>::deserialize(deserializer)?;
// A-Z a-z 0-9 // A-Z a-z 0-9
// . - _ // . - _
if s.chars() if self
.chars()
.all(|c| c.is_ascii_alphanumeric() || c == '.' || c == '-' || c == '_') .all(|c| c.is_ascii_alphanumeric() || c == '.' || c == '-' || c == '_')
{ {
return Ok(SecurePath { data: s }); return Ok(SecurePath {
data: self.to_string(),
});
} }
Err(serde::de::Error::custom(format!( Err(format!("Invalid value: {}", self.to_string()))
"Invalid value: {}",
s.to_string()
)))
} }
} }
#[get("/collections/<vendor_name>/<file_name>")]
async fn handle_get_collection( async fn handle_get_collection(
path: web::Path<(SecurePath, SecurePath)>, vendor_name: &str,
req: HttpRequest, file_name: &str,
static_data_dir: actix_web::web::Data<String>, app_configs: &State<AppConfigs>,
) -> actix_web::Result<HttpResponse> { ) -> MultiReply {
let (vendor_name, file_name) = path.into_inner(); let vendor_name: Result<SecurePath, String> = vendor_name.try_into();
let vendor_name = match vendor_name {
Ok(secure_path) => secure_path.data,
Err(err) => return MultiReply::FormError(PlainText(err.to_string())),
};
let file_name: Result<SecurePath, String> = file_name.try_into();
let file_name = match file_name {
Ok(secure_path) => secure_path.data,
Err(err) => return MultiReply::FormError(PlainText(err.to_string())),
};
let mut path: PathBuf = PathBuf::new(); let mut path: PathBuf = PathBuf::new();
let static_data_dir: String = static_data_dir.into_inner().to_string(); let static_data_dir: String = app_configs.static_data_dir.clone();
path.push(static_data_dir); path.push(static_data_dir);
path.push("collections"); path.push("collections");
path.push(vendor_name.data); path.push(vendor_name);
path.push(file_name.data); path.push(file_name);
match NamedFile::open(path) { match NamedFile::open(path).await {
Ok(file) => Ok(file.into_response(&req)), Ok(file) => MultiReply::FileContents(file),
Err(err) => Ok(HttpResponse::NotFound() Err(err) => MultiReply::NotFound(err.to_string()),
.content_type(ContentType::plaintext())
.body(format!("File not found: {}.\n", err))),
} }
} }
#[get("/scanners/<scanner_name>")]
async fn handle_list_scanners( async fn handle_list_scanners(
pool: web::Data<DbPool>, mut db: DbConn,
path: web::Path<Scanners>, scanner_name: &str,
req: HttpRequest, app_configs: &State<AppConfigs>,
static_data_dir: actix_web::web::Data<String>, ) -> MultiReply {
) -> actix_web::Result<HttpResponse> { let static_data_dir: String = app_configs.static_data_dir.clone();
let scanner_name = path.into_inner(); let scanner_name: Result<Scanners, String> = scanner_name.try_into();
let static_data_dir: String = static_data_dir.into_inner().to_string(); let scanner_name = match scanner_name {
Ok(scanner_name) => scanner_name,
Err(err) => return MultiReply::FormError(PlainText(err.to_string())),
};
if scanner_name.is_static() { if scanner_name.is_static() {
let mut path: PathBuf = PathBuf::new(); let mut path: PathBuf = PathBuf::new();
path.push(static_data_dir); path.push(static_data_dir);
path.push("scanners"); path.push("scanners");
path.push(scanner_name.to_string()); path.push(scanner_name.to_string());
return match NamedFile::open(path) { return match NamedFile::open(path).await {
Ok(file) => Ok(file.into_response(&req)), Ok(file) => MultiReply::FileContents(file),
Err(err) => Ok(HttpResponse::NotFound() Err(err) => MultiReply::NotFound(err.to_string()),
.content_type(ContentType::plaintext())
.body(format!("File not found: {}.\n", err))),
}; };
} }
// use web::block to offload blocking Diesel queries without blocking server thread let scanners_list = match Scanner::list_names(scanner_name, &mut db).await {
let scanners_list = web::block(move || { Ok(data) => Ok(data),
// note that obtaining a connection from the pool is also potentially blocking Err(err) => Err(err),
let conn = &mut pool.get().unwrap(); };
match Scanner::list_names(scanner_name, conn) {
Ok(data) => Ok(data),
Err(err) => Err(err),
}
})
.await
// map diesel query errors to a 500 error response
.map_err(|err| ErrorInternalServerError(err))
.unwrap();
if let Ok(scanners) = scanners_list { if let Ok(scanners) = scanners_list {
Ok(html_contents(scanners.join("\n"))) MultiReply::Content(HtmlContents(scanners.join("\n")))
} else { } else {
Ok(server_error("Unable to list scanners".to_string())) MultiReply::Error(ServerError("Unable to list scanners".to_string()))
} }
} }
@ -376,23 +402,16 @@ static SCAN_TASKS_FOOT: &str = r#"
</html> </html>
"#; "#;
async fn handle_list_scan_tasks(pool: web::Data<DbPool>) -> HttpResponse { #[get("/scan/tasks")]
async fn handle_list_scan_tasks(mut db: Connection<SnowDb>) -> MultiReply {
let mut html_data: Vec<String> = vec![SCAN_TASKS_HEAD.to_string()]; let mut html_data: Vec<String> = vec![SCAN_TASKS_HEAD.to_string()];
// use web::block to offload blocking Diesel queries without blocking server thread let scan_tasks_list = match ScanTask::list(&mut db).await {
let scan_tasks_list = web::block(move || { Ok(data) => Ok(data),
// note that obtaining a connection from the pool is also potentially blocking Err(err) => Err(err),
let conn = &mut pool.get().unwrap(); };
match ScanTask::list(conn) {
Ok(data) => Ok(data),
Err(err) => Err(err),
}
})
.await
// map diesel query errors to a 500 error response
.map_err(|err| ErrorInternalServerError(err));
if let Ok(scan_tasks) = scan_tasks_list.unwrap() { if let Ok(scan_tasks) = scan_tasks_list {
for row in scan_tasks { for row in scan_tasks {
let cidr: String = row.cidr; let cidr: String = row.cidr;
let started_at: Option<NaiveDateTime> = row.started_at; let started_at: Option<NaiveDateTime> = row.started_at;
@ -413,69 +432,81 @@ async fn handle_list_scan_tasks(pool: web::Data<DbPool>) -> HttpResponse {
html_data.push(SCAN_TASKS_FOOT.to_string()); html_data.push(SCAN_TASKS_FOOT.to_string());
html_contents(html_data.join("\n")) MultiReply::Content(HtmlContents(html_data.join("\n")))
} else { } else {
return server_error("Unable to list scan tasks".to_string()); return MultiReply::Error(ServerError("Unable to list scan tasks".to_string()));
} }
} }
fn get_connection(database_url: &str) -> DbPool { #[derive(Responder)]
let manager = ConnectionManager::<MysqlConnection>::new(database_url); #[response(status = 200, content_type = "text")]
// Refer to the `r2d2` documentation for more methods to use pub struct PlainText(String);
// when building a connection pool
Pool::builder() #[derive(Responder)]
.max_size(5) #[response(status = 200, content_type = "html")]
.test_on_check_out(true) pub struct HtmlContents(String);
.build(manager)
.expect("Could not build connection pool") #[derive(Responder)]
#[response(status = 500, content_type = "html")]
pub struct ServerError(String);
#[get("/")]
async fn index() -> HtmlContents {
HtmlContents(FORM.to_string())
} }
fn plain_contents(data: String) -> HttpResponse { #[get("/ping")]
HttpResponse::Ok() async fn pong() -> PlainText {
.content_type(ContentType::plaintext()) PlainText("pong".to_string())
.body(data)
} }
fn html_contents(data: String) -> HttpResponse { #[get("/ws")]
HttpResponse::Ok() pub async fn ws() -> PlainText {
.content_type(ContentType::html()) info!("establish_ws_connection");
.body(data) PlainText("ok".to_string())
// Ok(HttpResponse::Unauthorized().json(e))
/*
match result {
Ok(response) => Ok(response.into()),
Err(e) => {
error!("ws connection error: {:?}", e);
Err(e)
},
}*/
} }
fn server_error(data: String) -> HttpResponse { struct AppConfigs {
HttpResponse::InternalServerError() static_data_dir: String,
.content_type(ContentType::html())
.body(data)
} }
async fn index() -> HttpResponse { async fn report_counts(rocket: Rocket<Build>) -> Rocket<Build> {
html_contents(FORM.to_string()) use rocket_db_pools::diesel::AsyncConnectionWrapper;
let conn = SnowDb::fetch(&rocket)
.expect("database is attached")
.get().await
.unwrap_or_else(|e| {
span_error!("failed to connect to MySQL database" => error!("{e}"));
panic!("aborting launch");
});
let _: AsyncConnectionWrapper<_> = conn.into();
info!("Connected to the DB");
rocket
} }
async fn pong() -> HttpResponse { #[launch]
plain_contents("pong".to_string()) fn rocket() -> _ {
} let server_address: SocketAddr = if let Ok(env) = env::var("SERVER_ADDRESS") {
env.parse().expect("The ENV SERVER_ADDRESS should be a valid socket address (address:port)")
#[actix_web::main]
async fn main() -> std::io::Result<()> {
let _log2 = log2::stdout()
.module(false)
.level(match env::var("RUST_LOG") {
Ok(level) => level,
Err(_) => "debug".to_string(),
})
.start();
let server_address: String = if let Ok(env) = env::var("SERVER_ADDRESS") {
env
} else { } else {
"127.0.0.1:8000".to_string() "127.0.0.1:8000".parse().expect("The default address should be valid")
}; };
let worker_server_address: String = if let Ok(env) = env::var("WORKER_SERVER_ADDRESS") { let static_data_dir: String = match env::var("STATIC_DATA_DIR") {
env Ok(val) => val,
} else { Err(_) => "../data/".to_string(),
"127.0.0.1:8800".to_string()
}; };
let db_url: String = if let Ok(env) = env::var("DB_URL") { let db_url: String = if let Ok(env) = env::var("DB_URL") {
@ -485,40 +516,36 @@ async fn main() -> std::io::Result<()> {
"mysql://localhost".to_string() "mysql://localhost".to_string()
}; };
let pool = get_connection(db_url.as_str()); let db: Map<_, Value> = map! {
"url" => db_url.into(),
// note that obtaining a connection from the pool is also potentially blocking "pool_size" => 10.into(),
let conn = &mut pool.get().unwrap(); "timeout" => 5.into(),
let names = Scanner::list_names(Scanners::Stretchoid, conn);
match names {
Ok(names) => info!("Found {} Stretchoid scanners", names.len()),
Err(err) => error!("Unable to get names: {}", err),
}; };
let server = HttpServer::new(move || { let config_figment = rocket::Config::figment()
let static_data_dir: String = match env::var("STATIC_DATA_DIR") { .merge(("address", server_address.ip().to_string()))
Ok(val) => val, .merge(("port", server_address.port()))
Err(_) => "../data/".to_string(), .merge(("databases", map!["snow_scanner_db" => db]));
};
rocket::custom(config_figment)
.attach(SnowDb::init())
.attach(AdHoc::on_ignite("Report counts", report_counts))
.manage(AppConfigs { static_data_dir })
.mount(
"/",
routes![
index,
pong,
handle_report,
handle_scan,
handle_list_scan_tasks,
handle_list_scanners,
handle_get_collection,
ws,
],
)
/*
App::new()
.app_data(web::Data::new(pool.clone()))
.app_data(actix_web::web::Data::new(static_data_dir))
.route("/", web::get().to(index))
.route("/ping", web::get().to(pong))
.route("/report", web::post().to(handle_report))
.route("/scan", web::post().to(handle_scan))
.route("/scan/tasks", web::get().to(handle_list_scan_tasks))
.route(
"/scanners/{scanner_name}",
web::get().to(handle_list_scanners),
)
.route(
"/collections/{vendor_name}/{file_name}",
web::get().to(handle_get_collection),
)
})
.bind(&server_address);
match server { match server {
Ok(server) => { Ok(server) => {
match ws2::listen(worker_server_address.as_str()) { match ws2::listen(worker_server_address.as_str()) {
@ -544,13 +571,5 @@ async fn main() -> std::io::Result<()> {
} }
Err(err) => error!("Unable to listen on {worker_server_address}: {err}"), Err(err) => error!("Unable to listen on {worker_server_address}: {err}"),
}; };
}*/
info!("Now listening on {}", server_address);
server.run().await
}
Err(err) => {
error!("Could not bind the server to {}", server_address);
Err(err)
}
}
} }

View File

@ -1,11 +1,9 @@
use std::net::IpAddr; use std::net::IpAddr;
use crate::Scanners; use crate::{DbConn, Scanners};
use chrono::{NaiveDateTime, Utc}; use chrono::{NaiveDateTime, Utc};
use diesel::dsl::insert_into;
use diesel::prelude::*;
use diesel::result::Error as DieselError;
use hickory_resolver::Name; use hickory_resolver::Name;
use rocket_db_pools::diesel::{dsl::insert_into, prelude::*, result::Error as DieselError};
use crate::schema::scan_tasks::dsl::scan_tasks; use crate::schema::scan_tasks::dsl::scan_tasks;
use crate::schema::scanners::dsl::scanners; use crate::schema::scanners::dsl::scanners;
@ -25,14 +23,14 @@ pub struct Scanner {
} }
impl Scanner { impl Scanner {
pub fn find_or_new( pub async fn find_or_new(
query_address: IpAddr, query_address: IpAddr,
scanner_name: Scanners, scanner_name: Scanners,
ptr: Option<Name>, ptr: Option<Name>,
conn: &mut MysqlConnection, conn: &mut DbConn,
) -> Result<Scanner, ()> { ) -> Result<Scanner, ()> {
let ip_type = if query_address.is_ipv6() { 6 } else { 4 }; let ip_type = if query_address.is_ipv6() { 6 } else { 4 };
let scanner_row_result = Scanner::find(query_address.to_string(), ip_type, conn); let scanner_row_result = Scanner::find(query_address.to_string(), ip_type, conn).await;
let scanner_row = match scanner_row_result { let scanner_row = match scanner_row_result {
Ok(scanner_row) => scanner_row, Ok(scanner_row) => scanner_row,
Err(_) => return Err(()), Err(_) => return Err(()),
@ -58,31 +56,31 @@ impl Scanner {
last_checked_at: None, last_checked_at: None,
} }
}; };
match scanner.save(conn) { match scanner.save(conn).await {
Ok(scanner) => Ok(scanner), Ok(scanner) => Ok(scanner),
Err(_) => Err(()), Err(_) => Err(()),
} }
} }
pub fn find( pub async fn find(
ip_address: String, ip_address: String,
ip_type: u8, ip_type: u8,
conn: &mut MysqlConnection, conn: &mut DbConn,
) -> Result<Option<Scanner>, DieselError> { ) -> Result<Option<Scanner>, DieselError> {
use crate::schema::scanners; use crate::schema::scanners;
scanners scanners
.select(Scanner::as_select()) .select(Scanner::as_select())
.filter(scanners::ip.eq(ip_address)) .filter(scanners::ip.eq(ip_address))
.filter(scanners::ip_type.eq(ip_type)) .filter(scanners::ip_type.eq(ip_type))
.order((scanners::ip_type.desc(), scanners::created_at.desc())) .order((scanners::ip_type.desc(), scanners::created_at.desc()))
.first(conn) .first(conn)
.await
.optional() .optional()
} }
pub fn list_names( pub async fn list_names(
scanner_name: Scanners, scanner_name: Scanners,
conn: &mut MysqlConnection, conn: &mut DbConn,
) -> Result<Vec<String>, DieselError> { ) -> Result<Vec<String>, DieselError> {
use crate::schema::scanners; use crate::schema::scanners;
use crate::schema::scanners::ip; use crate::schema::scanners::ip;
@ -92,16 +90,20 @@ impl Scanner {
.filter(scanners::scanner_name.eq(scanner_name.to_string())) .filter(scanners::scanner_name.eq(scanner_name.to_string()))
.order((scanners::ip_type.desc(), scanners::created_at.desc())) .order((scanners::ip_type.desc(), scanners::created_at.desc()))
.load::<String>(conn) .load::<String>(conn)
.await
} }
pub fn save(self: Scanner, conn: &mut MysqlConnection) -> Result<Scanner, DieselError> { pub async fn save(self: Scanner, conn: &mut DbConn) -> Result<Scanner, DieselError> {
let new_scanner = NewScanner::from_scanner(&self); use crate::schema::scanners;
match insert_into(scanners)
let new_scanner: NewScanner = NewScanner::from_scanner(&self).await;
match insert_into(scanners::table)
.values(&new_scanner) .values(&new_scanner)
.on_conflict(diesel::dsl::DuplicatedKeys) .on_conflict(diesel::dsl::DuplicatedKeys)
.do_update() .do_update()
.set(&new_scanner) .set(&new_scanner)
.execute(conn) .execute(conn)
.await
{ {
Ok(_) => Ok(self), Ok(_) => Ok(self),
Err(err) => Err(err), Err(err) => Err(err),
@ -124,7 +126,7 @@ pub struct NewScanner {
} }
impl NewScanner { impl NewScanner {
pub fn from_scanner<'x>(scanner: &Scanner) -> NewScanner { pub async fn from_scanner<'x>(scanner: &Scanner) -> NewScanner {
NewScanner { NewScanner {
ip: scanner.ip.to_string(), ip: scanner.ip.to_string(),
ip_type: scanner.ip_type, ip_type: scanner.ip_type,
@ -165,21 +167,22 @@ pub struct ScanTaskitem {
} }
impl ScanTask { impl ScanTask {
pub fn list_not_started(conn: &mut MysqlConnection) -> Result<Vec<ScanTaskitem>, DieselError> { pub async fn list_not_started(mut conn: DbConn) -> Result<Vec<ScanTaskitem>, DieselError> {
use crate::schema::scan_tasks; use crate::schema::scan_tasks;
let res = scan_tasks let res = scan_tasks
.select(ScanTaskitem::as_select()) .select(ScanTaskitem::as_select())
.filter(scan_tasks::started_at.is_null()) .filter(scan_tasks::started_at.is_null())
.order((scan_tasks::created_at.asc(),)) .order((scan_tasks::created_at.asc(),))
.load::<ScanTaskitem>(conn); .load::<ScanTaskitem>(&mut conn)
.await;
match res { match res {
Ok(rows) => Ok(rows), Ok(rows) => Ok(rows),
Err(err) => Err(err), Err(err) => Err(err),
} }
} }
pub fn list(conn: &mut MysqlConnection) -> Result<Vec<ScanTaskitem>, DieselError> { pub async fn list(conn: &mut DbConn) -> Result<Vec<ScanTaskitem>, DieselError> {
use crate::schema::scan_tasks; use crate::schema::scan_tasks;
let res = scan_tasks let res = scan_tasks
@ -188,21 +191,26 @@ impl ScanTask {
scan_tasks::created_at.desc(), scan_tasks::created_at.desc(),
scan_tasks::task_group_id.asc(), scan_tasks::task_group_id.asc(),
)) ))
.load::<ScanTaskitem>(conn); .load::<ScanTaskitem>(conn)
.await;
match res { match res {
Ok(rows) => Ok(rows), Ok(rows) => Ok(rows),
Err(err) => Err(err), Err(err) => Err(err),
} }
} }
pub fn save(self: &ScanTask, conn: &mut MysqlConnection) -> Result<(), DieselError> { pub async fn save(self: &ScanTask, conn: &mut DbConn) -> Result<(), DieselError> {
let new_scan_task = NewScanTask::from_scan_task(self); use crate::schema::scan_tasks;
match insert_into(scan_tasks)
let new_scan_task: NewScanTask = NewScanTask::from_scan_task(self).await;
match insert_into(scan_tasks::table)
.values(&new_scan_task) .values(&new_scan_task)
.on_conflict(diesel::dsl::DuplicatedKeys) .on_conflict(diesel::dsl::DuplicatedKeys)
.do_update() .do_update()
.set(&new_scan_task) .set(&new_scan_task)
.execute(conn) .execute(conn)
.await
{ {
Ok(_) => Ok(()), Ok(_) => Ok(()),
Err(err) => Err(err), Err(err) => Err(err),
@ -225,7 +233,7 @@ pub struct NewScanTask {
} }
impl NewScanTask { impl NewScanTask {
pub fn from_scan_task<'x>(scan_task: &ScanTask) -> NewScanTask { pub async fn from_scan_task<'x>(scan_task: &ScanTask) -> NewScanTask {
NewScanTask { NewScanTask {
task_group_id: scan_task.task_group_id.to_string(), task_group_id: scan_task.task_group_id.to_string(),
cidr: scan_task.cidr.to_owned(), cidr: scan_task.cidr.to_owned(),

View File

@ -1,17 +1,7 @@
use cidr::IpCidr;
use diesel::MysqlConnection;
use hickory_resolver::Name; use hickory_resolver::Name;
use log2::*;
use std::{collections::HashMap, net::IpAddr, str::FromStr}; use std::{collections::HashMap, net::IpAddr, str::FromStr};
use ws2::{Pod, WebSocket};
use crate::{ use crate::{worker::detection::detect_scanner_from_name, DbConn, Scanner};
worker::{
detection::detect_scanner_from_name,
modules::{Network, WorkerMessages},
},
DbPool, Scanner,
};
pub struct Server { pub struct Server {
pub clients: HashMap<u32, Worker>, pub clients: HashMap<u32, Worker>,
@ -19,12 +9,7 @@ pub struct Server {
} }
impl Server { impl Server {
pub fn cleanup(&self, _: &ws2::Server) -> &Server { pub async fn commit(&mut self, conn: &mut DbConn) -> &Server {
// TODO: implement check not logged in
&self
}
pub fn commit(&mut self, conn: &mut MysqlConnection) -> &Server {
for (name, query_address) in self.new_scanners.clone() { for (name, query_address) in self.new_scanners.clone() {
let scanner_name = Name::from_str(name.as_str()).unwrap(); let scanner_name = Name::from_str(name.as_str()).unwrap();
@ -35,7 +20,9 @@ impl Server {
scanner_type, scanner_type,
Some(scanner_name), Some(scanner_name),
conn, conn,
) { )
.await
{
Ok(scanner) => { Ok(scanner) => {
// Got saved // Got saved
self.new_scanners.remove(&name); self.new_scanners.remove(&name);
@ -92,6 +79,7 @@ impl Worker {
} }
} }
/*
impl ws2::Handler for Server { impl ws2::Handler for Server {
fn on_open(&mut self, ws: &WebSocket) -> Pod { fn on_open(&mut self, ws: &WebSocket) -> Pod {
info!("New client: {ws}"); info!("New client: {ws}");
@ -165,4 +153,4 @@ impl ws2::Handler for Server {
} }
result result
} }
} }*/

View File

@ -15,7 +15,7 @@ path = "worker.rs"
[dependencies] [dependencies]
tungstenite = { version = "0.24.0", default-features = true, features = ["native-tls"] } tungstenite = { version = "0.24.0", default-features = true, features = ["native-tls"] }
log2 = "0.1.11" log2 = "0.1.11"
diesel = { version = "2.2.0", default-features = false, features = [] } diesel = { version = "2", default-features = false, features = [] }
dns-ptr-resolver = {git = "https://github.com/wdes/dns-ptr-resolver.git"} dns-ptr-resolver = {git = "https://github.com/wdes/dns-ptr-resolver.git"}
hickory-resolver = { version = "0.24.1", default-features = false, features = ["tokio-runtime", "dns-over-h3", "dns-over-https", "dns-over-quic"]} hickory-resolver = { version = "0.24.1", default-features = false, features = ["tokio-runtime", "dns-over-h3", "dns-over-https", "dns-over-quic"]}
chrono = "0.4.38" chrono = "0.4.38"