28 Commits

Author SHA1 Message Date
4d13491415 Document binding port 80 2025-03-01 15:50:32 +01:00
605c4d3256 Add an upload script 2025-03-01 11:14:33 +01:00
3461c11ede Bump to 1.0.0 2025-03-01 11:08:10 +01:00
36efcf03ac Fix #248 - Make a Debian package 2025-03-01 11:07:52 +01:00
b331b598aa Ban more IPs that abuse the IPs 2025-01-04 21:45:31 +01:00
3d0f58dd07 Also ban the network 2025-01-04 20:35:41 +01:00
ef19081638 Add new bad IPs trying to relay emails 2025-01-04 20:34:58 +01:00
5717f0ad85 Swap out blocked from France DNS servers
Some checks failed
Build IP lists / build-aws-cloudfront (push) Has been cancelled
2024-10-10 15:57:59 +02:00
1495950484 Re-work crate management 2024-10-10 15:09:16 +02:00
565e268d01 Fixup build 2024-10-10 12:08:33 +02:00
6b0c5467b6 Some re-working, adding security and fix handling shadowserver 2024-10-10 11:50:48 +02:00
8acf084467 Implement shadowserver.org 2024-10-10 10:30:01 +02:00
1783fe5c93 Improve the CIDR parsing before starting the task 2024-10-08 01:50:53 +02:00
dbbbdc4818 Fix the path to static scanners
Some checks failed
Build IP lists / Build scanners list (binaryedge) (push) Failing after -2m31s
Build IP lists / Build scanners list (stretchoid) (push) Failing after -2m33s
Build IP lists / build-aws-cloudfront (push) Failing after -2m32s
2024-10-08 01:30:45 +02:00
75dc88bcc1 Simplify and fixup implementations for paths 2024-10-08 01:18:26 +02:00
bc3f3fe34c Validate IP addresses before insert 2024-10-08 00:03:03 +02:00
32d1abdcee Implement saving the received results 2024-10-07 23:31:33 +02:00
f589d4c11e Broadcast work requests to each node 2024-10-07 00:15:29 +02:00
e5c3b38121 Finally make it capable of talking to all hosts 2024-10-06 23:56:09 +02:00
2a7ea4c969 Implement broadcast to all nodes 2024-10-03 21:22:36 +02:00
8bf201b3e5 Better websocket processing 2024-10-03 13:56:06 +02:00
fd4d43596f Re-implement the ws client 2024-10-03 12:49:33 +02:00
25df2642e9 Move to Rocket ! 2024-10-03 10:34:56 +02:00
04aea8558f Move worker to tungstenite and add WORKER_NAME ENV 2024-09-28 01:09:48 +02:00
cad1073448 Apply formatting 2024-09-27 22:28:54 +02:00
c01177e4c8 Add tasks not started query 2024-09-27 22:28:40 +02:00
d84918851b Split the worker to a sub package 2024-09-27 22:28:23 +02:00
36468e4baf Support ENV WORKER_URL for CLI 2024-09-27 21:02:49 +02:00
20 changed files with 1744 additions and 645 deletions

View File

@ -11,79 +11,6 @@ on:
- cron: "30 0 */5 * *"
jobs:
build-scanners-list:
name: Build scanners list
environment:
name: sudo-bot
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
type: ["stretchoid", "binaryedge"]
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Cache cargo binaries
uses: actions/cache@v4
id: cache-dns-ptr-resolver
with:
path: ~/.cargo/bin/dns-ptr-resolver
key: ${{ runner.os }}-cargo-bin-dns-ptr-resolver-1.1.0
- name: Set up toolchain
if: steps.cache-dns-ptr-resolver.outputs.cache-hit != 'true'
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: 1.67
override: true
- name: Install dns-ptr-resolver
if: steps.cache-dns-ptr-resolver.outputs.cache-hit != 'true'
run: cargo install dns-ptr-resolver@1.1.0
- name: Build the ${{ matrix.type }} list
run: ./make-${{ matrix.type }}.sh
- name: Post the summary
run: |
git add -A
printf '### Diff\n```diff\n%s\n```\n' "$(git diff --staged)" >> $GITHUB_STEP_SUMMARY
- name: Extract secrets
run: |
printf '%s' "${{ secrets.GH_APP_JWT_PRIV_PEM_CONTENTS }}" > ${HOME}/.secret_jwt.pem
printf '%s' "${{ secrets.GPG_PRIVATE_KEY }}" > ${HOME}/.private-key.asc
- uses: actions/setup-node@v4
with:
node-version: 18
- name: Get yarn cache directory path
id: yarn-cache-dir-path
run: echo "dir=$(yarn cache dir)" >> $GITHUB_OUTPUT
- name: yarn cache
uses: actions/cache@v4
with:
path: ${{ steps.yarn-cache-dir-path.outputs.dir }}
key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }}
restore-keys: |
${{ runner.os }}-yarn-
- name: Install sudo-bot
run: yarn global add sudo-bot
- name: Run sudo-bot
run: |
sudo-bot --verbose \
--jwt-file="${HOME}/.secret_jwt.pem" \
--gh-app-id='17453' \
--installation-id="${{ secrets.INSTALLATION_ID }}" \
--repository-slug='wdes/security' \
--target-branch='main' \
--assign='williamdes' \
--commit-author-email='sudo-bot@wdes.fr' \
--commit-author-name='Sudo Bot' \
--gpg-private-key-file="${HOME}/.private-key.asc" \
--template="$GITHUB_WORKSPACE/.github/sudo-bot-template.js" \
--gpg-private-key-passphrase="${{ secrets.GPG_PASSPHRASE }}"
- name: Purge secrets
if: always()
run: |
rm -v ${HOME}/.secret_jwt.pem
rm -v ${HOME}/.private-key.asc
build-aws-cloudfront:
runs-on: ubuntu-latest
steps:

View File

@ -6,6 +6,7 @@
- `https://security.wdes.eu/scanners/stretchoid.txt` (List of all known stretchoid IPs)
- `https://security.wdes.eu/scanners/binaryedge.txt` (List of all known binaryedge IPs)
- `https://security.wdes.eu/scanners/shadowserver.txt` (List of all known shadowserver IPs)
- `https://security.wdes.eu/scanners/censys.txt` (List of all IPs declared by censys scanner on their [FAQ](https://support.censys.io/hc/en-us/articles/360043177092-Opt-Out-of-Data-Collection)
- `https://security.wdes.eu/scanners/internet-measurement.com.txt` (List of all IPs declared by internet-measurement.com on [their website](https://internet-measurement.com/#ips))

View File

@ -153,3 +153,11 @@
185.242.226.41
162.216.18.113
59.110.115.16
87.120.120.31
87.120.120.39
87.120.120.50
87.120.120.57
45.143.95.76
185.28.39.97
202.131.82.140
78.153.140.123

View File

@ -15,3 +15,4 @@
217.169.88.0/21
205.210.31.0/24
94.102.61.0/24
87.120.120.0/23

View File

@ -1,9 +1,9 @@
[package]
name = "snow-scanner"
version = "0.1.0"
version = "1.0.0"
authors = ["William Desportes <williamdes@wdes.fr>"]
edition = "2021"
rust-version = "1.78.0" # MSRV
rust-version = "1.81.0" # MSRV
description = "A program to scan internet and find scanners"
homepage = "https://github.com/wdes/snow-scanner/tree/v1.2.0-dev#readme"
repository = "https://github.com/wdes/snow-scanner"
@ -29,24 +29,78 @@ maintenance = { status = "passively-maintained" }
name = "snow-scanner"
path = "src/main.rs"
[[bin]]
name = "snow-scanner-worker"
path = "src/worker/worker.rs"
[workspace]
members = [
"src/worker"
]
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[features]
# Enable unstable features, requires nightly
# Currently only used to enable rusts official ip support
unstable = []
[dependencies]
log2 = "0.1.11"
ws2 = "0.2.5"
actix-web = "4"
actix-files = "0.6.6"
rocket_db_pools = { git = "https://github.com/rwf2/Rocket/", rev = "3bf9ef02d6e803fe9f753777f5a829dda6d2453d", default-features = false, features = ["diesel_mysql"] }
snow-scanner-worker = {path = "./src/worker"}
diesel.workspace = true
dns-ptr-resolver.workspace = true
hickory-resolver.workspace = true
uuid.workspace = true
rocket.workspace = true
rocket_ws.workspace = true
ws.workspace = true
chrono.workspace = true
serde.workspace = true
serde_json.workspace = true
cidr.workspace = true
weighted-rs.workspace = true
[workspace.dependencies]
# mariadb-dev on Alpine
# "mysqlclient-src" "mysql_backend"
diesel = { version = "2.2.0", default-features = false, features = ["mysql", "chrono", "uuid", "r2d2"] }
diesel = { version = "^2", default-features = false, features = ["mysql", "chrono", "uuid"] }
ws = { package = "rocket_ws", version = "0.1.1" }
dns-ptr-resolver = {git = "https://github.com/wdes/dns-ptr-resolver.git"}
hickory-resolver = { version = "0.24.1", default-features = false, features = ["tokio-runtime", "dns-over-h3", "dns-over-https", "dns-over-quic"]}
rocket = { git = "https://github.com/rwf2/Rocket/", rev = "3bf9ef02d6e803fe9f753777f5a829dda6d2453d"}
rocket_ws = { git = "https://github.com/rwf2/Rocket/", rev = "3bf9ef02d6e803fe9f753777f5a829dda6d2453d"}
chrono = "0.4.38"
uuid = { version = "1.10.0", default-features = false, features = ["v7", "serde", "std"] }
cidr = "0.2.2"
serde = "1.0.210"
cidr = "0.3.0"
serde = { version = "1.0.210", features = ["derive"] }
serde_json = "1.0.128"
weighted-rs = "0.1.3"
[package.metadata.deb]
maintainer = "William Desportes <williamdes@wdes.fr>"
copyright = "2022-2025, William Desportes <williamdes@wdes.fr>"
license-file = ["../LICENSE"]
extended-description = """\
Find hidden IPs in the internet snow."""
depends = "$auto"
section = "rust"
priority = "optional"
assets = [
{ source = "README.md", dest = "usr/share/doc/snow-scanner/README", mode = "644"},
{ source = "../data/collections/*/*", dest = "usr/share/snow-scanner/data/collections", mode = "644"},
{ source = "../data/scanners/*", dest = "usr/share/snow-scanner/data/scanners", mode = "644"},
{ source = "target/release/snow-scanner", dest = "usr/bin/snow-scanner", mode = "777"},
{ source = "target/release/snow-scanner-worker", dest = "usr/bin/snow-scanner-worker", mode = "777"},
]
maintainer-scripts = "debian/"
systemd-units = [
{ unit-name = "snow-scanner", enable = true, start = false, restart-after-upgrade = true, stop-on-upgrade = true },
{ unit-name = "snow-scanner-worker", enable = true, start = false, restart-after-upgrade = true, stop-on-upgrade = true }
]
conf-files = [
"/etc/snow-scanner/.env",
"/etc/snow-scanner/worker.env"
]

View File

@ -0,0 +1,80 @@
[Unit]
Description=Snow scanner worker
After=network.target
[Service]
Type=simple
User=snow-scanner
Group=snow-scanner
EnvironmentFile=/etc/snow-scanner/worker.env
RemoveIPC=true
ProtectHome=true
NoNewPrivileges=true
PrivateTmp=false
ProtectSystem=strict
ProtectControlGroups=true
ProtectKernelModules=true
ProtectKernelTunables=true
RestrictAddressFamilies=AF_INET AF_INET6
RestrictNamespaces=true
RestrictRealtime=true
RestrictSUIDSGID=true
MemoryDenyWriteExecute=true
LockPersonality=true
# sets up a new /dev/ mount for the executed processes and only adds API pseudo devices such as /dev/null, /dev/zero or /dev/random to it,
# but no physical devices such as /dev/sda, system memory /dev/mem, system ports /dev/port and others.
# This is useful to turn off physical device access by the executed process
PrivateDevices=true
# allows access to standard pseudo devices including /dev/null, /dev/zero, /dev/full, /dev/random, and /dev/urandom
DevicePolicy=closed
ProtectProc=invisible
ProtectClock=true
ProcSubset=pid
ProtectHostname=true
ProtectKernelLogs=true
# This will fail icmp pingers if set to true
PrivateUsers=false
SystemCallFilter=~@clock @cpu-emulation @debug @module @mount @obsolete
SystemCallFilter=~@privileged @raw-io @reboot @resources @swap @keyring
SystemCallFilter=~@pkey @ipc
# to return when the system call filter configured with SystemCallFilter= is triggered, instead of terminating the process immediately.
SystemCallErrorNumber=EPERM
# See: https://www.opensourcerers.org/2022/04/25/optimizing-a-systemd-service-for-security/
# Run: systemd-analyze security snow-scanner
# Add this one for ports < 1024
#CapabilityBoundingSet=CAP_NET_BIND_SERVICE
#CapabilityBoundingSet=CAP_NET_RAW
SystemCallArchitectures=native
# Allow icmp
#AmbientCapabilities=CAP_NET_RAW
# Add this one for ports < 1024
#AmbientCapabilities=CAP_NET_BIND_SERVICE
# sets up a new /dev/ mount for the executed processes and only adds API pseudo devices such as /dev/null, /dev/zero or /dev/random to it,
# but no physical devices such as /dev/sda, system memory /dev/mem, system ports /dev/port and others.
# This is useful to turn off physical device access by the executed process
PrivateDevices=true
# allows access to standard pseudo devices including /dev/null, /dev/zero, /dev/full, /dev/random, and /dev/urandom
DevicePolicy=closed
# No devices (except clock: ProtectClock)
# See: https://github.com/systemd/systemd/issues/23185
DeviceAllow=
BindReadOnlyPaths=/usr/share/snow-scanner
ExecStart=/usr/bin/snow-scanner-worker
Restart=on-failure
LimitNOFILE=infinity
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,80 @@
[Unit]
Description=Snow scanner server
After=network.target
[Service]
Type=simple
User=snow-scanner
Group=snow-scanner
EnvironmentFile=/etc/snow-scanner/.env
RemoveIPC=true
ProtectHome=true
NoNewPrivileges=true
PrivateTmp=false
ProtectSystem=strict
ProtectControlGroups=true
ProtectKernelModules=true
ProtectKernelTunables=true
RestrictAddressFamilies=AF_INET AF_INET6
RestrictNamespaces=true
RestrictRealtime=true
RestrictSUIDSGID=true
MemoryDenyWriteExecute=true
LockPersonality=true
# sets up a new /dev/ mount for the executed processes and only adds API pseudo devices such as /dev/null, /dev/zero or /dev/random to it,
# but no physical devices such as /dev/sda, system memory /dev/mem, system ports /dev/port and others.
# This is useful to turn off physical device access by the executed process
PrivateDevices=true
# allows access to standard pseudo devices including /dev/null, /dev/zero, /dev/full, /dev/random, and /dev/urandom
DevicePolicy=closed
ProtectProc=invisible
ProtectClock=true
ProcSubset=pid
ProtectHostname=true
ProtectKernelLogs=true
# This will fail icmp pingers if set to true
PrivateUsers=false
SystemCallFilter=~@clock @cpu-emulation @debug @module @mount @obsolete
SystemCallFilter=~@privileged @raw-io @reboot @resources @swap @keyring
SystemCallFilter=~@pkey @ipc
# to return when the system call filter configured with SystemCallFilter= is triggered, instead of terminating the process immediately.
SystemCallErrorNumber=EPERM
# See: https://www.opensourcerers.org/2022/04/25/optimizing-a-systemd-service-for-security/
# Run: systemd-analyze security snow-scanner
# Add this one for ports < 1024
#CapabilityBoundingSet=CAP_NET_BIND_SERVICE
#CapabilityBoundingSet=CAP_NET_RAW
SystemCallArchitectures=native
# Allow icmp
#AmbientCapabilities=CAP_NET_RAW
# Add this one for ports < 1024
#AmbientCapabilities=CAP_NET_BIND_SERVICE
# sets up a new /dev/ mount for the executed processes and only adds API pseudo devices such as /dev/null, /dev/zero or /dev/random to it,
# but no physical devices such as /dev/sda, system memory /dev/mem, system ports /dev/port and others.
# This is useful to turn off physical device access by the executed process
PrivateDevices=true
# allows access to standard pseudo devices including /dev/null, /dev/zero, /dev/full, /dev/random, and /dev/urandom
DevicePolicy=closed
# No devices (except clock: ProtectClock)
# See: https://github.com/systemd/systemd/issues/23185
DeviceAllow=
BindReadOnlyPaths=/usr/share/snow-scanner
ExecStart=/usr/bin/snow-scanner
Restart=on-failure
LimitNOFILE=infinity
[Install]
WantedBy=multi-user.target

26
snow-scanner/debian/upload.sh Executable file
View File

@ -0,0 +1,26 @@
#!/bin/sh
user=$(git config github.user)
token=$(git config github.token)
if [ -z "$token" ]; then
echo 'Token is empty. Please run git config --add github.token "ChangeMe"';
exit 1;
fi
if [ -z "$user" ]; then
echo 'User is empty. Please run git config --add github.user "ChangeMe"';
exit 1;
fi
if [ -z "$1" ]; then
echo 'Package file is missing, please provide a .deb file';
exit 1;
fi
curl --user $user:$token \
--upload-file "$1" \
-v \
-# \
https://git.wdes.eu/api/packages/wdes/debian/pool/bookworm/main/upload

View File

@ -0,0 +1,135 @@
use std::{net::IpAddr, str::FromStr};
use crate::{DbConnection, SnowDb};
use hickory_resolver::Name;
use rocket::futures::channel::mpsc as rocket_mpsc;
use rocket::futures::StreamExt;
use rocket::tokio;
use snow_scanner_worker::detection::{detect_scanner_from_name, validate_ip};
use crate::Scanner;
/// Handles all the raw events being streamed from balancers and parses and filters them into only the events we care about.
pub struct EventBus {
events_rx: rocket_mpsc::Receiver<EventBusWriterEvent>,
events_tx: rocket_mpsc::Sender<EventBusWriterEvent>,
bus_tx: tokio::sync::broadcast::Sender<EventBusEvent>,
}
impl EventBus {
pub fn new() -> Self {
let (events_tx, events_rx) = rocket_mpsc::channel(100);
let (bus_tx, _) = tokio::sync::broadcast::channel(100);
Self {
events_rx,
events_tx,
bus_tx,
}
}
// db: &Connection<SnowDb>
pub async fn run(&mut self, mut conn: DbConnection<SnowDb>) {
info!("EventBus started");
loop {
tokio::select! {
Some(event) = self.events_rx.next() => {
self.handle_event(event, &mut conn).await;
}
else => {
warn!("EventBus stopped");
break;
}
}
}
}
async fn handle_event(&self, event: EventBusWriterEvent, db: &mut DbConnection<SnowDb>) {
info!("Received event");
if self.bus_tx.receiver_count() == 0 {
return;
}
match event {
EventBusWriterEvent::ScannerFoundResponse { name, address } => {
let ip: IpAddr = address.into();
if !validate_ip(ip) {
error!("Invalid IP address: {ip}");
return;
}
let name = Name::from_str(name.as_str()).unwrap();
match detect_scanner_from_name(&name) {
Ok(Some(scanner_type)) => {
match Scanner::find_or_new(ip, scanner_type, Some(name), db).await {
Ok(scanner) => {
let _ = scanner.save(db).await;
}
Err(err) => {
error!("Error find or save: {:?}", err);
}
}
}
Ok(None) => {
error!("No name detected for: {:?}", name);
}
Err(err) => {
error!("No name detected error: {:?}", err);
}
};
}
EventBusWriterEvent::BroadcastMessage(msg) => match self.bus_tx.send(msg) {
Ok(count) => {
info!("Event sent to {count} subscribers");
}
Err(err) => {
error!("Error sending event to subscribers: {}", err);
}
},
}
}
pub fn subscriber(&self) -> EventBusSubscriber {
EventBusSubscriber::new(self.bus_tx.clone())
}
pub fn writer(&self) -> EventBusWriter {
EventBusWriter::new(self.events_tx.clone())
}
}
pub type EventBusEvent = rocket_ws::Message;
/// Enables subscriptions to the event bus
pub struct EventBusSubscriber {
bus_tx: tokio::sync::broadcast::Sender<EventBusEvent>,
}
/// Enables subscriptions to the event bus
pub struct EventBusWriter {
bus_tx: rocket_mpsc::Sender<EventBusWriterEvent>,
}
pub enum EventBusWriterEvent {
BroadcastMessage(rocket_ws::Message),
ScannerFoundResponse { name: String, address: IpAddr },
}
impl EventBusWriter {
pub fn new(bus_tx: rocket_mpsc::Sender<EventBusWriterEvent>) -> Self {
Self { bus_tx }
}
pub fn write(&self) -> rocket_mpsc::Sender<EventBusWriterEvent> {
self.bus_tx.clone()
}
}
impl EventBusSubscriber {
pub fn new(bus_tx: tokio::sync::broadcast::Sender<EventBusEvent>) -> Self {
Self { bus_tx }
}
pub fn subscribe(&self) -> tokio::sync::broadcast::Receiver<EventBusEvent> {
self.bus_tx.subscribe()
}
}

View File

@ -1,122 +1,134 @@
use actix_files::NamedFile;
use actix_web::error::ErrorInternalServerError;
use actix_web::http::header::ContentType;
use actix_web::{web, App, HttpRequest, HttpResponse, HttpServer};
use log2::*;
use chrono::{NaiveDateTime, Utc};
use diesel::deserialize::{self};
use diesel::mysql::{Mysql, MysqlValue};
use diesel::sql_types::Text;
use diesel::r2d2::ConnectionManager;
use diesel::r2d2::Pool;
use worker::detection::{detect_scanner, get_dns_client, Scanners};
#[macro_use]
extern crate rocket;
use std::collections::HashMap;
use std::io::Write;
use std::path::PathBuf;
use std::{env, fmt};
use cidr::IpCidr;
use event_bus::{EventBusSubscriber, EventBusWriter, EventBusWriterEvent};
use rocket::{
fairing::AdHoc,
form::FromFormField,
futures::SinkExt,
http::Status,
request::{FromParam, FromRequest, Outcome, Request},
trace::error,
Rocket, State,
};
use rocket_db_pools::{
rocket::{
figment::{
util::map,
value::{Map, Value},
},
form::Form,
fs::NamedFile,
Responder,
},
Connection, Pool,
};
use rocket_db_pools::diesel::MysqlPool;
use rocket_db_pools::Database;
use rocket_ws::WebSocket;
use server::Server;
use weighted_rs::Weight;
use snow_scanner_worker::detection::{
detect_scanner, get_dns_client, get_dns_server_config, validate_ip,
};
use snow_scanner_worker::modules::{Network, WorkerMessages};
use snow_scanner_worker::scanners::IsStatic;
use snow_scanner_worker::scanners::Scanners;
use snow_scanner_worker::utils::get_dns_rr;
use std::net::SocketAddr;
use std::{
env,
net::IpAddr,
ops::{Deref, DerefMut},
};
use std::{path::PathBuf, str::FromStr};
use uuid::Uuid;
use serde::{Deserialize, Deserializer, Serialize};
use serde::{Deserialize, Serialize};
use diesel::serialize::IsNull;
use diesel::{serialize, MysqlConnection};
use dns_ptr_resolver::{get_ptr, ResolvedResult};
pub mod event_bus;
pub mod models;
pub mod schema;
pub mod server;
pub mod worker;
use crate::models::*;
use crate::server::Server;
/// Short-hand for the database pool type to use throughout the app.
type DbPool = Pool<ConnectionManager<MysqlConnection>>;
#[derive(Database, Clone)]
#[database("snow_scanner_db")]
pub struct SnowDb(MysqlPool);
trait IsStatic {
fn is_static(self: &Self) -> bool;
}
pub type ReqDbConn = Connection<SnowDb>;
pub type DbConn = DbConnection<SnowDb>;
impl IsStatic for Scanners {
fn is_static(self: &Self) -> bool {
match self {
Scanners::Censys => true,
Scanners::InternetMeasurement => true,
_ => false,
#[rocket::async_trait]
impl<'r, D: Database> FromRequest<'r> for DbConnection<D> {
type Error = Option<<D::Pool as Pool>::Error>;
async fn from_request(req: &'r Request<'_>) -> Outcome<Self, Self::Error> {
match D::fetch(req.rocket()) {
Some(db) => match db.get().await {
Ok(conn) => Outcome::Success(DbConnection(conn)),
Err(e) => Outcome::Error((Status::ServiceUnavailable, Some(e))),
},
None => Outcome::Error((Status::InternalServerError, None)),
}
}
}
impl<'de> Deserialize<'de> for Scanners {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s = <Vec<String>>::deserialize(deserializer)?;
let k: &str = s[0].as_str();
match k {
"stretchoid" => Ok(Scanners::Stretchoid),
"binaryedge" => Ok(Scanners::Binaryedge),
"stretchoid.txt" => Ok(Scanners::Stretchoid),
"binaryedge.txt" => Ok(Scanners::Binaryedge),
"censys.txt" => Ok(Scanners::Censys),
"internet-measurement.com.txt" => Ok(Scanners::InternetMeasurement),
v => Err(serde::de::Error::custom(format!(
"Unknown value: {}",
v.to_string()
))),
}
pub struct DbConnection<D: Database>(pub <D::Pool as Pool>::Connection);
impl<D: Database> Deref for DbConnection<D> {
type Target = <D::Pool as Pool>::Connection;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl fmt::Display for Scanners {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}",
match self {
Self::Stretchoid => "stretchoid",
Self::Binaryedge => "binaryedge",
Self::Censys => "censys",
Self::InternetMeasurement => "internet-measurement.com",
impl<D: Database> DerefMut for DbConnection<D> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[derive(serde::Deserialize, Clone)]
struct SafeIpAddr {
pub addr: IpAddr,
}
impl FromFormField<'_> for SafeIpAddr {
fn from_value(field: rocket::form::ValueField<'_>) -> rocket::form::Result<'_, Self> {
let ip = field.value;
let query_address = IpAddr::from_str(ip);
match query_address {
Ok(ip) => {
if !validate_ip(ip) {
return Err(rocket::form::Error::validation(format!(
"Invalid IP address: {ip}"
))
.into());
}
Ok(SafeIpAddr { addr: ip })
}
)
}
}
impl serialize::ToSql<Text, Mysql> for Scanners {
fn to_sql(&self, out: &mut serialize::Output<Mysql>) -> serialize::Result {
match *self {
Self::Stretchoid => out.write_all(b"stretchoid")?,
Self::Binaryedge => out.write_all(b"binaryedge")?,
Self::Censys => out.write_all(b"censys")?,
Self::InternetMeasurement => out.write_all(b"internet-measurement.com")?,
};
Ok(IsNull::No)
}
}
impl deserialize::FromSql<Text, Mysql> for Scanners {
fn from_sql(bytes: MysqlValue) -> deserialize::Result<Self> {
let value = <String as deserialize::FromSql<Text, Mysql>>::from_sql(bytes)?;
match &value as &str {
"stretchoid" => Ok(Scanners::Stretchoid),
"binaryedge" => Ok(Scanners::Binaryedge),
"internet-measurement.com" => Ok(Scanners::InternetMeasurement),
_ => Err("Unrecognized enum variant".into()),
Err(err) => Err(rocket::form::Error::validation(format!("Invalid IP: {err}")).into()),
}
}
}
async fn handle_ip(pool: web::Data<DbPool>, ip: String) -> Result<Scanner, Option<ResolvedResult>> {
let query_address = ip.parse().expect("To parse");
async fn handle_ip(
query_address: IpAddr,
) -> Result<(IpAddr, Option<Scanners>, ResolvedResult), ()> {
let ptr_result: Result<ResolvedResult, ()> = std::thread::spawn(move || {
let client = get_dns_client();
let mut rr_dns_servers = get_dns_rr();
let client = get_dns_client(&get_dns_server_config(&rr_dns_servers.next().unwrap()));
let ptr_result: ResolvedResult = if let Ok(res) = get_ptr(query_address, client) {
res
} else {
@ -127,29 +139,19 @@ async fn handle_ip(pool: web::Data<DbPool>, ip: String) -> Result<Scanner, Optio
.join()
.unwrap();
if ptr_result.is_err() {
return Err(None);
}
let result = ptr_result.unwrap();
match detect_scanner(&result) {
Ok(Some(scanner_type)) => {
// use web::block to offload blocking Diesel queries without blocking server thread
web::block(move || {
// note that obtaining a connection from the pool is also potentially blocking
let conn = &mut pool.get().unwrap();
match Scanner::find_or_new(query_address, scanner_type, result.result, conn) {
Ok(scanner) => Ok(scanner),
Err(_) => Err(None),
match ptr_result {
Ok(result) => match detect_scanner(&result) {
Ok(Some(scanner_type)) => {
if !validate_ip(query_address) {
error!("Invalid IP address: {query_address}");
return Err(());
}
})
.await
.unwrap()
}
Ok(None) => Err(None),
Err(_) => Err(Some(result)),
Ok((query_address, Some(scanner_type), result))
}
Ok(None) => Ok((query_address, None, result)),
Err(err) => Err(err),
},
Err(err) => Err(err),
}
}
@ -176,90 +178,168 @@ static FORM: &str = r#"
</html>
"#;
#[derive(Serialize, Deserialize)]
pub struct ScanParams {
username: String,
ips: String,
#[derive(FromForm, Serialize, Deserialize)]
pub struct ScanParams<'r> {
username: &'r str,
ips: &'r str,
}
async fn handle_scan(pool: web::Data<DbPool>, params: web::Form<ScanParams>) -> HttpResponse {
if params.username.len() < 4 {
return plain_contents("Invalid username".to_string());
#[derive(Responder)]
enum MultiReply {
#[response(status = 500, content_type = "text")]
Error(ServerError),
#[response(status = 422)]
FormError(PlainText),
#[response(status = 422)]
HtmlFormError(HtmlContents),
#[response(status = 404)]
NotFound(String),
#[response(status = 200)]
Content(HtmlContents),
#[response(status = 200)]
TextContent(PlainText),
#[response(status = 200)]
FileContents(NamedFile),
}
#[post("/scan", data = "<form>")]
async fn handle_scan(
mut db: DbConn,
form: Form<ScanParams<'_>>,
event_bus_writer: &State<EventBusWriter>,
) -> MultiReply {
if form.username.len() < 4 {
return MultiReply::FormError(PlainText("Invalid username".to_string()));
}
let mut cidrs: Vec<IpCidr> = vec![];
for line in form.ips.lines() {
cidrs.push(match IpCidr::from_str(line.trim()) {
Ok(data) => data,
Err(err) => {
return MultiReply::FormError(PlainText(format!("Invalid value: {line}: {err}")))
}
});
}
let task_group_id: Uuid = Uuid::now_v7();
// use web::block to offload blocking Diesel queries without blocking server thread
let _ = web::block(move || {
// note that obtaining a connection from the pool is also potentially blocking
let conn = &mut pool.get().unwrap();
for ip in params.ips.lines() {
let scan_task = ScanTask {
task_group_id: task_group_id.to_string(),
cidr: ip.to_string(),
created_by_username: params.username.clone(),
created_at: Utc::now().naive_utc(),
updated_at: None,
started_at: None,
still_processing_at: None,
ended_at: None,
};
match scan_task.save(conn) {
Ok(_) => error!("Added {}", ip.to_string()),
Err(err) => error!("Not added: {:?}", err),
for cidr in cidrs {
let scan_task = ScanTask {
task_group_id: task_group_id.to_string(),
cidr: cidr.to_string(),
created_by_username: form.username.to_string(),
created_at: Utc::now().naive_utc(),
updated_at: None,
started_at: None,
still_processing_at: None,
ended_at: None,
};
let mut bus_tx = event_bus_writer.write();
match scan_task.save(&mut db).await {
Ok(_) => {
info!("Added {}", cidr.to_string());
let msg = EventBusWriterEvent::BroadcastMessage(
WorkerMessages::DoWorkRequest {
neworks: vec![Network(cidr)],
}
.into(),
);
let _ = bus_tx.send(msg).await;
}
Err(err) => error!("Not added: {:?}", err),
}
})
.await
// map diesel query errors to a 500 error response
.map_err(|err| ErrorInternalServerError(err));
}
html_contents(format!("New task added: {} !", task_group_id))
MultiReply::Content(HtmlContents(format!("New task added: {} !", task_group_id)))
}
#[derive(Serialize, Deserialize)]
#[derive(FromForm, Deserialize)]
pub struct ReportParams {
ip: String,
ip: SafeIpAddr,
}
async fn handle_report(pool: web::Data<DbPool>, params: web::Form<ReportParams>) -> HttpResponse {
match handle_ip(pool, params.ip.clone()).await {
Ok(scanner) => html_contents(match scanner.scanner_name {
Scanners::Binaryedge => match scanner.last_checked_at {
Some(date) => format!(
"Reported a binaryedge ninja! <b>{}</b> known as {} since {date}.",
scanner.ip,
scanner.ip_ptr.unwrap_or("".to_string())
),
None => format!(
"Reported a binaryedge ninja! <b>{}</b> known as {}.",
scanner.ip,
scanner.ip_ptr.unwrap_or("".to_string())
),
},
Scanners::Stretchoid => match scanner.last_checked_at {
Some(date) => format!(
"Reported a stretchoid agent! <b>{}</b> known as {} since {date}.",
scanner.ip,
scanner.ip_ptr.unwrap_or("".to_string())
),
None => format!(
"Reported a stretchoid agent! <b>{}</b> known as {}.",
scanner.ip,
scanner.ip_ptr.unwrap_or("".to_string())
),
},
_ => format!("Not supported"),
}),
fn reply_contents_for_scanner_found(scanner: Scanner) -> HtmlContents {
HtmlContents(match scanner.scanner_name {
Scanners::Binaryedge => match scanner.last_checked_at {
Some(date) => format!(
"Reported a binaryedge ninja! <b>{}</b> known as {} since {date}.",
scanner.ip,
scanner.ip_ptr.unwrap_or("".to_string())
),
None => format!(
"Reported a binaryedge ninja! <b>{}</b> known as {}.",
scanner.ip,
scanner.ip_ptr.unwrap_or("".to_string())
),
},
Scanners::Stretchoid => match scanner.last_checked_at {
Some(date) => format!(
"Reported a stretchoid agent! <b>{}</b> known as {} since {date}.",
scanner.ip,
scanner.ip_ptr.unwrap_or("".to_string())
),
None => format!(
"Reported a stretchoid agent! <b>{}</b> known as {}.",
scanner.ip,
scanner.ip_ptr.unwrap_or("".to_string())
),
},
Scanners::Shadowserver => match scanner.last_checked_at {
Some(date) => format!(
"Reported a cloudy shadowserver ! <b>{}</b> known as {} since {date}.",
scanner.ip,
scanner.ip_ptr.unwrap_or("".to_string())
),
None => format!(
"Reported a cloudy shadowserver ! <b>{}</b> known as {}.",
scanner.ip,
scanner.ip_ptr.unwrap_or("".to_string())
),
},
_ => format!("Not supported"),
})
}
Err(ptr_result) => html_contents(format!(
"The IP <b>{}</a> resolved as {:?} did not match known scanners patterns.",
params.ip,
match ptr_result {
Some(res) => res.result,
None => None,
}
)),
#[post("/report", data = "<form>")]
async fn handle_report(mut db: DbConn, form: Form<ReportParams>) -> MultiReply {
match handle_ip(form.ip.addr).await {
Ok((query_address, scanner_type, result)) => match scanner_type {
Some(scanner_type) => match Scanner::find_or_new(
query_address,
scanner_type,
result.result.clone(),
&mut db,
)
.await
{
Ok(scanner) => MultiReply::Content(reply_contents_for_scanner_found(scanner)),
Err(err) => MultiReply::Error(ServerError(format!(
"The IP {} resolved as {} could not be saved, server error: {err}.",
form.ip.addr,
match result.result {
Some(res) => res.to_string(),
None => "No value".to_string(),
}
))),
},
None => MultiReply::HtmlFormError(HtmlContents(format!(
"The IP <b>{}</a> resolved as {:?} did not match known scanners patterns.",
form.ip.addr,
match result.result {
Some(res) => res.to_string(),
None => "No value".to_string(),
}
))),
},
Err(_) => MultiReply::Error(ServerError(format!(
"The IP <b>{}</a> did encounter en error at resolve time.",
form.ip.addr
))),
}
}
@ -267,87 +347,79 @@ struct SecurePath {
pub data: String,
}
impl<'de> Deserialize<'de> for SecurePath {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s = <String>::deserialize(deserializer)?;
impl FromParam<'_> for SecurePath {
type Error = String;
fn from_param(param: &'_ str) -> Result<Self, Self::Error> {
// A-Z a-z 0-9
// . - _
if s.chars()
if param
.chars()
.all(|c| c.is_ascii_alphanumeric() || c == '.' || c == '-' || c == '_')
{
return Ok(SecurePath { data: s });
return Ok(SecurePath {
data: param.to_string(),
});
}
Err(serde::de::Error::custom(format!(
"Invalid value: {}",
s.to_string()
)))
Err(format!(
"Invalid path value (forbidden chars): {}",
param.to_string()
))
}
}
#[get("/collections/<vendor_name>/<file_name>")]
async fn handle_get_collection(
path: web::Path<(SecurePath, SecurePath)>,
req: HttpRequest,
static_data_dir: actix_web::web::Data<String>,
) -> actix_web::Result<HttpResponse> {
let (vendor_name, file_name) = path.into_inner();
vendor_name: SecurePath,
file_name: SecurePath,
app_configs: &State<AppConfigs>,
) -> MultiReply {
let mut path: PathBuf = PathBuf::new();
let static_data_dir: String = static_data_dir.into_inner().to_string();
let static_data_dir: String = app_configs.static_data_dir.clone();
path.push(static_data_dir);
path.push("collections");
path.push(vendor_name.data);
path.push(file_name.data);
match NamedFile::open(path) {
Ok(file) => Ok(file.into_response(&req)),
Err(err) => Ok(HttpResponse::NotFound()
.content_type(ContentType::plaintext())
.body(format!("File not found: {}.\n", err))),
match NamedFile::open(path).await {
Ok(file) => MultiReply::FileContents(file),
Err(err) => MultiReply::NotFound(err.to_string()),
}
}
#[get("/scanners/<scanner_name>")]
async fn handle_list_scanners(
pool: web::Data<DbPool>,
path: web::Path<Scanners>,
req: HttpRequest,
static_data_dir: actix_web::web::Data<String>,
) -> actix_web::Result<HttpResponse> {
let scanner_name = path.into_inner();
let static_data_dir: String = static_data_dir.into_inner().to_string();
mut db: DbConn,
scanner_name: Scanners,
app_configs: &State<AppConfigs>,
) -> MultiReply {
let static_data_dir: String = app_configs.static_data_dir.clone();
if scanner_name.is_static() {
let mut path: PathBuf = PathBuf::new();
path.push(static_data_dir);
path.push("scanners");
path.push(scanner_name.to_string());
path.push(match scanner_name {
Scanners::Stretchoid | Scanners::Binaryedge | Scanners::Shadowserver => {
panic!("This should not happen")
}
Scanners::Censys => "censys.txt".to_string(),
Scanners::InternetMeasurement => "internet-measurement.com.txt".to_string(),
});
return match NamedFile::open(path) {
Ok(file) => Ok(file.into_response(&req)),
Err(err) => Ok(HttpResponse::NotFound()
.content_type(ContentType::plaintext())
.body(format!("File not found: {}.\n", err))),
return match NamedFile::open(path).await {
Ok(file) => MultiReply::FileContents(file),
Err(err) => MultiReply::NotFound(err.to_string()),
};
}
// use web::block to offload blocking Diesel queries without blocking server thread
let scanners_list = web::block(move || {
// note that obtaining a connection from the pool is also potentially blocking
let conn = &mut pool.get().unwrap();
match Scanner::list_names(scanner_name, conn) {
Ok(data) => Ok(data),
Err(err) => Err(err),
}
})
.await
// map diesel query errors to a 500 error response
.map_err(|err| ErrorInternalServerError(err))
.unwrap();
let scanners_list = match Scanner::list_names(scanner_name, &mut db).await {
Ok(data) => Ok(data),
Err(err) => Err(err),
};
if let Ok(scanners) = scanners_list {
Ok(html_contents(scanners.join("\n")))
MultiReply::TextContent(PlainText(scanners.join("\n")))
} else {
Ok(server_error("Unable to list scanners".to_string()))
MultiReply::Error(ServerError("Unable to list scanners".to_string()))
}
}
@ -376,23 +448,16 @@ static SCAN_TASKS_FOOT: &str = r#"
</html>
"#;
async fn handle_list_scan_tasks(pool: web::Data<DbPool>) -> HttpResponse {
#[get("/scan/tasks")]
async fn handle_list_scan_tasks(mut db: DbConn) -> MultiReply {
let mut html_data: Vec<String> = vec![SCAN_TASKS_HEAD.to_string()];
// use web::block to offload blocking Diesel queries without blocking server thread
let scan_tasks_list = web::block(move || {
// note that obtaining a connection from the pool is also potentially blocking
let conn = &mut pool.get().unwrap();
match ScanTask::list(conn) {
Ok(data) => Ok(data),
Err(err) => Err(err),
}
})
.await
// map diesel query errors to a 500 error response
.map_err(|err| ErrorInternalServerError(err));
let scan_tasks_list = match ScanTask::list(&mut db).await {
Ok(data) => Ok(data),
Err(err) => Err(err),
};
if let Ok(scan_tasks) = scan_tasks_list.unwrap() {
if let Ok(scan_tasks) = scan_tasks_list {
for row in scan_tasks {
let cidr: String = row.cidr;
let started_at: Option<NaiveDateTime> = row.started_at;
@ -413,69 +478,87 @@ async fn handle_list_scan_tasks(pool: web::Data<DbPool>) -> HttpResponse {
html_data.push(SCAN_TASKS_FOOT.to_string());
html_contents(html_data.join("\n"))
MultiReply::Content(HtmlContents(html_data.join("\n")))
} else {
return server_error("Unable to list scan tasks".to_string());
return MultiReply::Error(ServerError("Unable to list scan tasks".to_string()));
}
}
fn get_connection(database_url: &str) -> DbPool {
let manager = ConnectionManager::<MysqlConnection>::new(database_url);
// Refer to the `r2d2` documentation for more methods to use
// when building a connection pool
Pool::builder()
.max_size(5)
.test_on_check_out(true)
.build(manager)
.expect("Could not build connection pool")
#[derive(Responder)]
#[response(status = 200, content_type = "text")]
pub struct PlainText(String);
#[derive(Responder)]
#[response(status = 200, content_type = "html")]
pub struct HtmlContents(String);
#[derive(Responder)]
#[response(status = 500, content_type = "html")]
pub struct ServerError(String);
#[get("/")]
async fn index() -> HtmlContents {
HtmlContents(FORM.to_string())
}
fn plain_contents(data: String) -> HttpResponse {
HttpResponse::Ok()
.content_type(ContentType::plaintext())
.body(data)
#[get("/ping")]
async fn pong() -> PlainText {
PlainText("pong".to_string())
}
fn html_contents(data: String) -> HttpResponse {
HttpResponse::Ok()
.content_type(ContentType::html())
.body(data)
#[get("/ws")]
pub async fn ws(
ws: WebSocket,
event_bus: &State<EventBusSubscriber>,
event_bus_writer: &State<EventBusWriter>,
) -> rocket_ws::Channel<'static> {
use rocket::futures::channel::mpsc as rocket_mpsc;
let (_, ws_receiver) = rocket_mpsc::channel::<rocket_ws::Message>(1);
let bus_rx = event_bus.subscribe();
let bus_tx = event_bus_writer.write();
let channel: rocket_ws::Channel =
ws.channel(|stream| Server::handle(stream, bus_rx, bus_tx, ws_receiver));
channel
}
fn server_error(data: String) -> HttpResponse {
HttpResponse::InternalServerError()
.content_type(ContentType::html())
.body(data)
struct AppConfigs {
static_data_dir: String,
}
async fn index() -> HttpResponse {
html_contents(FORM.to_string())
async fn report_counts<'a>(rocket: Rocket<rocket::Build>) -> Rocket<rocket::Build> {
let conn = SnowDb::fetch(&rocket)
.expect("Failed to get DB connection")
.clone()
.get()
.await
.unwrap_or_else(|e| {
span_error!("failed to connect to MySQL database" => error!("{e}"));
panic!("aborting launch");
});
match Scanner::list_names(Scanners::Stretchoid, &mut DbConnection(conn)).await {
Ok(d) => info!("Found {} Stretchoid scanners", d.len()),
Err(err) => error!("Unable to fetch Stretchoid scanners: {err}"),
}
rocket
}
async fn pong() -> HttpResponse {
plain_contents("pong".to_string())
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
let _log2 = log2::stdout()
.module(false)
.level(match env::var("RUST_LOG") {
Ok(level) => level,
Err(_) => "debug".to_string(),
})
.start();
let server_address: String = if let Ok(env) = env::var("SERVER_ADDRESS") {
env
#[rocket::main]
async fn main() -> Result<(), rocket::Error> {
let server_address: SocketAddr = if let Ok(env) = env::var("SERVER_ADDRESS") {
env.parse()
.expect("The ENV SERVER_ADDRESS should be a valid socket address (address:port)")
} else {
"127.0.0.1:8000".to_string()
"127.0.0.1:8000"
.parse()
.expect("The default address should be valid")
};
let worker_server_address: String = if let Ok(env) = env::var("WORKER_SERVER_ADDRESS") {
env
} else {
"127.0.0.1:8800".to_string()
let static_data_dir: String = match env::var("STATIC_DATA_DIR") {
Ok(val) => val,
Err(_) => "../data/".to_string(),
};
let db_url: String = if let Ok(env) = env::var("DB_URL") {
@ -485,72 +568,99 @@ async fn main() -> std::io::Result<()> {
"mysql://localhost".to_string()
};
let pool = get_connection(db_url.as_str());
// note that obtaining a connection from the pool is also potentially blocking
let conn = &mut pool.get().unwrap();
let names = Scanner::list_names(Scanners::Stretchoid, conn);
match names {
Ok(names) => info!("Found {} Stretchoid scanners", names.len()),
Err(err) => error!("Unable to get names: {}", err),
let db: Map<_, Value> = map! {
"url" => db_url.into(),
"pool_size" => 10.into(),
"timeout" => 5.into(),
};
let server = HttpServer::new(move || {
let static_data_dir: String = match env::var("STATIC_DATA_DIR") {
Ok(val) => val,
Err(_) => "../data/".to_string(),
};
let config_figment = rocket::Config::figment()
.merge(("address", server_address.ip().to_string()))
.merge(("port", server_address.port()))
.merge(("databases", map!["snow_scanner_db" => db]));
App::new()
.app_data(web::Data::new(pool.clone()))
.app_data(actix_web::web::Data::new(static_data_dir))
.route("/", web::get().to(index))
.route("/ping", web::get().to(pong))
.route("/report", web::post().to(handle_report))
.route("/scan", web::post().to(handle_scan))
.route("/scan/tasks", web::get().to(handle_list_scan_tasks))
.route(
"/scanners/{scanner_name}",
web::get().to(handle_list_scanners),
)
.route(
"/collections/{vendor_name}/{file_name}",
web::get().to(handle_get_collection),
)
})
.bind(&server_address);
match server {
Ok(server) => {
match ws2::listen(worker_server_address.as_str()) {
Ok(mut ws_server) => {
std::thread::spawn(move || {
let pool = get_connection(db_url.as_str());
// note that obtaining a connection from the pool is also potentially blocking
let conn = &mut pool.get().unwrap();
let mut ws_server_handles = Server {
clients: HashMap::new(),
new_scanners: HashMap::new(),
};
info!("Worker server is listening on: {worker_server_address}");
loop {
match ws_server.process(&mut ws_server_handles, 0.5) {
Ok(_) => {}
Err(err) => error!("Processing error: {err}"),
}
ws_server_handles.cleanup(&ws_server);
ws_server_handles.commit(conn);
}
});
let mut event_bus = event_bus::EventBus::new();
let event_subscriber = event_bus.subscriber();
let event_writer = event_bus.writer();
let _ = rocket::custom(config_figment)
.attach(SnowDb::init())
.attach(AdHoc::on_ignite("Report counts", report_counts))
.attach(AdHoc::on_shutdown("Close Websockets", |r| {
Box::pin(async move {
if let Some(writer) = r.state::<EventBusWriter>() {
Server::shutdown_to_all(writer);
}
Err(err) => error!("Unable to listen on {worker_server_address}: {err}"),
};
})
}))
.attach(AdHoc::on_liftoff(
"Run websocket client manager",
move |r| {
Box::pin(async move {
let conn = SnowDb::fetch(r)
.expect("Failed to get DB connection")
.clone()
.get()
.await
.unwrap_or_else(|e| {
span_error!("failed to connect to MySQL database" => error!("{e}"));
panic!("aborting launch");
});
rocket::tokio::spawn(async move {
event_bus.run(DbConnection(conn)).await;
});
})
},
))
.manage(AppConfigs { static_data_dir })
.manage(event_subscriber)
.manage(event_writer)
.mount(
"/",
routes![
index,
pong,
handle_report,
handle_scan,
handle_list_scan_tasks,
handle_list_scanners,
handle_get_collection,
ws,
],
)
.launch()
.await;
Ok(())
}
info!("Now listening on {}", server_address);
server.run().await
}
Err(err) => {
error!("Could not bind the server to {}", server_address);
Err(err)
}
#[cfg(test)]
mod test {
use super::*;
use hickory_resolver::{
config::{NameServerConfigGroup, ResolverConfig, ResolverOpts},
Name, Resolver,
};
use std::time::Duration;
#[test]
fn test_get_ptr() {
let server = NameServerConfigGroup::google();
let config = ResolverConfig::from_parts(None, vec![], server);
let mut options = ResolverOpts::default();
options.timeout = Duration::from_secs(5);
options.attempts = 1; // One try
let resolver = Resolver::new(config, options).unwrap();
let query_address = "8.8.8.8".parse().expect("To parse");
assert_eq!(
get_ptr(query_address, resolver).unwrap(),
ResolvedResult {
query: Name::from_str_relaxed("8.8.8.8.in-addr.arpa.").unwrap(),
result: Some(Name::from_str_relaxed("dns.google.").unwrap()),
error: None,
}
);
}
}

View File

@ -1,11 +1,9 @@
use std::net::IpAddr;
use crate::Scanners;
use crate::{DbConn, Scanners};
use chrono::{NaiveDateTime, Utc};
use diesel::dsl::insert_into;
use diesel::prelude::*;
use diesel::result::Error as DieselError;
use hickory_resolver::Name;
use rocket_db_pools::diesel::{dsl::insert_into, prelude::*, result::Error as DieselError};
use crate::schema::scan_tasks::dsl::scan_tasks;
use crate::schema::scanners::dsl::scanners;
@ -25,17 +23,17 @@ pub struct Scanner {
}
impl Scanner {
pub fn find_or_new(
pub async fn find_or_new(
query_address: IpAddr,
scanner_name: Scanners,
ptr: Option<Name>,
conn: &mut MysqlConnection,
) -> Result<Scanner, ()> {
conn: &mut DbConn,
) -> Result<Scanner, DieselError> {
let ip_type = if query_address.is_ipv6() { 6 } else { 4 };
let scanner_row_result = Scanner::find(query_address.to_string(), ip_type, conn);
let scanner_row_result = Scanner::find(query_address.to_string(), ip_type, conn).await;
let scanner_row = match scanner_row_result {
Ok(scanner_row) => scanner_row,
Err(_) => return Err(()),
Err(err) => return Err(err),
};
let scanner = if let Some(mut scanner) = scanner_row {
@ -58,31 +56,31 @@ impl Scanner {
last_checked_at: None,
}
};
match scanner.save(conn) {
match scanner.save(conn).await {
Ok(scanner) => Ok(scanner),
Err(_) => Err(()),
Err(err) => Err(err),
}
}
pub fn find(
pub async fn find(
ip_address: String,
ip_type: u8,
conn: &mut MysqlConnection,
conn: &mut DbConn,
) -> Result<Option<Scanner>, DieselError> {
use crate::schema::scanners;
scanners
.select(Scanner::as_select())
.filter(scanners::ip.eq(ip_address))
.filter(scanners::ip_type.eq(ip_type))
.order((scanners::ip_type.desc(), scanners::created_at.desc()))
.first(conn)
.await
.optional()
}
pub fn list_names(
pub async fn list_names(
scanner_name: Scanners,
conn: &mut MysqlConnection,
conn: &mut DbConn,
) -> Result<Vec<String>, DieselError> {
use crate::schema::scanners;
use crate::schema::scanners::ip;
@ -92,16 +90,20 @@ impl Scanner {
.filter(scanners::scanner_name.eq(scanner_name.to_string()))
.order((scanners::ip_type.desc(), scanners::created_at.desc()))
.load::<String>(conn)
.await
}
pub fn save(self: Scanner, conn: &mut MysqlConnection) -> Result<Scanner, DieselError> {
let new_scanner = NewScanner::from_scanner(&self);
match insert_into(scanners)
pub async fn save(self: Scanner, conn: &mut DbConn) -> Result<Scanner, DieselError> {
use crate::schema::scanners;
let new_scanner: NewScanner = NewScanner::from_scanner(&self).await;
match insert_into(scanners::table)
.values(&new_scanner)
.on_conflict(diesel::dsl::DuplicatedKeys)
.do_update()
.set(&new_scanner)
.execute(conn)
.await
{
Ok(_) => Ok(self),
Err(err) => Err(err),
@ -124,7 +126,7 @@ pub struct NewScanner {
}
impl NewScanner {
pub fn from_scanner<'x>(scanner: &Scanner) -> NewScanner {
pub async fn from_scanner<'x>(scanner: &Scanner) -> NewScanner {
NewScanner {
ip: scanner.ip.to_string(),
ip_type: scanner.ip_type,
@ -165,7 +167,22 @@ pub struct ScanTaskitem {
}
impl ScanTask {
pub fn list(conn: &mut MysqlConnection) -> Result<Vec<ScanTaskitem>, DieselError> {
pub async fn list_not_started(mut conn: DbConn) -> Result<Vec<ScanTaskitem>, DieselError> {
use crate::schema::scan_tasks;
let res = scan_tasks
.select(ScanTaskitem::as_select())
.filter(scan_tasks::started_at.is_null())
.order((scan_tasks::created_at.asc(),))
.load::<ScanTaskitem>(&mut conn)
.await;
match res {
Ok(rows) => Ok(rows),
Err(err) => Err(err),
}
}
pub async fn list(conn: &mut DbConn) -> Result<Vec<ScanTaskitem>, DieselError> {
use crate::schema::scan_tasks;
let res = scan_tasks
@ -174,21 +191,26 @@ impl ScanTask {
scan_tasks::created_at.desc(),
scan_tasks::task_group_id.asc(),
))
.load::<ScanTaskitem>(conn);
.load::<ScanTaskitem>(conn)
.await;
match res {
Ok(rows) => Ok(rows),
Err(err) => Err(err),
}
}
pub fn save(self: &ScanTask, conn: &mut MysqlConnection) -> Result<(), DieselError> {
let new_scan_task = NewScanTask::from_scan_task(self);
match insert_into(scan_tasks)
pub async fn save(self: &ScanTask, conn: &mut DbConn) -> Result<(), DieselError> {
use crate::schema::scan_tasks;
let new_scan_task: NewScanTask = NewScanTask::from_scan_task(self).await;
match insert_into(scan_tasks::table)
.values(&new_scan_task)
.on_conflict(diesel::dsl::DuplicatedKeys)
.do_update()
.set(&new_scan_task)
.execute(conn)
.await
{
Ok(_) => Ok(()),
Err(err) => Err(err),
@ -211,7 +233,7 @@ pub struct NewScanTask {
}
impl NewScanTask {
pub fn from_scan_task<'x>(scan_task: &ScanTask) -> NewScanTask {
pub async fn from_scan_task<'x>(scan_task: &ScanTask) -> NewScanTask {
NewScanTask {
task_group_id: scan_task.task_group_id.to_string(),
cidr: scan_task.cidr.to_owned(),

View File

@ -1,74 +1,227 @@
use cidr::IpCidr;
use diesel::MysqlConnection;
use hickory_resolver::Name;
use log2::*;
use std::{collections::HashMap, net::IpAddr, str::FromStr};
use ws2::{Pod, WebSocket};
use rocket::futures::{stream::Next, SinkExt, StreamExt};
use rocket_ws::{frame::CloseFrame, Message};
use std::pin::Pin;
use crate::{
worker::{
detection::detect_scanner_from_name,
modules::{Network, WorkerMessages},
},
DbPool, Scanner,
};
use crate::event_bus::{EventBusEvent, EventBusWriter, EventBusWriterEvent};
use rocket::futures::channel::mpsc as rocket_mpsc;
use snow_scanner_worker::modules::WorkerMessages;
pub struct Server {
pub clients: HashMap<u32, Worker>,
pub new_scanners: HashMap<String, IpAddr>,
}
pub struct Server {}
type HandleBox = Pin<
Box<dyn std::future::Future<Output = Result<(), rocket_ws::result::Error>> + std::marker::Send>,
>;
impl Server {
pub fn cleanup(&self, _: &ws2::Server) -> &Server {
// TODO: implement check not logged in
&self
pub fn handle(
stream: rocket_ws::stream::DuplexStream,
bus_rx: rocket::tokio::sync::broadcast::Receiver<EventBusEvent>,
bus_tx: rocket_mpsc::Sender<EventBusWriterEvent>,
ws_receiver: rocket_mpsc::Receiver<rocket_ws::Message>,
) -> HandleBox {
use rocket::tokio;
Box::pin(async move {
let work_fn = Worker::work(stream, bus_rx, bus_tx, ws_receiver);
tokio::spawn(work_fn);
tokio::signal::ctrl_c().await.unwrap();
Ok(())
})
}
pub fn commit(&mut self, conn: &mut MysqlConnection) -> &Server {
for (name, query_address) in self.new_scanners.clone() {
let scanner_name = Name::from_str(name.as_str()).unwrap();
pub fn new() -> Server {
Server {}
}
match detect_scanner_from_name(&scanner_name) {
Ok(Some(scanner_type)) => {
match Scanner::find_or_new(
query_address,
scanner_type,
Some(scanner_name),
conn,
) {
Ok(scanner) => {
// Got saved
self.new_scanners.remove(&name);
info!(
"Saved {scanner_type}: {name} for {query_address}: {:?}",
scanner.ip_ptr
);
}
Err(err) => {
error!("Unable to find or new {:?}", err);
}
};
}
Ok(None) => {}
Err(_) => {}
}
/*pub fn add_worker(tx: rocket_mpsc::Sender<Message>, workers: &Mutex<WorkersList>) -> () {
let workers_lock = workers.try_lock();
if let Ok(mut workers) = workers_lock {
workers.push(tx);
info!("Clients: {}", workers.len());
std::mem::drop(workers);
} else {
error!("Unable to lock workers");
}
self
}*/
pub fn shutdown_to_all(server: &EventBusWriter) -> () {
let res = server
.write()
.try_send(EventBusWriterEvent::BroadcastMessage(Message::Close(Some(
CloseFrame {
code: rocket_ws::frame::CloseCode::Away,
reason: "Server stop".into(),
},
))));
match res {
Ok(_) => {
info!("Worker did receive stop signal.");
}
Err(err) => {
error!("Send error: {err}");
}
};
}
/*pub fn send_to_all(workers: &Mutex<WorkersList>, message: &str) -> () {
let workers_lock = workers.try_lock();
if let Ok(ref workers) = workers_lock {
workers.iter().for_each(|tx| {
let res = tx.clone().try_send(Message::Text(message.to_string()));
match res {
Ok(_) => {
info!("Message sent to worker !");
}
Err(err) => {
error!("Send error: {err}");
}
};
});
info!("Currently {} workers online.", workers.len());
std::mem::drop(workers_lock);
} else {
error!("Unable to lock workers");
}
}*/
}
#[derive(Debug, Clone)]
pub struct Worker {
pub authenticated: bool,
pub login: Option<String>,
pub struct Worker<'a> {
authenticated: bool,
login: Option<String>,
stream: &'a mut rocket_ws::stream::DuplexStream,
bus_tx: &'a mut rocket_mpsc::Sender<EventBusWriterEvent>,
}
impl Worker {
pub fn initial() -> Worker {
impl<'a> Worker<'a> {
pub fn initial(
stream: &'a mut rocket_ws::stream::DuplexStream,
bus_tx: &'a mut rocket_mpsc::Sender<EventBusWriterEvent>,
) -> Worker<'a> {
info!("New worker");
Worker {
authenticated: false,
login: None,
stream,
bus_tx,
}
}
pub async fn work(
mut stream: rocket_ws::stream::DuplexStream,
mut bus_rx: rocket::tokio::sync::broadcast::Receiver<EventBusEvent>,
mut bus_tx: rocket_mpsc::Sender<EventBusWriterEvent>,
mut ws_receiver: rocket_mpsc::Receiver<rocket_ws::Message>,
) {
use crate::rocket::futures::StreamExt;
use rocket::tokio;
let mut worker = Worker::initial(&mut stream, &mut bus_tx);
let mut interval = rocket::tokio::time::interval(std::time::Duration::from_secs(60));
loop {
tokio::select! {
_ = interval.tick() => {
// Send message every X seconds
if let Ok(true) = worker.tick().await {
break;
}
}
result = bus_rx.recv() => {
let message = match result {
Ok(message) => message,
Err(err) => {
error!("Bus error: {err}");
continue;
}
};
if let Err(err) = worker.send(message).await {
error!("Error sending event to Event bus WebSocket: {}", err);
break;
}
}
Some(message) = ws_receiver.next() => {
info!("Received message from other client: {:?}", message);
let _ = worker.send(message).await;
},
Ok(false) = worker.poll() => {
// Continue the loop
}
else => {
break;
}
}
}
}
pub async fn send(&mut self, msg: Message) -> Result<(), rocket_ws::result::Error> {
self.stream.send(msg).await
}
pub fn next(&mut self) -> Next<'_, rocket_ws::stream::DuplexStream> {
self.stream.next()
}
pub async fn tick(&mut self) -> Result<bool, ()> {
match self.send(rocket_ws::Message::Ping(vec![])).await {
Ok(_) => Ok(false),
Err(err) => {
error!("Processing error: {err}");
Ok(true) // Break processing loop
}
}
}
pub async fn poll(&mut self) -> Result<bool, ()> {
let message = self.next();
match message.await {
Some(Ok(message)) => {
match message {
rocket_ws::Message::Text(_) => match self.on_message(&message).await {
Ok(_) => {}
Err(err) => error!("Processing error: {err}"),
},
rocket_ws::Message::Binary(data) => {
// Handle Binary message
info!("Received Binary message: {:?}", data);
}
rocket_ws::Message::Close(close_frame) => {
// Handle Close message
info!("Received Close message: {:?}", close_frame);
let close_frame = rocket_ws::frame::CloseFrame {
code: rocket_ws::frame::CloseCode::Normal,
reason: "Client disconected".to_string().into(),
};
let _ = self.stream.close(Some(close_frame)).await;
return Ok(true); // Break processing loop
}
rocket_ws::Message::Ping(ping_data) => {
match self.send(rocket_ws::Message::Pong(ping_data)).await {
Ok(_) => {}
Err(err) => error!("Processing error: {err}"),
}
}
rocket_ws::Message::Pong(pong_data) => {
// Handle Pong message
info!("Received Pong message: {:?}", pong_data);
}
_ => {
info!("Received other message: {:?}", message);
}
};
Ok(false)
}
Some(Err(_)) => {
info!("Connection closed");
let close_frame = rocket_ws::frame::CloseFrame {
code: rocket_ws::frame::CloseCode::Normal,
reason: "Client disconected".to_string().into(),
};
let _ = self.stream.close(Some(close_frame)).await;
// The connection is closed by the client
Ok(true) // Break processing loop
}
None => Ok(false),
}
}
@ -90,62 +243,46 @@ impl Worker {
self.authenticated = true;
self
}
}
impl ws2::Handler for Server {
fn on_open(&mut self, ws: &WebSocket) -> Pod {
info!("New client: {ws}");
let worker = Worker::initial();
// Add the client
self.clients.insert(ws.id(), worker);
Ok(())
}
fn on_close(&mut self, ws: &WebSocket) -> Pod {
info!("Client /quit: {ws}");
// Drop the client
self.clients.remove(&ws.id());
Ok(())
}
fn on_message(&mut self, ws: &WebSocket, msg: String) -> Pod {
let client = self.clients.get_mut(&ws.id());
if client.is_none() {
// Impossible, close in case
return ws.close();
}
let worker: &mut Worker = client.unwrap();
info!("on message: {msg}, {ws}");
pub async fn on_message(&mut self, msg: &Message) -> Result<(), String> {
info!("on message: {msg}");
let mut worker_reply: Option<WorkerMessages> = None;
let worker_request: WorkerMessages = msg.clone().into();
let worker_request: WorkerMessages = match msg.clone().try_into() {
Ok(data) => data,
Err(err) => return Err(err),
};
let result = match worker_request {
WorkerMessages::AuthenticateRequest { login } => {
if !worker.is_authenticated() {
worker.authenticate(login);
if !self.is_authenticated() {
self.authenticate(login);
return Ok(());
} else {
error!("Already authenticated: {ws}");
error!("Already authenticated");
return Ok(());
}
}
WorkerMessages::ScannerFoundResponse { name, address } => {
info!("Detected {name} for {address}");
self.new_scanners.insert(name, address);
let _ = self
.bus_tx
.try_send(EventBusWriterEvent::ScannerFoundResponse { name, address });
Ok(())
}
WorkerMessages::GetWorkRequest {} => {
worker_reply = Some(WorkerMessages::DoWorkRequest {
neworks: vec![Network(IpCidr::from_str("52.189.78.0/24")?)],
});
worker_reply = Some(WorkerMessages::DoWorkRequest { neworks: vec![] });
Ok(())
}
WorkerMessages::DoWorkRequest { .. } | WorkerMessages::Invalid { .. } => {
error!("Unable to understand: {msg}, {ws}");
error!("Unable to understand: {msg}");
// Unable to understand, close the connection
return ws.close();
let close_frame = rocket_ws::frame::CloseFrame {
code: rocket_ws::frame::CloseCode::Unsupported,
reason: "Invalid data received".to_string().into(),
};
let _ = self.stream.close(Some(close_frame)).await;
Err("Unable to understand: {msg}}")
} /*msg => {
error!("No implemented: {:#?}", msg);
Ok(())
@ -155,14 +292,14 @@ impl ws2::Handler for Server {
// it has a request to send
if let Some(worker_reply) = worker_reply {
let msg_string: String = worker_reply.to_string();
match ws.send(msg_string) {
match self.send(rocket_ws::Message::Text(msg_string)).await {
Ok(_) => match worker_reply {
WorkerMessages::DoWorkRequest { .. } => {}
msg => error!("No implemented: {:#?}", msg),
},
Err(err) => error!("Error sending reply to {ws}: {err}"),
Err(err) => error!("Error sending reply: {err}"),
}
}
result
Ok(result?)
}
}

View File

@ -0,0 +1,34 @@
[package]
name = "snow-scanner-worker"
version = "0.1.0"
authors = ["William Desportes <williamdes@wdes.fr>"]
edition = "2021"
rust-version = "1.81.0" # MSRV
description = "The CLI to run a snow-scanner worker"
[[bin]]
name = "snow-scanner-worker"
path = "worker.rs"
[lib]
name = "snow_scanner_worker"
path = "mod.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
tungstenite = { version = "0.24.0", default-features = true, features = ["native-tls"] }
rocket.workspace = true
rocket_ws.workspace = true
log2 = "0.1.11"
diesel.workspace = true
dns-ptr-resolver.workspace = true
hickory-resolver.workspace = true
chrono.workspace = true
uuid.workspace = true
cidr.workspace = true
serde.workspace = true
serde_json.workspace = true
weighted-rs.workspace = true
rayon = "1.10.0"
rand = "0.8.5"

View File

@ -2,30 +2,23 @@ use std::net::IpAddr;
use std::str::FromStr;
use std::time::Duration;
use diesel::deserialize::FromSqlRow;
use crate::scanners::Scanners;
use dns_ptr_resolver::ResolvedResult;
use hickory_resolver::config::{NameServerConfigGroup, ResolverConfig, ResolverOpts};
use hickory_resolver::{Name, Resolver};
#[derive(Debug, Clone, Copy, FromSqlRow)]
pub enum Scanners {
Stretchoid,
Binaryedge,
Censys,
InternetMeasurement,
use crate::ip_addr::is_global_hardcoded;
pub fn get_dns_server_config(server_ips: &Vec<IpAddr>) -> NameServerConfigGroup {
NameServerConfigGroup::from_ips_clear(
server_ips, 53, // Port 53
true,
)
}
pub fn get_dns_client() -> Resolver {
let server_ip = "1.1.1.1";
let server = NameServerConfigGroup::from_ips_clear(
&[IpAddr::from_str(server_ip).unwrap()],
53, // Port 53
true,
);
let config = ResolverConfig::from_parts(None, vec![], server);
pub fn get_dns_client(server: &NameServerConfigGroup) -> Resolver {
let config = ResolverConfig::from_parts(None, vec![], server.clone());
let mut options = ResolverOpts::default();
options.timeout = Duration::from_secs(5);
options.attempts = 1; // One try
@ -33,6 +26,14 @@ pub fn get_dns_client() -> Resolver {
Resolver::new(config, options).unwrap()
}
pub fn validate_ip(ip: IpAddr) -> bool {
// unspecified => 0.0.0.0
if ip.is_loopback() || ip.is_multicast() || ip.is_unspecified() {
return false;
}
return is_global_hardcoded(ip);
}
pub fn detect_scanner(ptr_result: &ResolvedResult) -> Result<Option<Scanners>, ()> {
match &ptr_result.result {
Some(name) => detect_scanner_from_name(&name),
@ -56,6 +57,44 @@ pub fn detect_scanner_from_name(name: &Name) -> Result<Option<Scanners>, ()> {
{
Ok(Some(Scanners::Stretchoid))
}
ref name
if name
.trim_to(2)
.eq_case(&Name::from_str("shadowserver.org.").expect("Should parse")) =>
{
Ok(Some(Scanners::Shadowserver))
}
&_ => Ok(None),
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_detect_scanner_from_name() {
let ptr = Name::from_str("scan-47e.shadowserver.org.").unwrap();
assert_eq!(
detect_scanner_from_name(&ptr).unwrap(),
Some(Scanners::Shadowserver)
);
}
#[test]
fn test_detect_scanner() {
let cname_ptr = Name::from_str("111.0-24.197.62.64.in-addr.arpa.").unwrap();
let ptr = Name::from_str("scan-47e.shadowserver.org.").unwrap();
assert_eq!(
detect_scanner(&ResolvedResult {
query: cname_ptr,
result: Some(ptr),
error: None
})
.unwrap(),
Some(Scanners::Shadowserver)
);
}
}

View File

@ -0,0 +1,126 @@
//
// Port of the official Rust implementation
// Source: https://github.com/dani-garcia/vaultwarden/blob/1.32.1/src/util.rs
//
/// TODO: This is extracted from IpAddr::is_global, which is unstable:
/// https://doc.rust-lang.org/nightly/std/net/enum.IpAddr.html#method.is_global
/// Remove once https://github.com/rust-lang/rust/issues/27709 is merged
#[allow(clippy::nonminimal_bool)]
#[cfg(any(not(feature = "unstable"), test))]
pub fn is_global_hardcoded(ip: std::net::IpAddr) -> bool {
match ip {
std::net::IpAddr::V4(ip) => {
!(ip.octets()[0] == 0 // "This network"
|| ip.is_private()
|| (ip.octets()[0] == 100 && (ip.octets()[1] & 0b1100_0000 == 0b0100_0000)) //ip.is_shared()
|| ip.is_loopback()
|| ip.is_link_local()
// addresses reserved for future protocols (`192.0.0.0/24`)
||(ip.octets()[0] == 192 && ip.octets()[1] == 0 && ip.octets()[2] == 0)
|| ip.is_documentation()
|| (ip.octets()[0] == 198 && (ip.octets()[1] & 0xfe) == 18) // ip.is_benchmarking()
|| (ip.octets()[0] & 240 == 240 && !ip.is_broadcast()) //ip.is_reserved()
|| ip.is_broadcast())
}
std::net::IpAddr::V6(ip) => {
!(ip.is_unspecified()
|| ip.is_loopback()
// IPv4-mapped Address (`::ffff:0:0/96`)
|| matches!(ip.segments(), [0, 0, 0, 0, 0, 0xffff, _, _])
// IPv4-IPv6 Translat. (`64:ff9b:1::/48`)
|| matches!(ip.segments(), [0x64, 0xff9b, 1, _, _, _, _, _])
// Discard-Only Address Block (`100::/64`)
|| matches!(ip.segments(), [0x100, 0, 0, 0, _, _, _, _])
// IETF Protocol Assignments (`2001::/23`)
|| (matches!(ip.segments(), [0x2001, b, _, _, _, _, _, _] if b < 0x200)
&& !(
// Port Control Protocol Anycast (`2001:1::1`)
u128::from_be_bytes(ip.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0001
// Traversal Using Relays around NAT Anycast (`2001:1::2`)
|| u128::from_be_bytes(ip.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0002
// AMT (`2001:3::/32`)
|| matches!(ip.segments(), [0x2001, 3, _, _, _, _, _, _])
// AS112-v6 (`2001:4:112::/48`)
|| matches!(ip.segments(), [0x2001, 4, 0x112, _, _, _, _, _])
// ORCHIDv2 (`2001:20::/28`)
|| matches!(ip.segments(), [0x2001, b, _, _, _, _, _, _] if (0x20..=0x2F).contains(&b))
))
|| ((ip.segments()[0] == 0x2001) && (ip.segments()[1] == 0xdb8)) // ip.is_documentation()
|| ((ip.segments()[0] & 0xfe00) == 0xfc00) //ip.is_unique_local()
|| ((ip.segments()[0] & 0xffc0) == 0xfe80)) //ip.is_unicast_link_local()
}
}
}
#[cfg(not(feature = "unstable"))]
pub use is_global_hardcoded as is_global;
#[cfg(feature = "unstable")]
#[inline(always)]
pub fn is_global(ip: std::net::IpAddr) -> bool {
ip.is_global()
}
/// These are some tests to check that the implementations match
/// The IPv4 can be all checked in 30 seconds or so and they are correct as of nightly 2023-07-17
/// The IPV6 can't be checked in a reasonable time, so we check over a hundred billion random ones, so far correct
/// Note that the is_global implementation is subject to change as new IP RFCs are created
///
/// To run while showing progress output:
/// cargo +nightly test --release --features sqlite,unstable -- --nocapture --ignored
#[cfg(test)]
#[cfg(feature = "unstable")]
mod tests {
use super::*;
use std::net::IpAddr;
#[test]
#[ignore]
fn test_ipv4_global() {
for a in 0..u8::MAX {
println!("Iter: {}/255", a);
for b in 0..u8::MAX {
for c in 0..u8::MAX {
for d in 0..u8::MAX {
let ip = IpAddr::V4(std::net::Ipv4Addr::new(a, b, c, d));
assert_eq!(
ip.is_global(),
is_global_hardcoded(ip),
"IP mismatch: {}",
ip
)
}
}
}
}
}
#[test]
#[ignore]
fn test_ipv6_global() {
use rand::Rng;
std::thread::scope(|s| {
for t in 0..16 {
let handle = s.spawn(move || {
let mut v = [0u8; 16];
let mut rng = rand::thread_rng();
for i in 0..20 {
println!("Thread {t} Iter: {i}/50");
for _ in 0..500_000_000 {
rng.fill(&mut v);
let ip = IpAddr::V6(std::net::Ipv6Addr::from(v));
assert_eq!(
ip.is_global(),
is_global_hardcoded(ip),
"IP mismatch: {ip}"
);
}
}
});
}
});
}
}

View File

@ -1,2 +1,5 @@
pub mod detection;
pub mod ip_addr;
pub mod modules;
pub mod scanners;
pub mod utils;

View File

@ -1,6 +1,7 @@
use std::{net::IpAddr, str::FromStr};
use cidr::IpCidr;
use rocket_ws::Message as RocketMessage;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
#[derive(Debug, Clone, PartialEq)]
@ -66,6 +67,26 @@ impl Into<WorkerMessages> for String {
}
}
impl Into<RocketMessage> for WorkerMessages {
fn into(self) -> RocketMessage {
RocketMessage::Text(self.to_string())
}
}
impl TryInto<WorkerMessages> for RocketMessage {
type Error = String;
fn try_into(self) -> Result<WorkerMessages, Self::Error> {
match self {
RocketMessage::Text(data) => {
let data: WorkerMessages = data.into();
Ok(data)
}
_ => Err("Only text is supported".to_string()),
}
}
}
#[cfg(test)]
mod tests {
use cidr::IpCidr;

View File

@ -0,0 +1,133 @@
use diesel::deserialize;
use diesel::deserialize::FromSqlRow;
use diesel::mysql::Mysql;
use diesel::mysql::MysqlValue;
use diesel::serialize;
use diesel::serialize::IsNull;
use diesel::sql_types::Text;
use rocket::request::FromParam;
use serde::{Deserialize, Deserializer};
use std::fmt;
use std::io::Write;
#[derive(Debug, Clone, Copy, FromSqlRow, PartialEq)]
pub enum Scanners {
Stretchoid,
Binaryedge,
Shadowserver,
Censys,
InternetMeasurement,
}
pub trait IsStatic {
fn is_static(self: &Self) -> bool;
}
impl IsStatic for Scanners {
fn is_static(self: &Self) -> bool {
match self {
Scanners::Censys => true,
Scanners::InternetMeasurement => true,
_ => false,
}
}
}
impl FromParam<'_> for Scanners {
type Error = String;
fn from_param(param: &'_ str) -> Result<Self, Self::Error> {
match param {
"stretchoid" => Ok(Scanners::Stretchoid),
"binaryedge" => Ok(Scanners::Binaryedge),
"shadowserver" => Ok(Scanners::Shadowserver),
"stretchoid.txt" => Ok(Scanners::Stretchoid),
"binaryedge.txt" => Ok(Scanners::Binaryedge),
"shadowserver.txt" => Ok(Scanners::Shadowserver),
"censys.txt" => Ok(Scanners::Censys),
"internet-measurement.com.txt" => Ok(Scanners::InternetMeasurement),
v => Err(format!("Unknown value: {v}")),
}
}
}
impl<'de> Deserialize<'de> for Scanners {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s = <Vec<String>>::deserialize(deserializer)?;
let k: &str = s[0].as_str();
match k {
"stretchoid" => Ok(Scanners::Stretchoid),
"binaryedge" => Ok(Scanners::Binaryedge),
"shadowserver" => Ok(Scanners::Shadowserver),
"stretchoid.txt" => Ok(Scanners::Stretchoid),
"binaryedge.txt" => Ok(Scanners::Binaryedge),
"shadowserver.txt" => Ok(Scanners::Shadowserver),
"censys.txt" => Ok(Scanners::Censys),
"internet-measurement.com.txt" => Ok(Scanners::InternetMeasurement),
v => Err(serde::de::Error::custom(format!(
"Unknown value: {}",
v.to_string()
))),
}
}
}
impl fmt::Display for Scanners {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}",
match self {
Self::Stretchoid => "stretchoid",
Self::Binaryedge => "binaryedge",
Self::Censys => "censys",
Self::InternetMeasurement => "internet-measurement.com",
Self::Shadowserver => "shadowserver",
}
)
}
}
impl serialize::ToSql<Text, Mysql> for Scanners {
fn to_sql(&self, out: &mut serialize::Output<Mysql>) -> serialize::Result {
match *self {
Self::Stretchoid => out.write_all(b"stretchoid")?,
Self::Binaryedge => out.write_all(b"binaryedge")?,
Self::Censys => out.write_all(b"censys")?,
Self::InternetMeasurement => out.write_all(b"internet-measurement.com")?,
Self::Shadowserver => out.write_all(b"shadowserver")?,
};
Ok(IsNull::No)
}
}
impl deserialize::FromSql<Text, Mysql> for Scanners {
fn from_sql(bytes: MysqlValue) -> deserialize::Result<Self> {
let value = <String as deserialize::FromSql<Text, Mysql>>::from_sql(bytes)?;
let value = &value as &str;
let value: Result<Scanners, String> = value.try_into();
match value {
Ok(d) => Ok(d),
Err(err) => Err(err.into()),
}
}
}
impl TryInto<Scanners> for &str {
type Error = String;
fn try_into(self) -> Result<Scanners, Self::Error> {
match self {
"stretchoid" => Ok(Scanners::Stretchoid),
"binaryedge" => Ok(Scanners::Binaryedge),
"internet-measurement.com" => Ok(Scanners::InternetMeasurement),
"shadowserver" => Ok(Scanners::Shadowserver),
value => Err(format!("Invalid value: {value}")),
}
}
}

View File

@ -0,0 +1,33 @@
use rand::seq::SliceRandom;
use rand::thread_rng;
use std::net::IpAddr;
use weighted_rs::{RoundrobinWeight, Weight};
pub fn get_dns_rr() -> RoundrobinWeight<Vec<IpAddr>> {
use std::str::FromStr;
// https://gist.github.com/mutin-sa/5dcbd35ee436eb629db7872581093bc5
let dns_servers: Vec<IpAddr> = vec![
IpAddr::from_str("1.1.1.1").unwrap(),
IpAddr::from_str("1.0.0.1").unwrap(),
IpAddr::from_str("8.8.8.8").unwrap(),
IpAddr::from_str("8.8.4.4").unwrap(),
IpAddr::from_str("9.9.9.9").unwrap(),
IpAddr::from_str("9.9.9.10").unwrap(),
IpAddr::from_str("2.56.220.2").unwrap(), // G-Core DNS
IpAddr::from_str("95.85.95.85").unwrap(), // G-Core DNS
IpAddr::from_str("193.110.81.0").unwrap(), // dns0.eu AS50902
IpAddr::from_str("185.253.5.0").unwrap(), // dns0.eu AS50902
IpAddr::from_str("74.82.42.42").unwrap(), // Hurricane Electric [AS6939]
];
let mut rr: RoundrobinWeight<Vec<IpAddr>> = RoundrobinWeight::new();
// For each entry in the list we create a lot of two DNS servers to use
for _ in &dns_servers {
let mut client_servers = dns_servers.clone();
client_servers.shuffle(&mut thread_rng());
client_servers.truncate(2);
rr.add(client_servers, 1);
}
rr
}

View File

@ -1,36 +1,93 @@
use std::{env, net::IpAddr};
use chrono::{Duration, NaiveDateTime, Utc};
use cidr::IpCidr;
use detection::detect_scanner;
use dns_ptr_resolver::{get_ptr, ResolvedResult};
use log2::*;
use ws2::{Client, Pod, WebSocket};
use scanners::Scanners;
use tungstenite::stream::MaybeTlsStream;
use tungstenite::{connect, Error, Message, WebSocket};
use weighted_rs::Weight;
pub mod detection;
pub mod ip_addr;
pub mod modules;
pub mod scanners;
pub mod utils;
use crate::detection::get_dns_client;
use crate::detection::{get_dns_client, get_dns_server_config};
use crate::modules::WorkerMessages;
use crate::utils::get_dns_rr;
#[derive(Debug, Clone)]
pub struct IpToResolve {
pub address: IpAddr,
}
#[derive(Debug, Clone)]
#[derive(Debug)]
pub struct Worker {
pub authenticated: bool,
pub tasks: Vec<IpToResolve>,
pub last_request_for_work: Option<NaiveDateTime>,
ws: WebSocket<MaybeTlsStream<std::net::TcpStream>>,
}
impl Worker {
pub fn initial() -> Worker {
info!("New worker");
impl Into<Worker> for WebSocket<MaybeTlsStream<std::net::TcpStream>> {
fn into(self) -> Worker {
let wait_time = std::time::Duration::from_secs(1);
match self.get_ref() {
tungstenite::stream::MaybeTlsStream::Plain(stream) => stream
.set_read_timeout(Some(wait_time))
.expect("set_nonblocking to work"),
tungstenite::stream::MaybeTlsStream::NativeTls(stream) => {
stream
.get_ref()
.set_read_timeout(Some(wait_time))
.expect("set_nonblocking to work");
()
}
_ => unimplemented!(),
};
Worker {
authenticated: false,
tasks: vec![],
last_request_for_work: None,
ws: self,
}
}
}
impl Worker {
pub fn wait_for_messages(&mut self) -> Result<bool, Error> {
self.tick();
match self.ws.read() {
Ok(server_request) => {
match server_request {
Message::Text(msg_string) => {
self.receive_request(msg_string.into());
}
Message::Ping(data) => {
let _ = self.ws.write(Message::Pong(data));
}
Message::Pong(_) => {}
Message::Frame(_) => {}
Message::Binary(_) => {}
Message::Close(_) => {
return Ok(true); // Break the processing loop
}
};
Ok(false)
}
Err(err) => {
match err {
// Silently drop the error: Processing error: IO error: Resource temporarily unavailable (os error 11)
// That occurs when no messages are to be read
Error::Io(ref e) if e.kind() == std::io::ErrorKind::WouldBlock => Ok(false),
Error::Io(ref e) if e.kind() == std::io::ErrorKind::NotConnected => Ok(true), // Break the processing loop
_ => Err(err),
}
}
}
}
@ -49,11 +106,11 @@ impl Worker {
self
}
pub fn tick(&mut self, ws_client: &Client) -> &Worker {
pub fn tick(&mut self) -> () {
let mut request: Option<WorkerMessages> = None;
if !self.is_authenticated() {
request = Some(WorkerMessages::AuthenticateRequest {
login: "williamdes".to_string(),
login: env::var("WORKER_NAME").expect("The ENV WORKER_NAME should be set"),
});
} else {
if self.last_request_for_work.is_none()
@ -67,14 +124,13 @@ impl Worker {
// it has a request to send
if let Some(request) = request {
self.send_request(ws_client, request);
self.send_request(request);
}
self
}
pub fn send_request(&mut self, ws_client: &Client, request: WorkerMessages) -> &Worker {
pub fn send_request(&mut self, request: WorkerMessages) -> &Worker {
let msg_string: String = request.to_string();
match ws_client.send(msg_string) {
match self.ws.send(Message::Text(msg_string)) {
Ok(_) => {
match request {
WorkerMessages::AuthenticateRequest { login } => {
@ -93,44 +149,57 @@ impl Worker {
self
}
pub fn receive_request(&mut self, ws: &WebSocket, server_request: WorkerMessages) -> &Worker {
fn work_on_cidr(&mut self, cidr: IpCidr) {
info!("Picking up: {cidr}");
info!("Range, from {} to {}", cidr.first(), cidr.last());
let addresses = cidr.iter().addresses();
let count = addresses.count();
let mut current = 0;
let mut rr_dns_servers = get_dns_rr();
for addr in addresses {
let client = get_dns_client(&get_dns_server_config(&rr_dns_servers.next().unwrap()));
match get_ptr(addr, client) {
Ok(result) => match detect_scanner(&result) {
Ok(Some(scanner_name)) => {
self.report_detection(scanner_name, addr, result);
}
Ok(None) => {}
Err(err) => error!("Error detecting for {addr}: {:?}", err),
},
Err(_) => {
//debug!("Error processing {addr}: {err}")
}
};
current += 1;
if current % 10 == 0 {
info!("Progress: {count}/{current}");
}
}
}
fn report_detection(&mut self, scanner_name: Scanners, addr: IpAddr, result: ResolvedResult) {
info!("Detected {:?} for {addr}", scanner_name);
let request = WorkerMessages::ScannerFoundResponse {
name: result.result.unwrap().to_string(),
address: addr,
};
let msg_string: String = request.to_string();
match self.ws.send(Message::Text(msg_string)) {
Ok(_) => {}
Err(err) => error!("Unable to send scanner result: {err}"),
}
}
pub fn receive_request(&mut self, server_request: WorkerMessages) -> &Worker {
match server_request {
WorkerMessages::DoWorkRequest { neworks } => {
info!("Should work on: {:?}", neworks);
info!("Work request received for neworks: {:?}", neworks);
for cidr in neworks {
let cidr = cidr.0;
info!("Picking up: {cidr}");
info!("Range, from {} to {}", cidr.first(), cidr.last());
let addresses = cidr.iter().addresses();
let count = addresses.count();
let mut current = 0;
for addr in addresses {
let client = get_dns_client();
match get_ptr(addr, client) {
Ok(result) => match detect_scanner(&result) {
Ok(Some(scanner_name)) => {
info!("Detected {:?} for {addr}", scanner_name);
let request = WorkerMessages::ScannerFoundResponse {
name: result.result.unwrap().to_string(),
address: addr,
};
let msg_string: String = request.to_string();
match ws.send(msg_string) {
Ok(_) => {}
Err(err) => error!("Unable to send scanner result: {err}"),
}
}
Ok(None) => {}
Err(err) => error!("Error detecting for {addr}: {:?}", err),
},
Err(err) => {
//debug!("Error processing {addr}: {err}")
}
};
current += 1;
}
self.work_on_cidr(cidr);
}
}
WorkerMessages::AuthenticateRequest { .. }
@ -144,23 +213,67 @@ impl Worker {
}
}
impl ws2::Handler for Worker {
fn on_open(&mut self, ws: &WebSocket) -> Pod {
info!("Connected to: {ws}, starting to work");
Ok(())
/*fn resolve_file(addresses: InetAddressIterator<IpAddr>, dns_servers: Vec<&str>) {
let mut ips = vec![];
for address in addresses {
match IpAddr::from_str(address) {
Ok(addr) => ips.push(IpToResolve {
address: addr,
server: rr.next().unwrap(),
}),
Err(err) => {
eprintln!(
"Something went wrong while parsing the IP ({}): {}",
address, err
);
process::exit(1);
}
}
}
fn on_close(&mut self, ws: &WebSocket) -> Pod {
info!("End of the work day: {ws}");
Ok(())
match rayon::ThreadPoolBuilder::new()
.num_threads(30)
.build_global()
{
Ok(r) => r,
Err(err) => {
eprintln!(
"Something went wrong while building the thread pool: {}",
err
);
process::exit(1);
}
}
fn on_message(&mut self, ws: &WebSocket, msg: String) -> Pod {
let server_request: WorkerMessages = msg.clone().into();
self.receive_request(ws, server_request);
Ok(())
}
}
ips.into_par_iter()
.enumerate()
.for_each(|(_i, to_resolve)| {
let server = NameServerConfigGroup::from_ips_clear(
&[to_resolve.server.ip()],
to_resolve.server.port(),
true,
);
let ptr_result = get_ptr(to_resolve.address, resolver);
match ptr_result {
Ok(ptr) => match ptr.result {
Some(res) => println!("{} # {}", to_resolve.address, res),
None => println!("{}", to_resolve.address),
},
Err(err) => {
let two_hundred_millis = Duration::from_millis(400);
thread::sleep(two_hundred_millis);
eprintln!(
"[{}] Error for {} -> {}",
to_resolve.server, to_resolve.address, err.message
)
}
}
});
}*/
fn main() -> () {
let _log2 = log2::stdout()
@ -171,23 +284,39 @@ fn main() -> () {
})
.start();
info!("Running the worker");
let url = "ws://127.0.0.1:8800";
let mut worker = Worker::initial();
match ws2::connect(url) {
Ok(mut ws_client) => {
let connected = ws_client.is_open();
let url = match env::var("WORKER_URL") {
Ok(worker_url) => worker_url,
Err(_) => "ws://127.0.0.1:8800".to_string(),
};
match connect(&url) {
Ok((socket, response)) => {
let connected = response.status() == 101;
if connected {
info!("Connected to: {url}");
} else {
info!("Connecting to: {url}");
info!("Connecting replied {}: {url}", response.status());
}
let mut worker: Worker = socket.into();
loop {
match ws_client.process(&mut worker, 0.5) {
Ok(_) => {
worker.tick(&ws_client);
match worker.wait_for_messages() {
Ok(true) => {
error!("Stopping processing");
break;
}
Err(err) => error!("Processing error: {err}"),
Ok(false) => {
// Keep processing
}
Err(tungstenite::Error::ConnectionClosed) => {
error!("Stopping processing: connection closed");
break;
}
Err(tungstenite::Error::AlreadyClosed) => {
error!("Stopping processing: connection already closed");
break;
}
Err(err) => error!("Processing error: {err} -> {:?}", err),
}
}
}