first working version, not debugged yet
This commit is contained in:
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1 +1,4 @@
|
||||
/target
|
||||
target
|
||||
.vscode
|
||||
log
|
||||
Cargo.lock
|
||||
|
||||
7
Cargo.lock
generated
7
Cargo.lock
generated
@@ -1,7 +0,0 @@
|
||||
# This file is automatically @generated by Cargo.
|
||||
# It is not intended for manual editing.
|
||||
version = 4
|
||||
|
||||
[[package]]
|
||||
name = "rsnmpagent"
|
||||
version = "0.1.0"
|
||||
18
Cargo.toml
18
Cargo.toml
@@ -1,6 +1,22 @@
|
||||
[package]
|
||||
name = "rsnmpagent"
|
||||
version = "0.1.0"
|
||||
version = "0.3.0"
|
||||
edition = "2024"
|
||||
|
||||
[profile.release]
|
||||
#strip = "debuginfo"
|
||||
strip = true
|
||||
|
||||
|
||||
[dependencies]
|
||||
log = "0.4"
|
||||
flexi_logger = { version = "0.31", features = ["syslog_writer"] }
|
||||
figment = { version = "0.10", features = ["env", "yaml"] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
clap = { version = "4.5", features = ["derive" ] }
|
||||
regex = "1.12"
|
||||
chrono = "0.4"
|
||||
hex = "0.4"
|
||||
sha3 = "0.10"
|
||||
diff = "0.1"
|
||||
glob = "0.3"
|
||||
11
config.yml
Normal file
11
config.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
loglevel: "info"
|
||||
logdir: "/var/log"
|
||||
base_oid: ".1.3.6.1.4.1.8072.9999.9999"
|
||||
debug_log_marker: "/dev/shm/debug.marker"
|
||||
intervals:
|
||||
filesum: 60
|
||||
processes: 60
|
||||
meminfo: 60
|
||||
bonding:
|
||||
multipath:
|
||||
2
rust-toolchain.toml
Normal file
2
rust-toolchain.toml
Normal file
@@ -0,0 +1,2 @@
|
||||
[toolchain]
|
||||
components = [ "rust-analyzer", "clippy", "rustfmt" ]
|
||||
6
rustfmt.toml
Normal file
6
rustfmt.toml
Normal file
@@ -0,0 +1,6 @@
|
||||
style_edition = "2024"
|
||||
max_width = 120
|
||||
newline_style = "Unix"
|
||||
|
||||
# aktuell ist dieser Parameter erst in der unstable version vorhanden.
|
||||
#blank_lines_upper_bound = 4
|
||||
108
src/bonding.rs
Normal file
108
src/bonding.rs
Normal file
@@ -0,0 +1,108 @@
|
||||
use glob::glob;
|
||||
use log::{error, info, trace};
|
||||
use std::ffi::OsStr;
|
||||
use std::fs::{self};
|
||||
use std::io::{self};
|
||||
use std::path::Path;
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub(crate) struct BondingInfo {
|
||||
pub(crate) bond: String,
|
||||
pub(crate) master_state: String,
|
||||
pub(crate) slave: String,
|
||||
pub(crate) slave_state: String,
|
||||
pub(crate) mode: String,
|
||||
}
|
||||
|
||||
pub(crate) fn bonding_status(path: &str, re: Option<®ex::Regex>) -> io::Result<Vec<BondingInfo>> {
|
||||
let mut bond_list = Vec::new();
|
||||
let pglob = format!("{}/bond*", path);
|
||||
match glob(&pglob) {
|
||||
Ok(iter) => {
|
||||
for entry in iter {
|
||||
match entry {
|
||||
Ok(f) => {
|
||||
if f.is_dir() {
|
||||
match bond_status(&f) {
|
||||
Ok(r) => {
|
||||
for b in r {
|
||||
trace!("sucessfull read bonding device {:?}", b);
|
||||
match re {
|
||||
Some(re) => {
|
||||
if re.is_match(&b.bond) {
|
||||
info!("skipping bond {:?} as the filter has matched bond", b);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
None => {
|
||||
trace!("No filter regex, continue");
|
||||
}
|
||||
}
|
||||
bond_list.push(b);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Error reading bonding device {}", e);
|
||||
} // match e.kind() {
|
||||
// io::ErrorKind::Other => {
|
||||
// debug!("Skipped device {:?}", e);
|
||||
// }
|
||||
// _ => {
|
||||
// error!("Error reading bonding device {}", e);
|
||||
//}
|
||||
//},
|
||||
}
|
||||
} else {
|
||||
trace!("{:?} is not a dir", f);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Error reading directory Entry: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Invalid glob pattern: {}", e);
|
||||
return Err(std::io::Error::other(format!("Invalid glob pattern {}", e)));
|
||||
// return Err(io::Error::new(
|
||||
// io::ErrorKind::Other,
|
||||
// format!("Invalid glob pattern {}", e),
|
||||
// ));
|
||||
}
|
||||
};
|
||||
Ok(bond_list)
|
||||
}
|
||||
|
||||
fn bond_status(d: &Path) -> io::Result<Vec<BondingInfo>> {
|
||||
let mut bl = Vec::new();
|
||||
let bond_info = BondingInfo {
|
||||
bond: d
|
||||
.file_name()
|
||||
.unwrap_or(OsStr::new("unknown"))
|
||||
.to_string_lossy()
|
||||
.to_string(),
|
||||
master_state: fs::read_to_string(d.join("bonding/mii_status"))?.trim().to_string(),
|
||||
slave: "".to_string(),
|
||||
slave_state: "".to_string(),
|
||||
mode: "".to_string(),
|
||||
};
|
||||
let slaves = fs::read_to_string(d.join("bonding/slaves"))?.trim().to_string();
|
||||
for slave in slaves.split(' ') {
|
||||
let slave_state = fs::read_to_string(d.join(format!("lower_{}/bonding_slave/mii_status", slave)))?
|
||||
.trim()
|
||||
.to_string();
|
||||
let slave_speed = fs::read_to_string(d.join(format!("lower_{}/speed", slave)))?
|
||||
.trim()
|
||||
.to_string();
|
||||
let slave_mtu = fs::read_to_string(d.join(format!("lower_{}/mtu", slave)))?
|
||||
.trim()
|
||||
.to_string();
|
||||
let mut mpi = bond_info.clone();
|
||||
mpi.slave = slave.to_string();
|
||||
mpi.slave_state = slave_state;
|
||||
mpi.mode = format!("Speed: {}, MTU: {}", slave_speed, slave_mtu);
|
||||
bl.push(mpi);
|
||||
}
|
||||
Ok(bl)
|
||||
}
|
||||
270
src/config.rs
Normal file
270
src/config.rs
Normal file
@@ -0,0 +1,270 @@
|
||||
use clap::Parser;
|
||||
use figment::{
|
||||
Figment,
|
||||
providers::{Format, Serialized, Yaml},
|
||||
};
|
||||
use log::warn;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::error::Error;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[allow(unused)]
|
||||
use flexi_logger::{
|
||||
FileSpec, LogSpecification, Logger, LoggerHandle, colored_with_thread, with_thread,
|
||||
writers::{SyslogConnection, SyslogLineHeader, SyslogWriter},
|
||||
};
|
||||
|
||||
#[derive(Deserialize, Serialize, Debug)]
|
||||
pub(crate) struct DataFunctionsInterval {
|
||||
pub(crate) log_debug_watcher: Option<u64>,
|
||||
multipath: Option<u64>,
|
||||
meminfo: Option<u64>,
|
||||
processes: Option<u64>,
|
||||
bonding: Option<u64>,
|
||||
filesum: Option<u64>,
|
||||
}
|
||||
|
||||
impl Default for DataFunctionsInterval {
|
||||
fn default() -> Self {
|
||||
DataFunctionsInterval {
|
||||
log_debug_watcher: Some(5),
|
||||
multipath: Some(60),
|
||||
meminfo: Some(30),
|
||||
processes: Some(30),
|
||||
bonding: Some(30),
|
||||
filesum: Some(60),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DataFunctionsInterval {
|
||||
pub fn meminfo(&self) -> Option<u64> {
|
||||
self.meminfo
|
||||
}
|
||||
pub fn processes(&self) -> Option<u64> {
|
||||
self.processes
|
||||
}
|
||||
pub fn filesum(&self) -> Option<u64> {
|
||||
self.filesum
|
||||
}
|
||||
pub fn bonding(&self) -> Option<u64> {
|
||||
self.bonding
|
||||
}
|
||||
pub fn multipath(&self) -> Option<u64> {
|
||||
self.multipath
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Debug, Clone)]
|
||||
pub(crate) struct DataFunctionsFilesum {
|
||||
pub(crate) passwd: Option<String>,
|
||||
pub(crate) shadow: Option<String>,
|
||||
pub(crate) group: Option<String>,
|
||||
pub(crate) authorized_keys: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Debug)]
|
||||
pub struct DataFunctionsExtra {
|
||||
multipath: Option<String>,
|
||||
bonding: Option<String>,
|
||||
filesum: DataFunctionsFilesum,
|
||||
}
|
||||
|
||||
impl Default for DataFunctionsExtra {
|
||||
fn default() -> Self {
|
||||
DataFunctionsExtra {
|
||||
multipath: None,
|
||||
bonding: None,
|
||||
filesum: DataFunctionsFilesum {
|
||||
passwd: None,
|
||||
shadow: None,
|
||||
group: None,
|
||||
authorized_keys: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DataFunctionsExtra {
|
||||
pub(crate) fn filesum(&self) -> DataFunctionsFilesum {
|
||||
self.filesum.clone()
|
||||
}
|
||||
pub(crate) fn multipath(&self) -> Option<String> {
|
||||
self.multipath.clone()
|
||||
}
|
||||
pub(crate) fn bonding(&self) -> Option<String> {
|
||||
self.bonding.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Debug, Clone, clap::ValueEnum)]
|
||||
enum LogConfig {
|
||||
None,
|
||||
StdErr,
|
||||
Syslog,
|
||||
Logfile,
|
||||
LogfileAndSyslog,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Debug)]
|
||||
pub struct AppConfig {
|
||||
logoutput: LogConfig,
|
||||
logdir: String,
|
||||
logfile_basename: String,
|
||||
loglevel: String,
|
||||
pub(crate) base_oid: String,
|
||||
pub(crate) debug_log_marker: PathBuf,
|
||||
pub(crate) intervals: DataFunctionsInterval,
|
||||
pub(crate) extra_config: DataFunctionsExtra,
|
||||
}
|
||||
|
||||
impl Default for AppConfig {
|
||||
fn default() -> Self {
|
||||
AppConfig {
|
||||
logoutput: LogConfig::Logfile,
|
||||
logdir: "log".to_string(),
|
||||
logfile_basename: "rsnmpagent".to_string(),
|
||||
loglevel: "info".to_string(),
|
||||
base_oid: ".1.3.6.1.4.1.8072.9999.9999".to_string(),
|
||||
debug_log_marker: "debug.marker".to_string().into(),
|
||||
intervals: DataFunctionsInterval::default(),
|
||||
extra_config: DataFunctionsExtra::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AppConfig {
|
||||
pub fn base_oid(&self) -> &str {
|
||||
&self.base_oid
|
||||
}
|
||||
}
|
||||
|
||||
// Define the struct to hold the command-line arguments.
|
||||
#[derive(Parser, Deserialize, Serialize, Debug)]
|
||||
#[command(author, version, about = "rsnmpd, snmpd passpersist extension", long_about = None)]
|
||||
struct Cli {
|
||||
/// Optional path to a configuration file.
|
||||
#[arg(short, long, value_name = "FILENAME", default_value = "config.yml")]
|
||||
configfile: PathBuf,
|
||||
|
||||
#[arg(short, long, value_name = "LOGOUTPUT", value_enum, default_value_t = LogConfig::Logfile)]
|
||||
logoutput: LogConfig,
|
||||
|
||||
#[arg(short, long)]
|
||||
show_parsed_config: bool,
|
||||
}
|
||||
|
||||
pub fn build_config() -> Result<AppConfig, Box<dyn Error>> {
|
||||
let cli = Cli::parse();
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
if cli.show_parsed_config {
|
||||
eprintln!("Parsed config line options: {:#?}", cli);
|
||||
}
|
||||
|
||||
let config: AppConfig = Figment::new()
|
||||
.merge(Serialized::defaults(AppConfig::default()))
|
||||
.merge(Yaml::file(&cli.configfile))
|
||||
.merge(Serialized::defaults(&cli))
|
||||
.extract()?;
|
||||
|
||||
if cli.show_parsed_config {
|
||||
eprintln!("Loaded configuration: {:#?}", config);
|
||||
}
|
||||
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
pub fn start_logging(config: &AppConfig) -> Result<LoggerHandle, Box<dyn Error>> {
|
||||
let r = match config.logoutput {
|
||||
LogConfig::None => {
|
||||
let handle = Logger::with(LogSpecification::off()).do_not_log().start()?;
|
||||
warn!("Starting None logging config");
|
||||
handle
|
||||
}
|
||||
LogConfig::StdErr => {
|
||||
let handle = Logger::try_with_str(&config.loglevel)?
|
||||
.format(colored_with_thread)
|
||||
.start()?;
|
||||
warn!("Starting StdErr logging config with level: {}", config.loglevel);
|
||||
handle
|
||||
}
|
||||
LogConfig::Syslog => {
|
||||
#[cfg(target_os = "windows")]
|
||||
{
|
||||
Err("Syslog in windows not implemented")?
|
||||
}
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
let syslog_writer = SyslogWriter::builder(
|
||||
SyslogConnection::syslog_call(),
|
||||
SyslogLineHeader::Rfc3164,
|
||||
flexi_logger::writers::SyslogFacility::SystemDaemons,
|
||||
)
|
||||
.build()?;
|
||||
let handle = Logger::try_with_str(&config.loglevel)?
|
||||
.log_to_writer(syslog_writer)
|
||||
.format(with_thread)
|
||||
.start()?;
|
||||
warn!("Starting Syslog logging config with level: {}", config.loglevel);
|
||||
handle
|
||||
}
|
||||
}
|
||||
LogConfig::Logfile => {
|
||||
let handle = Logger::try_with_str(&config.loglevel)?
|
||||
.log_to_file(
|
||||
FileSpec::default()
|
||||
.directory(&config.logdir)
|
||||
.basename(&config.logfile_basename)
|
||||
.suppress_timestamp(),
|
||||
)
|
||||
.append()
|
||||
.format(with_thread)
|
||||
.start()?;
|
||||
warn!(
|
||||
"Starting Logfile logging to {} with level: {}",
|
||||
config.logdir, config.loglevel
|
||||
);
|
||||
handle
|
||||
}
|
||||
LogConfig::LogfileAndSyslog => {
|
||||
#[cfg(target_os = "windows")]
|
||||
{
|
||||
eprintln!(
|
||||
"Starting LogfileAndSyslog logging to {} with level: {}",
|
||||
config.logdir, config.loglevel
|
||||
);
|
||||
Err("Syslog in windows not implemented")?
|
||||
}
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
let handle = Logger::try_with_str(&config.loglevel)
|
||||
.expect("Log start failed")
|
||||
.log_to_file(
|
||||
FileSpec::default()
|
||||
.directory(&config.logdir)
|
||||
.basename(&config.logfile_basename)
|
||||
.suppress_timestamp(),
|
||||
)
|
||||
.append()
|
||||
.format(with_thread)
|
||||
.add_writer(
|
||||
"syslog",
|
||||
SyslogWriter::builder(
|
||||
SyslogConnection::syslog_call(),
|
||||
SyslogLineHeader::Rfc3164,
|
||||
flexi_logger::writers::SyslogFacility::SystemDaemons,
|
||||
)
|
||||
.build()?,
|
||||
)
|
||||
.start()?;
|
||||
warn!(
|
||||
"Starting LogfileAndSyslog logging to {} with level: {}",
|
||||
config.logdir, config.loglevel
|
||||
);
|
||||
handle
|
||||
}
|
||||
}
|
||||
};
|
||||
Ok(r)
|
||||
}
|
||||
70
src/filesum.rs
Normal file
70
src/filesum.rs
Normal file
@@ -0,0 +1,70 @@
|
||||
use diff::Result;
|
||||
use hex;
|
||||
use log::{debug, trace};
|
||||
use sha3::{Digest, Sha3_256};
|
||||
use std::fs;
|
||||
use std::io::{self, Error};
|
||||
use std::path::Path;
|
||||
|
||||
pub(crate) fn filesum_filtered(
|
||||
path: &Path,
|
||||
oldfile: &mut String,
|
||||
diff_string: &mut String,
|
||||
re: Option<®ex::Regex>,
|
||||
) -> io::Result<(bool, String)> {
|
||||
// Open file for hashing
|
||||
match re {
|
||||
Some(v) => debug!("try to open file {:?} for hashing using filter regex {}", path, v),
|
||||
None => debug!("try to open file {:?} for hashing", path),
|
||||
}
|
||||
let mut hasher = Sha3_256::new();
|
||||
let mut filedata = String::with_capacity(2048);
|
||||
let mut changed = false;
|
||||
|
||||
// we read only smal files, so its fast to read the whole file to memory, so we can also du a diff
|
||||
if let Ok(file_contents) = fs::read_to_string(path) {
|
||||
if let Some(re) = re {
|
||||
debug!("Filter lines with regex {:?}", re);
|
||||
for line in file_contents.lines() {
|
||||
if re.is_match(line) {
|
||||
trace!("line {} skipped by filter regex", line);
|
||||
continue;
|
||||
}
|
||||
// Update the hasher with the bytes
|
||||
filedata.push_str(line);
|
||||
}
|
||||
} else {
|
||||
// we do not have a filter regex, so we could simply paste the file to the hasher
|
||||
debug!("Hash file without filter regex");
|
||||
filedata = file_contents;
|
||||
}
|
||||
hasher.update(format!("{}\n", filedata).as_bytes());
|
||||
if !oldfile.is_empty() && *oldfile != filedata {
|
||||
diff_string.clear();
|
||||
for diff in diff::lines(oldfile, &filedata) {
|
||||
match diff {
|
||||
Result::Left(l) => {
|
||||
trace!("Diff - {}", l); // Removed line
|
||||
diff_string.push_str(&format!("-{}\n", l));
|
||||
}
|
||||
Result::Both(l, _) => {
|
||||
trace!("Diff {}", l); // Unchanged line
|
||||
}
|
||||
Result::Right(r) => {
|
||||
trace!("Diff + {}", r); // Added line
|
||||
diff_string.push_str(&format!("+{}\n", r));
|
||||
}
|
||||
}
|
||||
}
|
||||
debug!("Diff for {:?} is now {}", path, diff_string);
|
||||
changed = true;
|
||||
*oldfile = filedata;
|
||||
} else if oldfile.is_empty() {
|
||||
*oldfile = filedata;
|
||||
}
|
||||
// Finalize the hash and convert it to a hexadecimal string
|
||||
let hash = hex::encode(hasher.finalize());
|
||||
return Ok((changed, hash));
|
||||
}
|
||||
Err(Error::other(format!("Unable to read file {:?}", path)))
|
||||
}
|
||||
21
src/helper.rs
Normal file
21
src/helper.rs
Normal file
@@ -0,0 +1,21 @@
|
||||
use log::{debug, error, info};
|
||||
use regex::Regex;
|
||||
|
||||
pub(crate) fn compile_re(regex: Option<String>, name: &str) -> Option<Regex> {
|
||||
if let Some(r) = regex {
|
||||
let re = Regex::new(&r);
|
||||
match re {
|
||||
Ok(r) => {
|
||||
debug!("Sucessfull compiled {} filter regex: {:?}", name, r);
|
||||
Some(r)
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Error compiling {} filter regex: {:?}", name, e);
|
||||
None
|
||||
}
|
||||
}
|
||||
} else {
|
||||
info!("No filter regex for {} supplied", name);
|
||||
None
|
||||
}
|
||||
}
|
||||
713
src/lib.rs
Normal file
713
src/lib.rs
Normal file
@@ -0,0 +1,713 @@
|
||||
use chrono::Utc;
|
||||
use flexi_logger::LoggerHandle;
|
||||
use log::{debug, error, info, trace, warn};
|
||||
use regex::RegexBuilder;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::str::FromStr;
|
||||
use std::sync::{Arc, Condvar, Mutex};
|
||||
use std::thread::{Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
|
||||
pub mod bonding;
|
||||
pub mod config;
|
||||
pub mod filesum;
|
||||
pub mod helper;
|
||||
pub mod multipath;
|
||||
pub mod processes;
|
||||
pub mod snmp;
|
||||
use crate::bonding::bonding_status;
|
||||
use crate::config::DataFunctionsFilesum;
|
||||
use crate::filesum::filesum_filtered;
|
||||
use crate::helper::compile_re;
|
||||
use crate::multipath::multipath_status;
|
||||
use crate::processes::Ptypes;
|
||||
use crate::snmp::{Oid, OidData, SnmpData};
|
||||
|
||||
fn t_multipath(
|
||||
t_quit: Arc<(Mutex<bool>, Condvar)>,
|
||||
t_check_interval: u64,
|
||||
snmp_data: Arc<Mutex<OidData>>,
|
||||
options: Option<String>,
|
||||
) {
|
||||
debug!("Startup of t_multipath function");
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
let oidt = Oid::from_str("2.1.0").unwrap();
|
||||
let oidc = Oid::from_str("2.2.0").unwrap();
|
||||
let oidtab_dm = Oid::from_str("2.3.1.1").unwrap();
|
||||
let oidtab_uuid = Oid::from_str("2.3.1.2").unwrap();
|
||||
let oidtab_desc = Oid::from_str("2.3.1.3").unwrap();
|
||||
let oidtab_slt = Oid::from_str("2.3.1.4").unwrap();
|
||||
let oidtab_sle = Oid::from_str("2.3.1.5").unwrap();
|
||||
let oidtab_end = Oid::from_str("2.3.9999").unwrap();
|
||||
let filter_re = compile_re(options, "multipath_filter");
|
||||
let base_path = String::from("/sys/devices/virtual/block");
|
||||
let (lock, cvar) = &*t_quit;
|
||||
let mut quit = false;
|
||||
while !quit {
|
||||
debug!("Starting multipath_status function");
|
||||
let mut mplist = Vec::new();
|
||||
match multipath_status(&base_path, filter_re.as_ref()) {
|
||||
Ok(m) => {
|
||||
trace!("multipath_status returned Ok with {:?}", &m);
|
||||
mplist = m;
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Error at reading sys multipath status files: {}", e);
|
||||
}
|
||||
}
|
||||
let now = Utc::now().timestamp().try_into().unwrap_or(0);
|
||||
{
|
||||
debug!("try to lock mutex snmp_data to update multipath {:?}", now);
|
||||
let mut guard = snmp_data.lock().unwrap();
|
||||
let snmp_data = &mut guard.data;
|
||||
snmp_data.insert(oidt.clone(), SnmpData::Gauge(now));
|
||||
snmp_data.insert(oidc.clone(), SnmpData::Gauge(mplist.len().try_into().unwrap_or(0)));
|
||||
|
||||
// since we have not implemnted the copy trait, we must use the old way ...
|
||||
// 1. Collect the keys to be removed.
|
||||
let keys_to_remove: Vec<Oid> = snmp_data
|
||||
.range(&oidtab_dm..&oidtab_end)
|
||||
.map(|(k, _)| k.clone())
|
||||
.collect();
|
||||
// 2. Iterate over the collected keys and remove them.
|
||||
for key in keys_to_remove {
|
||||
snmp_data.remove(&key);
|
||||
}
|
||||
if !mplist.is_empty() {
|
||||
let mut n = 1;
|
||||
for e in mplist {
|
||||
snmp_data.insert(oidtab_dm.add_suffix_int(n), SnmpData::String(e.mp));
|
||||
snmp_data.insert(oidtab_uuid.add_suffix_int(n), SnmpData::String(e.uuid));
|
||||
snmp_data.insert(
|
||||
oidtab_desc.add_suffix_int(n),
|
||||
SnmpData::String(format!("{},{}", e.vendor, e.model)),
|
||||
);
|
||||
snmp_data.insert(oidtab_slt.add_suffix_int(n), SnmpData::Gauge(e.slave_count));
|
||||
snmp_data.insert(oidtab_sle.add_suffix_int(n), SnmpData::Gauge(e.slave_failed));
|
||||
n += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
let guard = lock.lock().unwrap();
|
||||
let qres = cvar.wait_timeout(guard, Duration::from_secs(t_check_interval)).unwrap();
|
||||
if !qres.1.timed_out() {
|
||||
quit = *qres.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
debug!("Exit of t_multipath function");
|
||||
}
|
||||
|
||||
fn t_bonding(
|
||||
t_quit: Arc<(Mutex<bool>, Condvar)>,
|
||||
t_check_interval: u64,
|
||||
snmp_data: Arc<Mutex<OidData>>,
|
||||
options: Option<String>,
|
||||
) {
|
||||
debug!("Startup of t_bonding function");
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
let oidt = Oid::from_str("5.1.0").unwrap();
|
||||
let oidc = Oid::from_str("5.2.0").unwrap();
|
||||
let oidtab_bond = Oid::from_str("5.3.1.1").unwrap();
|
||||
let oidtab_bond_state = Oid::from_str("5.3.1.2").unwrap();
|
||||
let oidtab_slave = Oid::from_str("5.3.1.3").unwrap();
|
||||
let oidtab_slave_state = Oid::from_str("5.3.1.4").unwrap();
|
||||
let oidtab_slave_mode = Oid::from_str("5.3.1.5").unwrap();
|
||||
let oidtab_end = Oid::from_str("5.3.9999").unwrap();
|
||||
let filter_re = compile_re(options, "bonding_filter");
|
||||
let base_path = String::from("/sys/devices/virtual/net");
|
||||
let (lock, cvar) = &*t_quit;
|
||||
let mut quit = false;
|
||||
while !quit {
|
||||
debug!("Starting bonding_status function");
|
||||
let mut bl = Vec::new();
|
||||
match bonding_status(&base_path, filter_re.as_ref()) {
|
||||
Ok(b) => {
|
||||
trace!("bonding_status returned Ok with {:?}", &b);
|
||||
bl = b;
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Error at reading sys bonding status files: {}", e);
|
||||
}
|
||||
}
|
||||
let now = Utc::now().timestamp().try_into().unwrap_or(0);
|
||||
{
|
||||
debug!("try to lock mutex snmp_data to update bonding with {:?}", now);
|
||||
let mut guard = snmp_data.lock().unwrap();
|
||||
let snmp_data = &mut guard.data;
|
||||
snmp_data.insert(oidt.clone(), SnmpData::Gauge(now));
|
||||
snmp_data.insert(oidc.clone(), SnmpData::Gauge(bl.len().try_into().unwrap_or(0)));
|
||||
|
||||
// since we have not implemnted the copy trait, we must use the old way ...
|
||||
// 1. Collect the keys to be removed.
|
||||
let keys_to_remove: Vec<Oid> = snmp_data
|
||||
.range(&oidtab_bond..&oidtab_end)
|
||||
.map(|(k, _)| k.clone())
|
||||
.collect();
|
||||
// 2. Iterate over the collected keys and remove them.
|
||||
for key in keys_to_remove {
|
||||
snmp_data.remove(&key);
|
||||
}
|
||||
if !bl.is_empty() {
|
||||
let mut n = 1;
|
||||
for e in bl {
|
||||
snmp_data.insert(oidtab_bond.add_suffix_int(n), SnmpData::String(e.bond));
|
||||
snmp_data.insert(oidtab_bond_state.add_suffix_int(n), SnmpData::String(e.master_state));
|
||||
snmp_data.insert(oidtab_slave.add_suffix_int(n), SnmpData::String(e.slave));
|
||||
snmp_data.insert(oidtab_slave_state.add_suffix_int(n), SnmpData::String(e.slave_state));
|
||||
snmp_data.insert(oidtab_slave_mode.add_suffix_int(n), SnmpData::String(e.mode));
|
||||
n += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
let guard = lock.lock().unwrap();
|
||||
let qres = cvar.wait_timeout(guard, Duration::from_secs(t_check_interval)).unwrap();
|
||||
if !qres.1.timed_out() {
|
||||
quit = *qres.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
debug!("Exit of t_bonding function");
|
||||
}
|
||||
|
||||
fn t_filesum(
|
||||
t_quit: Arc<(Mutex<bool>, Condvar)>,
|
||||
t_check_interval: u64,
|
||||
snmp_data: Arc<Mutex<OidData>>,
|
||||
options: DataFunctionsFilesum,
|
||||
) {
|
||||
debug!("Startup of t_filesum function");
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
// allocate some strings for holding file contents between check runs
|
||||
let mut oldpasswd = String::with_capacity(2048);
|
||||
let mut oldshadow = String::with_capacity(2048);
|
||||
let mut oldgroup = String::with_capacity(2048);
|
||||
let mut oldauthkey = String::with_capacity(2048);
|
||||
let mut hash_passwd = String::with_capacity(128);
|
||||
let mut diff_passwd = String::with_capacity(2048);
|
||||
let mut hash_shadow = String::with_capacity(128);
|
||||
let mut diff_shadow = String::with_capacity(2048);
|
||||
let mut hash_group = String::with_capacity(128);
|
||||
let mut diff_group = String::with_capacity(2048);
|
||||
let mut hash_authkey = String::with_capacity(128);
|
||||
let mut diff_authkey = String::with_capacity(2048);
|
||||
|
||||
// allocate Option<Regex> for our regex ...
|
||||
let re_passwd = compile_re(options.passwd, "passwd");
|
||||
let re_shadow = compile_re(options.shadow, "shadow");
|
||||
let re_group = compile_re(options.group, "group");
|
||||
let re_authkey = compile_re(options.authorized_keys, "authorized_keys");
|
||||
|
||||
// prepare variables which we use in the whole function
|
||||
let oid_filesum_time = Oid::from_str("6.1.0").unwrap();
|
||||
let oid_passwd_hash = Oid::from_str("6.3.1.2.1").unwrap();
|
||||
let oid_passwd_diff = Oid::from_str("6.3.1.3.1").unwrap();
|
||||
let oid_shadow_hash = Oid::from_str("6.3.1.2.2").unwrap();
|
||||
let oid_shadow_diff = Oid::from_str("6.3.1.3.2").unwrap();
|
||||
let oid_group_hash = Oid::from_str("6.3.1.2.3").unwrap();
|
||||
let oid_group_diff = Oid::from_str("6.3.1.3.3").unwrap();
|
||||
let oid_authkey_hash = Oid::from_str("6.3.1.2.4").unwrap();
|
||||
let oid_authkey_diff = Oid::from_str("6.3.1.3.4").unwrap();
|
||||
let oid_xxx_diff = Oid::from_str("6.3.1.3.999").unwrap();
|
||||
let fn_passwd_str = String::from("/etc/passwd");
|
||||
let fn_shadow_str = String::from("/etc/shadow");
|
||||
let fn_group_str = String::from("/etc/group");
|
||||
let fn_authkey_str = String::from("/root/.ssh/authorized_keys");
|
||||
let fn_passwd = PathBuf::from(&fn_passwd_str);
|
||||
let fn_shadow = PathBuf::from(&fn_shadow_str);
|
||||
let fn_group = PathBuf::from(&fn_group_str);
|
||||
let fn_authkey = PathBuf::from(&fn_authkey_str);
|
||||
{
|
||||
let oid_filesum_cnt = Oid::from_str("6.2.0").unwrap();
|
||||
let oid_passwd_filename = Oid::from_str("6.3.1.1.1").unwrap();
|
||||
let oid_shadow_filename = Oid::from_str("6.3.1.1.2").unwrap();
|
||||
let oid_group_filename = Oid::from_str("6.3.1.1.3").unwrap();
|
||||
let oid_authkey_filename = Oid::from_str("6.3.1.1.4").unwrap();
|
||||
debug!("try to lock mutex snmp_data to update filesum header data");
|
||||
let mut guard = snmp_data.lock().unwrap();
|
||||
let snmp_data = &mut guard.data;
|
||||
snmp_data.insert(oid_filesum_cnt, SnmpData::Gauge(4));
|
||||
snmp_data.insert(oid_passwd_filename, SnmpData::String(fn_passwd_str.clone()));
|
||||
snmp_data.insert(oid_shadow_filename, SnmpData::String(fn_shadow_str.clone()));
|
||||
snmp_data.insert(oid_group_filename, SnmpData::String(fn_group_str.clone()));
|
||||
snmp_data.insert(oid_authkey_filename, SnmpData::String(fn_authkey_str.clone()));
|
||||
}
|
||||
let (lock, cvar) = &*t_quit;
|
||||
let mut quit = false;
|
||||
let mut is_changed = true;
|
||||
while !quit {
|
||||
match filesum_filtered(&fn_passwd, &mut oldpasswd, &mut diff_passwd, re_passwd.as_ref()) {
|
||||
Ok((changed, hash)) => {
|
||||
if changed {
|
||||
warn!("Hash of {} is now {}", fn_passwd_str, hash);
|
||||
hash_passwd = hash;
|
||||
is_changed = true;
|
||||
} else {
|
||||
debug!("Hash of {} is still now {}", fn_passwd_str, hash);
|
||||
if hash_passwd.is_empty() {
|
||||
hash_passwd = hash;
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Filesum error {}", e);
|
||||
}
|
||||
}
|
||||
match filesum_filtered(&fn_shadow, &mut oldshadow, &mut diff_shadow, re_shadow.as_ref()) {
|
||||
Ok((changed, hash)) => {
|
||||
if changed {
|
||||
warn!("Hash of {} is now {}", fn_shadow_str, hash);
|
||||
hash_shadow = hash;
|
||||
is_changed = true;
|
||||
} else {
|
||||
debug!("Hash of {} is still now {}", fn_shadow_str, hash);
|
||||
if hash_shadow.is_empty() {
|
||||
hash_shadow = hash;
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Filesum error {}", e);
|
||||
}
|
||||
}
|
||||
match filesum_filtered(&fn_group, &mut oldgroup, &mut diff_group, re_group.as_ref()) {
|
||||
Ok((changed, hash)) => {
|
||||
if changed {
|
||||
warn!("Hash of {} is now {}", fn_group_str, hash);
|
||||
hash_group = hash;
|
||||
is_changed = true;
|
||||
} else {
|
||||
debug!("Hash of {} is still now {}", fn_group_str, hash);
|
||||
if hash_group.is_empty() {
|
||||
hash_group = hash;
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Filesum error {}", e);
|
||||
}
|
||||
}
|
||||
match filesum_filtered(&fn_authkey, &mut oldauthkey, &mut diff_authkey, re_authkey.as_ref()) {
|
||||
Ok((changed, hash)) => {
|
||||
if changed {
|
||||
warn!("Hash of {} is now {}", fn_authkey_str, hash);
|
||||
hash_authkey = hash;
|
||||
is_changed = true;
|
||||
} else {
|
||||
debug!("Hash of {} is still now {}", fn_authkey_str, hash);
|
||||
if hash_authkey.is_empty() {
|
||||
hash_authkey = hash;
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Filesum error {}", e);
|
||||
}
|
||||
}
|
||||
let now = Utc::now().timestamp().try_into().unwrap_or(0);
|
||||
{
|
||||
debug!(
|
||||
"try to lock mutex snmp_data to update filesum data, timestamp {:?}",
|
||||
now
|
||||
);
|
||||
let mut guard = snmp_data.lock().unwrap();
|
||||
let snmp_data = &mut guard.data;
|
||||
snmp_data.insert(oid_filesum_time.clone(), SnmpData::Gauge(now));
|
||||
if is_changed {
|
||||
snmp_data.insert(oid_passwd_hash.clone(), SnmpData::String(hash_passwd.clone()));
|
||||
snmp_data.insert(oid_shadow_hash.clone(), SnmpData::String(hash_shadow.clone()));
|
||||
snmp_data.insert(oid_group_hash.clone(), SnmpData::String(hash_group.clone()));
|
||||
snmp_data.insert(oid_authkey_hash.clone(), SnmpData::String(hash_authkey.clone()));
|
||||
|
||||
// since we have not implemnted the copy trait, we must use the old way ...
|
||||
|
||||
// 1. Collect the keys to be removed.
|
||||
let keys_to_remove: Vec<Oid> = snmp_data
|
||||
.range(&oid_passwd_diff..&oid_xxx_diff)
|
||||
.map(|(k, _)| k.clone())
|
||||
.collect();
|
||||
|
||||
// 2. Iterate over the collected keys and remove them.
|
||||
for key in keys_to_remove {
|
||||
snmp_data.remove(&key);
|
||||
}
|
||||
|
||||
if !diff_passwd.is_empty() {
|
||||
let mut n = 1;
|
||||
for l in diff_passwd.lines() {
|
||||
snmp_data.insert(oid_passwd_diff.add_suffix_int(n), SnmpData::String(l.to_string()));
|
||||
n += 1;
|
||||
}
|
||||
}
|
||||
if !diff_shadow.is_empty() {
|
||||
let mut n = 1;
|
||||
for l in diff_shadow.lines() {
|
||||
snmp_data.insert(oid_shadow_diff.add_suffix_int(n), SnmpData::String(l.to_string()));
|
||||
n += 1;
|
||||
}
|
||||
}
|
||||
if !diff_group.is_empty() {
|
||||
let mut n = 1;
|
||||
for l in diff_group.lines() {
|
||||
snmp_data.insert(oid_group_diff.add_suffix_int(n), SnmpData::String(l.to_string()));
|
||||
n += 1;
|
||||
}
|
||||
}
|
||||
if !diff_authkey.is_empty() {
|
||||
let mut n = 1;
|
||||
for l in diff_authkey.lines() {
|
||||
snmp_data.insert(oid_authkey_diff.add_suffix_int(n), SnmpData::String(l.to_string()));
|
||||
n += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
is_changed = false;
|
||||
}
|
||||
{
|
||||
let guard = lock.lock().unwrap();
|
||||
let qres = cvar.wait_timeout(guard, Duration::from_secs(t_check_interval)).unwrap();
|
||||
if !qres.1.timed_out() {
|
||||
quit = *qres.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
debug!("Exit of t_filesum function");
|
||||
}
|
||||
|
||||
fn t_processes(t_quit: Arc<(Mutex<bool>, Condvar)>, t_check_interval: u64, snmp_data: Arc<Mutex<OidData>>) {
|
||||
debug!("Startup of t_processes function");
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
let proc_path = Path::new("/proc");
|
||||
let mut proc_data = Ptypes::default();
|
||||
let oidt = Oid::from_str("4.1.0").unwrap();
|
||||
let oidz = Oid::from_str("4.2.0").unwrap();
|
||||
let oidr = Oid::from_str("4.3.0").unwrap();
|
||||
let oids = Oid::from_str("4.4.0").unwrap();
|
||||
let oidi = Oid::from_str("4.5.0").unwrap();
|
||||
let oidw = Oid::from_str("4.6.0").unwrap();
|
||||
let (lock, cvar) = &*t_quit;
|
||||
let mut quit = false;
|
||||
while !quit {
|
||||
debug!("Starting count_processes function for /proc");
|
||||
let res = processes::count_processes(proc_path);
|
||||
match res {
|
||||
Ok(v) => {
|
||||
trace!("count_processes finished, result: {:?}", v);
|
||||
proc_data = v;
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Error at reading proc files: {}", e);
|
||||
}
|
||||
}
|
||||
let now = Utc::now().timestamp().try_into().unwrap_or(0);
|
||||
{
|
||||
debug!(
|
||||
"try to lock mutex snmp_data to update processdata with {:#?} => {:?}",
|
||||
now, proc_data
|
||||
);
|
||||
let mut guard = snmp_data.lock().unwrap();
|
||||
let snmp_data = &mut guard.data;
|
||||
snmp_data.insert(oidt.clone(), SnmpData::Gauge(now));
|
||||
snmp_data.insert(oidz.clone(), SnmpData::Gauge(proc_data.zombie));
|
||||
snmp_data.insert(oidr.clone(), SnmpData::Gauge(proc_data.running));
|
||||
snmp_data.insert(oids.clone(), SnmpData::Gauge(proc_data.sleeping));
|
||||
snmp_data.insert(oidi.clone(), SnmpData::Gauge(proc_data.idle));
|
||||
snmp_data.insert(oidw.clone(), SnmpData::Gauge(proc_data.waiting));
|
||||
}
|
||||
{
|
||||
let guard = lock.lock().unwrap();
|
||||
let qres = cvar.wait_timeout(guard, Duration::from_secs(t_check_interval)).unwrap();
|
||||
if !qres.1.timed_out() {
|
||||
quit = *qres.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
debug!("Exit of t_processes function");
|
||||
}
|
||||
|
||||
fn t_meminfo(t_quit: Arc<(Mutex<bool>, Condvar)>, t_check_interval: u64, snmp_data: Arc<Mutex<OidData>>) {
|
||||
debug!("Startup of t_meminfo function");
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
let re = RegexBuilder::new(r"^MemAvailable:\s+(\d+) kB$")
|
||||
.multi_line(true)
|
||||
.unicode(false)
|
||||
.build()
|
||||
.expect("Unable to compile Regex");
|
||||
let re_old = RegexBuilder::new(r"^MemFree:\s+(\d+) kB.*?Buffers:\s+(\d+) kB$")
|
||||
.multi_line(true)
|
||||
.dot_matches_new_line(true)
|
||||
.unicode(true) // Muss true sein, sonst bekommen wir eine Runtime Panic
|
||||
.build()
|
||||
.expect("Unable to compile Regex");
|
||||
let fpath = PathBuf::from("/proc/meminfo");
|
||||
let oidt = Oid::from_str("3.1.0").unwrap();
|
||||
let oidm = Oid::from_str("3.2.0").unwrap();
|
||||
let (lock, cvar) = &*t_quit;
|
||||
let mut quit = false;
|
||||
while !quit {
|
||||
let mut freekb: u64 = 0;
|
||||
if let Ok(meminfo_contents) = fs::read_to_string(&fpath) {
|
||||
trace!("Read /prod/meminfo, contents: {:#?}", meminfo_contents);
|
||||
if let Some(m) = re.captures(&meminfo_contents) {
|
||||
debug!("regex mached {:?}", m);
|
||||
if let Some(m) = m.get(1) {
|
||||
freekb += m.as_str().parse().unwrap_or(0);
|
||||
debug!("freekb via regex parsed as {:#?}", freekb);
|
||||
}
|
||||
} else if let Some(m) = re_old.captures(&meminfo_contents) {
|
||||
debug!("old regex mached {:#?}", m);
|
||||
if let Some(m) = m.get(1) {
|
||||
freekb += m.as_str().parse().unwrap_or(0);
|
||||
}
|
||||
if let Some(m) = m.get(2) {
|
||||
freekb += m.as_str().parse().unwrap_or(0);
|
||||
}
|
||||
debug!("freekb via old regex parsed as {:#?}", freekb);
|
||||
}
|
||||
let now = Utc::now().timestamp().try_into().unwrap_or(0);
|
||||
{
|
||||
debug!(
|
||||
"try to lock mutex snmp_data to update meminfo with {:#?} => {:#?}",
|
||||
now, freekb
|
||||
);
|
||||
let mut guard = snmp_data.lock().unwrap();
|
||||
let snmp_data = &mut guard.data;
|
||||
snmp_data.insert(oidt.clone(), SnmpData::Gauge(now));
|
||||
//snmp_data.insert(oidm.clone(), SnmpData::String(freekb.to_string()));
|
||||
snmp_data.insert(oidm.clone(), SnmpData::Counter64(freekb));
|
||||
}
|
||||
};
|
||||
// store results in this mutex protected block
|
||||
{
|
||||
let guard = lock.lock().unwrap();
|
||||
let qres = cvar.wait_timeout(guard, Duration::from_secs(t_check_interval)).unwrap();
|
||||
if !qres.1.timed_out() {
|
||||
quit = *qres.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
debug!("Exit of t_meminfo function");
|
||||
}
|
||||
|
||||
fn log_debug_watcher(
|
||||
t_quit: Arc<(Mutex<bool>, Condvar)>,
|
||||
mut log: LoggerHandle,
|
||||
t_marker: PathBuf,
|
||||
t_check_interval: u64,
|
||||
) {
|
||||
debug!("Start of log_debug_watcher function");
|
||||
let mut last = false;
|
||||
let (lock, cvar) = &*t_quit;
|
||||
let mut quit = false;
|
||||
while !quit {
|
||||
trace!("quit not requested, going to check marker file now");
|
||||
match t_marker.try_exists() {
|
||||
Ok(v) => {
|
||||
if v != last {
|
||||
debug!("marker file {} is now readable: {:?}", t_marker.display(), v);
|
||||
let r = match v {
|
||||
true => log.parse_and_push_temp_spec("trace"),
|
||||
false => {
|
||||
log.pop_temp_spec();
|
||||
Ok(())
|
||||
}
|
||||
};
|
||||
match r {
|
||||
Ok(_) => info!(
|
||||
"Log config changed to {}",
|
||||
log.current_max_level()
|
||||
.expect("Retrive the current log level not possible")
|
||||
),
|
||||
Err(e) => error!("Unable to change log config {:#?}", e),
|
||||
};
|
||||
last = v;
|
||||
};
|
||||
}
|
||||
Err(e) => {
|
||||
error!(
|
||||
"Unable to check debug marker file {}, error: {:#?}",
|
||||
t_marker.display(),
|
||||
e
|
||||
)
|
||||
}
|
||||
};
|
||||
{
|
||||
let guard = lock.lock().unwrap();
|
||||
let qres = cvar.wait_timeout(guard, Duration::from_secs(t_check_interval)).unwrap();
|
||||
if !qres.1.timed_out() {
|
||||
quit = *qres.0;
|
||||
}
|
||||
}
|
||||
//thread::sleep(Duration::from_secs(t_check_interval));
|
||||
}
|
||||
debug!("Exit of log_debug_watcher function");
|
||||
}
|
||||
|
||||
pub fn start_workers(
|
||||
config: &config::AppConfig,
|
||||
quit_pair: &Arc<(Mutex<bool>, Condvar)>,
|
||||
snmp_data: &Arc<Mutex<OidData>>,
|
||||
log: flexi_logger::LoggerHandle,
|
||||
) -> Vec<JoinHandle<()>> {
|
||||
// handles collect all our thread handles ...
|
||||
let mut handles: Vec<JoinHandle<_>> = Vec::new();
|
||||
|
||||
// start log_debug_watcher
|
||||
if config.intervals.log_debug_watcher.is_some() {
|
||||
let t_quit = Arc::clone(quit_pair);
|
||||
let t_marker = config.debug_log_marker.clone();
|
||||
let t_check_interval = config
|
||||
.intervals
|
||||
.log_debug_watcher
|
||||
.expect("Unable to get config.intervals.log_debug_watcher");
|
||||
match Builder::new()
|
||||
.name("log_debug_watcher".to_string())
|
||||
.spawn(move || log_debug_watcher(t_quit, log, t_marker, t_check_interval))
|
||||
{
|
||||
Ok(v) => {
|
||||
info!(
|
||||
"started log_debug_watcher thread, checking {} every {}s",
|
||||
config.debug_log_marker.display(),
|
||||
t_check_interval
|
||||
);
|
||||
handles.push(v);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Unable to start log_debug_watcher thread: {:#?}", e);
|
||||
eprintln!("Unable to start log_debug_watcher thread: {:#?}", e);
|
||||
}
|
||||
};
|
||||
} else {
|
||||
warn!("log_debug_watcher thread not started, as it was disabled via config")
|
||||
}
|
||||
|
||||
// start meminfo worker thread
|
||||
if let Some(t_check_interval) = config.intervals.meminfo() {
|
||||
let t_quit = Arc::clone(quit_pair);
|
||||
let t_snmp_data = Arc::clone(snmp_data);
|
||||
match Builder::new()
|
||||
.name("meminfo".to_string())
|
||||
.spawn(move || t_meminfo(t_quit, t_check_interval, t_snmp_data))
|
||||
{
|
||||
Ok(v) => {
|
||||
info!("started meminfo thread, checking meminfo every {}s", t_check_interval);
|
||||
handles.push(v);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Unable to start meminfo thread: {:#?}", e);
|
||||
eprintln!("Unable to start meminfo thread: {:#?}", e);
|
||||
}
|
||||
};
|
||||
} else {
|
||||
warn!("meminfo thread not started, as it was disabled via config")
|
||||
}
|
||||
|
||||
// start zombie/processes worker thread
|
||||
if let Some(t_check_interval) = config.intervals.processes() {
|
||||
let t_quit = Arc::clone(quit_pair);
|
||||
let t_snmp_data = Arc::clone(snmp_data);
|
||||
match Builder::new()
|
||||
.name("processes".to_string())
|
||||
.spawn(move || t_processes(t_quit, t_check_interval, t_snmp_data))
|
||||
{
|
||||
Ok(v) => {
|
||||
info!(
|
||||
"started processes thread, checking processes every {}s",
|
||||
t_check_interval
|
||||
);
|
||||
handles.push(v);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Unable to start processes thread: {:#?}", e);
|
||||
eprintln!("Unable to start processes thread: {:#?}", e);
|
||||
}
|
||||
};
|
||||
} else {
|
||||
warn!("processes thread not started, as it was disabled via config")
|
||||
}
|
||||
|
||||
// start filesum worker thread
|
||||
if let Some(t_check_interval) = config.intervals.filesum() {
|
||||
let t_quit = Arc::clone(quit_pair);
|
||||
let t_snmp_data = Arc::clone(snmp_data);
|
||||
let c = config.extra_config.filesum();
|
||||
match Builder::new()
|
||||
.name("filesum".to_string())
|
||||
.spawn(move || t_filesum(t_quit, t_check_interval, t_snmp_data, c))
|
||||
{
|
||||
Ok(v) => {
|
||||
info!("started filesum thread, checking filesum every {}s", t_check_interval);
|
||||
handles.push(v);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Unable to start filesum thread: {:#?}", e);
|
||||
eprintln!("Unable to start filesum thread: {:#?}", e);
|
||||
}
|
||||
};
|
||||
} else {
|
||||
warn!("filesum thread not started, as it was disabled via config")
|
||||
}
|
||||
|
||||
// start multipath worker thread
|
||||
if let Some(t_check_interval) = config.intervals.multipath() {
|
||||
let t_quit = Arc::clone(quit_pair);
|
||||
let t_snmp_data = Arc::clone(snmp_data);
|
||||
let c = config.extra_config.multipath();
|
||||
match Builder::new()
|
||||
.name("multipath".to_string())
|
||||
.spawn(move || t_multipath(t_quit, t_check_interval, t_snmp_data, c))
|
||||
{
|
||||
Ok(v) => {
|
||||
info!(
|
||||
"started multipath thread, checking multipath every {}s",
|
||||
t_check_interval
|
||||
);
|
||||
handles.push(v);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Unable to start multipath thread: {:#?}", e);
|
||||
eprintln!("Unable to start multipath thread: {:#?}", e);
|
||||
}
|
||||
};
|
||||
} else {
|
||||
warn!("multipath thread not started, as it was disabled via config")
|
||||
}
|
||||
|
||||
// start bonding worker thread
|
||||
if let Some(t_check_interval) = config.intervals.bonding() {
|
||||
let t_quit = Arc::clone(quit_pair);
|
||||
let t_snmp_data = Arc::clone(snmp_data);
|
||||
let c = config.extra_config.bonding();
|
||||
match Builder::new()
|
||||
.name("bonding".to_string())
|
||||
.spawn(move || t_bonding(t_quit, t_check_interval, t_snmp_data, c))
|
||||
{
|
||||
Ok(v) => {
|
||||
info!("started bonding thread, checking bonding every {}s", t_check_interval);
|
||||
handles.push(v);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Unable to start bonding thread: {:#?}", e);
|
||||
eprintln!("Unable to start bonding thread: {:#?}", e);
|
||||
}
|
||||
};
|
||||
} else {
|
||||
warn!("bonding thread not started, as it was disabled via config")
|
||||
}
|
||||
|
||||
// return handles so main can join at the end to all handles ...
|
||||
handles
|
||||
}
|
||||
144
src/main.rs
144
src/main.rs
@@ -1,11 +1,141 @@
|
||||
use std::io;
|
||||
use std::process::ExitCode;
|
||||
use std::sync::{Arc, Condvar, Mutex};
|
||||
//use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use log::{debug, error, info, trace, warn};
|
||||
//use std::thread::{Builder, JoinHandle};
|
||||
|
||||
fn main() {
|
||||
println!("Hello, world!");
|
||||
use rsnmpagent::config;
|
||||
use rsnmpagent::snmp::{Oid, OidData, SnmpCommands};
|
||||
use rsnmpagent::start_workers;
|
||||
|
||||
let mut guess = String::new();
|
||||
const VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||
|
||||
io::stdin()
|
||||
.read_line(&mut guess)
|
||||
.expect("Failed to read line");
|
||||
fn main() -> ExitCode {
|
||||
let quit_pair = Arc::new((Mutex::new(false), Condvar::new()));
|
||||
|
||||
let config: config::AppConfig = match config::build_config() {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
eprintln!("Unable to build / load configuration: {:#?}", e);
|
||||
return ExitCode::from(1);
|
||||
}
|
||||
};
|
||||
|
||||
let log = match config::start_logging(&config) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
eprintln!("Unable to build / start logging: {:#?}", e);
|
||||
return ExitCode::from(2);
|
||||
}
|
||||
};
|
||||
|
||||
warn!(
|
||||
"Starting rsnapshot version {} with base OID {}",
|
||||
VERSION,
|
||||
config.base_oid()
|
||||
);
|
||||
|
||||
let mybase: Oid = config.base_oid().parse().expect("Unable to Parse base OID");
|
||||
let snmp_data = OidData::new_oid(&mybase);
|
||||
let snmp_data = Arc::new(Mutex::new(snmp_data));
|
||||
// now start all data getting threads ...
|
||||
let handles = start_workers(&config, &quit_pair, &snmp_data, log);
|
||||
|
||||
loop {
|
||||
let cmd = SnmpCommands::parse_command();
|
||||
match cmd {
|
||||
Err(e) => {
|
||||
debug!("answer NONE to {}", e);
|
||||
println!("NONE")
|
||||
}
|
||||
Ok(v) => match v {
|
||||
SnmpCommands::Quit => {
|
||||
debug!("requested quit, so i signal all threads to shutdown now");
|
||||
let (lock, cvar) = &*quit_pair;
|
||||
let mut quit = lock.lock().unwrap();
|
||||
*quit = true;
|
||||
cvar.notify_all();
|
||||
//quit.store(true, Ordering::Relaxed);
|
||||
break;
|
||||
}
|
||||
SnmpCommands::Ping => {
|
||||
debug!("answering PING with PONG");
|
||||
println!("PONG");
|
||||
}
|
||||
SnmpCommands::Set(ref _g, ref _d) => {
|
||||
info!("SNMPSET requested => {}", v);
|
||||
error!("set command not implemented, answering with not-writable");
|
||||
println!("not-writable");
|
||||
}
|
||||
SnmpCommands::Get(o) => match o.strip_prefix(&mybase) {
|
||||
Ok(doid) => {
|
||||
debug!("try to lock mutex snmp_data to lookup oid {}", o);
|
||||
let guard = snmp_data.lock().unwrap();
|
||||
if let Some(d) = guard.data_lookup(&doid) {
|
||||
debug!("GET {} => {}", o, d);
|
||||
println!("{}\n{}", o, d);
|
||||
} else {
|
||||
warn!("GET requested OID {} not found, answering NONE", o);
|
||||
println!("NONE");
|
||||
}
|
||||
}
|
||||
Err(v) => {
|
||||
error!("Unable to strip oid prefix {} from {}: {:?}", mybase, o, v);
|
||||
println!("NONE");
|
||||
}
|
||||
},
|
||||
SnmpCommands::GetNext(mut o) => {
|
||||
if o == mybase {
|
||||
trace!("adding .0 as GetNext is called with my suffix {}", mybase);
|
||||
o = o.add_suffix_int(0);
|
||||
}
|
||||
match o.strip_prefix(&mybase) {
|
||||
Ok(doid) => {
|
||||
trace!("try to lock mutex snmp_data to lookup next oid {}", o);
|
||||
let guard = snmp_data.lock().unwrap();
|
||||
if let Some((dk, v)) = guard.data_lookup_next(&doid) {
|
||||
let k = dk.add_prefix(&mybase);
|
||||
debug!("GETNEXT {} => {} => {}", doid, k, v);
|
||||
println!("{}\n{}", k, v);
|
||||
} else {
|
||||
warn!("GETNEXT requested OID {} not found, answering NONE", o);
|
||||
println!("NONE");
|
||||
}
|
||||
}
|
||||
Err(v) => {
|
||||
error!("Unable to strip oid prefix {} from {}: {:?}", mybase, o, v);
|
||||
println!("NONE");
|
||||
}
|
||||
}
|
||||
} //_ => warn!("Read unknown command {}", v)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
for t in handles {
|
||||
let tname = t.thread().name().unwrap_or("unknown_name").to_string();
|
||||
info!("Waiting for {} thread to stop", tname);
|
||||
match t.join() {
|
||||
Ok(_) => {
|
||||
info!("Successfull joined thread {}", tname);
|
||||
}
|
||||
Err(e) => {
|
||||
if let Some(s) = e.downcast_ref::<&str>() {
|
||||
error!("Error joining thread {}, Panic reason: {}", tname, s);
|
||||
} else {
|
||||
error!(
|
||||
"Error joining thread {}, Panic reason: Unknown or non-string payload.",
|
||||
tname
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//println!("Main Thread Exit");
|
||||
warn!("Now exiting gracefully");
|
||||
ExitCode::SUCCESS
|
||||
}
|
||||
|
||||
// to debug this interface, start snmpd with
|
||||
// /usr/sbin/snmpd -f -Ducd-snmp/pass
|
||||
|
||||
146
src/multipath.rs
Normal file
146
src/multipath.rs
Normal file
@@ -0,0 +1,146 @@
|
||||
use glob::glob;
|
||||
use log::{debug, error, info, trace};
|
||||
use std::ffi::OsStr;
|
||||
use std::fs::{self};
|
||||
use std::io::{self};
|
||||
use std::path::Path;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub(crate) struct MultipathInfo {
|
||||
pub(crate) mp: String,
|
||||
pub(crate) uuid: String,
|
||||
pub(crate) vendor: String,
|
||||
pub(crate) model: String,
|
||||
pub(crate) slave_count: u32,
|
||||
pub(crate) slave_failed: u32,
|
||||
}
|
||||
|
||||
pub(crate) fn multipath_status(path: &str, re: Option<®ex::Regex>) -> io::Result<Vec<MultipathInfo>> {
|
||||
let mut mp_list = Vec::new();
|
||||
let pglob = format!("{}/dm-*", path);
|
||||
match glob(&pglob) {
|
||||
Ok(iter) => {
|
||||
for entry in iter {
|
||||
match entry {
|
||||
Ok(f) => {
|
||||
if f.is_dir() {
|
||||
match mp_status(&f) {
|
||||
Ok(r) => {
|
||||
trace!("sucessfull read multipath device {:?}", r);
|
||||
match re {
|
||||
Some(re) => {
|
||||
trace!("filtering mp {:?} with {:?}", r, re);
|
||||
if re.is_match(&r.mp) {
|
||||
info!("skipping mp {:?} as the filter has matched mp", r);
|
||||
continue;
|
||||
}
|
||||
if re.is_match(&r.uuid) {
|
||||
info!("skipping mp {:?} as the filter has matched uuid", r);
|
||||
continue;
|
||||
}
|
||||
if re.is_match(&r.vendor) {
|
||||
info!("skipping mp {:?} as the filter has matched vendor", r);
|
||||
continue;
|
||||
}
|
||||
if re.is_match(&r.model) {
|
||||
info!("skipping mp {:?} as the filter has matched model", r);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
None => {
|
||||
trace!("No filter regex, continue");
|
||||
}
|
||||
}
|
||||
mp_list.push(r);
|
||||
}
|
||||
Err(e) => match e.kind() {
|
||||
io::ErrorKind::Other => {
|
||||
debug!("Skipped device {:?}", e);
|
||||
}
|
||||
_ => {
|
||||
error!("Error reading multipath device {}", e);
|
||||
}
|
||||
},
|
||||
}
|
||||
} else {
|
||||
trace!("{:?} is not a dir", f);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Error reading directory Entry: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Invalid glob pattern: {}", e);
|
||||
return Err(std::io::Error::other(format!("Invalid glob pattern {}", e)));
|
||||
}
|
||||
};
|
||||
Ok(mp_list)
|
||||
}
|
||||
|
||||
fn mp_status(d: &Path) -> io::Result<MultipathInfo> {
|
||||
let mut mp_info = MultipathInfo {
|
||||
mp: d
|
||||
.file_name()
|
||||
.unwrap_or(OsStr::new("unknown"))
|
||||
.to_string_lossy()
|
||||
.to_string(),
|
||||
uuid: fs::read_to_string(d.join("dm/uuid"))?.trim().to_string(),
|
||||
vendor: "".to_string(),
|
||||
model: "".to_string(),
|
||||
slave_count: 0,
|
||||
slave_failed: 0,
|
||||
};
|
||||
if !mp_info.uuid.starts_with("mpath-") {
|
||||
return Err(std::io::Error::other(format!("Wrong device uuid {}", mp_info.uuid)));
|
||||
}
|
||||
let slglob = format!("{}/slaves/*/device", d.to_string_lossy());
|
||||
match glob(&slglob) {
|
||||
Ok(iter) => {
|
||||
for p in iter {
|
||||
match p {
|
||||
Ok(f) => {
|
||||
match fs::read_to_string(f.join("state")) {
|
||||
Ok(s) => {
|
||||
mp_info.slave_count += 1;
|
||||
if s.trim() != "running" {
|
||||
mp_info.slave_failed += 1;
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Unable to read state file, error: {:?}", e);
|
||||
}
|
||||
}
|
||||
if mp_info.vendor.is_empty() {
|
||||
match fs::read_to_string(f.join("vendor")) {
|
||||
Ok(s) => {
|
||||
mp_info.vendor = s.trim().to_string();
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Unable to read state file, error: {:?}", e);
|
||||
}
|
||||
}
|
||||
match fs::read_to_string(f.join("model")) {
|
||||
Ok(s) => {
|
||||
mp_info.model = s.trim().to_string();
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Unable to read state file, error: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
info!("Skipping globing error {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
info!("Skipping globing error {}", e);
|
||||
}
|
||||
}
|
||||
Ok(mp_info)
|
||||
}
|
||||
73
src/processes.rs
Normal file
73
src/processes.rs
Normal file
@@ -0,0 +1,73 @@
|
||||
use log::{debug, error, trace, warn};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fs;
|
||||
use std::io;
|
||||
use std::path::Path;
|
||||
|
||||
#[derive(Deserialize, Serialize, Debug, Default)]
|
||||
pub(crate) struct Ptypes {
|
||||
pub(crate) running: u32, // R => Running
|
||||
pub(crate) sleeping: u32, // S => Sleeping in an interruptible wait
|
||||
pub(crate) waiting: u32, // D => Waiting in uninterruptible disk sleep
|
||||
pub(crate) zombie: u32, // Z => Zombie
|
||||
pub(crate) stopped: u32, // T => Stopped (on a signal) or (before Linux 2.6.33) trace stopped
|
||||
pub(crate) stop: u32, // t => tracing stop (Linux 2.6.33 onward)
|
||||
//pub(crate) paging: u64, // W => Paging (only before Linux 2.6.0)
|
||||
pub(crate) dead: u32, // X => Dead (from Linux 2.6.0 onward) or x => Dead (Linux 2.6.33 to 3.13 only)
|
||||
pub(crate) wakekill: u32, // K => Wakekill (Linux 2.6.33 to 3.13 only)
|
||||
pub(crate) waking: u32, // W => Waking (Linux 2.6.33 to 3.13 only)
|
||||
pub(crate) idle: u32, // I => Idle (Linux 4.14 onward)
|
||||
}
|
||||
|
||||
pub(crate) fn count_processes(proc_path: &Path) -> io::Result<Ptypes> {
|
||||
let mut counts = Ptypes::default();
|
||||
|
||||
debug!("Staring count_processes, iterating over {:?}", proc_path);
|
||||
for entry_result in fs::read_dir(proc_path)? {
|
||||
let entry = entry_result?;
|
||||
let path = entry.path();
|
||||
|
||||
// Check if the directory name is a number (a PID)
|
||||
if path.is_dir()
|
||||
&& path
|
||||
.file_name()
|
||||
.and_then(|s| s.to_str())
|
||||
.unwrap_or("")
|
||||
.parse::<u32>()
|
||||
.is_ok()
|
||||
{
|
||||
let stat_path = path.join("stat");
|
||||
|
||||
trace!("now reading contents of {:?}", stat_path);
|
||||
if let Ok(stat_contents) = fs::read_to_string(&stat_path) {
|
||||
// Read whole file, we read from memory, so we can search ")" from behind.
|
||||
if let Some(pos) = stat_contents.rfind(")") {
|
||||
match stat_contents.as_bytes().get(pos + 2) {
|
||||
Some(&byteval) => {
|
||||
match byteval {
|
||||
82 => counts.running += 1, // R
|
||||
83 => counts.sleeping += 1, // S
|
||||
68 => counts.waiting += 1, // D
|
||||
84 => counts.stopped += 1, // T
|
||||
116 => counts.stop += 1, // t
|
||||
//87 => counts.paging += 1, // W --> only before 2.6 !!
|
||||
88 => counts.dead += 1, // X
|
||||
120 => counts.dead += 1, // x
|
||||
75 => counts.wakekill += 1, // K
|
||||
87 => counts.waking += 1, // W
|
||||
73 => counts.idle += 1, // I
|
||||
_ => warn!("unknown proc status value {}", byteval),
|
||||
}
|
||||
}
|
||||
None => {
|
||||
error!("Problem parsing stat data {}", stat_contents);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
error!("read_to_string failed for {:?}", stat_path);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(counts)
|
||||
}
|
||||
295
src/snmp.rs
Normal file
295
src/snmp.rs
Normal file
@@ -0,0 +1,295 @@
|
||||
use log::{debug, error};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::BTreeMap;
|
||||
use std::error::Error;
|
||||
use std::fmt;
|
||||
use std::io;
|
||||
use std::num::ParseIntError;
|
||||
use std::str::FromStr;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum OidError {
|
||||
PrefixMissmatch,
|
||||
PrefixTooLong,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
|
||||
pub struct Oid(Vec<u32>);
|
||||
|
||||
impl FromStr for Oid {
|
||||
type Err = ParseIntError;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let r = s
|
||||
.split('.')
|
||||
.filter(|&s| !s.is_empty())
|
||||
.map(|s| s.parse::<u32>())
|
||||
.collect();
|
||||
match r {
|
||||
Ok(v) => Ok(Oid(v)),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Oid {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
".{}",
|
||||
self.0.iter().map(|&n| n.to_string()).collect::<Vec<String>>().join(".")
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Oid {
|
||||
pub fn add_prefix(&self, o: &Oid) -> Oid {
|
||||
let mut x = o.clone();
|
||||
x.0.extend(&self.0);
|
||||
x
|
||||
}
|
||||
|
||||
pub fn add_suffix(&self, o: &Oid) -> Oid {
|
||||
let mut x = self.clone();
|
||||
x.0.extend(o.clone().0);
|
||||
x
|
||||
}
|
||||
|
||||
pub fn add_suffix_int(&self, o: u32) -> Oid {
|
||||
let mut x = self.clone();
|
||||
x.0.push(o);
|
||||
x
|
||||
}
|
||||
|
||||
pub fn strip_prefix(&self, o: &Oid) -> Result<Oid, OidError> {
|
||||
let pl = o.0.len();
|
||||
if self.0.len() > pl {
|
||||
let mut x = self.0.clone();
|
||||
let y = x.split_off(pl);
|
||||
if x == o.0 {
|
||||
return Ok(Oid(y));
|
||||
} else {
|
||||
return Err(OidError::PrefixMissmatch);
|
||||
}
|
||||
}
|
||||
Err(OidError::PrefixTooLong)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Debug)]
|
||||
pub enum SnmpData {
|
||||
String(String),
|
||||
Gauge(u32),
|
||||
Integer(u32),
|
||||
Counter32(u32),
|
||||
Counter64(u64),
|
||||
// More data types ToDo ...
|
||||
//INTEGER
|
||||
//OCTET
|
||||
//IPADDRESS
|
||||
//TIMETICKS
|
||||
//OBJECTID
|
||||
// coding examples see https://github.com/nagius/snmp_passpersist/blob/master/snmp_passpersist.py
|
||||
}
|
||||
|
||||
impl fmt::Display for SnmpData {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Self::String(s) => write!(f, "string\n{}", s),
|
||||
Self::Gauge(i) => write!(f, "gauge\n{}", i),
|
||||
Self::Integer(i) => write!(f, "integer\n{}", i),
|
||||
Self::Counter32(i) => write!(f, "counter32\n{}", i),
|
||||
Self::Counter64(i) => write!(f, "counter64\n{}", i),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Debug)]
|
||||
pub struct OidData {
|
||||
pub(crate) base: Oid,
|
||||
pub(crate) data: BTreeMap<Oid, SnmpData>,
|
||||
}
|
||||
|
||||
impl OidData {
|
||||
pub fn new(base_oid: &str) -> Self {
|
||||
Self {
|
||||
base: base_oid.parse().expect("Unable to parse Oid"),
|
||||
data: BTreeMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_oid(base_oid: &Oid) -> Self {
|
||||
Self {
|
||||
base: base_oid.clone(),
|
||||
data: BTreeMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn data_lookup(&self, key: &Oid) -> Option<&SnmpData> {
|
||||
self.data.get(key)
|
||||
}
|
||||
|
||||
pub fn data_lookup_next(&self, key: &Oid) -> Option<(&Oid, &SnmpData)> {
|
||||
for (k, v) in self.data.iter() {
|
||||
if k > key {
|
||||
return Some((k, v));
|
||||
};
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Debug)]
|
||||
pub enum SnmpCommands {
|
||||
Ping,
|
||||
Get(Oid),
|
||||
GetNext(Oid),
|
||||
Set(Oid, String),
|
||||
Quit,
|
||||
}
|
||||
|
||||
impl fmt::Display for SnmpCommands {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Self::Get(s) => write!(f, "GET => {}", s),
|
||||
Self::GetNext(s) => write!(f, "GETNEXT => {}", s),
|
||||
Self::Ping => write!(f, "PING"),
|
||||
Self::Set(o, s) => write!(f, "SET => {} => {}", o, s),
|
||||
Self::Quit => write!(f, "QUIT"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SnmpCommands {
|
||||
pub fn parse_command() -> Result<SnmpCommands, Box<dyn Error>> {
|
||||
let mut cmd = String::new();
|
||||
io::stdin().read_line(&mut cmd).expect("Failed to read line");
|
||||
cmd = cmd.to_lowercase().trim().to_string();
|
||||
let pc: SnmpCommands = match &cmd as &str {
|
||||
"quit" => SnmpCommands::Quit,
|
||||
"ping" => SnmpCommands::Ping,
|
||||
"set" => {
|
||||
let mut setoid = String::new();
|
||||
let mut setdata = String::new();
|
||||
// we need to read 2 lines of addional input after SET command, first OID, second
|
||||
io::stdin().read_line(&mut setoid).expect("Failed to read line");
|
||||
io::stdin().read_line(&mut setdata).expect("Failed to read line");
|
||||
let oid: Oid = match setoid.trim().to_string().parse() {
|
||||
Ok(v) => v,
|
||||
Err(_e) => {
|
||||
let mystr = format!("unable to parse {} into Oid", setoid.trim());
|
||||
error!("{}", mystr);
|
||||
return Err(mystr.into());
|
||||
}
|
||||
};
|
||||
SnmpCommands::Set(oid, format!("DATA: {}", setdata.trim()))
|
||||
}
|
||||
"get" => {
|
||||
let mut getoid = String::new();
|
||||
// we need to read 1 lines of addional input after GET command, OID
|
||||
io::stdin().read_line(&mut getoid).expect("Failed to read line");
|
||||
let oid: Oid = match getoid.trim().to_string().parse() {
|
||||
Ok(v) => v,
|
||||
Err(_e) => {
|
||||
let mystr = format!("unable to parse {} into Oid", getoid.trim());
|
||||
error!("{}", mystr);
|
||||
return Err(mystr.into());
|
||||
}
|
||||
};
|
||||
SnmpCommands::Get(oid)
|
||||
}
|
||||
"getnext" => {
|
||||
let mut getoid = String::new();
|
||||
// we need to read 1 lines of addional input after GETNEXT command, OID
|
||||
io::stdin().read_line(&mut getoid).expect("Failed to read line");
|
||||
let oid: Oid = match getoid.trim().to_string().parse() {
|
||||
Ok(v) => v,
|
||||
Err(_e) => {
|
||||
let mystr = format!("unable to parse {} into Oid", getoid.trim());
|
||||
error!("{}", mystr);
|
||||
return Err(mystr.into());
|
||||
}
|
||||
};
|
||||
SnmpCommands::GetNext(oid)
|
||||
}
|
||||
_ => {
|
||||
let mystr = format!("unable to parse {} into SnmpCommand", cmd);
|
||||
error!("{}", mystr);
|
||||
return Err(mystr.into());
|
||||
}
|
||||
};
|
||||
debug!("parsed snmp command: {}", pc);
|
||||
Ok(pc)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn oid_sort1() {
|
||||
let oid1: Oid = "1.2.3".to_string().parse().unwrap();
|
||||
let oid2: Oid = "1.2.3.1".to_string().parse().unwrap();
|
||||
assert!(oid1 < oid2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn oid_sort2() {
|
||||
let oid1: Oid = "1.2.3.1".to_string().parse().unwrap();
|
||||
let oid2: Oid = "1.2.4".to_string().parse().unwrap();
|
||||
assert!(oid1 < oid2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn oid_sort3() {
|
||||
let oid1: Oid = "1.2.3".to_string().parse().unwrap();
|
||||
let oid2: Oid = "1.2.4".to_string().parse().unwrap();
|
||||
assert!(oid1 < oid2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn oid_sort4() {
|
||||
let oid1: Oid = "1.2.3".to_string().parse().unwrap();
|
||||
let oid2: Oid = "1.3.1".to_string().parse().unwrap();
|
||||
assert!(oid1 < oid2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn oid_add_prefix1() {
|
||||
let oid1: Oid = "1.2.3".to_string().parse().unwrap();
|
||||
let oid2: Oid = "4.5".to_string().parse().unwrap();
|
||||
let oid3 = oid2.add_prefix(&oid1);
|
||||
assert_eq!(oid3, Oid(vec![1, 2, 3, 4, 5]));
|
||||
}
|
||||
#[test]
|
||||
fn oid_stip_prefix1() {
|
||||
let oid1: Oid = "1.2.3.4.5".to_string().parse().unwrap();
|
||||
let oid2: Oid = "1.2".to_string().parse().unwrap();
|
||||
let oid3 = oid1.strip_prefix(&oid2).unwrap();
|
||||
assert_eq!(oid3, Oid(vec![3, 4, 5]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn oid_stip_prefix_missmatch() {
|
||||
let oid1: Oid = "1.2.3.4.5".to_string().parse().unwrap();
|
||||
let oid2: Oid = "1.3".to_string().parse().unwrap();
|
||||
let e1 = oid1.strip_prefix(&oid2).unwrap_err();
|
||||
assert_eq!(e1, OidError::PrefixMissmatch)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn oid_stip_prefix_too_long() {
|
||||
let oid1: Oid = "1.2.3".to_string().parse().unwrap();
|
||||
let oid2: Oid = "1.2.3".to_string().parse().unwrap();
|
||||
let e1 = oid1.strip_prefix(&oid2).unwrap_err();
|
||||
assert_eq!(e1, OidError::PrefixTooLong)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn oid_diplay_trait() {
|
||||
let oid1: Oid = "1.2.3".to_string().parse().unwrap();
|
||||
let s = format!("{}", oid1);
|
||||
assert_eq!(s, ".1.2.3".to_string());
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user