158 Commits

Author SHA1 Message Date
f5ff0c1afd Merge pull request 'v0.8.0' (#8) from develop into main
Reviewed-on: #8
2026-03-09 21:35:22 +05:00
63bc845b8b Update CHANGELOG.md to set release date for version 0.8.0 2026-03-09 21:26:14 +05:00
a44f9b4e75 Update CHANGELOG.md to fix release date for version 0.7.0 2026-03-09 21:24:25 +05:00
4647d1303e Update CHANGELOG.md to document new kor-elf-shield block delete command for IP unblocking 2026-03-09 21:23:28 +05:00
221fdb8d3b Add command for removing IP addresses from the block list
- Introduced `block delete` command to remove IPs from the block list.
- Added `UnblockIP` method to support IP removal in the firewall.
- Updated internationalization files for delete command descriptions.
- Enhanced repository with `DeleteByIP` for targeted IP removal.
2026-03-09 21:21:28 +05:00
a7e4c7d750 Update CHANGELOG.md to document new kor-elf-shield block add command for IP blocking 2026-03-09 17:47:53 +05:00
75c8eba0cd Add a command to add IP address blocking 2026-03-09 17:45:14 +05:00
bf8711aadd Add support for structured socket commands with JSON and argument parsing
- Introduced `SendCommand` for sending commands with arguments over sockets.
- Updated socket communication to encode/decode commands as JSON.
- Refactored daemon handlers to process commands with arguments.
- Added `Message` struct and `parseCommand` function for improved command handling.
2026-03-09 16:16:50 +05:00
1dbb4d0bff Refactor daemon commands to use newSocket helper for socket initialization. 2026-03-09 12:52:51 +05:00
993f48f541 Rename CmdBlockClear to cmdBlockClear for consistency with Go naming conventions. 2026-03-09 11:08:55 +05:00
286f32b618 Rename kor-elf-shield ban clear command to kor-elf-shield block clear and update CHANGELOG.md. 2026-03-09 11:05:34 +05:00
42e4a8cf40 Rename ban_clear command and related references to block_clear for improved clarity and consistency. 2026-03-09 11:02:49 +05:00
be3861ee6e Update CHANGELOG.md with 0.8.0 release details and added configuration parameters for brute force protection 2026-03-09 11:02:12 +05:00
d0a358a445 Expand analyzer configuration with block type and port settings
- Added support for specifying `block_type` and `ports` in brute force protection groups.
- Enhanced rate limit configuration to override `block_type` and define specific ports for blocking.
- Updated documentation in `analyzer.toml` with examples for new settings.
2026-03-09 10:47:14 +05:00
39cfb8a7b6 Add support for IP and port-based brute force protection
- Introduced `Block` interface to handle IP and port blocking configurations.
- Added `BlockIPWithPorts` functionality for enhanced blocking with ports.
- Enhanced brute force protection to support IP and port-based rules.
- Updated `Blocking` entity and repository for port-specific blocking.
- Added internationalization for port-based brute force notifications.
- Refactored the analyzer to accommodate new block configurations.
2026-03-05 01:10:02 +05:00
65eaa37637 Refactor firewall type handling and port configuration
- Introduced `types` package with structured definitions (`Protocol`, `Action`, `Direction`, `PolicyDrop`, etc.).
- Replaced primitive types for port-related logic with `L4Port` interface for improved encapsulation.
- Updated firewall methods to use `L4Port`, enhancing readability and reducing direct type handling.
- Adjusted validation and configuration logic to leverage new `types`.
2026-03-02 22:20:06 +05:00
c4852c3540 Merge pull request 'v0.7.0' (#7) from develop into main
Reviewed-on: #7
2026-02-28 21:45:11 +05:00
b884494250 Update CHANGELOG.md with 0.7.0 release date 2026-02-28 21:36:43 +05:00
598d83d6da Update CHANGELOG.md to include ban clear command information 2026-02-28 21:06:03 +05:00
f737edc3ce Add ban_clear command to unban all blocked IPs via the daemon
- Introduced `ban_clear` CLI command for clearing all banned IPs.
- Implemented `UnblockAllIPs` in the firewall for IP unblocking.
- Added internationalization messages for `ban_clear` actions.
2026-02-28 21:02:43 +05:00
dc85bc759a Add DeleteIP method to remove IPs from firewall lists
- Implemented `DeleteIP` to remove IPv4 and IPv6 addresses.
2026-02-28 21:02:12 +05:00
93b2927da7 Add DeleteElement method to manage firewall list elements
- Implemented `DeleteElement` to remove elements from the list.
- Updated `AddElement` to improve command construction with family inclusion.
2026-02-28 21:01:53 +05:00
afb0773dfd Update CHANGELOG.md with password brute force protection details and link to full settings configuration 2026-02-28 20:23:45 +05:00
187c447301 Reorganize "Plans" section in README files for clarity 2026-02-28 20:19:57 +05:00
3ec6b4c72d Expand brute force protection configuration with groups and rules
- Added support for defining groups with rate limits (`bruteForceProtection.groups`).
- Introduced rule configuration for log monitoring and grouping (`bruteForceProtection.rules`).
- Enhanced `analyzer.toml` with detailed examples and documentation for new settings.
2026-02-28 20:14:44 +05:00
b63e3adbd3 Add support for brute force protection rules and groups
- Introduced `BruteForceProtectionGroup` for managing rate limits and validation.
- Added `BruteForceProtectionRule` for defining protection rules with patterns.
- Updated `BruteForceProtection` to include groups and rules.
- Enhanced `ToSources` to process rules and associate them with defined groups.
2026-02-28 20:14:11 +05:00
aa519c8b44 Remove debug print statement from IP blocking logic 2026-02-28 18:47:16 +05:00
8329da32e3 Add reloadBlockList method to handle block list reloading in the firewall
- Implements `reloadBlockList` to reload IP block lists via `NftReload`.
- Links block lists to chains using `AddRuleToChain`.
2026-02-28 17:50:17 +05:00
833bc394b3 Pass logger to blocking.New in server initialization for improved logging and debugging. 2026-02-28 17:50:01 +05:00
e422bc4206 Add blocking package for IP blocking management
- Introduced `blocking` implementation with methods for managing blocked IPs.
- Added `NftReload` for reloading block lists into the firewall.
- Created `BlockIP` to block specific IPs with expiration and logging support.
- Implemented `ClearDBData` for clearing database blocking entries.
2026-02-28 17:49:39 +05:00
47aa0a9d6c Update BlockIP to return banning status (bool, error) for improved tracking and logging 2026-02-28 17:49:13 +05:00
58dbee450a Refactor BruteForceProtection to improve error handling and notification clarity
- Updated `BlockIPFunc` to return `(bool, error)` for better banning status tracking.
- Enhanced notification messages to include errors and blocked time (`blockSec`).
- Simplified `sendNotifyError` logic by embedding error context in `bruteForceProtectionNotify`.
2026-02-28 17:48:51 +05:00
68034fd6f9 Add before-local-input and after-local-input chains with block list API
- Introduced `NewBeforeLocalInput` and `NewAfterLocalInput` chain methods.
- Added `NewBlockListIP` for creating IP block lists linked to chains.
2026-02-28 17:48:12 +05:00
7b77b8730e Add ListIP interface and implementation for IP block list management in the firewall
- Implemented `NewListIP` for creating IPv4 and IPv6 block lists with timeout support.
- Added methods `AddIP` to add IPs to the lists and `AddRuleToChain` to link block lists to firewall chains.
2026-02-28 17:47:31 +05:00
187e874c29 Add List interface and implementation for managing block lists in the firewall 2026-02-28 17:47:05 +05:00
ee5a6a2d3d Add Reason field to Blocking entity 2026-02-28 17:46:35 +05:00
38283247e9 Simplify List method in BlockingRepository by removing unnecessary bucket creation logic 2026-02-28 17:46:24 +05:00
79c7ef1f91 Add before-local-input and after-local-input chains to enhance firewall rule management
- Implemented `BeforeLocalInput` and `AfterLocalInput` chain structures with rule management methods.
- Integrated chains into `reloadInput` logic for improved rule execution ordering.
2026-02-28 17:43:32 +05:00
e29d0de632 Add blockSec localization and integrate blocked time into notifications
- Added `blockSec` message to English, Kazakh, and Russian locale files.
- Extended `BruteForceProtection` logic to include blocked time (`blockSec`) in notifications.
2026-02-28 12:45:51 +05:00
be082a1841 Fix IP address handling in BruteForceProtectionGroup to prioritize IPv4 over IPv6 2026-02-28 12:13:03 +05:00
4b364cbdf0 Extend daemon stop logic to clear firewall data during testing interval expiration 2026-02-28 11:45:51 +05:00
dfa23bc7a6 Add ClearDBData method to firewall and integrate block list reload logic 2026-02-28 11:45:32 +05:00
3a34569e78 Add Clear method to BlockingRepository and implement bucket reset logic
- Introduced `Clear` to reset the database bucket for the `BlockingRepository`.
- Handled `ErrBucketNotFound` error to allow safe bucket recreation during cleanup.
2026-02-28 11:44:58 +05:00
b1f5ce4e9b Add ClearDBData method to Group interface and implement it in group 2026-02-28 11:44:30 +05:00
f2d851baa7 Add ClearDBData method to Group interface and implement it in group 2026-02-28 11:44:27 +05:00
2a617b5c17 Invoke ClearDBData in daemon stop logic to clean up analyzer data 2026-02-28 11:37:55 +05:00
a648647e4a Add ClearDBData functionality for analysis and repository components
- Introduced `ClearDBData` methods in `Analysis`, `Alert`, and `BruteForceProtection` components.
- Implemented `Clear` operations for `AlertGroupRepository` and `BruteForceProtectionGroupRepository` to reset database buckets.
- Updated `Analyzer` to invoke `ClearDBData` for cleanup logic.
2026-02-28 11:37:25 +05:00
6b482a350b Simplify error handling logic in DBQueueClear during testing interval expiration. 2026-02-28 11:05:56 +05:00
097cf362e3 Add brute force protection core logic and SSH-specific rules
- Integrated brute force protection mechanisms into the analyzer.
- Added `BruteForceProtection` and `BruteForceProtectionGroup` structures with rate-limiting and group-based blocking logic.
- Implemented IP blocking via the firewall service.
- Introduced SSH brute force detection rules and notifications for detected attempts.
- Updated analyzer and firewall services to handle brute force protection rules.
- Localized new brute force protection alert messages.
2026-02-26 00:01:06 +05:00
bf7d463930 Expand analyzer.toml with brute force protection settings
- Added configuration options for password brute-force protection, including rate limits, blocking duration, and SSH-specific settings.
- Included detailed parameter descriptions and default values to enhance setup clarity.
2026-02-25 23:58:12 +05:00
b49889ef58 Add brute force protection to analyzer settings
- Introduced `BruteForceProtection` structure with validation and default settings.
- Integrated brute force protection logic into `Setting` methods for initialization, validation, and source generation.
- Added group-based brute force rate-limiting functionality with `_default` group included.
2026-02-25 23:57:33 +05:00
fd899087d4 Introduce Blocking and BruteForceProtectionGroup entities and repositories
- Added `Blocking` and `BruteForceProtectionGroup` entities with associated logic.
- Implemented `BlockingRepository` for IP blocking management with add, list, and delete-expired functionalities.
- Introduced `BruteForceProtectionGroupRepository` for managing brute force protection groups.
- Updated `Repositories` to include new repositories with `app.db` and `security.db`.
2026-02-25 23:54:38 +05:00
8f254d11c1 Fix incorrect condition for empty rate limits in AlertGroup 2026-02-24 23:05:40 +05:00
2e08bf6b6a Improve error message for empty rate limits in alert_group configuration 2026-02-24 23:01:39 +05:00
036f037a30 Extract LazyRegexp into a reusable package and update references 2026-02-23 22:45:29 +05:00
c7f25b4ba8 Rename addRule to addAlertRule for improved clarity and consistency 2026-02-23 22:42:38 +05:00
623d626878 Add event tracking customization and new parameters to analyzer settings
- Enabled customization of log event tracking with examples.
- Added new `logAlert.groups` and `logAlert.rules` configurations in `analyzer.toml`.
2026-02-16 23:13:08 +05:00
e1bace602c Expand analyzer configuration with group-based alerting options
- Added detailed examples for group configuration and rate-limiting in `analyzer.toml`.
- Introduced optional `group` field in alert rule definitions for group associations.
2026-02-16 22:47:33 +05:00
e85fd785cd Integrate advanced alert grouping functionality
- Introduced `AlertGroup` structure for advanced rate-limiting and reset logic.
- Added support for nested rate-limit configuration with `RateLimit` structure.
- Implemented `alert_group.Group` service to facilitate alert group analysis and persistence.
- Integrated alert group logic into the analyzer configuration and runtime processing pipeline.
- Updated `LogAlertRule` to support group associations and validations.
- Enhanced repository structure with `AlertGroupRepository` for persistent alert group management.
2026-02-16 22:26:33 +05:00
c6841d14f3 Introduce AlertGroup entity and repository
- Added `AlertGroup` structure with reset functionality.
- Implemented `AlertGroupRepository` to manage alert group persistence using BoltDB.
- Integrated `AlertGroupRepository` into the `Repositories` interface and factory setup.
2026-02-16 22:24:51 +05:00
57b80da767 Rename notificationsQueue constant to notificationsQueueBucket for clarity 2026-02-14 23:51:12 +05:00
696961f7c0 Update third-party license file with entries for github.com/nxadm/tail and gopkg.in/tomb.v1 2026-02-14 02:11:47 +05:00
af082f36da Fix log formatting issue in file_monitoring.Logger.Fatalf 2026-02-14 02:06:13 +05:00
a889e5c81a Bump dependencies to latest versions for improved stability and performance 2026-02-14 02:05:10 +05:00
99e155fe10 Introduce file-based log monitoring support in analyzer
- Added `FileMonitoring` implementation for tailing log files.
- Integrated file monitoring service into `Analyzer`.
- Introduced `file_monitoring.Logger` for consistent log handling.
- Updated `Analyzer` to handle both `systemd` and file-based logs.
2026-02-14 01:56:11 +05:00
2fffe45a89 Add dependencies for file-based log monitoring (github.com/nxadm/tail, gopkg.in/tomb.v1) 2026-02-14 01:54:23 +05:00
ff0317ed0b Handle context cancellation in systemd log analyzer to prevent log channel blocking 2026-02-14 01:43:10 +05:00
0b627a283d Refactor and replace AlertRuleIndex with RulesIndex for improved rule handling
- Replaced `AlertRuleIndex` with the more robust `RulesIndex` structure.
- Introduced `RulesBucket` for efficient rule grouping and management.
- Added support for `file` source type in `RulesIndex`.
- Updated `Analyzer` and associated services to utilize `RulesIndex` for rule processing.
2026-02-11 23:48:56 +05:00
2b8a3e0d98 Add file source support in analyzer configuration
- Extended `Source` structure with a `Path` field for file sources.
- Added support for `file` source type in `ToSource` method.
- Implemented logic to handle `file` paths and integrate with `SourceFile`.
2026-02-11 23:47:30 +05:00
c09bf01de1 Add SourceFile support for file-based log monitoring
- Introduced `SourceFile` structure for file sources in analyzer configuration.
- Added `NewSourceFile` constructor with path validation logic.
- Updated `Source` to support both `SourceJournal` and `SourceFile` types.
2026-02-11 23:46:42 +05:00
627b70e024 Ensure unique journalctl match entries in analyzer configuration 2026-02-11 21:36:15 +05:00
660e1fcebd Add detailed log monitoring settings for LogAlert in analyzer.toml configuration file
- Introduced a dedicated section for configuring `LogAlert` rules and patterns.
- Added examples and documentation to guide users on setting up log analysis rules and notifications.
2026-02-10 22:54:10 +05:00
c9093f8244 Add LogAlert support for log analysis with configurable rules and patterns
- Introduced `LogAlert` feature to `Setting` for flexible log monitoring.
- Implemented `LogAlertRule`, `LogAlertPattern`, `PatternValue`, and `Source` structures for robust rule and pattern configurations.
- Enhanced validation and transformation logic for sources, patterns, and rules.
- Integrated `LogAlert` into log source generation and default settings.
2026-02-10 22:53:43 +05:00
8985ff884d Handle missing regex match values in log analysis by defaulting to "unknown" in supported locales 2026-02-10 22:52:22 +05:00
c7dadb3684 Refactor login analyzers to improve error handling and validation logic for journal sources 2026-02-09 23:38:45 +05:00
d5e92b70ef Ensure alert rules are only added when AlertRule is not nil 2026-02-09 22:46:02 +05:00
3bbedc5088 Merge pull request 'v0.6.0' (#6) from develop into main
Reviewed-on: #6
2026-02-08 15:06:31 +05:00
960494eec0 Add journalctl as a prerequisite in README files 2026-02-08 15:05:07 +05:00
98a62b4551 Update CHANGELOG.md with 0.6.0 release date 2026-02-08 14:57:18 +05:00
0fa8d88479 Update third-party license file to add go.etcd.io/bbolt and fix minor formatting inconsistencies 2026-02-08 14:55:27 +05:00
9eef81d1a5 Clarify test period description to include data clearing steps at end 2026-02-08 14:50:17 +05:00
6821924c8e Added clearing of queues from the database at the end of the test period 2026-02-08 14:48:05 +05:00
f0958a340f Refactor log analysis to support dynamic alert rules through a centralized rule index, replacing hardcoded login-specific logic. 2026-02-08 14:40:36 +05:00
d9a40c620c Update CHANGELOG.md with notification queue clear command details 2026-01-28 22:11:21 +05:00
fd764fb5c5 Add support for clearing the notification queue via new daemon command and DB layer 2026-01-28 22:09:29 +05:00
d6af8a7ea5 Update CHANGELOG.md with notification queue count command details 2026-01-28 21:44:51 +05:00
f0d5b597cb Add support for retrieving notification queue size via new daemon command and DB layer 2026-01-28 21:40:04 +05:00
81a28bf485 Update CHANGELOG.md with 0.6.0 changes: add notification retry support and new configuration options 2026-01-28 21:23:41 +05:00
0fb8c0b42d Add notifications retry mechanism with configurable interval and queue handling 2026-01-28 21:22:45 +05:00
6b79928b3a Add DB layer for managing notifications queue 2026-01-28 21:20:19 +05:00
9a0cf7bd8a Merge pull request 'v0.5.0' (#5) from develop into main
Reviewed-on: #5
2026-01-17 20:25:14 +05:00
b938b73cfd Update CHANGELOG.md with 0.5.0 release date 2026-01-17 20:15:43 +05:00
ce031be060 Update CHANGELOG.md with sudo login tracking and notification details 2026-01-15 00:31:44 +05:00
5e50bc179f Add sudo command login tracking and notification support 2026-01-15 00:28:11 +05:00
279f58b644 Update CHANGELOG.md with su login tracking and notification details 2026-01-14 23:27:52 +05:00
26365a519b Add su command login tracking and notification support 2026-01-14 23:25:16 +05:00
d1f307d2ad Update CHANGELOG.md with 0.5.0 changes: add local login tracking and notifications 2026-01-14 21:51:55 +05:00
ccf228242d Add TTY login tracking with notification support 2026-01-14 21:51:20 +05:00
5e12b1f6ab Refactor: Rename SSH alert keys for clarity and update relevant usages 2026-01-13 22:09:42 +05:00
67abcc0ef2 Refactor: Rename processLogin to process in SSH analyzer for consistency 2026-01-13 00:27:11 +05:00
5ad40cdf9b Refactor: Rename process to processLogin in SSH analyzer for clarity 2026-01-13 00:24:07 +05:00
374abcea80 Refactor: Consolidate sshProcessReturn into generic processReturn for improved reusability 2026-01-13 00:18:55 +05:00
4748630b04 Merge pull request 'v0.4.0' (#4) from develop into main
Reviewed-on: #4
2026-01-11 17:01:42 +05:00
a75df70922 Update CHANGELOG.md with release date for version 0.4.0 2026-01-11 16:50:56 +05:00
a84f1ccde6 Update CHANGELOG.md to document IP blocking fix during Docker container redirection 2026-01-11 16:49:58 +05:00
0d13f851dd Fixed a bug where IP blocking for containers did not work when Docker was enabled 2026-01-11 16:44:33 +05:00
b04016c596 Update CHANGELOG.md to include rule_strategy parameter addition and its configuration details 2026-01-11 15:58:25 +05:00
8147e715f2 Update default rule_strategy to incremental and handle new strategy in Docker settings 2026-01-11 15:52:17 +05:00
f57172a2ea Add IncrementalStrategy for rule management and extend chain functionality to support rule listing and removal 2026-01-11 15:51:54 +05:00
6c5a476d6e Refactor bridge name generation and extend IPInfo with NetworkID for improved modularity and network tracking 2026-01-11 14:21:43 +05:00
264f8ac60b Add NetworkID field to IPInfo and Docker network settings for enhanced network tracking 2026-01-11 14:21:20 +05:00
b2a9f83a44 Add FetchContainer method to Docker client for improved modularity and encapsulation 2026-01-11 13:29:30 +05:00
6ac0a86d9d Add FetchBridge method to Docker client for improved encapsulation and modularity 2026-01-11 13:26:34 +05:00
a6133c308e Refactor Docker client methods: bridge and container helper methods for improved encapsulation and naming consistency 2026-01-11 12:57:48 +05:00
82b501d0ec Refactor rule generation: add GenerateBridge and GenerateContainer methods for improved modularity and clarity 2026-01-11 12:54:38 +05:00
ce6cbbe17e Add optional comment parameter to JumpTo for enhanced rule traceability 2026-01-11 00:47:38 +05:00
2de8aa29c4 Update rebuildStrategy to handle container-specific events and adjust GenerateAll calls with an extra parameter 2026-01-11 00:46:19 +05:00
3afd4aa5f3 Add optional comment support in rule generation for improved traceability 2026-01-11 00:45:47 +05:00
42160ff5ab Enhance Docker event monitoring: add support for network events, JSON unmarshaling for events, and include detailed event attributes 2026-01-11 00:44:58 +05:00
8798811806 Refactor Docker bridge handling: consolidate bridge name and subnet methods into BridgeInfo for simplified logic and enhanced structure 2026-01-11 00:23:49 +05:00
a10d56df79 Add --no-trunc to Docker ps command for full container IDs in monitoring 2026-01-10 23:49:12 +05:00
876592c38d Refactor RebuildStrategy: rename to rebuildStrategy for improved naming consistency and update method receivers accordingly 2026-01-10 21:40:33 +05:00
e55660b098 Introduce rule management strategies and refactor Docker monitoring logic 2026-01-09 23:45:57 +05:00
c6c3f991cc Update CHANGELOG.md with details on options.docker_support removal and related Docker configuration changes 2026-01-07 20:29:20 +05:00
bc177f83b8 Add support for Docker configuration and refactor related settings 2026-01-07 20:28:54 +05:00
48be913c57 Refactor analyzer: replace slice initialization with var keyword for clarity 2026-01-07 20:27:55 +05:00
0a30733d27 When the program stops, there are cases when empty messages "Received log entry" appear in the logs 2026-01-07 20:27:28 +05:00
4a5492b1c5 Add check to skip empty Docker event messages in monitoring loop 2026-01-05 22:48:20 +05:00
a3df113b07 Update CHANGELOG.md with details on fixes related to binaryLocations.docker setting and Docker crash issue 2026-01-05 22:46:07 +05:00
e034debeaa Refactor Docker event monitoring: simplify Events method and introduce EventsClose for graceful shutdowns 2026-01-05 22:41:30 +05:00
9134ab8ec0 Refactor systemd log watcher to build args within the watch method 2026-01-05 22:33:52 +05:00
ba23474eab Refactor analyzer to use a shared log channel 2026-01-05 22:27:45 +05:00
bbaf0304c3 Merge pull request 'v0.3.0' (#3) from develop into main
Reviewed-on: #3
2026-01-04 17:09:39 +05:00
1f8be77ab3 Clarify Docker support status in English README 2026-01-04 16:39:21 +05:00
d2795639da Update Russian README: reorder sections and clarify Docker support status 2026-01-04 16:39:08 +05:00
8638c49886 Add "Requirements" section to English README 2026-01-04 16:37:16 +05:00
66e6bad111 Add system requirements section to README 2026-01-04 16:37:06 +05:00
1a6d6b813b Update CHANGELOG.md with release date for version 0.3.0 2026-01-04 16:36:36 +05:00
9b8d07ccb3 Fix typo in CHANGELOG.md: correct WantedBy target from sysinit.target to multi-user.target 2026-01-04 16:20:05 +05:00
4b8622a870 Update CHANGELOG.md with partial Docker support details for version 0.3.0 2026-01-04 16:19:30 +05:00
b9719f7eaf Add Docker event monitoring and chain clearing functionality
- Introduced `Events` method in Docker client to stream and handle Docker events.
- Added `Clear` method to nftables chain interface for clearing rules.
- Enhanced daemon lifecycle to include Docker event monitoring when Docker support is enabled.
- Updated nftables rule management with event-driven chain clearing and reloading.
2026-01-04 16:06:01 +05:00
c424621615 Add Docker support with nftables integration
- Introduced Docker monitoring to manage nftables rules.
- Added `docker_support` option to firewall configuration.
- Integrated Docker bridge, container handling, and related network rules.
- Updated default configurations for Docker path and settings.
- Enhanced `daemon` lifecycle for Docker integration.
2026-01-04 13:59:26 +05:00
865f12d966 Update dependencies: bump go-nftables-client to v0.1.1 and make go-mail a direct dependency 2026-01-01 22:06:50 +05:00
b3a94855b8 Refactor localOutput receiver names for consistency in AddRule and AddRuleOut methods 2026-01-01 20:28:54 +05:00
4d001a026c Refactor localInput receiver names for consistency in AddRule and AddRuleIn methods 2026-01-01 20:28:37 +05:00
6e4bd17bfe Update CHANGELOG.md to include new configuration files notifications.toml and analyzer.toml 2025-12-31 23:14:09 +05:00
0bcdb7bcc7 Update LICENSE-3RD-PARTY.txt to include go-mail dependency and its MIT license details 2025-12-31 23:05:56 +05:00
5f2d5a1a9e Simplify EmptyAnalysis.Process by ignoring unused parameter 2025-12-31 23:01:20 +05:00
542f7415b7 Update CHANGELOG.md with email notification and SSH login notification details for version 0.3.0 2025-12-31 22:58:25 +05:00
8615c79f12 Refactor log analyzer to support SSH login detection
- Moved `Entry` type to `analysis` package for better organization.
- Introduced `SSH` analysis service to detect and notify about SSH logins.
- Added notification and logging for detected SSH login events.
2025-12-31 22:52:12 +05:00
b5686a2ee6 Add systemd log integration for analyzer service
- Implemented `systemd` log monitoring using `journalctl`.
- Added `BinPath` configuration for specifying binary paths.
- Introduced `ssh` unit monitoring for authorization tracking.
- Updated analyzer lifecycle to integrate log processing.
- Enhanced validation for `journalctl` path in settings.
- Updated default configurations with `journalctl` path.
2025-12-30 20:57:35 +05:00
e78685c130 Add support for analyzer service and configuration
- Introduced `analyzer` service for log parsing and authorization tracking.
- Added dedicated analyzer configuration via `analyzer.toml`.
- Integrated analyzer setup and lifecycle management into daemon runtime.
- Enhanced `setting` package to include analyzer settings parsing and validation.
- Updated daemon options to support analyzer configuration.
- Extended default configuration files for analyzer settings.
2025-12-30 15:03:41 +05:00
74dce294bf Add support for email notifications
- Introduced email notifications enabling configuration via `notifications.toml`.
- Created notification handling within `internal/daemon/notifications`.
- Added async email queue with error handling and customizable TLS configurations.
- Integrated notifications setup and validation into the daemon runtime.
2025-12-16 19:30:18 +05:00
6929ac9bf5 Update systemd service file for kor-elf-shield to improve reliability
- Added `Restart=on-failure` with a 10-second delay.
- Changed `WantedBy` target to `multi-user.target`.
- Defined service type as `simple`.
2025-12-08 23:19:38 +05:00
69157c90cb Merge pull request 'v0.2.0' (#2) from develop into main
Reviewed-on: #2
2025-11-29 16:12:03 +05:00
7054efd359 Update CHANGELOG.md with release date for version 0.2.0 2025-11-29 15:41:12 +05:00
57948fb639 Add support for chain priority configuration in nftables
- Introduced `input_priority`, `output_priority`, and `forward_priority` options in `firewall.toml`.
- Updated `chains` and chain creation functions to include priority handling.
- Added validation for priority values to ensure they remain within the acceptable range (-50 to 50).
- Adjusted `reloadInput`, `reloadOutput`, and `reloadForward` to respect priority settings.
2025-11-29 15:38:58 +05:00
6e7b6093f1 Add support for clear_mode option to toggle nftables clearing behavior
- Introduced `clear_mode` parameter in `firewall.toml` with options for clearing all nftables rules (`global`) or table-specific rules (`own`).
- Updated `chains` and `firewall` logic to respect `clear_mode` configuration.
- Enhanced `options` parsing and validation for `clear_mode`.
- Updated `CHANGELOG.md` to reflect the addition of `clear_mode`.
2025-11-25 20:58:12 +05:00
128 changed files with 8752 additions and 242 deletions

View File

@@ -1,3 +1,148 @@
## 0.8.0 (9.3.2026)
* Теперь можно тонко настроить блокировку портов для IP адреса, который пытается подобрать пароль.
* В файл настроек analyzer.toml в [[bruteForceProtection.groups]] добавлен новый параметр "block_type".
* В файл настроек analyzer.toml в [[bruteForceProtection.groups]] добавлен новый параметр "ports".
* В файл настроек analyzer.toml в [[bruteForceProtection.groups.rate_limits]] добавлен новый параметр "block_type".
* В файл настроек analyzer.toml в [[bruteForceProtection.groups.rate_limits]] добавлен новый параметр "ports".
* Смотрите полный список по ссылке: https://git.kor-elf.net/kor-elf-shield/kor-elf-shield/src/commit/d0a358a445b1dec850d8b84c06e86bd6872796cf/assets/configs/analyzer.toml
* Команда `kor-elf-shield ban clear` была переименованна в `kor-elf-shield block clear`.
* Добавлена команда `kor-elf-shield block add`. Через эту команду можно заблокировать IP адрес. Смотрите подробно в `kor-elf-shield block add --help`.
* Добавлена команда `kor-elf-shield block delete`. Через эту команду можно удалить заблокированный IP адрес. Смотрите подробно в `kor-elf-shield block delete --help`.
***
#### English
* You can now fine-tune port blocking for the IP address attempting to brute-force a password.
* A new "block_type" parameter has been added to the analyzer.toml settings file in [[bruteForceProtection.groups]].
* A new "ports" parameter has been added to the analyzer.toml settings file in [[bruteForceProtection.groups]].
* A new "block_type" parameter has been added to the analyzer.toml settings file in [[bruteForceProtection.groups.rate_limits]].
* A new "ports" parameter has been added to the analyzer.toml settings file in [[bruteForceProtection.groups.rate_limits]].
* See the full list at: https://git.kor-elf.net/kor-elf-shield/kor-elf-shield/src/commit/d0a358a445b1dec850d8b84c06e86bd6872796cf/assets/configs/analyzer.toml
* The `kor-elf-shield ban clear` command has been renamed to `kor-elf-shield block clear`.
* The `kor-elf-shield block add` command has been added. This command can be used to block an IP address. See `kor-elf-shield block add --help` for details.
* The `kor-elf-shield block delete` command has been added. This command can be used to delete a blocked IP address. See `kor-elf-shield block delete --help` for details.
***
## 0.7.0 (28.2.2026)
***
#### Русский
* Добавлена возможность настройки отслеживания событий в журналах.
* Добавлены настройки для защиты от перебора паролей.
* В файл настроек analyzer.toml добавлены новые параметры. Смотрите полный список по ссылке: https://git.kor-elf.net/kor-elf-shield/kor-elf-shield/src/commit/187c447301b9c0bfa41ec2b2c9435ab0ce44bed6/assets/configs/analyzer.toml
* Добавлена команда `kor-elf-shield ban clear`, которая разблокирует все IP адреса. Которые были забанены.
***
#### English
* Added the ability to customize event tracking in logs.
* Added settings to protect against password guessing.
* New parameters have been added to the analyzer.toml settings file. See the full list at: https://git.kor-elf.net/kor-elf-shield/kor-elf-shield/src/commit/187c447301b9c0bfa41ec2b2c9435ab0ce44bed6/assets/configs/analyzer.toml
* Added the `kor-elf-shield ban clear` command, which unbans all banned IP addresses.
***
## 0.6.0 (8.2.2026)
***
#### Русский
* Добавлена возможность повторной отправки уведомления, если в прошлый раз произошла ошибка.
* Добавлена команда `kor-elf-shield notifications queue count`, которая возвращает количество уведомлений в очереди в базе данных.
* Добавлена команда `kor-elf-shield notifications queue clear`, которая удаляет все уведомления из очереди в базе данных.
* В файл настроек kor-elf-shield.toml добавлены новые параметры:
* data_dir = Каталог для постоянных данных приложения (state): локальная база данных, кэш/индексы, файлы состояния и другие служебные файлы. Должен быть доступен на запись пользователю, от имени которого запущен демон. Если каталог не существует — будет создан. По умолчанию: "/var/lib/kor-elf-shield/"
* В файл настроек notifications.toml добавлены новые параметры:
* enable_retries = Включает повторные попытки отправить уведомление, если сразу не получилось. По умолчанию: true
* retry_interval = Интервал времени в секундах между попытками. По умолчанию: 600
***
#### English
* Added the ability to retry sending a notification if an error occurred the previous time.
* Added the `kor-elf-shield notifications queue count` command, which returns the number of notifications in the queue in the database.
* Added the `kor-elf-shield notifications queue clear` command, which removes all notifications from the queue in the database.
* New parameters have been added to the kor-elf-shield.toml settings file:
* data_dir = Directory for persistent application data (state): local database, cache/indexes, state files, and other internal data. Must be writable by the daemon user. If the directory does not exist, it will be created. Default: "/var/lib/kor-elf-shield/"
* New parameters have been added to the notifications.toml settings file:
* enable_retries = Enables repeated attempts to send a notification if the first attempt fails. Default: true
* retry_interval = The time interval in seconds between attempts. Default: 600
***
## 0.5.0 (17.1.2026)
***
#### Русский
* В настройках analyzer.toml добавил параметры local_enable и local_notify.
* local_enable = Включает отслеживание локальных авторизаций (TTY, физический доступ). По умолчанию включён.
* local_notify = Включает уведомления о локальных авторизациях. По умолчанию включён.
* В настройках analyzer.toml добавил параметры su_enable и su_notify.
* su_enable = Включает отслеживание авторизаций через su. По умолчанию включён.
* su_notify = Включает уведомления об авторизациях через su. По умолчанию включён.
* В настройках analyzer.toml добавил параметры sudo_enable и sudo_notify.
* sudo_enable = Включает отслеживание авторизаций через sudo. По умолчанию выключен.
* sudo_notify = Включает уведомления об авторизациях через sudo. По умолчанию включён.
***
#### English
* Added local_enable and local_notify parameters to analyzer.toml settings.
* local_enable = Enables tracking of local logins (TTY, physical access). Enabled by default.
* local_notify = Enables notifications about local logins. Enabled by default.
* Added su_enable and su_notify parameters to analyzer.toml settings.
* su_enable = Enables tracking of logins via su. Enabled by default.
* su_notify = Enables notifications about logins via su. Enabled by default.
* Added sudo_enable and sudo_notify parameters to analyzer.toml settings.
* sudo_enable = Enables tracking of logins via sudo. Off by default.
* sudo_notify = Enables notifications about logins via sudo. Enabled by default.
***
## 0.4.0 (11.1.2026)
***
#### Русский
* Удалён параметр options.docker_support из файла firewall.toml. Настройки от Docker перенесены в файл docker.toml.
* В настройках docker.toml добавил возможность переключать режим работы с Docker через параметр rule_strategy.
* incremental = добавляются или удаляются только правила конкретного контейнера (сейчас по умолчанию)
* rebuild = при любом изменении все цепочки Docker пересоздаются целиком (старый режим)
* Исправлена ошибка:
* Настройка binaryLocations.docker не работала.
* Программа аварийно завершалась после остановки Docker'а.
* Указанные в настройках IP-адреса не блокировались во время перенаправления в контейнер Docker.
***
#### English
* Removed the options.docker_support parameter from firewall.toml. Docker settings have been moved to the docker.toml file.
* Added the ability to switch Docker operation mode via the rule_strategy parameter to the docker.toml settings.
* incremental = only rules for a specific container are added or removed (currently the default)
* rebuild = any change rebuilds all Docker chains (old mode)
* Fixed error:
* The binaryLocations.docker setting did not work.
* The program crashed after Docker was stopped.
* The IP addresses specified in the settings were not blocked during redirection to the Docker container.
***
## 0.3.0 (4.1.2026)
***
#### Русский
* Добавлена частичная поддержка Docker.
* Добавлен параметр options.docker_support в firewall.toml. Это включает поддержку Docker.
* Каждый запуск контейнера будет полностью пересчитываться правила у chain, которые относятся к Docker. (в будущем планирую это переработать)
* Добавлены настройки для уведомлений по электронной почте.
* Добавлен файл настроек notifications.toml.
* Реализовано уведомление о входах по SSH.
* Добавлен файл настроек analyzer.toml.
* Служба systemd
* Изменено WantedBy с sysinit.target на multi-user.target
* Убрано ExecStop. По факту это не работало. Чтобы остановить сервис с очисткой правил nftables выпоните команду: kor-elf-shield stop
* Добавлено Restart=on-failure. Нужно для того, чтобы программа перезапустилась после критической ошибки.
***
#### English
* Added partial Docker support.
* Added the options.docker_support parameter to firewall.toml. This enables Docker support.
* Each container launch will completely recalculate the Docker-specific rules in chain. (I plan to rework this in the future)
* Added settings for email notifications.
* Added notifications.toml settings file.
* Implemented notification of SSH logins.
* Added analyzer.toml settings file.
* Systemd service
* Changed WantedBy from sysinit.target to multi-user.target
* Removed ExecStop. It didn't actually work. To stop the service and clear the nftables rules, run the command: kor-elf-shield stop
* Added Restart=on-failure. This is necessary to ensure the program restarts after a critical error.
## 0.2.0 (29.11.2025)
***
#### Русский
* Добавлен параметр clear_mode в firewall.toml. Он позволяет переключать режим очистки всех правил в nftables или только таблицу относящие к программе.
* Добавлен параметр input_priority в firewall.toml. Можно указать приоритет от -50 по 50 к chain input.
* Добавлен параметр output_priority в firewall.toml. Можно указать приоритет от -50 по 50 к chain output.
* Добавлен параметр forward_priority в firewall.toml. Можно указать приоритет от -50 по 50 к chain forward.
***
#### English
* Added the clear_mode parameter to firewall.toml. It allows you to toggle clearing of all rules in nftables or only the program-specific table.
* Added the input_priority parameter to firewall.toml. You can specify a priority from -50 to 50 for chain input.
* Added the output_priority parameter to firewall.toml. You can specify a priority from -50 to 50 for chain output.
* Added the forward_priority parameter to firewall.toml. You can specify a priority from -50 to 50 for chain forward.
***
## 0.1.0 (8.11.2025)
***
#### Русский

View File

@@ -92,6 +92,32 @@ THE SOFTWARE.
--------------------------------------------------------------------------------
github.com/nxadm/tail
# The MIT License (MIT)
# © Copyright 2015 Hewlett Packard Enterprise Development LP
Copyright (c) 2014 ActiveState
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
--------------------------------------------------------------------------------
github.com/pelletier/go-toml/v2
The bulk of github.com/pelletier/go-toml is distributed under the MIT license
@@ -684,6 +710,57 @@ SOFTWARE.
--------------------------------------------------------------------------------
github.com/wneessen/go-mail
MIT License
Copyright (c) 2022-2025 The go-mail Authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
--------------------------------------------------------------------------------
go.etcd.io/bbolt
The MIT License (MIT)
Copyright (c) 2013 Ben Johnson
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------------
go.uber.org/multierr
Copyright (c) 2017-2021 Uber Technologies, Inc.
@@ -747,13 +824,13 @@ starting in 2011 when the project was ported over:
- internal/libyaml/yaml.go
- internal/libyaml/yamlprivate.go
Copyright 2006-2011 - Kirill Simonov
Copyright 2006-2010 Kirill Simonov
https://opensource.org/license/mit
All the remaining project files are covered by the Apache license:
Copyright 2011-2019 - Canonical Ltd
Copyright 2025 - The go-yaml Project Contributors
Copyright 2011-2019 Canonical Ltd
Copyright 2025 The go-yaml Project Contributors
http://www.apache.org/licenses/LICENSE-2.0
--------------------------------------------------------------------------------
@@ -822,6 +899,40 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------
gopkg.in/tomb.v1
tomb - support for clean goroutine termination in Go.
Copyright (c) 2010-2011 - Gustavo Niemeyer <gustavo@niemeyer.net>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------
github.com/golang/go
Copyright 2009 The Go Authors.
@@ -853,4 +964,3 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------

View File

@@ -6,6 +6,16 @@
***
<p style="color: red; font-weight: bold; font-size: 20px;">Требования:</p>
* Запуск от имени root
* Linux 5.2+
* nftables
* Systemd
* journalctl
***
### Сделанно:
* Реализована возможность настраивать nftables:
* По умолчанию разрешить или блокировать входящий трафик.
@@ -14,12 +24,12 @@
* Настройка портов.
* Настройка белых и чёрных списков IP адресов.
* Настройка логирование.
### В планах:
* Подружить с docker.
* Подружить с docker (частично).
* Внедрить настройку уведомлений (пока только e-mail).
* Отправлять уведомления при авторизации ssh.
* Защита от перебора паролей (brute-force).
### В планах:
* Уведомлять, если появится новый пользователь в системе.
* Уведомлять, если изменились системные файлы.
***

View File

@@ -6,6 +6,16 @@
***
<p style="color: red; font-weight: bold; font-size: 20px;">Requirements:</p>
* Run as root
* Linux 5.2+
* nftables
* Systemd
* journalctl
***
### Done:
* The ability to configure nftables has been implemented:
* Allow or block incoming traffic by default.
@@ -14,12 +24,12 @@
* Port configuration.
* Setting up white and black lists of IP addresses.
* Setting up logging.
### The plans include:
* Make friends with docker.
* Make friends with docker (partially).
* Implement notification settings (for now only by e-mail).
* Send notifications during ssh authorization.
* Password brute-force protection.
### The plans include:
* Notify if a new user appears in the system.
* Notify if system files have changed.
***

View File

@@ -0,0 +1,428 @@
###############################################################################
# РАЗДЕЛ:Защита от перебора пароля
# ***
# SECTION:Protection against password brute-force attacks
###############################################################################
[bruteForceProtection]
###
# Включает группу отслеживания перебора пароля.
# Если отключено, отслеживание перебора пароля работать не будет.
# По умолчанию: true
# ***
# Enables the password attack monitoring group.
# If disabled, password attack monitoring will not work.
# Default: true
###
enabled = true
###
# Включает уведомления об блокировок.
# Если отключено, они будут отображаться в логах только на уровне = "info".
# По умолчанию: true
# ***
# Enables notifications about blocks.
# If disabled, they will only appear in the logs under level = "info".
# Default: true
###
notify = true
###
# Максимальное количество ошибок, после которого произойдёт блокировка.
# По умолчанию: 5
# ***
# The maximum number of errors after which a blocking will occur.
# Default: 5
###
rate_limit_count = 5
###
# Насколько времени в секундах блокировать IP адрес.
# Если указать 0, то будет на всегда заблокирован.
# По умолчанию: 3600
# ***
# How long in seconds to block an IP address.
# If you specify 0, it will be blocked forever.
# Default: 3600
###
blocking_time = 3600
###
# Установите временной интервал для отслеживания сбоев входа в систему в течение секунд.
# По умолчанию: 3600
# ***
# Set the time interval to monitor login failures in seconds.
# Default: 3600
###
rate_limit_period = 3600
###
# Указываем в секундах, через какое время сбрасывать данные IP в групе _default если не было событий.
# Если указать 0, то не будет сбрасывать.
# По умолчанию: 86400
# ***
# Specify the number of seconds after which IP data in the _default group will be reset if there have been no events.
# If you specify 0, the reset will not occur.
# Default: 86400
###
rate_limit_reset_period = 86400
###
# Включает защиту от перебора пароля от ssh.
# По умолчанию: true
# ***
# Enables protection against brute-force attacks against ssh.
# Default: true
###
ssh_enable = true
###
# Включает уведомления об блокировках, когда срабатывает защита от перебора пароля.
# Если отключено, они будут отображаться в логах только на уровне = "info".
# По умолчанию: true
# ***
# Enables block notifications when password brute-force protection is triggered.
# If disabled, they will only appear in the logs under level = "info".
# Default: true
###
ssh_notify = true
###
# Можно указать свою группу, чтобы связать с другими правилами.
# По умолчанию: ""
# ***
# You can specify your own group to link it to other rules.
# Default: ""
###
ssh_group = ""
###
# Указываем настройки группы.
# Примеры:
# [[bruteForceProtection.groups]]
# name = "my_name_group" # Имя группы. Разрешены символы "a-z, A-Z, -, _". Первый символ обязательно буква (обязательное поле)
# message = "Любой текст группы" # Текст уведомления (обязательное поле)
# rate_limit_reset_period = 86400 # Указываем в секундах, через какое время сбрасывать данные в групе если не было событий. Если указать 0, то не будет сбрасывать.
## block_type = "ip_port" # Указываем тип блокировки: ip, ip_port. Если ничего не укажите, будет указан тип ip.
## ports = ["22/tcp", "22/udp"] # Если тип блокировки стоит ip_port, то нужно указать порты, которые будут заблокированы после обнаружения попытки перебора пароля.
# [[bruteForceProtection.groups.rate_limits]]
## Через сколько будет срабатывать блокировка. В данном случае в течение часа, если было 5 обнаружений, то сработает блокировка.
## И заблокирует на 10 минут.
# count = 5
# period = 3600
# blocking_time = 600
## Внутри bruteForceProtection.groups.rate_limits можно переопределить настройки block_type и ports.
## block_type = "ip_port" # Указываем тип блокировки: ip, ip_port. Если ничего не укажите, будет указан тип ip.
## ports = ["22/tcp", "22/udp", "80/tcp", "443/tcp"] # Если тип блокировки стоит ip_port, то нужно указать порты, которые будут заблокированы после обнаружения попытки перебора пароля.
# [[bruteForceProtection.groups.rate_limits]]
## После срабатывания блокировки, переходим на второй уровень, тепер если в течение часа было 3 обнаружений, то сработает блокировка.
## И теперь заблокирует на час.
# count = 3
# period = 3600
# blocking_time = 3600
# [[bruteForceProtection.groups.rate_limits]]
## И таких уровней можно указыват сколько захотите.
# count = 2
# period = 600
# blocking_time = 3600
#
# ***
# Specify group settings.
# Examples:
# [[bruteForceProtection.groups]]
# name = "my_name_group" # Group name. Allowed characters are "a-z, A-Z, -, _". The first character must be a letter (required)
# message = "Any group text" # Notification text (required)
# rate_limit_reset_period = 86400 # Specify, in seconds, how long to reset group data if there have been no events. Specifying 0 means no reset.
## block_type = "ip_port" # Specify the blocking type: IP, IP_port. If you don't specify anything, the IP type will be used.
## ports = ["22/tcp", "22/udp"] # If the blocking type is ip_port, then you need to specify the ports that will be blocked after detecting a password brute-force attempt.
# [[bruteForceProtection.groups.rate_limits]]
## How long will it take for the block to be triggered? In this case, if there were 5 detections within an hour, the block will be triggered.
## And it will block for 10 minutes.
# count = 5
# period = 3600
# blocking_time = 600
## Inside bruteForceProtection.groups.rate_limits you can override the block_type and ports settings.
## block_type = "ip_port" # Specify the blocking type: IP, IP_port. If you don't specify anything, the IP type will be used.
## ports = ["22/tcp", "22/udp", "80/tcp", "443/tcp"] # If the blocking type is ip_port, then you need to specify the ports that will be blocked after detecting a password brute-force attempt.
# [[bruteForceProtection.groups.rate_limits]]
## After the blocking is triggered, we move to the second level. Now, if there are three detections within an hour, the blocking will be triggered.
## And now it will block for an hour.
# count = 3
# period = 3600
# blocking_time = 3600
# [[bruteForceProtection.groups.rate_limits]]
## You can specify as many of these levels as you like.
# count = 2
# period = 600
# blocking_time = 3600
###
###
# Указываем настройки логов, которые надо отслеживать для защиты от перебора пароля.
# Примеры:
# [[bruteForceProtection.rules]]
# enabled = true # Включает или выключает отслеживания (обязательное поле)
# notify = true # Включает или выключает уведомления (обязательное поле)
# name = "my_name_rule" # Имя уведомления. Разрешены символы "a-z, A-Z, -, _". Первый символ обязательно буква (обязательное поле)
# message = "Ваш любой текст для уведомления" # Текст уведомления (обязательное поле)
# group = "my_name_group" # Можно указать имя группы (не обязательное поле)
# [bruteForceProtection.rules.source]
# type = "journalctl" # journalctl или file (обязательное поле)
# field = "systemd_unit" # systemd_unit или syslog_identifier (обязательное поле если type = "journalctl")
# match = "ssh.service" # Значение (обязательное поле если type = "journalctl")
# если field = "systemd_unit", то match должен заканичваться: .service, .socket, .target, .mount, .timer, .path, .scope, .slice, .device
# [[bruteForceProtection.rules.patterns]]
# regexp = '^Failed password for (\S+) from (\S+) port \S+'
# ip = 2 # Указываем номер value, который укажет IP (обязательное поле)
# [[bruteForceProtection.rules.patterns.values]]
# name = "Пользователь"
# value = 1
#
# ***
# Specify the log settings that need to be monitored to protect against password brute-force attacks.
# Examples:
# [[bruteForceProtection.rules]]
# enabled = true # Enables or disables tracking (required)
# notify = true # Enables or disables notifications (required)
# name = "my_name_rule" # Notification name. Allowed characters are "a-z, A-Z, -, _". The first character must be a letter (required field)
# message = "Your any text for notification" # Notification text (required field)
# group = "my_name_group" # You can specify the group name (optional field)
# [bruteForceProtection.rules.source]
# type = "journalctl" # journalctl or file (required)
# field = "systemd_unit" # systemd_unit or syslog_identifier (required if type = "journalctl")
# match = "ssh.service" # Value (required if type = "journalctl")
# If field = "systemd_unit", then match must end with: .service, .socket, .target, .mount, .timer, .path, .scope, .slice, .device
# [[bruteForceProtection.rules.patterns]]
# regexp = '^Accepted (\S+) for (\S+) from (\S+) port \S+'
# [[bruteForceProtection.rules.patterns]]
# regexp = '^Failed password for (\S+) from (\S+) port \S+'
# ip = 2 # We indicate the value number that will indicate the IP (required field)
# [[bruteForceProtection.rules.patterns.values]]
# name = "User"
# value = 1
###
###############################################################################
# РАЗДЕЛ:Отслеживать авторизаций
# ***
# SECTION:Track authorizations
###############################################################################
[login]
###
# Включает группу отслеживания авторизации.
# Если отключено, отслеживание авторизации работать не будет.
# По умолчанию: true
# ***
# Enables the authorization tracking group.
# If disabled, no authorization tracking will work.
# Default: true
###
enabled = true
###
# Включает уведомления об авторизации.
# Если отключено, они будут отображаться в логах только на уровне = "info".
# По умолчанию: true
# ***
# Enables authorization notifications.
# If disabled, they will only appear in the logs under level = "info".
# Default: true
###
notify = true
###
# Включает отслеживание авторизации по ssh.
# По умолчанию: true
# ***
# Enables tracking of SSH authorization.
# Default: true
###
ssh_enable = true
###
# Включает уведомления об авторизации по ssh.
# Если отключено, они будут отображаться в логах только на уровне = "info".
# По умолчанию: true
# ***
# Enables SSH authorization notifications.
# If disabled, they will only appear in the logs under level = "info".
# Default: true
###
ssh_notify = true
###
# Включает отслеживание локальных авторизаций (TTY, физический доступ).
# По умолчанию: true
# ***
# Enables tracking of local authorizations (TTY, physical access).
# Default: true
###
local_enable = true
###
# Включает уведомления о локальных авторизациях.
# По умолчанию: true
# ***
# Enables local authorization notifications.
# Default: true
###
local_notify = true
###
# Включает отслеживание, если кто-либо использует команду `su` для доступа к другой учетной записи.
# По умолчанию: true
# ***
# Enables tracking if someone uses the `su` command to access another account.
# Default: true
###
su_enable = true
###
# Включает уведомления, если кто-либо использует команду `su` для доступа к другой учетной записи.
# По умолчанию: true
# ***
# Enables notifications if someone uses the `su` command to access another account.
# Default: true
###
su_notify = true
###
# Включает отслеживание, если кто-либо использует команду `sudo` для доступа к другой учетной записи.
#
# ПРИМЕЧАНИЕ: Эта опция может стать обременительной, если команда sudo широко используется
# для получения root-доступа администраторами или панелями управления.
#
# По умолчанию: false
# ***
# Enables tracking if someone uses the `sudo` command to access another account.
#
# NOTE: This option could become onerous if sudo is used extensively for root
# access by administrators or control panels.
#
# Default: false
###
sudo_enable = false
###
# Включает уведомления, если кто-либо использует команду `sudo` для доступа к другой учетной записи.
# По умолчанию: true
# ***
# Enables notifications if someone uses the `sudo` command to access another account.
# Default: true
###
sudo_notify = true
###############################################################################
# РАЗДЕЛ:Настройки анализа логов для уведомления
# ***
# SECTION:Log analysis settings for notifications
###############################################################################
[logAlert]
###
# Включает группу отслеживания логов для оповещения.
# Если отключено, отслеживание логов для оповещения работать не будет.
# По умолчанию: true
# ***
# Enables the log monitoring group for alerts.
# If disabled, log monitoring for alerts will not work.
# Default: true
###
enabled = true
###
# Включает уведомления.
# Если отключено, они будут отображаться в логах только на уровне = "info".
# По умолчанию: true
# ***
# Enables notifications.
# If disabled, they will only appear in the logs under level = "info".
# Default: true
###
notify = true
###
# Указываем настройки группы.
# Примеры:
# [[logAlert.groups]]
# name = "my_name_group" # Имя группы. Разрешены символы "a-z, A-Z, -, _". Первый символ обязательно буква (обязательное поле)
# message = "Любой текст группы" # Текст уведомления (обязательное поле)
# rate_limit_reset_period = 86400 # Указываем в секундах, через какое время сбрасывать данные в групе если не было событий. Если указать 0, то не будет сбрасывать.
# [[logAlert.groups.rate_limits]]
## Через сколько будет срабатывать оповещение. В данном случае в течение часа, если было 5 обнаружений, то сработает оповещение.
# count = 5
# period = 3600
# [[logAlert.groups.rate_limits]]
## После срабатывания оповещения, переходим на второй уровень, тепер если в течение часа было 3 обнаружений, то сработает оповещение.
# count = 3
# period = 3600
# [[logAlert.groups.rate_limits]]
## И таких уровней можно указыват сколько захотите.
# count = 2
# period = 600
#
# ***
# Specify group settings.
# Examples:
# [[logAlert.groups]]
# name = "my_name_group" # Group name. Allowed characters are "a-z, A-Z, -, _". The first character must be a letter (required)
# message = "Any group text" # Notification text (required)
# rate_limit_reset_period = 86400 # Specify, in seconds, how long to reset group data if there have been no events. Specifying 0 means no reset.
# [[logAlert.groups.rate_limits]]
## How long to wait before an alert is triggered. In this case, if there were 5 detections within an hour, the alert will be triggered. # count = 5
# count = 5
# period = 3600
# [[logAlert.groups.rate_limits]]
## After the alert is triggered, we move to the second level. Now, if there are 3 detections within an hour, the alert will be triggered.
# count = 3
# period = 3600
# [[logAlert.groups.rate_limits]]
## You can specify as many of these levels as you like.
# count = 2
# period = 600
###
###
# Указываем настройки логов, которые надо отслеживать для оповещения.
# Примеры:
# [[logAlert.rules]]
# enabled = true # Включает или выключает отслеживания (обязательное поле)
# notify = true # Включает или выключает уведомления (обязательное поле)
# name = "my_name_rule" # Имя уведомления. Разрешены символы "a-z, A-Z, -, _". Первый символ обязательно буква (обязательное поле)
# message = "Ваш любой текст для уведомления" # Текст уведомления (обязательное поле)
# group = "my_name_group" # Можно указать имя группы (не обязательное поле)
# [logAlert.rules.source]
# type = "journalctl" # journalctl или file (обязательное поле)
# field = "systemd_unit" # systemd_unit или syslog_identifier (обязательное поле если type = "journalctl")
# match = "ssh.service" # Значение (обязательное поле если type = "journalctl")
# если field = "systemd_unit", то match должен заканичваться: .service, .socket, .target, .mount, .timer, .path, .scope, .slice, .device
# [[logAlert.rules.patterns]]
# regexp = '^Accepted (\S+) for (\S+) from (\S+) port \S+'
# [[logAlert.rules.patterns.values]]
# name = "Пользователь"
# value = 2
# [[logAlert.rules.patterns.values]]
# name = "IP"
# value = 3
#
# ***
# Specify the log settings to monitor for notifications.
# Examples:
# [[logAlert.rules]]
# enabled = true # Enables or disables tracking (required)
# notify = true # Enables or disables notifications (required)
# name = "my_name_rule" # Notification name. Allowed characters are "a-z, A-Z, -, _". The first character must be a letter (required field)
# message = "Your any text for notification" # Notification text (required field)
# group = "my_name_group" # You can specify the group name (optional field)
# [logAlert.rules.source]
# type = "journalctl" # journalctl or file (required)
# field = "systemd_unit" # systemd_unit or syslog_identifier (required if type = "journalctl")
# match = "ssh.service" # Value (required if type = "journalctl")
# If field = "systemd_unit", then match must end with: .service, .socket, .target, .mount, .timer, .path, .scope, .slice, .device
# [[logAlert.rules.patterns]]
# regexp = '^Accepted (\S+) for (\S+) from (\S+) port \S+'
# [[logAlert.rules.patterns.values]]
# name = "User"
# value = 2
# [[logAlert.rules.patterns.values]]
# name = "IP"
# value = 3
###

View File

@@ -0,0 +1,23 @@
###
# Включает поддержку docker.
# По умолчанию: false
# ***
# Includes docker support.
# Default: false
###
enabled = false
###
# Стратегия управления правилами при запуске или остановке контейнеров в Docker:
# rebuild = при любом изменении все цепочки Docker пересоздаются целиком
# incremental = добавляются или удаляются только правила конкретного контейнера
#
# По умолчанию: "incremental"
# ***
# # Strategy for managing rules when container start or stop events occur in docker:
# rebuild = any change causes all Docker chains to be rebuilt entirely
# incremental = only rules for a specific container are added or removed
#
# Default: "incremental"
###
rule_strategy = "incremental"

View File

@@ -299,6 +299,26 @@ icmp_strict = false
# SECTION:General Settings
###############################################################################
[options]
###
# Переключения режима очистки фаервола nftables. Если указать "own", то может получиться конфликт в правилах.
# Может спровоцировать проблему в безопасности. Указывайте "own" если вы уверены в своих действиях.
# Допустимые значения:
# global = очищает полностью все правила
# own = очищает только правила от таблицы, которые указаны в параметре table_name
#
# По умолчанию: global
# ***
# Switching the nftables firewall cleaning mode. If you specify "own", a conflict in the rules may occur.
# This may cause a security issue. Use "own" if you are confident in your actions.
# Valid values:
# global = clears all rules completely
# own = clears only the rules from the table that are specified in the table_name parameter
#
# Default: global
###
clear_mode = "global"
###
# Будет ли демон сохранять правила в системный файл nftables.
# Не забудьте проверить, что путь к nftables соответствует вашей ОС.
@@ -409,6 +429,21 @@ default_allow_forward = false
###
input_drop = "drop"
###
# Приоритет chain для input.
# От: -50
# По: 50
#
# По умолчанию: -10
# ***
# Chain priority for input.
# From: -50
# To: 50
#
# Default: -10
###
input_priority = -10
###
# Как заблокировать исходящий трафик. Блокировать молча или с обратной связью.
# Допустимые значения:
@@ -426,6 +461,21 @@ input_drop = "drop"
###
output_drop = "reject"
###
# Приоритет chain для output.
# От: -50
# По: 50
#
# По умолчанию: -10
# ***
# Chain priority for output.
# From: -50
# To: 50
#
# Default: -10
###
output_priority = -10
###
# Как заблокировать трафик forward. Блокировать молча или с обратной связью.
# Допустимые значения:
@@ -443,6 +493,21 @@ output_drop = "reject"
###
forward_drop = "drop"
###
# Приоритет chain для forward.
# От: -50
# По: 50
#
# По умолчанию: -10
# ***
# Chain priority for forward.
# From: -50
# To: 50
#
# Default: -10
###
forward_priority = -10
###############################################################################
# РАЗДЕЛ:Именование метаданных
# ***

View File

@@ -22,13 +22,13 @@
testing = true
###
# Тестовый период, по истечении которого брандмауэр удалит правила и демон завершит работу.
# Тестовый период, по истечении которого брандмауэр удалит правила, очистит другие данные и демон завершит работу.
# Период указывается в минутах.
# Мин: 1
# Макс: 30000
# По умолчанию: 5
# ***
# The test period after which the firewall will clear the rules and the daemon will shut down.
# A test period after which the firewall will remove rules, clear other data, and the daemon will exit.
# The period is specified in minutes.
# Min: 1
# Max: 30000
@@ -76,6 +76,18 @@ pid_file = "/var/run/kor-elf-shield/kor-elf-shield.pid"
###
socket_file = "/var/run/kor-elf-shield/kor-elf-shield.sock"
###
# Каталог для постоянных данных приложения (state): локальная база данных, кэш/индексы, файлы состояния
# и другие служебные файлы. Должен быть доступен на запись пользователю, от имени которого запущен демон.
# Если каталог не существует — будет создан.
# По умолчанию: "/var/lib/kor-elf-shield/"
# ***
# Directory for persistent application data (state): local database, cache/indexes, state files, and other
# internal data. Must be writable by the daemon user. If the directory does not exist, it will be created.
# Default: "/var/lib/kor-elf-shield/"
###
data_dir = "/var/lib/kor-elf-shield/"
###############################################################################
# РАЗДЕЛ:Log
# ***
@@ -192,6 +204,24 @@ log_error_paths = ["stderr"]
###
nftables = "/usr/sbin/nft"
###
# Укажите путь к journalctl. Возможно в вашей ОС путь может отличаться.
# По умолчанию: /bin/journalctl
# ***
# Specify the path to journalctl. The path may differ in your OS.
# Default: /bin/journalctl
###
journalctl = "/bin/journalctl"
###
# Укажите путь к docker. Возможно в вашей ОС путь может отличаться.
# По умолчанию: /usr/bin/docker
# ***
# Specify the path to docker. The path may differ in your OS.
# Default: /usr/bin/docker
###
docker = "/usr/bin/docker"
###############################################################################
# РАЗДЕЛ:Пути к другим настройкам
# ***
@@ -210,3 +240,36 @@ nftables = "/usr/sbin/nft"
# Default: /etc/kor-elf-shield/firewall.toml
###
firewall = "/etc/kor-elf-shield/firewall.toml"
###
# Укажите путь к настройкам уведомлений.
# Файл должен иметь расширение .toml.
# По умолчанию: /etc/kor-elf-shield/notifications.toml
# ***
# Specify the path to notification settings.
# The file must have the .toml extension.
# Default: /etc/kor-elf-shield/notifications.toml
###
notifications = "/etc/kor-elf-shield/notifications.toml"
###
# Укажите путь к настройкам парсинга логов.
# Файл должен иметь расширение .toml.
# По умолчанию: /etc/kor-elf-shield/analyzer.toml
# ***
# Specify the path to the log parsing settings.
# The file must have the .toml extension.
# Default: /etc/kor-elf-shield/analyzer.toml
###
analyzer = "/etc/kor-elf-shield/analyzer.toml"
###
# Укажите путь к настройкам docker.
# Файл должен иметь расширение .toml.
# По умолчанию: /etc/kor-elf-shield/docker.toml
# ***
# Specify the path to the docker settings.
# The file must have the .toml extension.
# Default: /etc/kor-elf-shield/docker.toml
###
docker = "/etc/kor-elf-shield/docker.toml"

View File

@@ -0,0 +1,199 @@
###############################################################################
# РАЗДЕЛ:Базовые настройки
# ***
# SECTION:Basic settings
###############################################################################
###
# Включает или выключает уведомления.
# !!! Не забудьте перед включением настроить email !!!
# false = Выключает.
# true = Включает.
#
# По умолчанию: false
# ***
# Turns notifications on or off.
# !!! Don't forget to set up your email before turning it on !!!
# false = Disables.
# true = Enables.
#
# Default: false
###
enabled = false
###
# Включает повторные попытки отправить уведомление, если сразу не получилось.
# false = Выключает.
# true = Включает.
#
# По умолчанию: true
# ***
# Enables repeated attempts to send a notification if the first attempt fails.
# false = Disables.
# true = Enables.
#
# Default: true
###
enable_retries = true
###
# Интервал времени в секундах между попытками.
#
# По умолчанию: 600
# ***
# The time interval in seconds between attempts.
#
# Default: 600
###
retry_interval = 600
###
# Название сервера в уведомлениях
# По умолчанию: server
# ***
# Server name in notifications
# Default: server
###
server_name = "server"
###############################################################################
# РАЗДЕЛ:email
# ***
# SECTION:email
###############################################################################
[email]
###
# Сервер, через который будет отправляться почта.
# Например: smtp.gmail.com
# По умолчанию:
# ***
# The server through which mail will be sent.
# For example: smtp.gmail.com
# Default:
###
host = ""
###
# Указать порт сервера, через который будет отправляться почта.
# Например: 587
# По умолчанию:
# ***
# Specify the server port through which mail will be sent.
# For example: 587
# Default:
###
port = ""
###
# Логин к серверу, через который будет отправляться почта.
# По умолчанию:
# ***
# Login to the server through which mail will be sent.
# Default:
###
username = ""
###
# Пароль к серверу, через который будет отправляться почта.
# По умолчанию:
# ***
# Password for the server through which mail will be sent.
# Default:
###
password = ""
###
# Тип авторизации.
# Варианты: "PLAIN", "LOGIN", "CRAM-MD5", "NONE"
# Обычно используется "PLAIN". Если у вас внутренний релей без пароля - используйте "NONE".
# По умолчанию: "PLAIN"
# ***
# Authorization type.
# Options: "PLAIN", "LOGIN", "CRAM-MD5", "NONE"
# Usually "PLAIN" is used. If you have an internal relay without a password - use "NONE".
# Default: "PLAIN"
###
auth_type = "PLAIN"
###
# Защищённое соединение.
# Варианты: "NONE", "STARTTLS", "IMPLICIT"
#
# "NONE" — без TLS
# "STARTTLS" — обычный SMTP на 587 (или 25) + upgrade через STARTTLS
# "IMPLICIT" — SMTPS (TLS сразу), обычно 465
#
# По умолчанию: "STARTTLS"
# ***
# Secure connection.
# Options: "NONE", "STARTTLS", "IMPLICIT"
#
# "NONE" — without TLS
# "STARTTLS" — regular SMTP on 587 (or 25) + upgrade via STARTTLS
# "IMPLICIT" — SMTPS (TLS Immediately), typically 465
#
# Default: "STARTTLS"
###
tls_mode = "STARTTLS"
###
# Только если тип защищённого соединения в режиме starttls.
# Варианты: "MANDATORY", "OPPORTUNISTIC"
#
# "MANDATORY" — если STARTTLS недоступен/не удался будет вызвана ошибка
# "OPPORTUNISTIC" — попытаться STARTTLS, но если нельзя, то попытается отправить без TLS
#
# По умолчанию: "MANDATORY"
# ***
# Only if the secure connection type is in starttls mode.
# Options: "MANDATORY", "OPPORTUNISTIC"
#
# "MANDATORY" — if STARTTLS is unavailable/failed, an error will be raised
# "OPPORTUNISTIC" — try STARTTLS, but if that fails, it will try to send without TLS
#
# Default: "MANDATORY"
###
tls_policy = "MANDATORY"
###
# Проверять ли сертификат защищённого соединения.
#
# false = Выключает.
# true = Включает.
#
# По умолчанию: true
# ***
# Whether to check the secure connection certificate.
#
# false = Disables.
# true = Enables.
#
# Default: true
###
tls_verify = true
###
# Email, который будет указываться при отправки почты.
# Например: test@localhost
# По умолчанию:
# ***
# Email that will be specified when sending mail.
# For example: test@localhost
# Default:
###
from = ""
###
# Адрес электронной почты, на который будет отправлено письмо.
# Например: root@localhost
# По умолчанию:
# ***
# Email to whom the mail will be sent.
# For example: root@localhost
# Default:
###
to = ""

View File

@@ -3,8 +3,10 @@ Description=kor-elf-shield
After=network.target
[Service]
Type=simple
ExecStart=/usr/sbin/kor-elf-shield start
ExecStop=/usr/sbin/kor-elf-shield stop
Restart=on-failure
RestartSec=10s
[Install]
WantedBy=sysinit.target
WantedBy=multi-user.target

20
go.mod
View File

@@ -3,25 +3,29 @@ module git.kor-elf.net/kor-elf-shield/kor-elf-shield
go 1.25
require (
github.com/nicksnyder/go-i18n/v2 v2.6.0
git.kor-elf.net/kor-elf-shield/go-nftables-client v0.1.1
github.com/nicksnyder/go-i18n/v2 v2.6.1
github.com/nxadm/tail v1.4.11
github.com/spf13/viper v1.21.0
github.com/urfave/cli/v3 v3.4.1
go.uber.org/zap v1.27.0
golang.org/x/sys v0.36.0
golang.org/x/text v0.29.0
github.com/urfave/cli/v3 v3.6.2
github.com/wneessen/go-mail v0.7.2
go.etcd.io/bbolt v1.4.3
go.uber.org/zap v1.27.1
golang.org/x/sys v0.41.0
golang.org/x/text v0.34.0
)
require (
git.kor-elf.net/kor-elf-shield/go-nftables-client v0.1.0 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
github.com/go-viper/mapstructure/v2 v2.5.0 // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
github.com/sagikazarmark/locafero v0.12.0 // indirect
github.com/spf13/afero v1.15.0 // indirect
github.com/spf13/cast v1.10.0 // indirect
github.com/spf13/pflag v1.0.10 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
go.uber.org/multierr v1.10.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
)

32
go.sum
View File

@@ -1,15 +1,19 @@
git.kor-elf.net/kor-elf-shield/go-nftables-client v0.1.0 h1:jglai6XEk1uSCxd1TEpx6IBqWhkc+KgonV6rUDTkyyU=
git.kor-elf.net/kor-elf-shield/go-nftables-client v0.1.0/go.mod h1:a7F+XdL1pK5P3ucQRR2EK/fABAP37LLBENiA4hX7L6A=
git.kor-elf.net/kor-elf-shield/go-nftables-client v0.1.1 h1:3oGtZ/r1YAdlvI16OkZSCaxcWztHe/33ITWfI2LaQm0=
git.kor-elf.net/kor-elf-shield/go-nftables-client v0.1.1/go.mod h1:a7F+XdL1pK5P3ucQRR2EK/fABAP37LLBENiA4hX7L6A=
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/BurntSushi/toml v1.6.0 h1:dRaEfpa2VI55EwlIW72hMRHdWouJeRF7TPYhI+AUQjk=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/go-viper/mapstructure/v2 v2.5.0 h1:vM5IJoUAy3d7zRSVtIwQgBj7BiWtMPfmPEgAXnvj1Ro=
github.com/go-viper/mapstructure/v2 v2.5.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
@@ -18,6 +22,10 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/nicksnyder/go-i18n/v2 v2.6.0 h1:C/m2NNWNiTB6SK4Ao8df5EWm3JETSTIGNXBpMJTxzxQ=
github.com/nicksnyder/go-i18n/v2 v2.6.0/go.mod h1:88sRqr0C6OPyJn0/KRNaEz1uWorjxIKP7rUUcvycecE=
github.com/nicksnyder/go-i18n/v2 v2.6.1 h1:JDEJraFsQE17Dut9HFDHzCoAWGEQJom5s0TRd17NIEQ=
github.com/nicksnyder/go-i18n/v2 v2.6.1/go.mod h1:Vee0/9RD3Quc/NmwEjzzD7VTZ+Ir7QbXocrkhOzmUKA=
github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY=
github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc=
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
@@ -40,20 +48,40 @@ github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/urfave/cli/v3 v3.4.1 h1:1M9UOCy5bLmGnuu1yn3t3CB4rG79Rtoxuv1sPhnm6qM=
github.com/urfave/cli/v3 v3.4.1/go.mod h1:FJSKtM/9AiiTOJL4fJ6TbMUkxBXn7GO9guZqoZtpYpo=
github.com/urfave/cli/v3 v3.6.2 h1:lQuqiPrZ1cIz8hz+HcrG0TNZFxU70dPZ3Yl+pSrH9A8=
github.com/urfave/cli/v3 v3.6.2/go.mod h1:ysVLtOEmg2tOy6PknnYVhDoouyC/6N42TMeoMzskhso=
github.com/wneessen/go-mail v0.7.2 h1:xxPnhZ6IZLSgxShebmZ6DPKh1b6OJcoHfzy7UjOkzS8=
github.com/wneessen/go-mail v0.7.2/go.mod h1:+TkW6QP3EVkgTEqHtVmnAE/1MRhmzb8Y9/W3pweuS+k=
go.etcd.io/bbolt v1.4.3 h1:dEadXpI6G79deX5prL3QRNP6JB8UxVkqo4UPnHaNXJo=
go.etcd.io/bbolt v1.4.3/go.mod h1:tKQlpPaYCVFctUIgFKFnAlvbmB3tpy1vkTnDWohtc0E=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc=
go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -0,0 +1,141 @@
package daemon
import (
"context"
"errors"
"fmt"
"net"
"strconv"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/i18n"
"github.com/urfave/cli/v3"
)
func CmdBlock() *cli.Command {
return &cli.Command{
Name: "block",
Usage: i18n.Lang.T("cmd.daemon.block.Usage"),
Commands: []*cli.Command{
{
Name: "add",
Usage: i18n.Lang.T("cmd.daemon.block.add.Usage"),
Description: i18n.Lang.T("cmd.daemon.block.add.Description"),
Action: cmdBlockAdd,
Flags: []cli.Flag{
&cli.StringFlag{
Name: "port",
Usage: i18n.Lang.T("cmd.daemon.block.add.FlagUsage.port"),
},
&cli.Uint32Flag{
Name: "seconds",
Usage: i18n.Lang.T("cmd.daemon.block.add.FlagUsage.seconds"),
},
&cli.StringFlag{
Name: "reason",
Usage: i18n.Lang.T("cmd.daemon.block.add.FlagUsage.reason"),
},
},
},
{
Name: "delete",
Usage: i18n.Lang.T("cmd.daemon.block.delete.Usage"),
Description: i18n.Lang.T("cmd.daemon.block.delete.Description"),
Action: cmdBlockDelete,
},
{
Name: "clear",
Usage: i18n.Lang.T("cmd.daemon.block.clear.Usage"),
Description: i18n.Lang.T("cmd.daemon.block.clear.Description"),
Action: cmdBlockClear,
},
},
}
}
func cmdBlockAdd(_ context.Context, cmd *cli.Command) error {
ip := net.ParseIP(cmd.Args().Get(0))
if ip == nil {
return errors.New("invalid ip address")
}
sock, err := newSocket()
if err != nil {
return errors.New(i18n.Lang.T("daemon is not running"))
}
defer func() {
_ = sock.Close()
}()
result, err := sock.SendCommand("block_add_ip", map[string]string{
"ip": ip.String(),
"port": cmd.String("port"),
"seconds": strconv.Itoa(int(cmd.Uint32("seconds"))),
"reason": cmd.String("reason"),
})
if err != nil {
return err
}
if result != "ok" {
return errors.New(i18n.Lang.T("cmd.error", map[string]any{
"Error": result,
}))
}
fmt.Println(i18n.Lang.T("block_add_ip_success"))
return nil
}
func cmdBlockDelete(_ context.Context, cmd *cli.Command) error {
ip := net.ParseIP(cmd.Args().Get(0))
if ip == nil {
return errors.New("invalid ip address")
}
sock, err := newSocket()
if err != nil {
return errors.New(i18n.Lang.T("daemon is not running"))
}
defer func() {
_ = sock.Close()
}()
result, err := sock.SendCommand("block_delete_ip", map[string]string{
"ip": ip.String(),
})
if err != nil {
return err
}
if result != "ok" {
return errors.New(i18n.Lang.T("cmd.error", map[string]any{
"Error": result,
}))
}
fmt.Println(i18n.Lang.T("block_delete_ip_success"))
return nil
}
func cmdBlockClear(_ context.Context, _ *cli.Command) error {
sock, err := newSocket()
if err != nil {
return errors.New(i18n.Lang.T("daemon is not running"))
}
defer func() {
_ = sock.Close()
}()
result, err := sock.Send("block_clear")
if err != nil {
return err
}
if result != "ok" {
return errors.New(i18n.Lang.T("block_clear_error"))
}
fmt.Println(i18n.Lang.T("block_clear_success"))
return nil
}

View File

@@ -0,0 +1,16 @@
package daemon
import (
"errors"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/i18n"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/setting"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/socket"
)
func newSocket() (socket.Client, error) {
if setting.Config.SocketFile == "" {
return nil, errors.New(i18n.Lang.T("socket file is not specified"))
}
return socket.NewSocketClient(setting.Config.SocketFile)
}

View File

@@ -0,0 +1,81 @@
package daemon
import (
"context"
"errors"
"fmt"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/i18n"
"github.com/urfave/cli/v3"
)
func CmdNotifications() *cli.Command {
return &cli.Command{
Name: "notifications",
Usage: i18n.Lang.T("cmd.daemon.notifications.Usage"),
Commands: []*cli.Command{
{
Name: "queue",
Usage: i18n.Lang.T("cmd.daemon.notifications.queue.Usage"),
Commands: []*cli.Command{
{
Name: "count",
Usage: i18n.Lang.T("cmd.daemon.notifications.queue.count.Usage"),
Description: i18n.Lang.T("cmd.daemon.notifications.queue.count.Description"),
Action: cmdNotificationsQueueCount,
},
{
Name: "clear",
Usage: i18n.Lang.T("cmd.daemon.notifications.queue.clear.Usage"),
Description: i18n.Lang.T("cmd.daemon.notifications.queue.clear.Description"),
Action: cmdNotificationsQueueClear,
},
},
},
},
}
}
func cmdNotificationsQueueCount(_ context.Context, _ *cli.Command) error {
sock, err := newSocket()
if err != nil {
return errors.New(i18n.Lang.T("daemon is not running"))
}
defer func() {
_ = sock.Close()
}()
result, err := sock.Send("notifications_queue_count")
if err != nil {
return err
}
fmt.Println(i18n.Lang.T("cmd.daemon.notifications.queue.count.result", map[string]interface{}{
"Count": result,
}))
return nil
}
func cmdNotificationsQueueClear(_ context.Context, _ *cli.Command) error {
sock, err := newSocket()
if err != nil {
return errors.New(i18n.Lang.T("daemon is not running"))
}
defer func() {
_ = sock.Close()
}()
result, err := sock.Send("notifications_queue_clear")
if err != nil {
return err
}
if result != "ok" {
return errors.New(i18n.Lang.T("notifications_queue_clear_error"))
}
fmt.Println(i18n.Lang.T("notifications_queue_clear_success"))
return nil
}

View File

@@ -6,8 +6,6 @@ import (
"fmt"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/i18n"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/setting"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/socket"
"github.com/urfave/cli/v3"
)
@@ -21,11 +19,7 @@ func CmdReopenLogger() *cli.Command {
}
func cmdReopenLogger(_ context.Context, _ *cli.Command) error {
if setting.Config.SocketFile == "" {
return errors.New(i18n.Lang.T("socket file is not specified"))
}
sock, err := socket.NewSocketClient(setting.Config.SocketFile)
sock, err := newSocket()
if err != nil {
return errors.New(i18n.Lang.T("daemon is not running"))
}

View File

@@ -2,8 +2,13 @@ package daemon
import (
"context"
"fmt"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/db"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/db/repository"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/docker_monitor"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/notifications"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/i18n"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/log"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/setting"
@@ -34,7 +39,12 @@ func runDaemon(ctx context.Context, _ *cli.Command) error {
_ = logger.Sync()
}()
config, err := setting.Config.ToDaemonOptions()
dockerService, dockerSupport, err := newDockerService(ctx, logger)
if err != nil {
logger.Error(fmt.Sprintf("Failed to create docker service: %s", err))
}
config, err := setting.Config.ToDaemonOptions(dockerSupport)
if err != nil {
logger.Fatal(err.Error())
@@ -43,7 +53,29 @@ func runDaemon(ctx context.Context, _ *cli.Command) error {
return err
}
d, err := daemon.NewDaemon(config, logger)
repositories, err := db.New(config.DataDir)
if err != nil {
logger.Fatal(err.Error())
// Fatal should call os.Exit(1), but there's a chance that might not happen,
// so we return err just in case.return err
return err
}
defer func() {
_ = repositories.Close()
}()
config.Repositories = repositories
notificationsService, err := newNotificationsService(repositories.NotificationsQueue(), logger)
if err != nil {
logger.Fatal(err.Error())
// Fatal should call os.Exit(1), but there's a chance that might not happen,
// so we return err just in case.return err
return err
}
d, err := daemon.NewDaemon(config, logger, notificationsService, dockerService)
if err != nil {
logger.Fatal(err.Error())
@@ -63,3 +95,31 @@ func runDaemon(ctx context.Context, _ *cli.Command) error {
return nil
}
func newNotificationsService(queueRepository repository.NotificationsQueueRepository, logger log.Logger) (notifications.Notifications, error) {
config, err := setting.Config.OtherSettingsPath.ToNotificationsConfig()
if err != nil {
return nil, err
}
return notifications.New(config, queueRepository, logger), nil
}
func newDockerService(ctx context.Context, logger log.Logger) (dockerService docker_monitor.Docker, dockerSupport bool, err error) {
config, dockerSupport, err := setting.Config.OtherSettingsPath.ToDockerConfig(setting.Config.BinaryLocations)
if err != nil {
return docker_monitor.NewDockerNotSupport(), false, err
}
if !dockerSupport {
dockerService = docker_monitor.NewDockerNotSupport()
return dockerService, false, nil
}
dockerService, err = docker_monitor.New(&config, ctx, logger)
if err != nil {
return docker_monitor.NewDockerNotSupport(), false, err
}
return dockerService, dockerSupport, nil
}

View File

@@ -6,8 +6,6 @@ import (
"fmt"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/i18n"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/setting"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/socket"
"github.com/urfave/cli/v3"
)
@@ -21,11 +19,7 @@ func CmdStatus() *cli.Command {
}
func cmdStatus(_ context.Context, _ *cli.Command) error {
if setting.Config.SocketFile == "" {
return errors.New(i18n.Lang.T("socket file is not specified"))
}
sock, err := socket.NewSocketClient(setting.Config.SocketFile)
sock, err := newSocket()
if err != nil {
return errors.New(i18n.Lang.T("daemon is not running"))
}

View File

@@ -6,8 +6,6 @@ import (
"fmt"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/i18n"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/setting"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/socket"
"github.com/urfave/cli/v3"
)
@@ -21,11 +19,7 @@ func CmdStop() *cli.Command {
}
func stopDaemon(_ context.Context, _ *cli.Command) error {
if setting.Config.SocketFile == "" {
return errors.New(i18n.Lang.T("socket file is not specified"))
}
sock, err := socket.NewSocketClient(setting.Config.SocketFile)
sock, err := newSocket()
if err != nil {
return errors.New(i18n.Lang.T("daemon is not running"))
}

View File

@@ -38,6 +38,8 @@ func NewMainApp(appVer AppVersion, defaultConfigPath string) *cli.Command {
daemon.CmdStop(),
daemon.CmdStatus(),
daemon.CmdReopenLogger(),
daemon.CmdNotifications(),
daemon.CmdBlock(),
}
return app

View File

@@ -0,0 +1,136 @@
package analyzer
import (
"context"
"fmt"
config2 "git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/analyzer/config"
analyzerLog "git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/analyzer/log"
analysisServices "git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/analyzer/log/analysis"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/analyzer/log/analysis/brute_force_protection_group"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/db"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/notifications"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/log"
)
type Analyzer interface {
Run(ctx context.Context)
ClearDBData() error
Close() error
}
type analyzer struct {
config config2.Config
logger log.Logger
notify notifications.Notifications
systemd analyzerLog.Systemd
files analyzerLog.FileMonitoring
analysis analyzerLog.Analysis
logChan chan analysisServices.Entry
}
func New(config config2.Config, blockService brute_force_protection_group.BlockService, repositories db.Repositories, logger log.Logger, notify notifications.Notifications) Analyzer {
var journalMatches []string
journalMatchesUniq := map[string]struct{}{}
var files []string
filesUniq := map[string]struct{}{}
rulesIndex := analysisServices.NewRulesIndex()
for _, source := range config.Sources {
switch source.Type {
case config2.SourceTypeJournal:
match := source.Journal.JournalctlMatch()
if _, ok := journalMatchesUniq[match]; !ok {
journalMatchesUniq[match] = struct{}{}
journalMatches = append(journalMatches, match)
}
case config2.SourceTypeFile:
file := source.File.Path
if _, ok := filesUniq[file]; !ok {
filesUniq[file] = struct{}{}
files = append(files, file)
}
default:
logger.Error(fmt.Sprintf("Unknown source type: %s", source.Type))
continue
}
err := rulesIndex.Add(source)
if err != nil {
logger.Error(fmt.Sprintf("Failed to add rule: %s", err))
}
}
systemdService := analyzerLog.NewSystemd(config.BinPath.Journalctl, journalMatches, logger)
filesService := analyzerLog.NewFileMonitoring(files, logger)
analysisService := analyzerLog.NewAnalysis(rulesIndex, blockService, repositories, logger, notify)
return &analyzer{
config: config,
logger: logger,
notify: notify,
systemd: systemdService,
files: filesService,
analysis: analysisService,
logChan: make(chan analysisServices.Entry, 1000),
}
}
func (a *analyzer) Run(ctx context.Context) {
go a.processLogs(ctx)
go a.systemd.Run(ctx, a.logChan)
go a.files.Run(ctx, a.logChan)
a.logger.Debug("Analyzer is start")
}
func (a *analyzer) ClearDBData() error {
a.logger.Debug("Clear data")
clearDBErrors, err := a.analysis.ClearDBData()
if err != nil {
for _, err := range clearDBErrors {
a.logger.Error(err.Error())
}
return err
}
return nil
}
func (a *analyzer) processLogs(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case entry, ok := <-a.logChan:
if !ok {
// Channel closed
return
}
a.logger.Debug(fmt.Sprintf("Received log entry: %v", entry))
a.analysis.BruteForceProtection(&entry)
a.analysis.Alert(&entry)
}
}
}
func (a *analyzer) Close() error {
if err := a.systemd.Close(); err != nil {
a.logger.Error(err.Error())
}
if err := a.files.Close(); err != nil {
a.logger.Error(err.Error())
}
close(a.logChan)
a.logger.Debug("Analyzer is stop")
return nil
}

View File

@@ -0,0 +1,31 @@
package config
import "fmt"
type RateLimit struct {
Count uint32
Period uint32
}
type AlertGroup struct {
Name string
Message string
RateLimits []RateLimit
RateLimitResetPeriod uint32
}
func (g *AlertGroup) RateLimit(level uint64) (rateLimit RateLimit, err error) {
lenRateLimits := len(g.RateLimits) - 1
if lenRateLimits < 0 {
return RateLimit{}, fmt.Errorf("rate limits is empty")
}
if level <= uint64(lenRateLimits) {
rateLimit = g.RateLimits[level]
} else {
rateLimit = g.RateLimits[lenRateLimits]
}
return rateLimit, nil
}

View File

@@ -0,0 +1,5 @@
package config
type BinPath struct {
Journalctl string
}

View File

@@ -0,0 +1,45 @@
package config
import (
"fmt"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/analyzer/config/brute_force_protection"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/i18n"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/pkg/regular_expression"
)
func NewBruteForceProtectionSSH(isNotify bool, group *brute_force_protection.Group) ([]*Source, error) {
var sources []*Source
journal, err := NewSourceJournal(JournalFieldSystemdUnit, "ssh.service")
if err != nil {
return nil, fmt.Errorf("failed to create journal source for SSH: %w", err)
}
source := &Source{
Type: SourceTypeJournal,
Journal: journal,
BruteForceProtectionRule: &brute_force_protection.Rule{
Name: "_ssh",
Message: i18n.Lang.T("alert.bruteForceProtection.ssh.message"),
IsNotification: isNotify,
Patterns: []brute_force_protection.RegexPattern{
{
Regexp: regular_expression.NewLazyRegexp(`^Failed password for (\S+) from (\S+) port \S+`),
Values: []brute_force_protection.PatternValue{
{
Name: i18n.Lang.T("user"),
Value: 1,
},
},
IP: 2,
},
},
Group: group,
},
}
sources = append(sources, source)
return sources, nil
}

View File

@@ -0,0 +1,34 @@
package brute_force_protection
import "git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/firewall/types"
type Block interface {
PortsBlocked() (bool, []types.L4Port)
}
type block struct {
shouldPortsBlocked bool
ports []types.L4Port
}
func NewBlockOnceIPConfig() Block {
return &block{
shouldPortsBlocked: false,
ports: nil,
}
}
func NewBlockIPAndPortsConfig(ports []types.L4Port) Block {
return &block{
shouldPortsBlocked: true,
ports: ports,
}
}
func (b *block) PortsBlocked() (bool, []types.L4Port) {
if !b.shouldPortsBlocked {
return false, nil
}
return true, b.ports
}

View File

@@ -0,0 +1,29 @@
package brute_force_protection
import "git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/pkg/regular_expression"
type Rule struct {
Name string
Message string
IsNotification bool
Patterns []RegexPattern
Group *Group
}
type RegexPattern struct {
Regexp *regular_expression.LazyRegexp
Values []PatternValue
IP uint8
}
type RateLimit struct {
Count uint32
Period uint32
BlockingTimeSeconds uint32
BlockConfig Block
}
type PatternValue struct {
Name string
Value uint8
}

View File

@@ -0,0 +1,26 @@
package brute_force_protection
import "fmt"
type Group struct {
Name string
Message string
RateLimits []RateLimit
RateLimitResetPeriod uint32
}
func (g *Group) RateLimit(level uint64) (rateLimit RateLimit, err error) {
lenRateLimits := len(g.RateLimits) - 1
if lenRateLimits < 0 {
return RateLimit{}, fmt.Errorf("rate limits is empty")
}
if level <= uint64(lenRateLimits) {
rateLimit = g.RateLimits[level]
} else {
rateLimit = g.RateLimits[lenRateLimits]
}
return rateLimit, nil
}

View File

@@ -0,0 +1,122 @@
package config
import (
"fmt"
"regexp"
"strings"
"unicode"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/analyzer/config/brute_force_protection"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/pkg/regular_expression"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/setting/validate"
)
var (
reSystemdUnitValue = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9._@-]{0,255}\.(service|socket|target|mount|timer|path|scope|slice|device)$`)
reSyslogIDValue = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9._@-]{0,127}$`)
)
type SourceType string
const (
SourceTypeJournal SourceType = "journalctl"
SourceTypeFile SourceType = "file"
)
type JournalField string
const (
JournalFieldSystemdUnit JournalField = "_SYSTEMD_UNIT"
JournalFieldSyslogIdentifier JournalField = "SYSLOG_IDENTIFIER"
)
type Config struct {
BinPath BinPath
Sources []*Source
}
type SourceJournal struct {
Field JournalField
Match string
}
func NewSourceJournal(field JournalField, match string) (*SourceJournal, error) {
v := strings.TrimSpace(match)
if v == "" {
return nil, fmt.Errorf("journal match must not be empty")
}
if len(v) > 512 {
return nil, fmt.Errorf("journal match is too long: %d", len(v))
}
for _, r := range v {
if r == 0 || r == '\n' || r == '\r' || unicode.IsControl(r) {
return nil, fmt.Errorf("journal match contains control characters")
}
}
// to avoid breaking the FIELD=VALUE format and concatenation with '+'
if strings.ContainsAny(v, "=+") {
return nil, fmt.Errorf("journal match must not contain '=' or '+'")
}
if strings.ContainsAny(v, " \t") {
return nil, fmt.Errorf("journal match must not contain spaces or tabs")
}
switch field {
case JournalFieldSystemdUnit:
if !reSystemdUnitValue.MatchString(v) {
return nil, fmt.Errorf("invalid _SYSTEMD_UNIT value: %q", v)
}
case JournalFieldSyslogIdentifier:
if !reSyslogIDValue.MatchString(v) {
return nil, fmt.Errorf("invalid SYSLOG_IDENTIFIER value: %q", v)
}
default:
return nil, fmt.Errorf("invalid journal field: %q", field)
}
return &SourceJournal{Field: field, Match: v}, nil
}
type SourceFile struct {
Path string
}
func NewSourceFile(path string) (*SourceFile, error) {
if err := validate.PathFile(path, "logAlert.rules.source.path"); err != nil {
return nil, err
}
return &SourceFile{Path: path}, nil
}
func (s *SourceJournal) JournalctlMatch() string {
return string(s.Field) + "=" + s.Match
}
type Source struct {
Type SourceType
Journal *SourceJournal
File *SourceFile
AlertRule *AlertRule
BruteForceProtectionRule *brute_force_protection.Rule
}
type AlertRule struct {
Name string
Message string
IsNotification bool
Patterns []AlertRegexPattern
Group *AlertGroup
}
type AlertRegexPattern struct {
Regexp *regular_expression.LazyRegexp
Values []PatternValue
}
type PatternValue struct {
Name string
Value uint8
}

View File

@@ -0,0 +1,160 @@
package config
import (
"fmt"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/i18n"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/pkg/regular_expression"
)
func NewLoginSSH(isNotify bool) ([]*Source, error) {
var sources []*Source
journal, err := NewSourceJournal(JournalFieldSystemdUnit, "ssh.service")
if err != nil {
return nil, fmt.Errorf("failed to create journal source for SSH login: %w", err)
}
source := &Source{
Type: SourceTypeJournal,
Journal: journal,
AlertRule: &AlertRule{
Name: "_login-ssh",
Message: i18n.Lang.T("alert.login.ssh.message"),
IsNotification: isNotify,
Patterns: []AlertRegexPattern{
{
Regexp: regular_expression.NewLazyRegexp(`^Accepted (\S+) for (\S+) from (\S+) port \S+`),
Values: []PatternValue{
{
Name: i18n.Lang.T("user"),
Value: 2,
},
{
Name: "IP",
Value: 3,
},
},
},
},
Group: nil,
},
}
sources = append(sources, source)
return sources, nil
}
func NewLoginLocal(isNotify bool) ([]*Source, error) {
var sources []*Source
journal, err := NewSourceJournal(JournalFieldSyslogIdentifier, "login")
if err != nil {
return nil, fmt.Errorf("failed to create journal source for local login: %w", err)
}
source := &Source{
Type: SourceTypeJournal,
Journal: journal,
AlertRule: &AlertRule{
Name: "_login-local",
Message: i18n.Lang.T("alert.login.local.message"),
IsNotification: isNotify,
Patterns: []AlertRegexPattern{
{
Regexp: regular_expression.NewLazyRegexp(`^pam_unix\(login:session\): session opened for user (\S+)\(\S+\) by \S+`),
Values: []PatternValue{
{
Name: i18n.Lang.T("user"),
Value: 1,
},
},
},
},
Group: nil,
},
}
sources = append(sources, source)
return sources, nil
}
func NewLoginSu(isNotify bool) ([]*Source, error) {
var sources []*Source
journal, err := NewSourceJournal(JournalFieldSyslogIdentifier, "su")
if err != nil {
return nil, fmt.Errorf("failed to create journal source for su login: %w", err)
}
source := &Source{
Type: SourceTypeJournal,
Journal: journal,
AlertRule: &AlertRule{
Name: "_login-su",
Message: i18n.Lang.T("alert.login.su.message"),
IsNotification: isNotify,
Patterns: []AlertRegexPattern{
{
Regexp: regular_expression.NewLazyRegexp(`^pam_unix\(su:session\): session opened for user (\S+)\(\S+\) by (\S+)\(\S+\)`),
Values: []PatternValue{
{
Name: i18n.Lang.T("user"),
Value: 2,
},
{
Name: i18n.Lang.T("access to user has been gained"),
Value: 1,
},
},
},
},
Group: nil,
},
}
sources = append(sources, source)
return sources, nil
}
func NewLoginSudo(isNotify bool) ([]*Source, error) {
var sources []*Source
journal, err := NewSourceJournal(JournalFieldSyslogIdentifier, "sudo")
if err != nil {
return nil, fmt.Errorf("failed to create journal source for sudo login: %w", err)
}
source := &Source{
Type: SourceTypeJournal,
Journal: journal,
AlertRule: &AlertRule{
Name: "_login-sudo",
Message: i18n.Lang.T("alert.login.sudo.message"),
IsNotification: isNotify,
Patterns: []AlertRegexPattern{
{
Regexp: regular_expression.NewLazyRegexp(`^pam_unix\(sudo:session\): session opened for user (\S+)\(\S+\) by (\S+)\(\S+\)`),
Values: []PatternValue{
{
Name: i18n.Lang.T("user"),
Value: 2,
},
{
Name: i18n.Lang.T("access to user has been gained"),
Value: 1,
},
},
},
},
Group: nil,
},
}
sources = append(sources, source)
return sources, nil
}

View File

@@ -0,0 +1,57 @@
package log
import (
"fmt"
analysisServices "git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/analyzer/log/analysis"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/analyzer/log/analysis/alert_group"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/analyzer/log/analysis/brute_force_protection_group"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/db"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/notifications"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/log"
)
type Analysis interface {
Alert(entry *analysisServices.Entry)
BruteForceProtection(entry *analysisServices.Entry)
ClearDBData() ([]error, error)
}
type analysis struct {
alertService analysisServices.Alert
bruteForceProtectionService analysisServices.BruteForceProtection
}
func NewAnalysis(rulesIndex *analysisServices.RulesIndex, blockService brute_force_protection_group.BlockService, repositories db.Repositories, logger log.Logger, notify notifications.Notifications) Analysis {
alertGroupService := alert_group.NewGroup(repositories.AlertGroup(), logger)
bruteForceProtectionGroupService := brute_force_protection_group.NewGroup(repositories.BruteForceProtectionGroup(), logger)
return &analysis{
alertService: analysisServices.NewAlert(rulesIndex, alertGroupService, logger, notify),
bruteForceProtectionService: analysisServices.NewBruteForceProtection(rulesIndex, bruteForceProtectionGroupService, blockService, logger, notify),
}
}
func (a *analysis) Alert(entry *analysisServices.Entry) {
a.alertService.Analyze(entry)
}
func (a *analysis) BruteForceProtection(entry *analysisServices.Entry) {
a.bruteForceProtectionService.Analyze(entry)
}
func (a *analysis) ClearDBData() ([]error, error) {
var errClearDB []error
if err := a.alertService.ClearDBData(); err != nil {
errClearDB = append(errClearDB, err)
}
if err := a.bruteForceProtectionService.ClearDBData(); err != nil {
errClearDB = append(errClearDB, err)
}
if len(errClearDB) > 0 {
return nil, fmt.Errorf("failed to clear database data: %v", errClearDB)
}
return errClearDB, nil
}

View File

@@ -0,0 +1,155 @@
package analysis
import (
"fmt"
"time"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/analyzer/config"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/analyzer/log/analysis/alert_group"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/notifications"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/i18n"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/log"
)
type Alert interface {
Analyze(entry *Entry)
ClearDBData() error
}
type alert struct {
rulesIndex *RulesIndex
alertGroupService alert_group.Group
logger log.Logger
notify notifications.Notifications
}
type alertAnalyzeRuleReturn struct {
found bool
fields []*regexField
}
type alertNotify struct {
rule *config.AlertRule
messages []string
time time.Time
fields []*regexField
}
func NewAlert(rulesIndex *RulesIndex, alertGroupService alert_group.Group, logger log.Logger, notify notifications.Notifications) Alert {
return &alert{
rulesIndex: rulesIndex,
alertGroupService: alertGroupService,
logger: logger,
notify: notify,
}
}
func (a *alert) Analyze(entry *Entry) {
rules, err := a.rulesIndex.Alerts(entry)
if err != nil {
a.logger.Error(fmt.Sprintf("Failed to get alert rules: %s", err))
}
for _, rule := range rules {
result := a.analyzeRule(rule, entry.Message)
if !result.found {
continue
}
groupName := ""
messages := []string{}
if rule.Group != nil {
alertGroup, err := a.alertGroupService.Analyze(rule.Group, entry.Time, entry.Message)
if err != nil {
a.logger.Error(fmt.Sprintf("Failed to analyze alert group: %s", err))
continue
}
if !alertGroup.Alerted {
continue
}
groupName = rule.Group.Name
for _, lastLog := range alertGroup.LastLogs {
messages = append(messages, lastLog)
}
} else {
messages = append(messages, entry.Message)
}
a.logger.Info(fmt.Sprintf("Alert detected (%s) (group:%s): %s", rule.Name, groupName, entry.Message))
a.sendNotify(&alertNotify{
rule: rule,
messages: messages,
time: entry.Time,
fields: result.fields,
})
}
}
func (a *alert) ClearDBData() error {
return a.alertGroupService.ClearDBData()
}
func (a *alert) analyzeRule(rule *config.AlertRule, message string) alertAnalyzeRuleReturn {
result := alertAnalyzeRuleReturn{
found: false,
fields: []*regexField{},
}
for _, pattern := range rule.Patterns {
re, err := pattern.Regexp.Get()
if err != nil {
a.logger.Error(fmt.Sprintf("Failed to compile regexp: %s", err))
continue
}
idx := re.FindStringSubmatchIndex(message)
if idx != nil {
for _, value := range pattern.Values {
start, end, err := getValueStartEndByRegexIndex(int(value.Value), idx)
if err != nil {
result.fields = append(result.fields, &regexField{name: value.Name, value: i18n.Lang.T("unknown")})
continue
}
result.fields = append(result.fields, &regexField{name: value.Name, value: message[start:end]})
}
if len(pattern.Values) != len(result.fields) {
continue
}
result.found = true
return result
}
}
return result
}
func (a *alert) sendNotify(notify *alertNotify) {
if !notify.rule.IsNotification {
return
}
groupName := ""
groupMessage := ""
if notify.rule.Group != nil {
groupName = notify.rule.Group.Name
groupMessage = notify.rule.Group.Message + "\n\n"
}
subject := i18n.Lang.T("alert.subject", map[string]any{
"Name": notify.rule.Name,
"GroupName": groupName,
})
text := subject + "\n\n" + groupMessage + notify.rule.Message + "\n\n"
text += i18n.Lang.T("time", map[string]any{
"Time": notify.time,
}) + "\n"
for _, field := range notify.fields {
text += fmt.Sprintf("%s: %s\n", field.name, field.value)
}
text += "\n" + i18n.Lang.T("log") + "\n"
for _, message := range notify.messages {
text += message + "\n"
}
a.notify.SendAsync(notifications.Message{Subject: subject, Body: text})
}

View File

@@ -0,0 +1,108 @@
package alert_group
import (
"fmt"
"time"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/analyzer/config"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/db/entity"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/db/repository"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/log"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/pkg/time_operation"
)
type Group interface {
Analyze(alertGroup *config.AlertGroup, eventTime time.Time, message string) (AnalysisResult, error)
ClearDBData() error
}
type group struct {
alertGroupRepository repository.AlertGroupRepository
logger log.Logger
}
type AnalysisResult struct {
Alerted bool
LastLogs []string
}
func NewGroup(alertGroupRepository repository.AlertGroupRepository, logger log.Logger) Group {
return &group{
alertGroupRepository: alertGroupRepository,
logger: logger,
}
}
func (g *group) Analyze(alertGroup *config.AlertGroup, eventTime time.Time, message string) (AnalysisResult, error) {
analysisResult := AnalysisResult{
Alerted: false,
}
g.logger.Debug(fmt.Sprintf("Analyzing alert group %s", alertGroup.Name))
err := g.alertGroupRepository.Update(alertGroup.Name, func(entityAlertGroup *entity.AlertGroup) (*entity.AlertGroup, error) {
rateLimit, err := alertGroup.RateLimit(entityAlertGroup.CurrentLevelTriggerCount)
if err != nil {
return entityAlertGroup, err
}
if time_operation.IsRateLimited(entityAlertGroup.LastTriggeredAtUnix, eventTime, int64(rateLimit.Period)) {
g.logger.Debug(fmt.Sprintf("Alert group %s is rate limited", alertGroup.Name))
analysisResult, entityAlertGroup = g.analysisResult(rateLimit, eventTime, message, entityAlertGroup)
return entityAlertGroup, nil
}
entityAlertGroup.TriggerCount = 0
if time_operation.IsReset(entityAlertGroup.LastTriggeredAtUnix, eventTime, int64(alertGroup.RateLimitResetPeriod)) {
g.logger.Debug(fmt.Sprintf("Alert group %s is reset", alertGroup.Name))
entityAlertGroup.Reset()
rateLimit, err = alertGroup.RateLimit(0)
if err != nil {
return entityAlertGroup, err
}
}
g.logger.Debug(fmt.Sprintf("Alert not rate limited"))
analysisResult, entityAlertGroup = g.analysisResult(rateLimit, eventTime, message, entityAlertGroup)
return entityAlertGroup, nil
})
if err != nil {
return AnalysisResult{
Alerted: false,
}, err
}
return analysisResult, nil
}
func (g *group) ClearDBData() error {
return g.alertGroupRepository.Clear()
}
func (g *group) analysisResult(rateLimit config.RateLimit, eventTime time.Time, message string, entityAlertGroup *entity.AlertGroup) (AnalysisResult, *entity.AlertGroup) {
analysisResult := AnalysisResult{
Alerted: false,
}
entityAlertGroup.LastTriggeredAtUnix = eventTime.Unix()
entityAlertGroup.TriggerCount++
entityAlertGroup.LastLogs = append(entityAlertGroup.LastLogs, fmt.Sprintf("event time: %s, message: %s", eventTime.Format(time.RFC3339), message))
g.logger.Debug(fmt.Sprintf("Alert triggered. Count: %d", entityAlertGroup.TriggerCount))
if entityAlertGroup.TriggerCount >= uint64(rateLimit.Count) {
g.logger.Debug(fmt.Sprintf("Alert reached rate limit"))
analysisResult.LastLogs = entityAlertGroup.LastLogs
analysisResult.Alerted = true
entityAlertGroup.CurrentLevelTriggerCount++
entityAlertGroup.TriggerCount = 0
entityAlertGroup.LastLogs = []string{}
} else {
g.logger.Debug(fmt.Sprintf("Alert not reached rate limit"))
}
return analysisResult, entityAlertGroup
}

View File

@@ -0,0 +1,40 @@
package analysis
import (
"errors"
"time"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/analyzer/config"
)
type Entry struct {
Source config.SourceType
Message string
Time time.Time
Unit string // for systemd source
PID string // for systemd source
SyslogIdentifier string // for systemd source
File string // for file source
}
type regexField struct {
name string
value string
}
func getValueStartEndByRegexIndex(valueId int, idx []int) (start int, end int, err error) {
id := 2 * valueId
if idx == nil || len(idx) <= id+1 {
return 0, 0, errors.New("invalid index")
}
start, end = idx[id], idx[id+1]
if start < 0 || end < 0 {
return 0, 0, errors.New("invalid index")
}
return start, end, nil
}

View File

@@ -0,0 +1,300 @@
package analysis
import (
"fmt"
"net"
"strings"
"time"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/analyzer/config/brute_force_protection"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/analyzer/log/analysis/brute_force_protection_group"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/firewall/blocking"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/firewall/types"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/notifications"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/i18n"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/log"
)
type BruteForceProtection interface {
Analyze(entry *Entry)
ClearDBData() error
}
type bruteForceProtection struct {
rulesIndex *RulesIndex
groupService brute_force_protection_group.Group
blockService brute_force_protection_group.BlockService
logger log.Logger
notify notifications.Notifications
}
type bruteForceProtectionAnalyzeRuleReturn struct {
found bool
fields []*regexField
ip net.IP
}
type bruteForceProtectionNotify struct {
rule *brute_force_protection.Rule
messages []string
ip net.IP
ports []types.L4Port
time time.Time
fields []*regexField
blockSec uint32
err error
}
func NewBruteForceProtection(rulesIndex *RulesIndex, groupService brute_force_protection_group.Group, blockService brute_force_protection_group.BlockService, logger log.Logger, notify notifications.Notifications) BruteForceProtection {
return &bruteForceProtection{
rulesIndex: rulesIndex,
groupService: groupService,
blockService: blockService,
logger: logger,
notify: notify,
}
}
func (p *bruteForceProtection) Analyze(entry *Entry) {
rules, err := p.rulesIndex.BruteForceProtections(entry)
if err != nil {
p.logger.Error(fmt.Sprintf("Failed to get brute force protection rules for entry: %v", err))
return
}
for _, rule := range rules {
if rule.Group == nil {
p.logger.Error("Brute force protection rule without group")
continue
}
result := p.analyzeRule(rule, entry.Message)
if !result.found {
continue
}
groupResult, err := p.groupService.Analyze(rule.Group, entry.Time, result.ip, entry.Message)
if err != nil {
p.logger.Error(fmt.Sprintf("Failed to analyze brute force protection group: %s", err))
continue
}
if !groupResult.Block {
continue
}
ipWithPorts, l4Ports := groupResult.BlockConfig.PortsBlocked()
if !ipWithPorts {
p.handleBlockIP(entry, rule, &result, &groupResult)
continue
}
p.handleBlockIPWithPorts(entry, rule, &result, &groupResult, l4Ports)
}
}
func (p *bruteForceProtection) ClearDBData() error {
return p.groupService.ClearDBData()
}
func (p *bruteForceProtection) handleBlockIP(
entry *Entry,
rule *brute_force_protection.Rule,
result *bruteForceProtectionAnalyzeRuleReturn,
groupResult *brute_force_protection_group.AnalysisResult,
) {
blockIP := blocking.BlockIP{
IP: result.ip,
TimeSeconds: groupResult.BlockSec,
Reason: rule.Message,
}
isBanned, err := p.blockService.BlockIP(blockIP)
if isBanned == false {
p.logger.Info(fmt.Sprintf("IP %s are not blocked (%s) (group:%s): %s. Err: %s", result.ip, rule.Name, rule.Group.Name, entry.Message, err.Error()))
p.sendNotifyError(&bruteForceProtectionNotify{
rule: rule,
ip: result.ip,
messages: groupResult.LastLogs,
time: entry.Time,
fields: result.fields,
blockSec: groupResult.BlockSec,
err: err,
})
return
}
p.logger.Info(fmt.Sprintf("Block IP %s detected (%s) (group:%s): %s", result.ip, rule.Name, rule.Group.Name, entry.Message))
p.sendNotifySuccess(&bruteForceProtectionNotify{
rule: rule,
ip: result.ip,
messages: groupResult.LastLogs,
time: entry.Time,
fields: result.fields,
blockSec: groupResult.BlockSec,
err: err,
})
}
func (p *bruteForceProtection) handleBlockIPWithPorts(
entry *Entry,
rule *brute_force_protection.Rule,
result *bruteForceProtectionAnalyzeRuleReturn,
groupResult *brute_force_protection_group.AnalysisResult,
l4Ports []types.L4Port,
) {
blockIPWithPorts := blocking.BlockIPWithPorts{
IP: result.ip,
TimeSeconds: groupResult.BlockSec,
Reason: rule.Message,
Ports: l4Ports,
}
isBanned, err := p.blockService.BlockIPWithPorts(blockIPWithPorts)
if isBanned == false {
p.logger.Info(fmt.Sprintf("IP %s are not blocked (%s) (group:%s): %s. Err: %s", result.ip, rule.Name, rule.Group.Name, entry.Message, err.Error()))
p.sendNotifyError(&bruteForceProtectionNotify{
rule: rule,
ip: result.ip,
ports: l4Ports,
messages: groupResult.LastLogs,
time: entry.Time,
fields: result.fields,
blockSec: groupResult.BlockSec,
err: err,
})
return
}
p.logger.Info(fmt.Sprintf("Block IP %s detected (%s) (group:%s): %s", result.ip, rule.Name, rule.Group.Name, entry.Message))
p.sendNotifySuccess(&bruteForceProtectionNotify{
rule: rule,
ip: result.ip,
ports: l4Ports,
messages: groupResult.LastLogs,
time: entry.Time,
fields: result.fields,
blockSec: groupResult.BlockSec,
err: err,
})
}
func (p *bruteForceProtection) analyzeRule(rule *brute_force_protection.Rule, message string) bruteForceProtectionAnalyzeRuleReturn {
result := bruteForceProtectionAnalyzeRuleReturn{
found: false,
fields: []*regexField{},
ip: nil,
}
for _, pattern := range rule.Patterns {
re, err := pattern.Regexp.Get()
if err != nil {
p.logger.Error(fmt.Sprintf("Failed to compile regexp: %s", err))
continue
}
idx := re.FindStringSubmatchIndex(message)
if idx != nil {
start, end, err := getValueStartEndByRegexIndex(int(pattern.IP), idx)
if err != nil {
p.logger.Error(fmt.Sprintf("Failed to get ip value: %s", err))
return result
}
ipText := message[start:end]
result.ip = net.ParseIP(ipText)
if result.ip == nil {
p.logger.Error(fmt.Sprintf("Failed to parse ip: %s", ipText))
return bruteForceProtectionAnalyzeRuleReturn{
found: false,
}
}
for _, value := range pattern.Values {
start, end, err := getValueStartEndByRegexIndex(int(value.Value), idx)
if err != nil {
result.fields = append(result.fields, &regexField{name: value.Name, value: i18n.Lang.T("unknown")})
continue
}
result.fields = append(result.fields, &regexField{name: value.Name, value: message[start:end]})
}
if len(pattern.Values) != len(result.fields) {
continue
}
result.found = true
return result
}
}
return result
}
func (p *bruteForceProtection) sendNotifySuccess(notify *bruteForceProtectionNotify) {
if !notify.rule.IsNotification {
return
}
groupName := notify.rule.Group.Name
subject := i18n.Lang.T("alert.bruteForceProtection.subject", map[string]any{
"Name": notify.rule.Name,
"GroupName": groupName,
"IP": notify.ip,
})
p.sendNotify(subject, notify)
}
func (p *bruteForceProtection) sendNotifyError(notify *bruteForceProtectionNotify) {
if !notify.rule.IsNotification {
return
}
groupName := notify.rule.Group.Name
subject := i18n.Lang.T("alert.bruteForceProtection.subject-error", map[string]any{
"Name": notify.rule.Name,
"GroupName": groupName,
"IP": notify.ip,
})
p.sendNotify(subject, notify)
}
func (p *bruteForceProtection) sendNotify(subject string, notify *bruteForceProtectionNotify) {
if !notify.rule.IsNotification {
return
}
groupMessage := notify.rule.Group.Message + "\n\n"
text := subject + "\n\n" + groupMessage + notify.rule.Message + "\n\n"
if notify.err != nil {
text += i18n.Lang.T("alert.bruteForceProtection.error", map[string]any{
"Error": notify.err.Error(),
}) + "\n"
}
text += "IP: " + notify.ip.String() + "\n"
if len(notify.ports) > 0 {
var ports []string
for _, port := range notify.ports {
ports = append(ports, port.ToString())
}
text += i18n.Lang.T("ports", map[string]any{
"Ports": strings.Join(ports, ", "),
}) + "\n"
}
text += i18n.Lang.T("blockSec", map[string]any{
"BlockSec": notify.blockSec,
}) + "\n"
text += i18n.Lang.T("time", map[string]any{
"Time": notify.time,
}) + "\n"
for _, field := range notify.fields {
text += fmt.Sprintf("%s: %s\n", field.name, field.value)
}
text += "\n" + i18n.Lang.T("log") + "\n"
for _, message := range notify.messages {
text += message + "\n"
}
p.notify.SendAsync(notifications.Message{Subject: subject, Body: text})
}

View File

@@ -0,0 +1,31 @@
package brute_force_protection_group
import "git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/firewall/blocking"
type BlockService interface {
BlockIP(blockIP blocking.BlockIP) (bool, error)
BlockIPWithPorts(blockIP blocking.BlockIPWithPorts) (bool, error)
}
type BlockIPFunc func(blockIP blocking.BlockIP) (bool, error)
type BlockIPWithPortsFunc func(blockIP blocking.BlockIPWithPorts) (bool, error)
type blockService struct {
blockIPFunc BlockIPFunc
blockIPWithPortsFunc BlockIPWithPortsFunc
}
func NewBlockService(blockIPFunc BlockIPFunc, blockIPWithPortsFunc BlockIPWithPortsFunc) BlockService {
return &blockService{
blockIPFunc: blockIPFunc,
blockIPWithPortsFunc: blockIPWithPortsFunc,
}
}
func (b *blockService) BlockIP(blockIP blocking.BlockIP) (bool, error) {
return b.blockIPFunc(blockIP)
}
func (b *blockService) BlockIPWithPorts(blockIP blocking.BlockIPWithPorts) (bool, error) {
return b.blockIPWithPortsFunc(blockIP)
}

View File

@@ -0,0 +1,113 @@
package brute_force_protection_group
import (
"fmt"
"net"
"time"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/analyzer/config/brute_force_protection"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/db/entity"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/db/repository"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/log"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/pkg/time_operation"
)
type Group interface {
Analyze(group *brute_force_protection.Group, eventTime time.Time, ip net.IP, message string) (AnalysisResult, error)
ClearDBData() error
}
type group struct {
groupRepository repository.BruteForceProtectionGroupRepository
logger log.Logger
}
type AnalysisResult struct {
Block bool
BlockSec uint32
BlockConfig brute_force_protection.Block
LastLogs []string
}
func NewGroup(groupRepository repository.BruteForceProtectionGroupRepository, logger log.Logger) Group {
return &group{
groupRepository: groupRepository,
logger: logger,
}
}
func (g *group) Analyze(group *brute_force_protection.Group, eventTime time.Time, ip net.IP, message string) (AnalysisResult, error) {
analysisResult := AnalysisResult{
Block: false,
}
g.logger.Debug(fmt.Sprintf("Analyzing brute force protection group %s IP %s", group.Name, ip.String()))
err := g.groupRepository.Update(group.Name, ip, func(entityGroup *entity.BruteForceProtectionGroup) (*entity.BruteForceProtectionGroup, error) {
rateLimit, err := group.RateLimit(entityGroup.CurrentLevelTriggerCount)
if err != nil {
return entityGroup, err
}
if time_operation.IsRateLimited(entityGroup.LastTriggeredAtUnix, eventTime, int64(rateLimit.Period)) {
g.logger.Debug(fmt.Sprintf("Brute force protection group %s is rate limited", group.Name))
analysisResult, entityGroup = g.analysisResult(rateLimit, eventTime, message, entityGroup)
return entityGroup, nil
}
entityGroup.TriggerCount = 0
if time_operation.IsReset(entityGroup.LastTriggeredAtUnix, eventTime, int64(group.RateLimitResetPeriod)) {
g.logger.Debug(fmt.Sprintf("Brute force protection group %s is reset", group.Name))
entityGroup.Reset()
rateLimit, err = group.RateLimit(0)
if err != nil {
return entityGroup, err
}
}
g.logger.Debug(fmt.Sprintf("Brute force protection not rate limited"))
analysisResult, entityGroup = g.analysisResult(rateLimit, eventTime, message, entityGroup)
return entityGroup, nil
})
if err != nil {
return AnalysisResult{
Block: false,
}, err
}
return analysisResult, nil
}
func (g *group) ClearDBData() error {
return g.groupRepository.Clear()
}
func (g *group) analysisResult(rateLimit brute_force_protection.RateLimit, eventTime time.Time, message string, entityGroup *entity.BruteForceProtectionGroup) (AnalysisResult, *entity.BruteForceProtectionGroup) {
analysisResult := AnalysisResult{
Block: false,
}
entityGroup.LastTriggeredAtUnix = eventTime.Unix()
entityGroup.TriggerCount++
entityGroup.LastLogs = append(entityGroup.LastLogs, fmt.Sprintf("event time: %s, message: %s", eventTime.Format(time.RFC3339), message))
g.logger.Debug(fmt.Sprintf("Brute force protection triggered. Count: %d", entityGroup.TriggerCount))
if entityGroup.TriggerCount >= uint64(rateLimit.Count) {
g.logger.Debug(fmt.Sprintf("Brute force protection reached rate limit"))
analysisResult.LastLogs = entityGroup.LastLogs
analysisResult.Block = true
analysisResult.BlockSec = rateLimit.BlockingTimeSeconds
analysisResult.BlockConfig = rateLimit.BlockConfig
entityGroup.CurrentLevelTriggerCount++
entityGroup.TriggerCount = 0
entityGroup.LastLogs = []string{}
} else {
g.logger.Debug(fmt.Sprintf("Brute force protection not reached rate limit"))
}
return analysisResult, entityGroup
}

View File

@@ -0,0 +1,41 @@
package analysis
import (
config2 "git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/analyzer/config"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/analyzer/config/brute_force_protection"
)
type RulesBucket interface {
Alerts() []*config2.AlertRule
BruteForceProtectionRules() []*brute_force_protection.Rule
addAlertRule(rule *config2.AlertRule)
addBruteForceProtectionRule(rule *brute_force_protection.Rule)
}
type rulesBucket struct {
alerts []*config2.AlertRule
bruteForceProtectionRules []*brute_force_protection.Rule
}
func (rb *rulesBucket) Alerts() []*config2.AlertRule {
return rb.alerts
}
func (rb *rulesBucket) BruteForceProtectionRules() []*brute_force_protection.Rule {
return rb.bruteForceProtectionRules
}
func (rb *rulesBucket) addAlertRule(rule *config2.AlertRule) {
rb.alerts = append(rb.alerts, rule)
}
func (rb *rulesBucket) addBruteForceProtectionRule(rule *brute_force_protection.Rule) {
rb.bruteForceProtectionRules = append(rb.bruteForceProtectionRules, rule)
}
func newRulesBucket() RulesBucket {
return &rulesBucket{
alerts: make([]*config2.AlertRule, 0),
}
}

View File

@@ -0,0 +1,118 @@
package analysis
import (
"errors"
"fmt"
config2 "git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/analyzer/config"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/analyzer/config/brute_force_protection"
)
type RulesIndex struct {
byKey map[indexKey]RulesBucket
}
type indexKey struct {
source config2.SourceType
val string
}
func (idx *RulesIndex) Add(source *config2.Source) error {
if source.AlertRule == nil && source.BruteForceProtectionRule == nil {
return fmt.Errorf("no alert rule or brute force protection rule")
}
key, err := generateIndexKeyBySource(source)
if err != nil {
return err
}
if _, ok := idx.byKey[key]; !ok {
idx.byKey[key] = newRulesBucket()
}
if source.AlertRule != nil {
idx.byKey[key].addAlertRule(source.AlertRule)
}
if source.BruteForceProtectionRule != nil {
idx.byKey[key].addBruteForceProtectionRule(source.BruteForceProtectionRule)
}
return nil
}
func (idx *RulesIndex) Alerts(entry *Entry) ([]*config2.AlertRule, error) {
rules := make([]*config2.AlertRule, 0)
keys, err := generateIndexKeysByEntry(entry)
if err != nil {
return rules, err
}
for _, key := range keys {
b, ok := idx.byKey[key]
if !ok {
continue
}
rules = append(rules, b.Alerts()...)
}
return rules, nil
}
func (idx *RulesIndex) BruteForceProtections(entry *Entry) ([]*brute_force_protection.Rule, error) {
rules := make([]*brute_force_protection.Rule, 0)
keys, err := generateIndexKeysByEntry(entry)
if err != nil {
return rules, err
}
for _, key := range keys {
b, ok := idx.byKey[key]
if !ok {
continue
}
rules = append(rules, b.BruteForceProtectionRules()...)
}
return rules, nil
}
func NewRulesIndex() *RulesIndex {
return &RulesIndex{byKey: make(map[indexKey]RulesBucket)}
}
func generateIndexKeyBySource(source *config2.Source) (indexKey, error) {
switch source.Type {
case config2.SourceTypeJournal:
match := source.Journal.JournalctlMatch()
if source.Journal.Field == "" || source.Journal.Match == "" {
return indexKey{}, errors.New("journalctl match is empty")
}
return indexKey{source: source.Type, val: match}, nil
case config2.SourceTypeFile:
return indexKey{source: source.Type, val: source.File.Path}, nil
}
return indexKey{}, errors.New(fmt.Sprintf("unknown source type: %s", source.Type))
}
func generateIndexKeysByEntry(entry *Entry) ([]indexKey, error) {
var keys []indexKey
switch entry.Source {
case config2.SourceTypeJournal:
keys = append(keys, indexKey{source: entry.Source, val: string(config2.JournalFieldSystemdUnit) + "=" + entry.Unit})
keys = append(keys, indexKey{source: entry.Source, val: string(config2.JournalFieldSyslogIdentifier) + "=" + entry.SyslogIdentifier})
return keys, nil
case config2.SourceTypeFile:
keys = append(keys, indexKey{source: entry.Source, val: entry.File})
return keys, nil
}
return []indexKey{}, errors.New(fmt.Sprintf("unknown source type: %s", entry.Source))
}

View File

@@ -0,0 +1,111 @@
package log
import (
"context"
"fmt"
"io"
"sync"
"time"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/analyzer/config"
analysisServices "git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/analyzer/log/analysis"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/analyzer/log/file_monitoring"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/log"
"github.com/nxadm/tail"
)
type FileMonitoring interface {
Run(ctx context.Context, logChan chan<- analysisServices.Entry)
Close() error
}
type fileMonitoring struct {
paths []string
logger log.Logger
tailers []*tail.Tail
mu sync.Mutex
}
func NewFileMonitoring(paths []string, logger log.Logger) FileMonitoring {
return &fileMonitoring{
paths: paths,
logger: logger,
tailers: []*tail.Tail{},
}
}
func (fm *fileMonitoring) Run(ctx context.Context, logChan chan<- analysisServices.Entry) {
pathsCount := len(fm.paths)
if pathsCount == 0 {
fm.logger.Debug("No paths specified for file monitoring")
return
}
fm.logger.Debug("Starting file monitoring")
tailLogger := file_monitoring.NewLogger(fm.logger)
for _, path := range fm.paths {
path := path
go func() {
fm.monitorFile(path, ctx, logChan, tailLogger)
}()
}
}
func (fm *fileMonitoring) Close() error {
for _, t := range fm.tailers {
_ = t.Stop()
fm.logger.Debug(fmt.Sprintf("Stopped monitoring file %s", t.Filename))
}
return nil
}
func (fm *fileMonitoring) monitorFile(path string, ctx context.Context, logChan chan<- analysisServices.Entry, tailLogger file_monitoring.Logger) {
fm.logger.Debug(fmt.Sprintf("Monitoring file %s", path))
t, err := tail.TailFile(path, tail.Config{
Follow: true,
ReOpen: true,
Poll: true,
Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekEnd},
Logger: tailLogger,
})
fm.mu.Lock()
fm.tailers = append(fm.tailers, t)
fm.mu.Unlock()
if err != nil {
fm.logger.Error(fmt.Sprintf("Failed to tail file %s: %s", path, err))
return
}
for {
select {
case <-ctx.Done():
return
case line, ok := <-t.Lines:
if !ok {
return
}
if line == nil {
continue
}
entry := analysisServices.Entry{
Source: config.SourceTypeFile,
File: path,
Message: line.Text,
Time: time.Now(),
}
select {
case <-ctx.Done():
return
case logChan <- entry:
}
}
}
}

View File

@@ -0,0 +1,63 @@
package file_monitoring
import (
"fmt"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/log"
)
type Logger interface {
Fatal(v ...interface{})
Fatalf(format string, v ...interface{})
Fatalln(v ...interface{})
Panic(v ...interface{})
Panicf(format string, v ...interface{})
Panicln(v ...interface{})
Print(v ...interface{})
Printf(format string, v ...interface{})
Println(v ...interface{})
}
type logger struct {
logger log.Logger
}
func NewLogger(log log.Logger) Logger {
return &logger{logger: log}
}
func (l *logger) Fatal(v ...interface{}) {
l.logger.Error(fmt.Sprintf("File Monitoring: %v", v...))
}
func (l *logger) Fatalf(format string, v ...interface{}) {
l.logger.Error(fmt.Sprintf("File Monitoring: "+format, v...))
}
func (l *logger) Fatalln(v ...interface{}) {
l.logger.Error(fmt.Sprintf("File Monitoring: %v", v...))
}
func (l *logger) Panic(v ...interface{}) {
l.logger.Error(fmt.Sprintf("File Monitoring: %v", v...))
}
func (l *logger) Panicf(format string, v ...interface{}) {
l.logger.Warn(fmt.Sprintf("File Monitoring: "+format, v...))
}
func (l *logger) Panicln(v ...interface{}) {
l.logger.Error(fmt.Sprintf("File Monitoring: %v", v...))
}
func (l *logger) Print(v ...interface{}) {
l.logger.Warn(fmt.Sprintf("File Monitoring: %v", v...))
}
func (l *logger) Printf(format string, v ...interface{}) {
l.logger.Warn(fmt.Sprintf("File Monitoring: "+format, v...))
}
func (l *logger) Println(v ...interface{}) {
l.logger.Warn(fmt.Sprintf("File Monitoring: %v", v...))
}

View File

@@ -0,0 +1,159 @@
package log
import (
"context"
"encoding/json"
"fmt"
"io"
"os/exec"
"strconv"
"sync"
"time"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/analyzer/config"
analysisServices "git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/analyzer/log/analysis"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/log"
)
type Systemd interface {
Run(ctx context.Context, logChan chan<- analysisServices.Entry)
Close() error
}
type systemd struct {
path string
matches []string
logger log.Logger
cmd *exec.Cmd
mu sync.Mutex
}
type journalRawEntry struct {
Message string `json:"MESSAGE"`
Unit string `json:"_SYSTEMD_UNIT"`
PID string `json:"_PID"`
SyslogIdentifier string `json:"SYSLOG_IDENTIFIER"`
SourceTimestamp string `json:"_SOURCE_REALTIME_TIMESTAMP"`
RealtimeTimestamp string `json:"__REALTIME_TIMESTAMP"`
}
func NewSystemd(path string, matches []string, logger log.Logger) Systemd {
return &systemd{
path: path,
matches: matches,
logger: logger,
}
}
func (s *systemd) Run(ctx context.Context, logChan chan<- analysisServices.Entry) {
if len(s.matches) == 0 {
s.logger.Debug("No matches specified for journalctl")
return
}
s.logger.Debug("Journalctl started")
for {
select {
case <-ctx.Done():
return
default:
if err := s.watch(ctx, logChan); err != nil {
s.logger.Error(fmt.Sprintf("Journalctl exited with error: %v", err))
}
// Pause before restarting to avoid CPU load during persistent errors
select {
case <-ctx.Done():
return
case <-time.After(5 * time.Second):
s.logger.Warn("Journalctl connection lost. Restarting in 5s...")
continue
}
}
}
}
func (s *systemd) watch(ctx context.Context, logChan chan<- analysisServices.Entry) error {
args := []string{"-f", "-n", "0", "-o", "json"}
for index, match := range s.matches {
if index > 0 {
args = append(args, "+")
}
args = append(args, match)
}
cmd := exec.CommandContext(ctx, s.path, args...)
s.mu.Lock()
s.cmd = cmd
s.mu.Unlock()
stdout, err := cmd.StdoutPipe()
if err != nil {
return fmt.Errorf("stdout pipe error: %w", err)
}
if err := cmd.Start(); err != nil {
return fmt.Errorf("start error: %w", err)
}
decoder := json.NewDecoder(stdout)
for {
var raw journalRawEntry
if err := decoder.Decode(&raw); err != nil {
if err == io.EOF {
break // The process terminated normally or was killed.
}
return fmt.Errorf("decode error: %w", err)
}
tsStr := raw.SourceTimestamp
if tsStr == "" {
tsStr = raw.RealtimeTimestamp
}
var entryTime time.Time
if usec, err := strconv.ParseInt(tsStr, 10, 64); err == nil {
entryTime = time.Unix(0, usec*int64(time.Microsecond))
} else {
entryTime = time.Now()
}
entry := analysisServices.Entry{
Source: config.SourceTypeJournal,
Message: raw.Message,
Unit: raw.Unit,
PID: raw.PID,
SyslogIdentifier: raw.SyslogIdentifier,
Time: entryTime,
}
select {
case <-ctx.Done():
break
case logChan <- entry:
}
}
return cmd.Wait()
}
func (s *systemd) Close() error {
if s.matches == nil {
return nil
}
s.mu.Lock()
defer s.mu.Unlock()
if s.cmd != nil && s.cmd.Process != nil {
s.logger.Debug("Stopping journalctl")
// Force journalctl to quit on shutdown
return s.cmd.Process.Kill()
}
s.logger.Debug("Journalctl stopped")
return nil
}

View File

@@ -3,12 +3,23 @@ package daemon
import (
"context"
"errors"
"fmt"
"net"
"strconv"
"strings"
"time"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/analyzer"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/docker_monitor"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/firewall"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/firewall/blocking"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/firewall/types"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/notifications"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/pidfile"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/socket"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/log"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/pkg/ip"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/setting/validate"
)
type Daemon interface {
@@ -17,10 +28,13 @@ type Daemon interface {
}
type daemon struct {
pidFile pidfile.PidFile
socket socket.Socket
logger log.Logger
firewall firewall.API
pidFile pidfile.PidFile
socket socket.Socket
logger log.Logger
firewall firewall.API
notifications notifications.Notifications
analyzer analyzer.Analyzer
docker docker_monitor.Docker
stopCh chan struct{}
}
@@ -52,6 +66,23 @@ func (d *daemon) Run(ctx context.Context, isTesting bool, testingInterval uint16
_ = d.socket.Close()
}()
d.notifications.Run()
defer func() {
_ = d.notifications.Close()
}()
d.analyzer.Run(ctx)
defer func() {
_ = d.analyzer.Close()
}()
if d.firewall.DockerSupport() {
go d.docker.Run()
defer func() {
_ = d.docker.Close()
}()
}
go d.socket.Run(ctx, d.socketCommand)
d.runWorker(ctx, isTesting, testingInterval)
@@ -81,6 +112,19 @@ func (d *daemon) runWorker(ctx context.Context, isTesting bool, testingInterval
return
case <-stopTestingCh:
d.logger.Info("Testing interval expired, stopping service")
if err := d.notifications.DBQueueClear(); err != nil {
d.logger.Error(fmt.Sprintf("failed to clear notifications queue: %v", err))
}
if err := d.analyzer.ClearDBData(); err != nil {
d.logger.Error(fmt.Sprintf("failed to clear analyzer data: %v", err))
}
if err := d.firewall.ClearDBData(); err != nil {
d.logger.Error(fmt.Sprintf("failed to clear firewall data: %v", err))
}
d.Stop()
return
case <-d.stopCh:
@@ -90,7 +134,7 @@ func (d *daemon) runWorker(ctx context.Context, isTesting bool, testingInterval
}
}
func (d *daemon) socketCommand(command string, socket socket.Connect) error {
func (d *daemon) socketCommand(command string, args map[string]string, socket socket.Connect) error {
switch command {
case "stop":
d.stopCh <- struct{}{}
@@ -103,8 +147,149 @@ func (d *daemon) socketCommand(command string, socket socket.Connect) error {
return err
}
return socket.Write("ok")
case "notifications_queue_count":
count := d.notifications.DBQueueSize()
return socket.Write(strconv.Itoa(count))
case "notifications_queue_clear":
if err := d.notifications.DBQueueClear(); err != nil {
_ = socket.Write("notifications queue clear failed: " + err.Error())
return err
}
return socket.Write("ok")
case "block_add_ip":
if args["ip"] == "" {
return socket.Write("ip argument is required")
}
ipAddr := net.ParseIP(args["ip"])
if ipAddr == nil {
_ = socket.Write("invalid ip address")
return errors.New("invalid ip address")
}
port := args["port"]
if port != "" {
if err := d.cmdBlockAddIPWithPort(ipAddr, port, args); err != nil {
_ = socket.Write("block add failed: " + err.Error())
return err
}
} else {
if err := d.cmdBlockAddIP(ipAddr, args); err != nil {
_ = socket.Write("block add failed: " + err.Error())
return err
}
}
return socket.Write("ok")
case "block_delete_ip":
if args["ip"] == "" {
return socket.Write("ip argument is required")
}
ipAddr := net.ParseIP(args["ip"])
if ipAddr == nil {
_ = socket.Write("invalid ip address")
return errors.New("invalid ip address")
}
if err := d.firewall.UnblockIP(ipAddr); err != nil {
_ = socket.Write("block delete failed: " + err.Error())
return err
}
return socket.Write("ok")
case "block_clear":
if err := d.firewall.UnblockAllIPs(); err != nil {
_ = socket.Write("block clear failed: " + err.Error())
return err
}
return socket.Write("ok")
default:
_ = socket.Write("unknown command")
return errors.New("unknown command")
}
}
func (d *daemon) cmdBlockAddIP(ip net.IP, args map[string]string) error {
blockIP := blocking.BlockIP{
IP: ip,
}
if args["seconds"] != "" {
seconds, err := strconv.Atoi(args["seconds"])
if err != nil {
return err
}
blockIP.TimeSeconds = uint32(seconds)
}
if args["reason"] != "" {
blockIP.Reason = args["reason"]
}
isBlock, err := d.firewall.BlockIP(blockIP)
if err != nil {
return err
}
if !isBlock {
return errors.New("the IP address is not blocked")
}
return nil
}
func (d *daemon) cmdBlockAddIPWithPort(ip net.IP, port string, args map[string]string) error {
l4Port, err := newL4PortFromString(port)
if err != nil {
return err
}
blockIP := blocking.BlockIPWithPorts{
IP: ip,
Ports: []types.L4Port{l4Port},
}
if args["seconds"] != "" {
seconds, err := strconv.Atoi(args["seconds"])
if err != nil {
return err
}
blockIP.TimeSeconds = uint32(seconds)
}
if args["reason"] != "" {
blockIP.Reason = args["reason"]
}
isBlock, err := d.firewall.BlockIPWithPorts(blockIP)
if err != nil {
return err
}
if !isBlock {
return errors.New("the IP address is not blocked")
}
return nil
}
func newL4PortFromString(s string) (types.L4Port, error) {
if s == "" {
return nil, errors.New("port is empty")
}
data := strings.Split(s, "/")
protocol := types.ProtocolTCP
port, err := strconv.Atoi(data[0])
if err != nil {
return nil, err
}
if err := validate.Port(port, "port"); err != nil {
return nil, err
}
if len(data) == 2 {
protocol, err = ip.ToProtocol(data[1])
if err != nil {
return nil, err
}
}
return types.NewL4Port(uint16(port), protocol)
}

87
internal/daemon/db/db.go Normal file
View File

@@ -0,0 +1,87 @@
package db
import (
"errors"
"time"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/db/repository"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/pkg/filesystem"
"go.etcd.io/bbolt"
)
const (
appDB = "app.db"
securityDB = "security.db"
)
type Repositories interface {
NotificationsQueue() repository.NotificationsQueueRepository
AlertGroup() repository.AlertGroupRepository
BruteForceProtectionGroup() repository.BruteForceProtectionGroupRepository
Blocking() repository.BlockingRepository
Close() error
}
type repositories struct {
notificationsQueue repository.NotificationsQueueRepository
alertGroup repository.AlertGroupRepository
bruteForceProtectionGroup repository.BruteForceProtectionGroupRepository
blocking repository.BlockingRepository
db []*bbolt.DB
}
func New(dataDir string) (Repositories, error) {
if dataDir == "" {
return &repositories{}, errors.New("data directory is empty")
}
if dataDir[len(dataDir)-1:] != "/" {
dataDir += "/"
}
err := filesystem.EnsureDir(dataDir)
if err != nil {
return &repositories{}, err
}
appDB, err := bbolt.Open(dataDir+appDB, 0600, &bbolt.Options{Timeout: 3 * time.Second})
if err != nil {
return &repositories{}, err
}
securityDB, err := bbolt.Open(dataDir+securityDB, 0600, &bbolt.Options{Timeout: 3 * time.Second})
return &repositories{
notificationsQueue: repository.NewNotificationsQueueRepository(appDB),
alertGroup: repository.NewAlertGroupRepository(appDB),
bruteForceProtectionGroup: repository.NewBruteForceProtectionGroupRepository(securityDB),
blocking: repository.NewBlockingRepository(securityDB),
db: []*bbolt.DB{appDB, securityDB},
}, nil
}
func (r *repositories) NotificationsQueue() repository.NotificationsQueueRepository {
return r.notificationsQueue
}
func (r *repositories) AlertGroup() repository.AlertGroupRepository {
return r.alertGroup
}
func (r *repositories) BruteForceProtectionGroup() repository.BruteForceProtectionGroupRepository {
return r.bruteForceProtectionGroup
}
func (r *repositories) Blocking() repository.BlockingRepository {
return r.blocking
}
func (r *repositories) Close() error {
for _, db := range r.db {
_ = db.Close()
}
return nil
}

View File

@@ -0,0 +1,15 @@
package entity
type AlertGroup struct {
LastTriggeredAtUnix int64 `json:"LastTriggeredAtUnix"`
TriggerCount uint64 `json:"TriggerCount"`
CurrentLevelTriggerCount uint64 `json:"CurrentLevelTriggerCount"`
LastLogs []string `json:"LastLogs"`
}
func (g *AlertGroup) Reset() {
g.LastTriggeredAtUnix = 0
g.TriggerCount = 0
g.CurrentLevelTriggerCount = 0
g.LastLogs = []string{}
}

View File

@@ -0,0 +1,55 @@
package entity
import (
"errors"
"fmt"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/firewall/types"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/pkg/ip"
)
type Blocking struct {
IP string `json:"IP"`
Ports []BlockingPort
ExpireAtUnix int64 `json:"ExpireAtUnix"`
Reason string `json:"Reason"`
}
func (b *Blocking) IsPorts() bool {
return len(b.Ports) > 0
}
func (b *Blocking) ToL4Ports() ([]types.L4Port, error) {
if !b.IsPorts() {
return nil, fmt.Errorf("ports is empty")
}
l4Ports := make([]types.L4Port, 0, len(b.Ports))
for _, port := range b.Ports {
l4port, err := port.ToL4Port()
if err != nil {
return nil, err
}
l4Ports = append(l4Ports, l4port)
}
return l4Ports, nil
}
type BlockingPort struct {
Number uint16 `json:"Port"`
Protocol string `json:"Protocol"`
}
func (p *BlockingPort) ToL4Port() (types.L4Port, error) {
if p.Protocol == "" {
return nil, errors.New("protocol is empty")
}
protocol, err := ip.ToProtocol(p.Protocol)
if err != nil {
return nil, err
}
return types.NewL4Port(p.Number, protocol)
}

View File

@@ -0,0 +1,15 @@
package entity
type BruteForceProtectionGroup struct {
LastTriggeredAtUnix int64 `json:"LastTriggeredAtUnix"`
TriggerCount uint64 `json:"TriggerCount"`
CurrentLevelTriggerCount uint64 `json:"CurrentLevelTriggerCount"`
LastLogs []string `json:"LastLogs"`
}
func (g *BruteForceProtectionGroup) Reset() {
g.LastTriggeredAtUnix = 0
g.TriggerCount = 0
g.CurrentLevelTriggerCount = 0
g.LastLogs = []string{}
}

View File

@@ -0,0 +1,6 @@
package entity
type NotificationsQueue struct {
Subject string `json:"Subject"`
Body string `json:"Body"`
}

View File

@@ -0,0 +1,72 @@
package repository
import (
"encoding/json"
"errors"
"fmt"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/db/entity"
"go.etcd.io/bbolt"
bboltErrors "go.etcd.io/bbolt/errors"
)
type AlertGroupRepository interface {
Update(name string, f func(*entity.AlertGroup) (*entity.AlertGroup, error)) error
Clear() error
}
type alertGroupRepository struct {
db *bbolt.DB
bucket string
}
func NewAlertGroupRepository(appDB *bbolt.DB) AlertGroupRepository {
return &alertGroupRepository{
db: appDB,
bucket: alertGroupBucket,
}
}
func (r *alertGroupRepository) Update(name string, f func(*entity.AlertGroup) (*entity.AlertGroup, error)) error {
entityAlertGroup := &entity.AlertGroup{}
entityAlertGroup.Reset()
return r.db.Update(func(tx *bbolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte(r.bucket))
if err != nil {
return err
}
key := []byte(name)
group := b.Get(key)
if group != nil {
err = json.Unmarshal(group, entityAlertGroup)
if err != nil {
return fmt.Errorf("failed to unmarshal alert group: %w", err)
}
}
entityAlertGroup, err = f(entityAlertGroup)
if err != nil {
return err
}
data, err := json.Marshal(entityAlertGroup)
if err != nil {
return err
}
return b.Put(key, data)
})
}
func (r *alertGroupRepository) Clear() error {
return r.db.Update(func(tx *bbolt.Tx) error {
err := tx.DeleteBucket([]byte(r.bucket))
if errors.Is(err, bboltErrors.ErrBucketNotFound) {
// If the bucket may not exist, ignore ErrBucketNotFound
return nil
}
_, err = tx.CreateBucketIfNotExists([]byte(r.bucket))
return err
})
}

View File

@@ -0,0 +1,173 @@
package repository
import (
"encoding/json"
"errors"
"net"
"time"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/db/entity"
"go.etcd.io/bbolt"
bboltErrors "go.etcd.io/bbolt/errors"
)
type BlockingRepository interface {
Add(blockedIP entity.Blocking) error
List(callback func(entity.Blocking) error) error
DeleteByIP(ip net.IP, callback func(entity.Blocking) error) error
DeleteExpired(limit int) (int, error)
Clear() error
}
type blocking struct {
db *bbolt.DB
bucket string
}
func NewBlockingRepository(appDB *bbolt.DB) BlockingRepository {
return &blocking{
db: appDB,
bucket: blockingBucket,
}
}
func (r *blocking) Add(blockedIP entity.Blocking) error {
return r.db.Update(func(tx *bbolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(r.bucket))
if err != nil {
return err
}
data, err := json.Marshal(blockedIP)
if err != nil {
return err
}
id, err := nextKeyByExpire(bucket, uint64(blockedIP.ExpireAtUnix))
if err != nil {
return err
}
return bucket.Put(id, data)
})
}
func (r *blocking) List(callback func(entity.Blocking) error) error {
return r.db.View(func(tx *bbolt.Tx) error {
bucket := tx.Bucket([]byte(r.bucket))
if bucket == nil {
return nil
}
return bucket.ForEach(func(_, v []byte) error {
blockedIP := entity.Blocking{}
err := json.Unmarshal(v, &blockedIP)
if err != nil {
return err
}
if err := callback(blockedIP); err != nil {
return err
}
return nil
})
})
}
func (r *blocking) DeleteByIP(ip net.IP, callback func(entity.Blocking) error) error {
return r.db.Update(func(tx *bbolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(r.bucket))
if err != nil {
return err
}
c := bucket.Cursor()
for k, v := c.First(); k != nil; {
blockedIP := entity.Blocking{}
err := json.Unmarshal(v, &blockedIP)
if err != nil {
return err
}
parsedBlockedIP := net.ParseIP(blockedIP.IP)
if parsedBlockedIP == nil || !parsedBlockedIP.Equal(ip) {
k, v = c.Next()
continue
}
if err := callback(blockedIP); err != nil {
return err
}
nextK, nextV := c.Next()
if err := bucket.Delete(k); err != nil {
return err
}
k = nextK
v = nextV
}
return nil
})
}
func (r *blocking) DeleteExpired(limit int) (int, error) {
if limit <= 0 {
return 0, nil
}
var deleted int
err := r.db.Update(func(tx *bbolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(r.bucket))
if err != nil {
return err
}
now := time.Now().Unix()
c := bucket.Cursor()
deleted = 0
for k, v := c.First(); k != nil && deleted < limit; {
blockedIP := entity.Blocking{}
if err := json.Unmarshal(v, &blockedIP); err != nil {
return err
}
if blockedIP.ExpireAtUnix <= 0 {
k, v = c.Next()
continue
}
if blockedIP.ExpireAtUnix > now {
// Not expired yet
break
}
nextK, nextV := c.Next()
if err := bucket.Delete(k); err != nil {
return err
}
deleted++
k = nextK
v = nextV
}
return nil
})
return deleted, err
}
func (r *blocking) Clear() error {
return r.db.Update(func(tx *bbolt.Tx) error {
err := tx.DeleteBucket([]byte(r.bucket))
if errors.Is(err, bboltErrors.ErrBucketNotFound) {
// If the bucket may not exist, ignore ErrBucketNotFound
return nil
}
_, err = tx.CreateBucketIfNotExists([]byte(r.bucket))
return err
})
}

View File

@@ -0,0 +1,103 @@
package repository
import (
"encoding/json"
"errors"
"fmt"
"net"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/db/entity"
"go.etcd.io/bbolt"
bboltErrors "go.etcd.io/bbolt/errors"
)
type BruteForceProtectionGroupRepository interface {
Update(name string, ip net.IP, f func(*entity.BruteForceProtectionGroup) (*entity.BruteForceProtectionGroup, error)) error
Clear() error
}
type bruteForceProtectionGroupRepository struct {
db *bbolt.DB
bucket string
}
func NewBruteForceProtectionGroupRepository(appDB *bbolt.DB) BruteForceProtectionGroupRepository {
return &bruteForceProtectionGroupRepository{
db: appDB,
bucket: bruteForceProtectionGroupBucket,
}
}
func (r *bruteForceProtectionGroupRepository) Update(name string, ip net.IP, f func(*entity.BruteForceProtectionGroup) (*entity.BruteForceProtectionGroup, error)) error {
entityGroup := &entity.BruteForceProtectionGroup{}
entityGroup.Reset()
return r.db.Update(func(tx *bbolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte(r.bucket))
if err != nil {
return err
}
key, err := keyGroupIP(name, ip)
if err != nil {
return err
}
group := b.Get(key)
if group != nil {
err = json.Unmarshal(group, entityGroup)
if err != nil {
return fmt.Errorf("failed to unmarshal brute force protection group: %w", err)
}
}
entityGroup, err = f(entityGroup)
if err != nil {
return err
}
data, err := json.Marshal(entityGroup)
if err != nil {
return err
}
return b.Put(key, data)
})
}
func (r *bruteForceProtectionGroupRepository) Clear() error {
return r.db.Update(func(tx *bbolt.Tx) error {
err := tx.DeleteBucket([]byte(r.bucket))
if errors.Is(err, bboltErrors.ErrBucketNotFound) {
// If the bucket may not exist, ignore ErrBucketNotFound
return nil
}
_, err = tx.CreateBucketIfNotExists([]byte(r.bucket))
return err
})
}
func keyGroupIP(groupID string, ip net.IP) ([]byte, error) {
if ip == nil {
return nil, fmt.Errorf("ip cannot be nil")
}
if len(groupID) == 0 {
return nil, fmt.Errorf("group id cannot be empty")
}
if ip.To4() == nil && ip.To16() == nil {
return nil, fmt.Errorf("ip is neither IPv4 nor IPv6")
}
var ipAddr net.IP
if ip.To4() != nil {
ipAddr = ip.To4()
} else {
ipAddr = ip.To16()
}
k := make([]byte, 0, len(groupID)+1+len(ipAddr))
k = append(k, groupID...)
k = append(k, 0x00)
k = append(k, ipAddr...)
return k, nil
}

View File

@@ -0,0 +1,121 @@
package repository
import (
"encoding/json"
"errors"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/db/entity"
"go.etcd.io/bbolt"
bboltErrors "go.etcd.io/bbolt/errors"
)
type NotificationsQueueRepository interface {
Add(q entity.NotificationsQueue) error
Get(limit int) (map[string]entity.NotificationsQueue, error)
Delete(id string) error
// Count - return size of notifications queue in db
Count() (int, error)
Clear() error
}
type notificationsQueueRepository struct {
db *bbolt.DB
bucket string
}
func NewNotificationsQueueRepository(appDB *bbolt.DB) NotificationsQueueRepository {
return &notificationsQueueRepository{
db: appDB,
bucket: notificationsQueueBucket,
}
}
func (r *notificationsQueueRepository) Add(q entity.NotificationsQueue) error {
return r.db.Update(func(tx *bbolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(r.bucket))
if err != nil {
return err
}
data, err := json.Marshal(q)
if err != nil {
return err
}
id, err := nextID(bucket)
if err != nil {
return err
}
return bucket.Put(id, data)
})
}
func (r *notificationsQueueRepository) Get(limit int) (map[string]entity.NotificationsQueue, error) {
notifications := make(map[string]entity.NotificationsQueue)
if limit <= 0 {
return notifications, nil
}
err := r.db.View(func(tx *bbolt.Tx) error {
bucket := tx.Bucket([]byte(r.bucket))
if bucket == nil {
return nil
}
c := bucket.Cursor()
for k, v := c.First(); k != nil && len(notifications) < limit; k, v = c.Next() {
var q entity.NotificationsQueue
if err := json.Unmarshal(v, &q); err != nil {
return err
}
notifications[string(k)] = q
}
return nil
})
return notifications, err
}
func (r *notificationsQueueRepository) Delete(id string) error {
return r.db.Update(func(tx *bbolt.Tx) error {
bucket := tx.Bucket([]byte(r.bucket))
if bucket == nil {
return nil
}
return bucket.Delete([]byte(id))
})
}
func (r *notificationsQueueRepository) Count() (int, error) {
count := 0
err := r.db.View(func(tx *bbolt.Tx) error {
bucket := tx.Bucket([]byte(r.bucket))
if bucket == nil {
return nil
}
count = bucket.Stats().KeyN
return nil
})
return count, err
}
func (r *notificationsQueueRepository) Clear() error {
return r.db.Update(func(tx *bbolt.Tx) error {
err := tx.DeleteBucket([]byte(r.bucket))
if errors.Is(err, bboltErrors.ErrBucketNotFound) {
// If the bucket may not exist, ignore ErrBucketNotFound
return nil
}
_, err = tx.CreateBucketIfNotExists([]byte(r.bucket))
return err
})
}

View File

@@ -0,0 +1,47 @@
package repository
import (
"encoding/binary"
"math"
"go.etcd.io/bbolt"
)
const (
notificationsQueueBucket = "notifications_queue"
alertGroupBucket = "alert_group"
bruteForceProtectionGroupBucket = "brute_force_protection_group"
blockingBucket = "blocking"
)
func nextID(b *bbolt.Bucket) ([]byte, error) {
seq, err := b.NextSequence()
if err != nil {
return nil, err
}
key := make([]byte, 8)
binary.BigEndian.PutUint64(key, seq)
return key, nil
}
func nextKeyByExpire(b *bbolt.Bucket, expireUnixAt uint64) ([]byte, error) {
seq, err := b.NextSequence()
if err != nil {
return nil, err
}
// 0 = "forever" -> sort after any finite timestamp
if expireUnixAt == 0 {
expireUnixAt = math.MaxUint64
}
// 8 bytes expire + 8 bytes seq
key := make([]byte, 16)
// Important: BigEndian, so that sorting by bytes matches sorting by number.
binary.BigEndian.PutUint64(key[0:8], expireUnixAt)
binary.BigEndian.PutUint64(key[8:16], seq)
return key, nil
}

View File

@@ -0,0 +1,87 @@
package chain
import nftChain "git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/firewall/chain"
type Chains interface {
ForwardFilterJump(addRule func(expr ...string) error) error
PreroutingFilterJump(addRule func(expr ...string) error) error
PreroutingNatJump(addRule func(expr ...string) error) error
OutputNatJump(addRule func(expr ...string) error) error
PostroutingNatJump(addRule func(expr ...string) error) error
List() *chains
}
type chains struct {
ForwardFilter Data
ForwardBridge Data
ForwardCT Data
PreroutingFilter Data
DockerFilter Data
DockerFilterFirst Data
DockerFilterSecond Data
DockerNat Data
PostroutingNat Data
}
type Data struct {
chain nftChain.Chain
name string
}
func (d *chains) ForwardFilterJump(addRule func(expr ...string) error) error {
return d.ForwardFilter.Jump(addRule, "")
}
func (d *chains) PreroutingFilterJump(addRule func(expr ...string) error) error {
return d.PreroutingFilter.Jump(addRule, "")
}
func (d *chains) PreroutingNatJump(addRule func(expr ...string) error) error {
return d.DockerNat.Jump(addRule, "fib daddr type local counter")
}
func (d *chains) OutputNatJump(addRule func(expr ...string) error) error {
if err := d.DockerNat.Jump(addRule, "ip daddr != 127.0.0.0/8 fib daddr type local counter"); err != nil {
return err
}
return d.DockerNat.Jump(addRule, "ip6 daddr != ::1 fib daddr type local counter")
}
func (d *chains) PostroutingNatJump(addRule func(expr ...string) error) error {
return d.PostroutingNat.Jump(addRule, "")
}
func (d *chains) List() *chains {
return d
}
func (d *Data) Jump(addRule func(expr ...string) error, rule string) error {
args := []string{rule, "jump", d.name}
return addRule(args...)
}
func (d *Data) JumpTo(data *Data, rule string, comment string) error {
args := []string{rule, "jump", d.name, comment}
return data.AddRule(args...)
}
func (d *Data) AddRule(rule ...string) error {
return d.chain.AddRule(rule...)
}
func (d *Data) RemoveRuleByHandle(handle uint64) error {
return d.chain.RemoveRuleByHandle(handle)
}
func (d *Data) ListRules() ([]nftChain.Rule, error) {
return d.chain.ListRules()
}
func (d *Data) Clear() error {
return d.chain.Clear()
}

View File

@@ -0,0 +1,32 @@
package chain
type emptyChains struct {
}
func NewEmptyChains() Chains {
return &emptyChains{}
}
func (c *emptyChains) ForwardFilterJump(_ func(expr ...string) error) error {
return nil
}
func (c *emptyChains) PreroutingFilterJump(_ func(expr ...string) error) error {
return nil
}
func (c *emptyChains) PreroutingNatJump(_ func(expr ...string) error) error {
return nil
}
func (c *emptyChains) OutputNatJump(_ func(expr ...string) error) error {
return nil
}
func (c *emptyChains) PostroutingNatJump(_ func(expr ...string) error) error {
return nil
}
func (c *emptyChains) List() *chains {
return &chains{}
}

View File

@@ -0,0 +1,77 @@
package chain
import nftChain "git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/firewall/chain"
func NewChains(newNoneChain func(chain string) (nftChain.Chain, error)) (Chains, error) {
chainsData := &chains{}
if data, err := newChainData("docker_nat", newNoneChain); err != nil {
return nil, err
} else {
chainsData.DockerNat = data
}
if data, err := newChainData("docker_postrouting_nat", newNoneChain); err != nil {
return nil, err
} else {
chainsData.PostroutingNat = data
}
if data, err := newChainData("docker_prerouting_filter", newNoneChain); err != nil {
return nil, err
} else {
chainsData.PreroutingFilter = data
}
if data, err := newChainData("docker_filter", newNoneChain); err != nil {
return nil, err
} else {
chainsData.DockerFilter = data
}
if data, err := newChainData("docker_filter_first", newNoneChain); err != nil {
return nil, err
} else {
chainsData.DockerFilterFirst = data
}
if data, err := newChainData("docker_filter_second", newNoneChain); err != nil {
return nil, err
} else {
chainsData.DockerFilterSecond = data
}
if data, err := newChainData("docker_forward_filter", newNoneChain); err != nil {
return nil, err
} else {
chainsData.ForwardFilter = data
}
if data, err := newChainData("docker_forward_bridge", newNoneChain); err != nil {
return nil, err
} else {
chainsData.ForwardBridge = data
}
if data, err := newChainData("docker_forward_ct", newNoneChain); err != nil {
return nil, err
} else {
chainsData.ForwardCT = data
}
return chainsData, nil
}
func newChainData(chainName string, newNoneChain func(chain string) (nftChain.Chain, error)) (Data, error) {
data := Data{
name: chainName,
}
newChain, err := newNoneChain(data.name)
if err != nil {
return data, err
}
data.chain = newChain
return data, nil
}

View File

@@ -0,0 +1,42 @@
package client
import (
"encoding/json"
"fmt"
"strings"
)
func (d *docker) bridges() ([]string, error) {
args := []string{"network", "ls", "-q", "--filter", "Driver=bridge"}
result, err := d.command(args...)
if err != nil {
return nil, fmt.Errorf("failed to get docker bridge names: %s", err.Error())
}
output := strings.TrimSpace(string(result))
if output == "" {
return []string{}, nil
}
lines := strings.Split(output, "\n")
for i := range lines {
lines[i] = strings.TrimSpace(lines[i])
}
return lines, nil
}
func (d *docker) bridgeInfo(bridgeID string) (DockerBridgeInspect, error) {
args := []string{"network", "inspect", bridgeID}
result, err := d.command(args...)
if err != nil {
return DockerBridgeInspect{}, fmt.Errorf("failed to get bridge name: %s", err.Error())
}
var info []DockerBridgeInspect
if err := json.Unmarshal(result, &info); err != nil {
return DockerBridgeInspect{}, err
}
return info[0], nil
}

View File

@@ -0,0 +1,84 @@
package client
import (
"encoding/json"
"fmt"
"strings"
)
func (d *docker) containers(bridgeID string) ([]string, error) {
args := []string{"ps", "-q", "--no-trunc", "--filter", fmt.Sprintf("network=%s", bridgeID)}
result, err := d.command(args...)
if err != nil {
return nil, fmt.Errorf("failed to get docker containers: %s", err.Error())
}
output := strings.TrimSpace(string(result))
if output == "" {
return []string{}, nil
}
lines := strings.Split(output, "\n")
for i := range lines {
lines[i] = strings.TrimSpace(lines[i])
}
return lines, nil
}
func (d *docker) containerNetworks(containerID string) (DockerContainerInspect, error) {
result, err := d.command("inspect", containerID)
if err != nil {
return DockerContainerInspect{}, err
}
var info []DockerContainerInspect
if err := json.Unmarshal(result, &info); err != nil {
return DockerContainerInspect{}, err
}
if len(info) == 0 {
return DockerContainerInspect{}, fmt.Errorf("container %s not found", containerID)
}
return info[0], nil
}
func (d *docker) parsePorts(info DockerContainerInspect) []ContainerPort {
var ports []ContainerPort
for containerPortFull, hostConfigs := range info.NetworkSettings.Ports {
parts := strings.Split(containerPortFull, "/")
portNum := parts[0]
protocol := "tcp" // default
if len(parts) > 1 {
protocol = parts[1]
}
cp := ContainerPort{
Port: portNum,
Protocol: protocol,
}
for _, h := range hostConfigs {
host := HostPort{
Port: h.HostPort,
}
ipVersion, err := ipVersion(h.HostIp)
if err != nil {
d.logger.Error(err.Error())
continue
}
host.IP = IPInfo{Address: h.HostIp, Version: ipVersion}
cp.HostPort = append(cp.HostPort, host)
}
ports = append(ports, cp)
}
return ports
}

View File

@@ -0,0 +1,262 @@
package client
import (
"bufio"
"context"
"encoding/json"
"fmt"
"os/exec"
"sync"
"time"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/log"
)
type Docker interface {
FetchBridges() (Bridges, error)
FetchBridge(bridgeID string) (Bridge, error)
FetchContainers(bridgeID string) (Containers, error)
FetchContainer(containerID string) (Container, error)
Events() <-chan Event
EventsClose() error
}
type docker struct {
path string
ctx context.Context
logger log.Logger
cmd *exec.Cmd
mu sync.Mutex
}
func NewDocker(path string, ctx context.Context, logger log.Logger) Docker {
return &docker{
path: path,
ctx: ctx,
logger: logger,
}
}
func (d *docker) FetchBridges() (Bridges, error) {
bridges := Bridges{}
list, err := d.bridges()
if err != nil {
return nil, err
}
for _, bridgeId := range list {
bridge, err := d.FetchBridge(bridgeId)
if err != nil {
d.logger.Error(err.Error())
continue
}
bridges = append(bridges, bridge)
}
return bridges, nil
}
func (d *docker) FetchBridge(bridgeID string) (Bridge, error) {
bridgeInfo, err := d.bridgeInfo(bridgeID)
if err != nil {
return Bridge{}, err
}
var containers Containers
containers, err = d.FetchContainers(bridgeID)
if err != nil {
d.logger.Error(err.Error())
}
bridgeName := bridgeInfo.Options.Name
if bridgeName == "" {
bridgeName = bridgeNameFromID(bridgeID)
}
var bridgeSubnet []string
if bridgeInfo.IPAM.Config != nil {
for _, config := range bridgeInfo.IPAM.Config {
bridgeSubnet = append(bridgeSubnet, config.Subnet)
}
}
return Bridge{
ID: bridgeInfo.ID,
Name: bridgeName,
Subnets: bridgeSubnet,
Containers: containers,
}, nil
}
func (d *docker) FetchContainers(bridgeID string) (Containers, error) {
containers := Containers{}
list, err := d.containers(bridgeID)
if err != nil {
return nil, err
}
for _, containerID := range list {
container, err := d.FetchContainer(containerID)
if err != nil {
d.logger.Error(err.Error())
continue
}
containers = append(containers, container)
}
return containers, nil
}
func (d *docker) FetchContainer(containerID string) (Container, error) {
info, err := d.containerNetworks(containerID)
if err != nil {
return Container{}, err
}
networks := ContainerNetworks{
IPAddresses: []IPInfo{},
Ports: d.parsePorts(info),
}
for _, networkData := range info.NetworkSettings.Networks {
if networkData.IPAddress != "" {
ipVesion, err := ipVersion(networkData.IPAddress)
if err != nil {
d.logger.Error(err.Error())
continue
}
networks.IPAddresses = append(networks.IPAddresses, IPInfo{
Address: networkData.IPAddress,
Version: ipVesion,
NetworkID: networkData.NetworkID,
})
}
}
return Container{
ID: containerID,
Networks: networks,
}, nil
}
func (d *docker) command(args ...string) ([]byte, error) {
cmd := exec.CommandContext(d.ctx, d.path, args...)
result, err := cmd.CombinedOutput()
if err != nil {
return nil, fmt.Errorf(string(result))
}
return result, nil
}
func (d *docker) Events() <-chan Event {
eventsChan := make(chan Event)
d.logger.Debug("Starting docker monitor")
go func() {
defer close(eventsChan)
for {
select {
case <-d.ctx.Done():
return
default:
if err := d.watch(eventsChan); err != nil {
d.logger.Error(fmt.Sprintf("Docker monitor exited with error: %v", err))
}
// Pause before restarting to avoid CPU load during persistent errors
select {
case <-d.ctx.Done():
return
case <-time.After(15 * time.Second):
d.logger.Warn("Docker connection lost. Restarting in 15s...")
continue
}
}
}
}()
return eventsChan
}
func (d *docker) watch(eventsChan chan Event) error {
args := []string{
"events",
"--filter", "type=container",
"--filter", "event=start",
"--filter", "event=die",
"--filter", "type=network",
"--filter", "event=create",
"--filter", "event=destroy",
"--format",
"{{json .}}",
}
cmd := exec.CommandContext(d.ctx, d.path, args...)
d.mu.Lock()
d.cmd = cmd
d.mu.Unlock()
stdout, err := cmd.StdoutPipe()
if err != nil {
return err
}
if err := cmd.Start(); err != nil {
return err
}
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
if err := scanner.Err(); err != nil {
return err
}
if scanner.Text() == "" {
return fmt.Errorf("empty line")
}
var dockerEvent DockerEvent
if err := json.Unmarshal([]byte(scanner.Text()), &dockerEvent); err != nil {
return fmt.Errorf("failed to unmarshal docker event: %v", err)
}
if dockerEvent.Type == "" || dockerEvent.Action == "" || dockerEvent.Actor.ID == "" {
continue
}
eventsChan <- Event{
Type: dockerEvent.Type,
Action: dockerEvent.Action,
ID: dockerEvent.Actor.ID,
Message: scanner.Text(),
}
}
return scanner.Err()
}
func (d *docker) EventsClose() error {
d.mu.Lock()
defer d.mu.Unlock()
if d.cmd != nil && d.cmd.Process != nil {
d.logger.Debug("Stopping docker monitor")
// Force docker monitor to quit on shutdown
return d.cmd.Process.Kill()
}
d.logger.Debug("Docker monitor stopped")
return nil
}
func bridgeNameFromID(bridgeID string) string {
if len(bridgeID) > 12 {
bridgeID = bridgeID[:12]
}
return fmt.Sprintf("br-%s", bridgeID)
}

View File

@@ -0,0 +1,104 @@
package client
import (
"errors"
"net"
)
type Event struct {
Type string
Action string
ID string // Full 64-char ID (Actor.ID)
Message string // debug
}
type DockerEvent struct {
Type string `json:"Type"` // container, network
Action string `json:"Action"` // start, die, create, destroy
Actor struct {
ID string `json:"ID"`
} `json:"Actor"`
}
type Bridges []Bridge
type Bridge struct {
ID string
Name string
Subnets []string
Containers Containers
}
type DockerBridgeInspect struct {
ID string `json:"Id"`
Options struct {
Name string `json:"com.docker.network.bridge.name"`
} `json:"Options"`
IPAM struct {
Config []struct {
Subnet string `json:"Subnet"`
} `json:"Config"`
} `json:"IPAM"`
}
type Containers []Container
type Container struct {
ID string
Networks ContainerNetworks
}
type ContainerNetworks struct {
IPAddresses []IPInfo
Ports []ContainerPort
}
type IPInfo struct {
Address string
Version int // "4" or "6"
NetworkID string
}
func (i IPInfo) NftPrefix() string {
if i.Version == 6 {
return "ip6"
}
return "ip"
}
type ContainerPort struct {
Port string
Protocol string
HostPort []HostPort
}
type HostPort struct {
Port string
IP IPInfo
}
type DockerContainerInspect struct {
NetworkSettings struct {
Ports map[string][]struct {
HostIp string `json:"HostIp"`
HostPort string `json:"HostPort"`
} `json:"Ports"`
Networks map[string]struct {
IPAddress string `json:"IPAddress"`
NetworkID string `json:"NetworkID"`
} `json:"Networks"`
} `json:"NetworkSettings"`
}
func ipVersion(ip string) (int, error) {
ipParse := net.ParseIP(ip)
if ipParse == nil || (ipParse.To4() == nil && ipParse.To16() == nil) {
return 0, errors.New("invalid ip address")
}
if ipParse.To4() != nil {
return 4, nil
}
return 6, nil
}

View File

@@ -0,0 +1,13 @@
package docker_monitor
type Config struct {
Path string
RuleStrategy RuleStrategy
}
type RuleStrategy int8
const (
RuleStrategyRebuild RuleStrategy = iota + 1
RuleStrategyIncremental
)

View File

@@ -0,0 +1,74 @@
package docker_monitor
import (
"context"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/docker_monitor/chain"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/docker_monitor/client"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/docker_monitor/rule_strategy"
nftChain "git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/firewall/chain"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/log"
)
type Docker interface {
NftReload(newNoneChain func(chain string) (nftChain.Chain, error)) error
NftChains() chain.Chains
Run()
Close() error
}
type docker struct {
dockerClient client.Docker
ruleStrategy rule_strategy.Strategy
logger log.Logger
ctx context.Context
}
func New(config *Config, ctx context.Context, logger log.Logger) (Docker, error) {
dockerClient := client.NewDocker(config.Path, ctx, logger)
ruleStrategy, err := newRuleStrategy(config, dockerClient, logger)
if err != nil {
return nil, err
}
return &docker{
dockerClient: dockerClient,
logger: logger,
ctx: ctx,
ruleStrategy: ruleStrategy,
}, nil
}
func (d *docker) NftReload(newNoneChain func(chain string) (nftChain.Chain, error)) error {
return d.ruleStrategy.Reload(newNoneChain)
}
func (d *docker) NftChains() chain.Chains {
return d.ruleStrategy.Chains()
}
func (d *docker) Run() {
events := d.dockerClient.Events()
for {
select {
case <-d.ctx.Done():
return
case event := <-events:
if event.Message == "" {
continue
}
d.logger.Debug("Docker event received: " + event.Message)
d.ruleStrategy.Event(&event)
}
}
}
func (d *docker) Close() error {
return d.dockerClient.EventsClose()
}
func (d *docker) chainCommand(chainData chain.Data, rule string) {
if err := chainData.AddRule(rule); err != nil {
d.logger.Error(err.Error())
}
}

View File

@@ -0,0 +1,32 @@
package docker_monitor
import (
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/docker_monitor/chain"
nftChain "git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/firewall/chain"
)
type DockerNotSupport struct {
chains chain.Chains
}
func NewDockerNotSupport() Docker {
return &DockerNotSupport{
chains: chain.NewEmptyChains(),
}
}
func (d *DockerNotSupport) NftReload(_ func(chain string) (nftChain.Chain, error)) error {
return nil
}
func (d *DockerNotSupport) NftChains() chain.Chains {
return d.chains
}
func (d *DockerNotSupport) Run() {
}
func (d *DockerNotSupport) Close() error {
return nil
}

View File

@@ -0,0 +1,22 @@
package docker_monitor
import (
"fmt"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/docker_monitor/client"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/docker_monitor/rule_strategy"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/log"
)
func newRuleStrategy(config *Config, dockerClient client.Docker, logger log.Logger) (rule_strategy.Strategy, error) {
generate := rule_strategy.NewGenerator(dockerClient, logger)
switch config.RuleStrategy {
case RuleStrategyRebuild:
return rule_strategy.NewRebuildStrategy(generate), nil
case RuleStrategyIncremental:
return rule_strategy.NewIncrementalStrategy(generate, dockerClient, logger), nil
}
return nil, fmt.Errorf("invalid option rule_strategy")
}

View File

@@ -0,0 +1,174 @@
package rule_strategy
import (
"fmt"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/docker_monitor/chain"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/docker_monitor/client"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/log"
)
type Generator interface {
GenerateAll(chains chain.Chains, isComment bool)
GenerateBridge(bridge client.Bridge, chain chain.Chains, isComment bool)
GenerateContainer(container client.Container, bridgeName string, chain chain.Chains, isComment bool)
ClearChains(chains chain.Chains)
AddRule(chainData chain.Data, rule string)
}
type generator struct {
dockerClient client.Docker
logger log.Logger
}
func NewGenerator(dockerClient client.Docker, logger log.Logger) Generator {
return &generator{
dockerClient: dockerClient,
logger: logger,
}
}
func (g *generator) GenerateAll(chains chain.Chains, isComment bool) {
listChains := chains.List()
if err := listChains.ForwardCT.JumpTo(&listChains.ForwardFilter, "", ""); err != nil {
g.logger.Error(err.Error())
}
if err := listChains.ForwardBridge.JumpTo(&listChains.ForwardFilter, "", ""); err != nil {
g.logger.Error(err.Error())
}
if err := listChains.DockerFilterFirst.JumpTo(&listChains.DockerFilter, "", ""); err != nil {
g.logger.Error(err.Error())
}
if err := listChains.DockerFilterSecond.JumpTo(&listChains.DockerFilter, "", ""); err != nil {
g.logger.Error(err.Error())
}
bridges, err := g.dockerClient.FetchBridges()
if err != nil {
g.logger.Error(err.Error())
return
}
for _, bridge := range bridges {
g.GenerateBridge(bridge, chains, isComment)
if bridge.Containers == nil {
continue
}
for _, container := range bridge.Containers {
g.GenerateContainer(container, bridge.Name, chains, isComment)
}
}
}
func (g *generator) GenerateBridge(bridge client.Bridge, chain chain.Chains, isComment bool) {
listChains := chain.List()
var rule string
comment := ""
if isComment {
comment = fmt.Sprintf("comment \"bridge_id:%s\"", bridge.ID)
}
rule = fmt.Sprintf("iifname != \"%s\" oifname \"%s\" counter drop %s", bridge.Name, bridge.Name, comment)
g.AddRule(listChains.DockerFilterSecond, rule)
rule = fmt.Sprintf("iifname \"%s\" counter accept %s", bridge.Name, comment)
g.AddRule(listChains.ForwardFilter, rule)
rule = fmt.Sprintf("oifname \"%s\" counter", bridge.Name)
if err := listChains.DockerFilter.JumpTo(&listChains.ForwardBridge, rule, comment); err != nil {
g.logger.Error(err.Error())
}
rule = fmt.Sprintf("oifname \"%s\" ct state related,established counter accept %s", bridge.Name, comment)
g.AddRule(listChains.ForwardCT, rule)
for _, subnet := range bridge.Subnets {
rule = fmt.Sprintf("ip saddr %s oifname != \"%s\" counter masquerade %s", subnet, bridge.Name, comment)
g.AddRule(listChains.PostroutingNat, rule)
}
}
func (g *generator) GenerateContainer(container client.Container, bridgeName string, chain chain.Chains, isComment bool) {
listChains := chain.List()
var rule string
comment := ""
if isComment {
comment = fmt.Sprintf("comment \"container_id:%s\"", container.ID)
}
for _, ipInfo := range container.Networks.IPAddresses {
rule = fmt.Sprintf("%s daddr %s iifname != \"%s\" counter drop %s", ipInfo.NftPrefix(), ipInfo.Address, bridgeName, comment)
g.AddRule(listChains.PreroutingFilter, rule)
for _, port := range container.Networks.Ports {
isZeroAddress := false
for _, hostInfo := range port.HostPort {
if hostInfo.IP.Address != "0.0.0.0" && hostInfo.IP.Address != "::" && (hostInfo.IP.Address == "127.0.0.1" || hostInfo.IP.Address == "::1") {
rule = fmt.Sprintf("%s daddr %s iifname != \"lo\" %s dport %s counter drop %s", hostInfo.IP.NftPrefix(), hostInfo.IP.Address, port.Protocol, hostInfo.Port, comment)
g.AddRule(listChains.PreroutingFilter, rule)
}
if hostInfo.IP.Address == "0.0.0.0" || hostInfo.IP.Address == "::" {
if isZeroAddress {
continue
}
isZeroAddress = true
rule = fmt.Sprintf("iifname != \"%s\" %s dport %s counter dnat %s to %s:%s %s", bridgeName, port.Protocol, hostInfo.Port, ipInfo.NftPrefix(), ipInfo.Address, port.Port, comment)
g.AddRule(listChains.DockerNat, rule)
rule = fmt.Sprintf("%s daddr %s iifname != \"%s\" oifname \"%s\" %s dport %s counter accept %s", ipInfo.NftPrefix(), ipInfo.Address, bridgeName, bridgeName, port.Protocol, port.Port, comment)
g.AddRule(listChains.DockerFilterFirst, rule)
continue
}
rule = fmt.Sprintf("%s daddr %s iifname != \"%s\" oifname \"%s\" %s dport %s counter accept %s", ipInfo.NftPrefix(), ipInfo.Address, bridgeName, bridgeName, port.Protocol, port.Port, comment)
g.AddRule(listChains.DockerFilterFirst, rule)
rule = fmt.Sprintf("%s daddr %s iifname != \"%s\" %s dport %s counter dnat to %s:%s %s", hostInfo.IP.NftPrefix(), hostInfo.IP.Address, bridgeName, port.Protocol, hostInfo.Port, ipInfo.Address, port.Port, comment)
g.AddRule(listChains.DockerNat, rule)
}
}
}
}
func (g *generator) ClearChains(chains chain.Chains) {
listChains := chains.List()
if err := listChains.DockerNat.Clear(); err != nil {
g.logger.Error(err.Error())
}
if err := listChains.PostroutingNat.Clear(); err != nil {
g.logger.Error(err.Error())
}
if err := listChains.PreroutingFilter.Clear(); err != nil {
g.logger.Error(err.Error())
}
if err := listChains.DockerFilter.Clear(); err != nil {
g.logger.Error(err.Error())
}
if err := listChains.DockerFilterFirst.Clear(); err != nil {
g.logger.Error(err.Error())
}
if err := listChains.DockerFilterSecond.Clear(); err != nil {
g.logger.Error(err.Error())
}
if err := listChains.ForwardFilter.Clear(); err != nil {
g.logger.Error(err.Error())
}
if err := listChains.ForwardBridge.Clear(); err != nil {
g.logger.Error(err.Error())
}
if err := listChains.ForwardCT.Clear(); err != nil {
g.logger.Error(err.Error())
}
}
func (g *generator) AddRule(chainData chain.Data, rule string) {
if err := chainData.AddRule(rule); err != nil {
g.logger.Error(err.Error())
}
}

View File

@@ -0,0 +1,13 @@
package rule_strategy
import (
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/docker_monitor/chain"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/docker_monitor/client"
nftChain "git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/firewall/chain"
)
type Strategy interface {
Reload(newNoneChain func(chain string) (nftChain.Chain, error)) error
Chains() chain.Chains
Event(event *client.Event)
}

View File

@@ -0,0 +1,181 @@
package rule_strategy
import (
"fmt"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/docker_monitor/chain"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/docker_monitor/client"
nftChain "git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/firewall/chain"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/log"
)
type incrementalStrategy struct {
dockerClient client.Docker
chains chain.Chains
generator Generator
logger log.Logger
}
func NewIncrementalStrategy(generator Generator, dockerClient client.Docker, logger log.Logger) Strategy {
return &incrementalStrategy{
dockerClient: dockerClient,
generator: generator,
logger: logger,
}
}
func (i *incrementalStrategy) Reload(newNoneChain func(chain string) (nftChain.Chain, error)) error {
chains, err := chain.NewChains(newNoneChain)
if err != nil {
return err
}
i.chains = chains
i.generator.GenerateAll(i.chains, true)
return nil
}
func (i *incrementalStrategy) Chains() chain.Chains {
return i.chains
}
func (i *incrementalStrategy) Event(event *client.Event) {
if event == nil || event.ID == "" {
return
}
if event.Type == "container" {
if event.Action == "start" {
if err := i.eventContainerStart(event.ID); err != nil {
i.logger.Error(fmt.Sprintf("failed to handle container start event: %s", err))
}
return
}
if event.Action == "die" {
i.eventContainerStop(event.ID)
return
}
return
}
if event.Type == "network" {
if event.Action == "create" {
if err := i.eventNetworkCreate(event.ID); err != nil {
i.logger.Error(fmt.Sprintf("failed to handle network create event: %s", err))
}
}
if event.Action == "destroy" {
i.eventNetworkDestroy(event.ID)
}
return
}
}
func (i *incrementalStrategy) eventContainerStart(containerId string) error {
container, err := i.dockerClient.FetchContainer(containerId)
if err != nil {
return err
}
for _, ipInfo := range container.Networks.IPAddresses {
bridge, err := i.dockerClient.FetchBridge(ipInfo.NetworkID)
if err != nil {
i.logger.Error(fmt.Sprintf("failed to fetch bridge for container %s: %s", containerId, err))
continue
}
i.generator.GenerateContainer(container, bridge.Name, i.chains, true)
}
return nil
}
func (i *incrementalStrategy) eventContainerStop(containerId string) {
listChains := i.chains.List()
if err := i.nftRuleDeleteContainer(containerId, &listChains.PreroutingFilter); err != nil {
i.logger.Error(fmt.Sprintf("failed to delete container %s rules: %s", containerId, err))
}
if err := i.nftRuleDeleteContainer(containerId, &listChains.DockerNat); err != nil {
i.logger.Error(fmt.Sprintf("failed to delete container %s rules: %s", containerId, err))
}
if err := i.nftRuleDeleteContainer(containerId, &listChains.DockerFilterFirst); err != nil {
i.logger.Error(fmt.Sprintf("failed to delete container %s rules: %s", containerId, err))
}
}
func (i *incrementalStrategy) nftRuleDeleteContainer(containerId string, chain *chain.Data) error {
rules, err := chain.ListRules()
if err != nil {
return err
}
for _, rule := range rules {
if rule.Comment != "container_id:"+containerId {
continue
}
if err := chain.RemoveRuleByHandle(rule.Handle); err != nil {
i.logger.Error(fmt.Sprintf("failed to delete container %s rule: %s", containerId, err))
}
}
return nil
}
func (i *incrementalStrategy) eventNetworkCreate(bridgeId string) error {
bridge, err := i.dockerClient.FetchBridge(bridgeId)
if err != nil {
return err
}
i.generator.GenerateBridge(bridge, i.chains, true)
return nil
}
func (i *incrementalStrategy) eventNetworkDestroy(bridgeId string) {
listChains := i.chains.List()
if err := i.nftRuleDeleteBridge(bridgeId, &listChains.DockerFilterSecond); err != nil {
i.logger.Error(fmt.Sprintf("failed to delete bridge %s rules: %s", bridgeId, err))
}
if err := i.nftRuleDeleteBridge(bridgeId, &listChains.ForwardFilter); err != nil {
i.logger.Error(fmt.Sprintf("failed to delete bridge %s rules: %s", bridgeId, err))
}
if err := i.nftRuleDeleteBridge(bridgeId, &listChains.ForwardBridge); err != nil {
i.logger.Error(fmt.Sprintf("failed to delete bridge %s rules: %s", bridgeId, err))
}
if err := i.nftRuleDeleteBridge(bridgeId, &listChains.ForwardCT); err != nil {
i.logger.Error(fmt.Sprintf("failed to delete bridge %s rules: %s", bridgeId, err))
}
if err := i.nftRuleDeleteBridge(bridgeId, &listChains.PostroutingNat); err != nil {
i.logger.Error(fmt.Sprintf("failed to delete bridge %s rules: %s", bridgeId, err))
}
}
func (i *incrementalStrategy) nftRuleDeleteBridge(bridgeId string, chain *chain.Data) error {
rules, err := chain.ListRules()
if err != nil {
return err
}
for _, rule := range rules {
if rule.Comment != "bridge_id:"+bridgeId {
continue
}
if err := chain.RemoveRuleByHandle(rule.Handle); err != nil {
i.logger.Error(fmt.Sprintf("failed to delete bridge %s rule: %s", bridgeId, err))
}
}
return nil
}

View File

@@ -0,0 +1,43 @@
package rule_strategy
import (
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/docker_monitor/chain"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/docker_monitor/client"
nftChain "git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/firewall/chain"
)
type rebuildStrategy struct {
chains chain.Chains
generator Generator
}
func NewRebuildStrategy(generator Generator) Strategy {
return &rebuildStrategy{
generator: generator,
}
}
func (r *rebuildStrategy) Reload(newNoneChain func(chain string) (nftChain.Chain, error)) error {
chains, err := chain.NewChains(newNoneChain)
if err != nil {
return err
}
r.chains = chains
r.generator.GenerateAll(r.chains, false)
return nil
}
func (r *rebuildStrategy) Chains() chain.Chains {
return r.chains
}
func (r *rebuildStrategy) Event(event *client.Event) {
if event == nil || event.Type != "container" {
return
}
r.generator.ClearChains(r.chains)
r.generator.GenerateAll(r.chains, false)
}

View File

@@ -0,0 +1,257 @@
package blocking
import (
"fmt"
"net"
"strings"
"sync"
"time"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/db/entity"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/db/repository"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/firewall/chain/block"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/firewall/types"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/log"
)
type API interface {
NftReload(blockListIP block.ListIP, blockListIPWithPort block.ListIPWithPort) error
BlockIP(block BlockIP) (bool, error)
BlockIPWithPorts(block BlockIPWithPorts) (bool, error)
UnblockAllIPs() error
UnblockIP(ip net.IP) error
ClearDBData() error
}
type blocking struct {
blockingRepository repository.BlockingRepository
blockListIP block.ListIP
blockListIPWithPort block.ListIPWithPort
logger log.Logger
mu sync.Mutex
}
type BlockIP struct {
IP net.IP
TimeSeconds uint32
Reason string
}
type BlockIPWithPorts struct {
IP net.IP
TimeSeconds uint32
Reason string
Ports []types.L4Port
}
func New(blockingRepository repository.BlockingRepository, logger log.Logger) API {
return &blocking{
blockingRepository: blockingRepository,
logger: logger,
mu: sync.Mutex{},
}
}
func (b *blocking) NftReload(blockListIP block.ListIP, blockListIPWithPort block.ListIPWithPort) error {
b.mu.Lock()
b.blockListIP = blockListIP
b.blockListIPWithPort = blockListIPWithPort
b.mu.Unlock()
isExpiredEntries := false
nowUnix := time.Now().Unix()
err := b.blockingRepository.List(func(e entity.Blocking) error {
ip := net.ParseIP(e.IP)
if ip == nil {
b.logger.Error(fmt.Sprintf("Failed to parse IP address: %s", e.IP))
return nil
}
blockSeconds := uint32(0)
if e.ExpireAtUnix > 0 {
if e.ExpireAtUnix < nowUnix {
isExpiredEntries = true
return nil
}
blockSeconds = uint32(e.ExpireAtUnix - nowUnix)
}
if e.IsPorts() {
l4Ports, err := e.ToL4Ports()
if err != nil {
b.logger.Error(fmt.Sprintf("Failed to parse ports: %s", err))
return nil
}
if err := b.blockListIPWithPort.AddIP(ip, l4Ports, blockSeconds); err != nil {
b.logger.Error(fmt.Sprintf("Failed to add IP %s to block list: %s", ip.String(), err))
}
return nil
}
if err := b.blockListIP.AddIP(ip, blockSeconds); err != nil {
b.logger.Error(fmt.Sprintf("Failed to add IP %s to block list: %s", ip.String(), err))
return nil
}
return nil
})
if isExpiredEntries {
go func() {
deleteCount, err := b.blockingRepository.DeleteExpired(100)
if err != nil {
b.logger.Error(fmt.Sprintf("Failed to delete expired entries from database: %s", err))
}
b.logger.Debug(fmt.Sprintf("Deleted %d expired entries from database", deleteCount))
}()
}
return err
}
func (b *blocking) BlockIP(block BlockIP) (bool, error) {
if block.IP.IsLoopback() {
return false, fmt.Errorf("loopback IP address %s cannot be blocked", block.IP.String())
}
if err := b.blockListIP.AddIP(block.IP, block.TimeSeconds); err != nil {
return false, err
}
expireAtUnix := int64(0)
if block.TimeSeconds > 0 {
expire := time.Now().Add(time.Duration(int64(block.TimeSeconds)) * time.Second)
expireAtUnix = expire.Unix()
}
data := entity.Blocking{
IP: block.IP.String(),
ExpireAtUnix: expireAtUnix,
Reason: block.Reason,
}
if err := b.blockingRepository.Add(data); err != nil {
return true, fmt.Errorf("the IP is blocked, but not recorded in the database. Failed to add IP %s to database: %w", block.IP.String(), err)
}
return true, nil
}
func (b *blocking) BlockIPWithPorts(block BlockIPWithPorts) (bool, error) {
if block.IP.IsLoopback() {
return false, fmt.Errorf("loopback IP address %s cannot be blocked", block.IP.String())
}
if err := b.blockListIPWithPort.AddIP(block.IP, block.Ports, block.TimeSeconds); err != nil {
return false, err
}
var l4Ports []entity.BlockingPort
for _, port := range block.Ports {
l4Ports = append(l4Ports, entity.BlockingPort{
Number: port.Number(),
Protocol: port.ProtocolString(),
})
}
expireAtUnix := int64(0)
if block.TimeSeconds > 0 {
expire := time.Now().Add(time.Duration(int64(block.TimeSeconds)) * time.Second)
expireAtUnix = expire.Unix()
}
data := entity.Blocking{
IP: block.IP.String(),
ExpireAtUnix: expireAtUnix,
Reason: block.Reason,
Ports: l4Ports,
}
if err := b.blockingRepository.Add(data); err != nil {
return true, fmt.Errorf("the IP is blocked, but not recorded in the database. Failed to add IP %s to database: %w", block.IP.String(), err)
}
return true, nil
}
func (b *blocking) UnblockIP(ip net.IP) error {
err := b.blockingRepository.DeleteByIP(ip, func(e entity.Blocking) error {
if e.IsPorts() {
l4Ports, err := e.ToL4Ports()
if err != nil {
return err
}
return b.removeIPWithPorts(ip, l4Ports)
}
if err := b.blockListIP.DeleteIP(ip); err != nil {
if strings.Contains(err.Error(), "element does not exist") {
return nil
}
return err
}
return nil
})
if err != nil {
return err
}
return nil
}
func (b *blocking) UnblockAllIPs() error {
err := b.blockingRepository.List(func(e entity.Blocking) error {
ip := net.ParseIP(e.IP)
if ip == nil {
return fmt.Errorf("failed to parse IP address: %s", e.IP)
}
if e.IsPorts() {
l4Ports, err := e.ToL4Ports()
if err != nil {
return err
}
for _, port := range l4Ports {
if err := b.blockListIPWithPort.DeleteIP(ip, port); err != nil {
if strings.Contains(err.Error(), "element does not exist") ||
strings.Contains(err.Error(), "Error: Could not process rule: No such file or directory") {
continue
}
return err
}
}
}
if err := b.blockListIP.DeleteIP(ip); err != nil {
if strings.Contains(err.Error(), "element does not exist") {
return nil
}
return err
}
return nil
})
if err != nil {
_ = b.blockingRepository.Clear()
return err
}
return b.blockingRepository.Clear()
}
func (b *blocking) ClearDBData() error {
return b.blockingRepository.Clear()
}
func (b *blocking) removeIPWithPorts(ip net.IP, l4Ports []types.L4Port) error {
for _, port := range l4Ports {
if err := b.blockListIPWithPort.DeleteIP(ip, port); err != nil {
if strings.Contains(err.Error(), "element does not exist") ||
strings.Contains(err.Error(), "Error: Could not process rule: No such file or directory") {
continue
}
return err
}
}
return nil
}

View File

@@ -0,0 +1,41 @@
package chain
import (
nft "git.kor-elf.net/kor-elf-shield/go-nftables-client"
nftChain "git.kor-elf.net/kor-elf-shield/go-nftables-client/chain"
"git.kor-elf.net/kor-elf-shield/go-nftables-client/family"
)
type AfterLocalInput interface {
AddRule(expr ...string) error
AddRuleIn(AddRuleFunc func(expr ...string) error) error
}
type afterLocalInput struct {
nft nft.NFT
family family.Type
table string
chain string
}
func newAfterLocalInput(nft nft.NFT, family family.Type, table string) (LocalInput, error) {
chain := "after-local-input"
if err := nft.Chain().Add(family, table, chain, nftChain.TypeNone); err != nil {
return nil, err
}
return &afterLocalInput{
nft: nft,
family: family,
table: table,
chain: chain,
}, nil
}
func (l *afterLocalInput) AddRule(expr ...string) error {
return l.nft.Rule().Add(l.family, l.table, l.chain, expr...)
}
func (l *afterLocalInput) AddRuleIn(AddRuleFunc func(expr ...string) error) error {
return AddRuleFunc("iifname != \"lo\" counter jump " + l.chain)
}

View File

@@ -0,0 +1,41 @@
package chain
import (
nft "git.kor-elf.net/kor-elf-shield/go-nftables-client"
nftChain "git.kor-elf.net/kor-elf-shield/go-nftables-client/chain"
"git.kor-elf.net/kor-elf-shield/go-nftables-client/family"
)
type BeforeLocalInput interface {
AddRule(expr ...string) error
AddRuleIn(AddRuleFunc func(expr ...string) error) error
}
type beforeLocalInput struct {
nft nft.NFT
family family.Type
table string
chain string
}
func newBeforeLocalInput(nft nft.NFT, family family.Type, table string) (LocalInput, error) {
chain := "before-local-input"
if err := nft.Chain().Add(family, table, chain, nftChain.TypeNone); err != nil {
return nil, err
}
return &beforeLocalInput{
nft: nft,
family: family,
table: table,
chain: chain,
}, nil
}
func (l *beforeLocalInput) AddRule(expr ...string) error {
return l.nft.Rule().Add(l.family, l.table, l.chain, expr...)
}
func (l *beforeLocalInput) AddRuleIn(AddRuleFunc func(expr ...string) error) error {
return AddRuleFunc("iifname != \"lo\" counter jump " + l.chain)
}

View File

@@ -0,0 +1,59 @@
package block
import (
"fmt"
nft "git.kor-elf.net/kor-elf-shield/go-nftables-client"
"git.kor-elf.net/kor-elf-shield/go-nftables-client/family"
)
type List interface {
Name() string
AddElement(element string) error
DeleteElement(element string) error
}
type list struct {
nft nft.NFT
family family.Type
table string
name string
}
func newList(nft nft.NFT, family family.Type, table string, name string, params string) (List, error) {
command := []string{
"add set", family.String(), table, name, "{ " + params + " }",
}
if err := nft.Command().Run(command...); err != nil {
return nil, err
}
return &list{
nft: nft,
family: family,
table: table,
name: name,
}, nil
}
func (l *list) Name() string {
return l.name
}
func (l *list) AddElement(element string) error {
command := []string{
"add element",
l.family.String(), l.table, l.name,
fmt.Sprintf("{ %s }", element),
}
return l.nft.Command().Run(command...)
}
func (l *list) DeleteElement(element string) error {
command := []string{
"delete element",
l.family.String(), l.table, l.name,
fmt.Sprintf("{ %s }", element),
}
return l.nft.Command().Run(command...)
}

View File

@@ -0,0 +1,87 @@
package block
import (
"fmt"
"net"
"strings"
nft "git.kor-elf.net/kor-elf-shield/go-nftables-client"
"git.kor-elf.net/kor-elf-shield/go-nftables-client/family"
)
type ListIP interface {
// AddIP Add an IP address to the list.
AddIP(addr net.IP, banSeconds uint32) error
// DeleteIP Delete an IP address from the list.
DeleteIP(addr net.IP) error
// AddRuleToChain Add a rule to the parent chain.
AddRuleToChain(chainAddRuleFunc func(expr ...string) error, action string) error
}
type listIP struct {
listIPv4 List
listIPv6 List
}
func NewListIP(nft nft.NFT, family family.Type, table string, name string) (ListIP, error) {
params := "type ipv4_addr; flags interval, timeout;"
listName := name + "_ip4"
listIPv4, err := newList(nft, family, table, listName, params)
if err != nil {
return nil, err
}
params = "type ipv6_addr; flags interval, timeout;"
listName = name + "_ip6"
listIPv6, err := newList(nft, family, table, listName, params)
if err != nil {
return nil, err
}
return &listIP{
listIPv4: listIPv4,
listIPv6: listIPv6,
}, nil
}
func (l *listIP) AddIP(addr net.IP, banSeconds uint32) error {
el := []string{addr.String()}
if banSeconds > 0 {
el = append(el, "timeout", fmt.Sprintf("%ds", banSeconds))
}
element := strings.Join(el, " ")
if addr.To4() != nil {
return l.listIPv4.AddElement(element)
}
return l.listIPv6.AddElement(element)
}
func (l *listIP) DeleteIP(addr net.IP) error {
if addr == nil {
return fmt.Errorf("IP address cannot be nil")
}
if addr.To4() != nil {
return l.listIPv4.DeleteElement(addr.String())
}
return l.listIPv6.DeleteElement(addr.String())
}
func (l *listIP) AddRuleToChain(chainAddRuleFunc func(expr ...string) error, action string) error {
rule := "ip saddr @" + l.listIPv4.Name() + " " + action
if err := chainAddRuleFunc(rule); err != nil {
return err
}
rule = "ip6 saddr @" + l.listIPv6.Name() + " " + action
if err := chainAddRuleFunc(rule); err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,102 @@
package block
import (
"fmt"
"net"
"strings"
nft "git.kor-elf.net/kor-elf-shield/go-nftables-client"
"git.kor-elf.net/kor-elf-shield/go-nftables-client/family"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/firewall/types"
)
type ListIPWithPort interface {
// AddIP Add an IP address to the list.
AddIP(addr net.IP, ports []types.L4Port, banSeconds uint32) error
// DeleteIP Delete an IP address from the list.
DeleteIP(addr net.IP, port types.L4Port) error
// AddRuleToChain Add a rule to the parent chain.
AddRuleToChain(chainAddRuleFunc func(expr ...string) error, action string) error
}
type listIPWithPort struct {
listIPv4 List
listIPv6 List
}
func NewListIPWithPort(nft nft.NFT, family family.Type, table string, name string) (ListIPWithPort, error) {
params := "type ipv4_addr . inet_proto . inet_service; flags interval, timeout;"
listName := name + "_ip4"
listIPv4, err := newList(nft, family, table, listName, params)
if err != nil {
return nil, err
}
params = "type ipv6_addr . inet_proto . inet_service; flags interval, timeout;"
listName = name + "_ip6"
listIPv6, err := newList(nft, family, table, listName, params)
if err != nil {
return nil, err
}
return &listIPWithPort{
listIPv4: listIPv4,
listIPv6: listIPv6,
}, nil
}
func (l *listIPWithPort) AddIP(addr net.IP, ports []types.L4Port, banSeconds uint32) error {
if len(ports) == 0 {
return fmt.Errorf("ports is empty")
}
var elements []string
for _, port := range ports {
el := []string{fmt.Sprintf("%s . %s . %d", addr.String(), port.ProtocolString(), port.Number())}
if banSeconds > 0 {
el = append(el, "timeout", fmt.Sprintf("%ds", banSeconds))
}
elements = append(elements, strings.Join(el, " "))
}
element := strings.Join(elements, ",")
if addr.To4() != nil {
return l.listIPv4.AddElement(element)
}
return l.listIPv6.AddElement(element)
}
func (l *listIPWithPort) DeleteIP(addr net.IP, port types.L4Port) error {
if addr == nil {
return fmt.Errorf("IP address cannot be nil")
}
if port.ToString() == "" {
return fmt.Errorf("port cannot be empty")
}
element := fmt.Sprintf("%s . %s . %d", addr.String(), port.ProtocolString(), port.Number())
if addr.To4() != nil {
return l.listIPv4.DeleteElement(element)
}
return l.listIPv6.DeleteElement(element)
}
func (l *listIPWithPort) AddRuleToChain(chainAddRuleFunc func(expr ...string) error, action string) error {
rule := "ip saddr . meta l4proto . th dport @" + l.listIPv4.Name() + " " + action
if err := chainAddRuleFunc(rule); err != nil {
return err
}
rule = "ip6 saddr . meta l4proto . th dport @" + l.listIPv6.Name() + " " + action
if err := chainAddRuleFunc(rule); err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,67 @@
package chain
import (
"encoding/json"
nft "git.kor-elf.net/kor-elf-shield/go-nftables-client"
"git.kor-elf.net/kor-elf-shield/go-nftables-client/family"
)
type Chain interface {
AddRule(expr ...string) error
ListRules() ([]Rule, error)
RemoveRuleByHandle(handle uint64) error
Clear() error
}
type chain struct {
nft nft.NFT
family family.Type
table string
chain string
}
type NftOutput struct {
Nftables []NftElement `json:"nftables"`
}
type NftElement struct {
Rule *Rule `json:"rule,omitempty"`
}
type Rule struct {
Handle uint64 `json:"handle"`
Comment string `json:"comment"`
}
func (c *chain) AddRule(expr ...string) error {
return c.nft.Rule().Add(c.family, c.table, c.chain, expr...)
}
func (c *chain) ListRules() ([]Rule, error) {
args := []string{"-a", "-j", "list", "chain", c.family.String(), c.table, c.chain}
jsonData, err := c.nft.Command().RunWithOutput(args...)
if err != nil {
return nil, err
}
var output NftOutput
if err := json.Unmarshal([]byte(jsonData), &output); err != nil {
return nil, err
}
var rules []Rule
for _, el := range output.Nftables {
if el.Rule != nil {
rules = append(rules, *el.Rule)
}
}
return rules, nil
}
func (c *chain) RemoveRuleByHandle(handle uint64) error {
return c.nft.Rule().Delete(c.family, c.table, c.chain, handle)
}
func (c *chain) Clear() error {
return c.nft.Chain().Clear(c.family, c.table, c.chain)
}

View File

@@ -1,28 +1,48 @@
package chain
import (
"strings"
nft "git.kor-elf.net/kor-elf-shield/go-nftables-client"
nftChain "git.kor-elf.net/kor-elf-shield/go-nftables-client/chain"
nftFamily "git.kor-elf.net/kor-elf-shield/go-nftables-client/family"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/firewall/chain/block"
)
type Chains interface {
NewPacketFilter(enable bool) error
PacketFilter() PacketFilter
NewInput(chain string, defaultAllow bool) error
NewInput(chain string, defaultAllow bool, priority int) error
Input() Input
NewOutput(chain string, defaultAllow bool) error
NewOutput(chain string, defaultAllow bool, priority int) error
Output() Output
NewForward(chain string, defaultAllow bool) error
NewForward(chain string, defaultAllow bool, priority int) error
Forward() Forward
NewBeforeLocalInput() error
BeforeLocalInput() BeforeLocalInput
NewLocalInput() error
LocalInput() LocalInput
NewAfterLocalInput() error
AfterLocalInput() AfterLocalInput
NewLocalOutput() error
LocalOutput() LocalOutput
NewLocalForward() error
LocalForward() LocalForward
ClearRules() error
NewNoneChain(chain string) (Chain, error)
NewChain(chain string, baseChain nftChain.ChainOptions) (Chain, error)
NewBlockListIP(name string) (block.ListIP, error)
NewBlockListIPWithPort(name string) (block.ListIPWithPort, error)
}
type chains struct {
@@ -31,8 +51,12 @@ type chains struct {
forward Forward
packetFilter PacketFilter
localInput LocalInput
localOutput LocalOutput
beforeLocalInput BeforeLocalInput
localInput LocalInput
afterLocalInput AfterLocalInput
localOutput LocalOutput
localForward LocalForward
family nftFamily.Type
table string
@@ -40,11 +64,12 @@ type chains struct {
}
func NewChains(nft nft.NFT, table string) (Chains, error) {
if err := nft.Clear(); err != nil {
family := nftFamily.INET
if err := clearRules(nft, family, table); err != nil {
return nil, err
}
family := nftFamily.INET
if err := nft.Table().Add(family, table); err != nil {
return nil, err
}
@@ -70,8 +95,8 @@ func (c *chains) PacketFilter() PacketFilter {
return c.packetFilter
}
func (c *chains) NewInput(chain string, defaultAllow bool) error {
input, err := newInput(c.nft, c.family, c.table, chain, defaultAllow)
func (c *chains) NewInput(chain string, defaultAllow bool, priority int) error {
input, err := newInput(c.nft, c.family, c.table, chain, defaultAllow, priority)
if err != nil {
return err
}
@@ -84,8 +109,8 @@ func (c *chains) Input() Input {
return c.input
}
func (c *chains) NewOutput(chain string, defaultAllow bool) error {
output, err := newOutput(c.nft, c.family, c.table, chain, defaultAllow)
func (c *chains) NewOutput(chain string, defaultAllow bool, priority int) error {
output, err := newOutput(c.nft, c.family, c.table, chain, defaultAllow, priority)
if err != nil {
return err
}
@@ -98,8 +123,8 @@ func (c *chains) Output() Output {
return c.output
}
func (c *chains) NewForward(chain string, defaultAllow bool) error {
forward, err := newForward(c.nft, c.family, c.table, chain, defaultAllow)
func (c *chains) NewForward(chain string, defaultAllow bool, priority int) error {
forward, err := newForward(c.nft, c.family, c.table, chain, defaultAllow, priority)
if err != nil {
return err
}
@@ -112,6 +137,19 @@ func (c *chains) Forward() Forward {
return c.forward
}
func (c *chains) NewBeforeLocalInput() error {
newChain, err := newBeforeLocalInput(c.nft, c.family, c.table)
if err != nil {
return err
}
c.beforeLocalInput = newChain
return nil
}
func (c *chains) BeforeLocalInput() BeforeLocalInput {
return c.beforeLocalInput
}
func (c *chains) NewLocalInput() error {
localInput, err := newLocalInput(c.nft, c.family, c.table)
if err != nil {
@@ -125,6 +163,19 @@ func (c *chains) LocalInput() LocalInput {
return c.localInput
}
func (c *chains) NewAfterLocalInput() error {
newChain, err := newAfterLocalInput(c.nft, c.family, c.table)
if err != nil {
return err
}
c.afterLocalInput = newChain
return nil
}
func (c *chains) AfterLocalInput() AfterLocalInput {
return c.afterLocalInput
}
func (c *chains) NewLocalOutput() error {
localOutput, err := newLocalOutput(c.nft, c.family, c.table)
if err != nil {
@@ -137,3 +188,65 @@ func (c *chains) NewLocalOutput() error {
func (c *chains) LocalOutput() LocalOutput {
return c.localOutput
}
func (c *chains) NewLocalForward() error {
localForward, err := newLocalForward(c.nft, c.family, c.table)
if err != nil {
return err
}
c.localForward = localForward
return nil
}
func (c *chains) LocalForward() LocalForward {
return c.localForward
}
func (c *chains) ClearRules() error {
return clearRules(c.nft, c.family, c.table)
}
func (c *chains) NewNoneChain(chainName string) (Chain, error) {
return c.NewChain(chainName, nftChain.TypeNone)
}
func (c *chains) NewChain(chainName string, baseChain nftChain.ChainOptions) (Chain, error) {
if err := c.nft.Chain().Add(c.family, c.table, chainName, baseChain); err != nil {
return nil, err
}
return &chain{
nft: c.nft,
family: c.family,
table: c.table,
chain: chainName,
}, nil
}
func (c *chains) NewBlockListIP(name string) (block.ListIP, error) {
blockList, err := block.NewListIP(c.nft, c.family, c.table, name)
if err != nil {
return nil, err
}
return blockList, nil
}
func (c *chains) NewBlockListIPWithPort(name string) (block.ListIPWithPort, error) {
blockList, err := block.NewListIPWithPort(c.nft, c.family, c.table, name)
if err != nil {
return nil, err
}
return blockList, nil
}
func clearRules(nft nft.NFT, family nftFamily.Type, table string) error {
if err := nft.Table().Delete(family, table); err != nil {
if !strings.Contains(string(err.Error()), "delete table "+family.String()+" "+table) {
return err
}
}
return nil
}

View File

@@ -17,7 +17,7 @@ type forward struct {
chain string
}
func newForward(nft nft.NFT, family family.Type, table string, chain string, defaultAllow bool) (Forward, error) {
func newForward(nft nft.NFT, family family.Type, table string, chain string, defaultAllow bool, priority int) (Forward, error) {
policy := nftChain.PolicyDrop
if defaultAllow {
policy = nftChain.PolicyAccept
@@ -26,7 +26,7 @@ func newForward(nft nft.NFT, family family.Type, table string, chain string, def
baseChain := nftChain.BaseChainOptions{
Type: nftChain.TypeFilter,
Hook: nftChain.HookForward,
Priority: 0,
Priority: int32(priority),
Policy: policy,
Device: "",
}

View File

@@ -17,7 +17,7 @@ type input struct {
chain string
}
func newInput(nft nft.NFT, family family.Type, table string, chain string, defaultAllow bool) (Input, error) {
func newInput(nft nft.NFT, family family.Type, table string, chain string, defaultAllow bool, priority int) (Input, error) {
policy := nftChain.PolicyDrop
if defaultAllow {
policy = nftChain.PolicyAccept
@@ -26,7 +26,7 @@ func newInput(nft nft.NFT, family family.Type, table string, chain string, defau
baseChain := nftChain.BaseChainOptions{
Type: nftChain.TypeFilter,
Hook: nftChain.HookInput,
Priority: 0,
Priority: int32(priority),
Policy: policy,
Device: "",
}

View File

@@ -0,0 +1,41 @@
package chain
import (
nft "git.kor-elf.net/kor-elf-shield/go-nftables-client"
nftChain "git.kor-elf.net/kor-elf-shield/go-nftables-client/chain"
"git.kor-elf.net/kor-elf-shield/go-nftables-client/family"
)
type LocalForward interface {
AddRule(expr ...string) error
AddRuleIn(AddRuleFunc func(expr ...string) error) error
}
type localForward struct {
nft nft.NFT
family family.Type
table string
chain string
}
func newLocalForward(nft nft.NFT, family family.Type, table string) (LocalForward, error) {
chain := "local-forward"
if err := nft.Chain().Add(family, table, chain, nftChain.TypeNone); err != nil {
return nil, err
}
return &localForward{
nft: nft,
family: family,
table: table,
chain: chain,
}, nil
}
func (l *localForward) AddRule(expr ...string) error {
return l.nft.Rule().Add(l.family, l.table, l.chain, expr...)
}
func (l *localForward) AddRuleIn(AddRuleFunc func(expr ...string) error) error {
return AddRuleFunc("iifname != \"lo\" counter jump " + l.chain)
}

View File

@@ -32,10 +32,10 @@ func newLocalInput(nft nft.NFT, family family.Type, table string) (LocalInput, e
}, nil
}
func (c *localInput) AddRule(expr ...string) error {
return c.nft.Rule().Add(c.family, c.table, c.chain, expr...)
func (l *localInput) AddRule(expr ...string) error {
return l.nft.Rule().Add(l.family, l.table, l.chain, expr...)
}
func (f *localInput) AddRuleIn(AddRuleFunc func(expr ...string) error) error {
return AddRuleFunc("iifname != \"lo\" counter jump " + f.chain)
func (l *localInput) AddRuleIn(AddRuleFunc func(expr ...string) error) error {
return AddRuleFunc("iifname != \"lo\" counter jump " + l.chain)
}

View File

@@ -32,10 +32,10 @@ func newLocalOutput(nft nft.NFT, family family.Type, table string) (LocalOutput,
}, nil
}
func (c *localOutput) AddRule(expr ...string) error {
return c.nft.Rule().Add(c.family, c.table, c.chain, expr...)
func (l *localOutput) AddRule(expr ...string) error {
return l.nft.Rule().Add(l.family, l.table, l.chain, expr...)
}
func (f *localOutput) AddRuleOut(AddRuleFunc func(expr ...string) error) error {
return AddRuleFunc("oifname != \"lo\" counter jump " + f.chain)
func (l *localOutput) AddRuleOut(AddRuleFunc func(expr ...string) error) error {
return AddRuleFunc("oifname != \"lo\" counter jump " + l.chain)
}

View File

@@ -17,7 +17,7 @@ type output struct {
chain string
}
func newOutput(nft nft.NFT, family family.Type, table string, chain string, defaultAllow bool) (Output, error) {
func newOutput(nft nft.NFT, family family.Type, table string, chain string, defaultAllow bool, priority int) (Output, error) {
policy := nftChain.PolicyDrop
if defaultAllow {
policy = nftChain.PolicyAccept
@@ -26,7 +26,7 @@ func newOutput(nft nft.NFT, family family.Type, table string, chain string, defa
baseChain := nftChain.BaseChainOptions{
Type: nftChain.TypeFilter,
Hook: nftChain.HookOutput,
Priority: 0,
Priority: int32(priority),
Policy: policy,
Device: "",
}

View File

@@ -1,6 +1,8 @@
package firewall
import "fmt"
import (
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/firewall/types"
)
type Config struct {
InPorts []ConfigPort
@@ -13,11 +15,13 @@ type Config struct {
}
type ConfigOptions struct {
ClearMode ClearMode
SavesRules bool
SavesRulesPath string
DnsStrict bool
DnsStrictNs bool
PacketFilter bool
DockerSupport bool
}
type ConfigMetadata struct {
@@ -31,27 +35,12 @@ type ConfigPolicy struct {
DefaultAllowInput bool
DefaultAllowOutput bool
DefaultAllowForward bool
InputDrop PolicyDrop
OutputDrop PolicyDrop
ForwardDrop PolicyDrop
}
type PolicyDrop int8
const (
Drop PolicyDrop = iota + 1
Reject
)
func (p PolicyDrop) String() string {
switch p {
case Drop:
return "drop"
case Reject:
return "reject"
default:
return "drop"
}
InputDrop types.PolicyDrop
InputPriority int
OutputDrop types.PolicyDrop
OutputPriority int
ForwardDrop types.PolicyDrop
ForwardPriority int
}
type ConfigIP4 struct {
@@ -72,74 +61,22 @@ type ConfigIP6 struct {
}
type ConfigPort struct {
Number uint16
Protocol Protocol
Action Action
Port types.L4Port
Action types.Action
LimitRate string
}
type ConfigIP struct {
IP string
OnlyIP bool // Port is not taken into account
Port uint16
Action Action
Protocol Protocol
Port types.L4Port
Action types.Action
LimitRate string
}
type Action int8
type ClearMode int8
const (
ActionAccept Action = iota + 1
ActionReject
ActionDrop
ClearModeGlobal ClearMode = iota + 1
ClearModeOwn
)
func (a Action) String() string {
switch a {
case ActionAccept:
return "accept"
case ActionReject:
return "reject"
case ActionDrop:
return "drop"
default:
return "drop"
}
}
type Protocol int8
const (
ProtocolTCP Protocol = iota + 1
ProtocolUDP
)
func (p Protocol) String() string {
switch p {
case ProtocolTCP:
return "tcp"
case ProtocolUDP:
return "udp"
default:
return fmt.Sprintf("Protocol(%d)", p)
}
}
type Direction int8
const (
DirectionIn Direction = iota + 1
DirectionOut
)
func (d Direction) String() string {
switch d {
case DirectionIn:
return "in"
case DirectionOut:
return "out"
default:
return fmt.Sprintf("Direction(%d)", d)
}
}

View File

@@ -0,0 +1,72 @@
package firewall
import nftChain "git.kor-elf.net/kor-elf-shield/go-nftables-client/chain"
func (f *firewall) reloadDocker() error {
f.logger.Debug("Reload docker rules")
if err := f.reloadDockerPrerouting(); err != nil {
return err
}
return nil
}
func (f *firewall) reloadDockerPrerouting() error {
preroutingNat, err := f.chains.NewChain("prerouting_nat", nftChain.BaseChainOptions{
Type: nftChain.TypeNat,
Hook: nftChain.HookPrerouting,
Priority: -100,
Policy: nftChain.PolicyAccept,
Device: "",
})
if err != nil {
return err
}
if err := f.docker.NftChains().PreroutingNatJump(preroutingNat.AddRule); err != nil {
return err
}
preroutingFilter, err := f.chains.NewChain("prerouting_filter", nftChain.BaseChainOptions{
Type: nftChain.TypeFilter,
Hook: nftChain.HookPrerouting,
Priority: -300,
Policy: nftChain.PolicyAccept,
Device: "",
})
if err != nil {
return err
}
if err := f.docker.NftChains().PreroutingFilterJump(preroutingFilter.AddRule); err != nil {
return err
}
outputNat, err := f.chains.NewChain("output_nat", nftChain.BaseChainOptions{
Type: nftChain.TypeNat,
Hook: nftChain.HookOutput,
Priority: -100,
Policy: nftChain.PolicyAccept,
Device: "",
})
if err != nil {
return err
}
if err := f.docker.NftChains().OutputNatJump(outputNat.AddRule); err != nil {
return err
}
postroutingNat, err := f.chains.NewChain("postrouting_nat", nftChain.BaseChainOptions{
Type: nftChain.TypeNat,
Hook: nftChain.HookPostrouting,
Priority: 300,
Policy: nftChain.PolicyAccept,
Device: "",
})
if err != nil {
return err
}
if err := f.docker.NftChains().PostroutingNatJump(postroutingNat.AddRule); err != nil {
return err
}
return nil
}

View File

@@ -2,8 +2,11 @@ package firewall
import (
"fmt"
"net"
"os"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/docker_monitor"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/firewall/blocking"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/firewall/chain"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/log"
@@ -19,35 +22,68 @@ type API interface {
// ClearRules Clear all rules.
ClearRules()
// BlockIP Block IP address.
BlockIP(blockIP blocking.BlockIP) (bool, error)
// BlockIPWithPorts Block IP address with ports.
BlockIPWithPorts(blockIP blocking.BlockIPWithPorts) (bool, error)
// UnblockAllIPs Unblock all IP addresses.
UnblockAllIPs() error
// UnblockIP Unblock IP address.
UnblockIP(ip net.IP) error
// ClearDBData Clear all data from DB
ClearDBData() error
// DockerSupport Return true if docker support
DockerSupport() bool
}
type firewall struct {
nft nftables.NFT
logger log.Logger
config *Config
chains chain.Chains
nft nftables.NFT
logger log.Logger
config *Config
blockingService blocking.API
chains chain.Chains
docker docker_monitor.Docker
}
func New(pathNFT string, logger log.Logger, config Config) (API, error) {
func New(pathNFT string, blockingService blocking.API, logger log.Logger, config Config, docker docker_monitor.Docker) (API, error) {
nft, err := nftables.NewWithPath(pathNFT)
if err != nil {
return nil, fmt.Errorf("failed to create nft client: %w %s", err, pathNFT)
}
return &firewall{
nft: nft,
logger: logger,
config: &config,
nft: nft,
logger: logger,
config: &config,
blockingService: blockingService,
docker: docker,
}, nil
}
func (f *firewall) Reload() error {
f.logger.Debug("Reload nftables rules")
if f.config.Options.ClearMode == ClearModeGlobal {
if err := f.nft.Clear(); err != nil {
return err
}
}
chains, err := chain.NewChains(f.nft, f.config.MetadataNaming.TableName)
if err != nil {
return err
}
f.chains = chains
if err := f.docker.NftReload(f.chains.NewNoneChain); err != nil {
return err
}
if err := f.chains.NewPacketFilter(f.config.Options.PacketFilter); err != nil {
return err
}
@@ -60,6 +96,15 @@ func (f *firewall) Reload() error {
if err := f.reloadForward(); err != nil {
return err
}
if f.config.Options.DockerSupport {
if err := f.reloadDocker(); err != nil {
return err
}
}
if err := f.reloadBlockList(); err != nil {
return err
}
f.logger.Debug("Reload nftables rules done")
return nil
@@ -67,12 +112,35 @@ func (f *firewall) Reload() error {
func (f *firewall) ClearRules() {
f.logger.Debug("Clear nftables rules")
if err := f.nft.Clear(); err != nil {
f.logger.Error(fmt.Sprintf("Failed to clear rules: %s", err))
switch f.config.Options.ClearMode {
case ClearModeGlobal:
if err := f.nft.Clear(); err != nil {
f.logger.Error(fmt.Sprintf("Failed to clear rules: %s", err))
}
break
case ClearModeOwn:
if err := f.chains.ClearRules(); err != nil {
f.logger.Error(fmt.Sprintf("Failed to clear rules: %s", err))
}
break
}
f.logger.Debug("Clear nftables rules done")
}
func (f *firewall) UnblockAllIPs() error {
return f.blockingService.UnblockAllIPs()
}
func (f *firewall) UnblockIP(ip net.IP) error {
return f.blockingService.UnblockIP(ip)
}
func (f *firewall) ClearDBData() error {
return f.blockingService.ClearDBData()
}
func (f *firewall) SavesRules() {
if !f.config.Options.SavesRules {
f.logger.Debug("SavesRules is false, skip")
@@ -100,3 +168,25 @@ func (f *firewall) SavesRules() {
f.logger.Info("Save nftables rules")
}
func (f *firewall) BlockIP(blockIP blocking.BlockIP) (bool, error) {
isBanned, err := f.blockingService.BlockIP(blockIP)
if err != nil {
f.logger.Warn(fmt.Sprintf("Failed to block ip %s: %s", blockIP.IP.String(), err))
}
return isBanned, err
}
func (f *firewall) BlockIPWithPorts(blockIP blocking.BlockIPWithPorts) (bool, error) {
isBanned, err := f.blockingService.BlockIPWithPorts(blockIP)
if err != nil {
f.logger.Warn(fmt.Sprintf("Failed to block ip %s: %s", blockIP.IP.String(), err))
}
return isBanned, err
}
func (f *firewall) DockerSupport() bool {
return f.config.Options.DockerSupport
}

View File

@@ -0,0 +1,25 @@
package firewall
func (f *firewall) reloadBlockList() error {
listBlockedIP, err := f.chains.NewBlockListIP("blocked_ip")
if err != nil {
return err
}
if err := listBlockedIP.AddRuleToChain(f.chains.BeforeLocalInput().AddRule, "drop"); err != nil {
return err
}
listBlockedIPWithPort, err := f.chains.NewBlockListIPWithPort("blocked_ip_port")
if err != nil {
return err
}
if err := listBlockedIPWithPort.AddRuleToChain(f.chains.BeforeLocalInput().AddRule, "drop"); err != nil {
return err
}
if err := f.blockingService.NftReload(listBlockedIP, listBlockedIPWithPort); err != nil {
return err
}
return nil
}

View File

@@ -1,13 +1,25 @@
package firewall
import "git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/firewall/types"
func (f *firewall) reloadForward() error {
f.logger.Debug("Reloading forward chain")
err := f.chains.NewForward(f.config.MetadataNaming.ChainForwardName, f.config.Policy.DefaultAllowForward)
err := f.chains.NewForward(f.config.MetadataNaming.ChainForwardName, f.config.Policy.DefaultAllowForward, f.config.Policy.ForwardPriority)
if err != nil {
return err
}
chain := f.chains.Forward()
if err := f.reloadForwardAddIPs(); err != nil {
return err
}
if f.config.Options.DockerSupport {
if err := f.docker.NftChains().ForwardFilterJump(chain.AddRule); err != nil {
return err
}
}
if f.config.Policy.DefaultAllowForward == false {
drop := f.config.Policy.ForwardDrop.String()
if err := chain.AddRule(drop); err != nil {
@@ -17,3 +29,53 @@ func (f *firewall) reloadForward() error {
return nil
}
func (f *firewall) reloadForwardAddIPs() error {
if err := f.chains.NewLocalForward(); err != nil {
return err
}
chain := f.chains.LocalForward()
if err := chain.AddRuleIn(f.chains.Forward().AddRule); err != nil {
return err
}
for _, ipConfig := range f.config.IP4.InIPs {
if ipConfig.Action != types.ActionDrop && ipConfig.Action != types.ActionReject {
continue
}
if err := forwardAddIP(chain.AddRule, ipConfig, "ip"); err != nil {
return err
}
}
if !f.config.IP6.Enable {
return nil
}
for _, ipConfig := range f.config.IP6.InIPs {
if ipConfig.Action != types.ActionDrop && ipConfig.Action != types.ActionReject {
continue
}
if err := forwardAddIP(chain.AddRule, ipConfig, "ip6"); err != nil {
return err
}
}
return nil
}
func forwardAddIP(addRuleFunc func(expr ...string) error, config ConfigIP, ipMatch string) error {
rule := ipMatch + " saddr " + config.IP + " iifname != \"lo\""
// There, during routing, the port changes and then the IP blocking rule will not work.
//if !config.OnlyIP {
// rule += " " + config.Protocol.String() + " dport " + strconv.Itoa(int(config.Port))
//}
rule += " counter " + config.Action.String()
if err := addRuleFunc(rule); err != nil {
return err
}
return nil
}

View File

@@ -3,14 +3,13 @@ package firewall
import (
"fmt"
"net"
"strconv"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/pkg"
)
func (f *firewall) reloadInput() error {
f.logger.Debug("Reloading input chain")
err := f.chains.NewInput(f.config.MetadataNaming.ChainInputName, f.config.Policy.DefaultAllowInput)
err := f.chains.NewInput(f.config.MetadataNaming.ChainInputName, f.config.Policy.DefaultAllowInput, f.config.Policy.InputPriority)
if err != nil {
return err
}
@@ -24,10 +23,24 @@ func (f *firewall) reloadInput() error {
return err
}
if err := f.chains.NewBeforeLocalInput(); err != nil {
return err
}
if err := f.chains.BeforeLocalInput().AddRuleIn(chain.AddRule); err != nil {
return err
}
if err := f.reloadInputAddIPs(); err != nil {
return err
}
if err := f.chains.NewAfterLocalInput(); err != nil {
return err
}
if err := f.chains.AfterLocalInput().AddRuleIn(chain.AddRule); err != nil {
return err
}
if err := f.chains.PacketFilter().AddRuleIn(chain.AddRule); err != nil {
return err
}
@@ -201,8 +214,8 @@ func (f *firewall) reloadInputICMP6Strict() error {
func (f *firewall) reloadInputPorts() error {
chain := f.chains.Input()
for _, port := range f.config.InPorts {
protocol := port.Protocol.String()
number := strconv.Itoa(int(port.Number))
protocol := port.Port.ProtocolString()
number := port.Port.NumberString()
baseRule := "iifname != \"lo\" meta l4proto " + protocol + " ct state new " + protocol + " dport " + number
@@ -256,7 +269,7 @@ func inputAddIP(addRuleFunc func(expr ...string) error, config ConfigIP, ipMatch
rule := ipMatch + " saddr " + config.IP + " iifname != \"lo\""
if !config.OnlyIP {
rule += " " + config.Protocol.String() + " dport " + strconv.Itoa(int(config.Port))
rule += " " + config.Port.ProtocolString() + " dport " + config.Port.NumberString()
}
if config.LimitRate != "" {
rule += " limit rate " + config.LimitRate

View File

@@ -3,14 +3,13 @@ package firewall
import (
"fmt"
"net"
"strconv"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/pkg"
)
func (f *firewall) reloadOutput() error {
f.logger.Debug("Reloading output chain")
err := f.chains.NewOutput(f.config.MetadataNaming.ChainOutputName, f.config.Policy.DefaultAllowOutput)
err := f.chains.NewOutput(f.config.MetadataNaming.ChainOutputName, f.config.Policy.DefaultAllowOutput, f.config.Policy.OutputPriority)
if err != nil {
return err
}
@@ -176,8 +175,8 @@ func (f *firewall) reloadOutputICMPAfter() error {
func (f *firewall) reloadOutputPorts() error {
chain := f.chains.Output()
for _, port := range f.config.OutPorts {
protocol := port.Protocol.String()
number := strconv.Itoa(int(port.Number))
protocol := port.Port.ProtocolString()
number := port.Port.NumberString()
baseRule := "oifname != \"lo\" meta l4proto " + protocol + " ct state new " + protocol + " dport " + number
if port.LimitRate != "" {
@@ -231,7 +230,7 @@ func outputAddIP(addRuleFunc func(expr ...string) error, config ConfigIP, ipMatc
rule := ipMatch + " daddr " + config.IP + " oifname != \"lo\""
if !config.OnlyIP {
rule += " " + config.Protocol.String() + " dport " + strconv.Itoa(int(config.Port))
rule += " " + config.Port.ProtocolString() + " dport " + config.Port.NumberString()
}
if config.LimitRate != "" {
rule += " limit rate " + config.LimitRate

View File

@@ -0,0 +1,43 @@
package types
import (
"errors"
"strconv"
)
type L4Port interface {
Number() uint16
NumberString() string
ProtocolString() string
ToString() string
}
type l4Port struct {
number uint16
protocol string
}
func NewL4Port(number uint16, protocol Protocol) (L4Port, error) {
if protocol != ProtocolTCP && protocol != ProtocolUDP {
return nil, errors.New("invalid protocol")
}
return &l4Port{number: number, protocol: protocol.String()}, nil
}
func (p *l4Port) Number() uint16 {
return p.number
}
func (p *l4Port) NumberString() string {
port := p.Number()
return strconv.Itoa(int(port))
}
func (p *l4Port) ProtocolString() string {
return p.protocol
}
func (p *l4Port) ToString() string {
return p.NumberString() + "/" + p.ProtocolString()
}

View File

@@ -0,0 +1,78 @@
package types
import "fmt"
type PolicyDrop int8
const (
Drop PolicyDrop = iota + 1
Reject
)
func (p PolicyDrop) String() string {
switch p {
case Drop:
return "drop"
case Reject:
return "reject"
default:
return "drop"
}
}
type Action int8
const (
ActionAccept Action = iota + 1
ActionReject
ActionDrop
)
func (a Action) String() string {
switch a {
case ActionAccept:
return "accept"
case ActionReject:
return "reject"
case ActionDrop:
return "drop"
default:
return "drop"
}
}
type Protocol int8
const (
ProtocolTCP Protocol = iota + 1
ProtocolUDP
)
func (p Protocol) String() string {
switch p {
case ProtocolTCP:
return "tcp"
case ProtocolUDP:
return "udp"
default:
return fmt.Sprintf("Protocol(%d)", p)
}
}
type Direction int8
const (
DirectionIn Direction = iota + 1
DirectionOut
)
func (d Direction) String() string {
switch d {
case DirectionIn:
return "in"
case DirectionOut:
return "out"
default:
return fmt.Sprintf("Direction(%d)", d)
}
}

View File

@@ -0,0 +1,45 @@
package notifications
import (
"github.com/wneessen/go-mail"
)
type Config struct {
Enabled bool
EnableRetries bool
RetryInterval uint16
ServerName string
Email Email
}
type Email struct {
Host string
Port uint
Username string
Password string
AuthType mail.SMTPAuthType
TLS TLS
From string
To string
}
type TLS struct {
Mode TLSMode
Policy TLSPolicy
Verify bool
}
type TLSMode string
const (
TLSModeNone TLSMode = "NONE"
TLSModeStartTLS TLSMode = "STARTTLS"
TLSModeImplicit TLSMode = "IMPLICIT"
)
type TLSPolicy string
const (
TLSPolicyMandatory TLSPolicy = "MANDATORY"
TLSPolicyOpportunistic TLSPolicy = "OPPORTUNISTIC"
)

View File

@@ -0,0 +1,221 @@
package notifications
import (
"context"
"crypto/tls"
"fmt"
"sync"
"time"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/db/entity"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/db/repository"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/log"
"github.com/wneessen/go-mail"
)
type Message struct {
Subject string
Body string
}
type Notifications interface {
Run()
SendAsync(message Message)
// DBQueueSize - return size of notifications queue in db
DBQueueSize() int
DBQueueClear() error
Close() error
}
type notifications struct {
config Config
queueRepository repository.NotificationsQueueRepository
logger log.Logger
msgQueue chan Message
wg sync.WaitGroup
}
func New(config Config, queueRepository repository.NotificationsQueueRepository, logger log.Logger) Notifications {
return &notifications{
config: config,
queueRepository: queueRepository,
logger: logger,
msgQueue: make(chan Message, 100),
}
}
func (n *notifications) Run() {
if n.config.Enabled == false {
n.logger.Info("Notifications are disabled")
}
n.wg.Add(1)
go func() {
defer n.wg.Done()
ticker := time.NewTicker(time.Duration(n.config.RetryInterval) * time.Second)
defer ticker.Stop()
for {
select {
case msg, ok := <-n.msgQueue:
if !ok {
return
}
err := n.sendEmail(msg)
if err != nil {
n.logger.Error(fmt.Sprintf("failed to send email: %v", err))
n.addNotificationsQueue(msg)
} else if n.config.Enabled {
n.logger.Debug(fmt.Sprintf("email sent: Subject %s, Body %s", msg.Subject, msg.Body))
}
case <-ticker.C:
if n.config.Enabled == false || n.config.EnableRetries == false {
continue
}
items, err := n.queueRepository.Get(10)
if err != nil {
n.logger.Error(fmt.Sprintf("failed to get notifications from the queue: %v", err))
continue
}
for id, item := range items {
err = n.sendEmail(Message{Subject: item.Subject, Body: item.Body})
if err != nil {
n.logger.Error(fmt.Sprintf("failed to send queued email: %v", err))
break
}
err = n.queueRepository.Delete(id)
if err != nil {
n.logger.Error(fmt.Sprintf("failed to delete queued email from the queue: %v", err))
}
}
}
}
}()
}
func (n *notifications) SendAsync(message Message) {
select {
case n.msgQueue <- message:
if n.config.Enabled == false {
n.logger.Debug(fmt.Sprintf("email sending is disabled, message was added to the queue: Subject %s, Body %s", message.Subject, message.Body))
} else {
n.logger.Debug(fmt.Sprintf("added to the mail sending queue: Subject %s, Body %s", message.Subject, message.Body))
}
default:
n.logger.Error(fmt.Sprintf("failed to send email: queue is full"))
n.addNotificationsQueue(message)
}
}
func (n *notifications) DBQueueSize() int {
count, err := n.queueRepository.Count()
if err != nil {
n.logger.Error(fmt.Sprintf("failed to get notifications queue size: %v", err))
return 0
}
return count
}
func (n *notifications) DBQueueClear() error {
err := n.queueRepository.Clear()
if err != nil {
n.logger.Error(fmt.Sprintf("failed to clear notifications queue: %v", err))
}
return err
}
func (n *notifications) Close() error {
close(n.msgQueue)
n.logger.Debug("We are waiting for all notifications to be sent")
n.wg.Wait()
n.logger.Debug("Notifications queue processed and closed")
return nil
}
func (n *notifications) sendEmail(message Message) error {
if n.config.Enabled == false {
return nil
}
m := mail.NewMsg()
if err := m.From(n.config.Email.From); err != nil {
return err
}
if err := m.To(n.config.Email.To); err != nil {
return err
}
m.Subject(message.Subject + " (" + n.config.ServerName + ")")
m.SetBodyString(mail.TypeTextPlain, "Server: "+n.config.ServerName+"\n"+message.Body)
client, err := newClient(n.config.Email)
if err != nil {
return err
}
defer func() { _ = client.Close() }()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
return client.DialAndSendWithContext(ctx, m)
}
func (n *notifications) addNotificationsQueue(message Message) {
if n.config.Enabled == false || n.config.EnableRetries == false {
return
}
err := n.queueRepository.Add(entity.NotificationsQueue{Body: message.Body, Subject: message.Subject})
if err != nil {
n.logger.Error(fmt.Sprintf("failed to save email to the queue: %v", err))
}
}
func newClient(config Email) (*mail.Client, error) {
options := []mail.Option{
mail.WithPort(int(config.Port)),
mail.WithSMTPAuth(config.AuthType),
}
if config.AuthType != mail.SMTPAuthNoAuth {
options = append(options, mail.WithUsername(config.Username), mail.WithPassword(config.Password))
}
switch config.TLS.Mode {
case TLSModeImplicit:
options = append(options, mail.WithSSL())
break
case TLSModeStartTLS:
switch config.TLS.Policy {
case TLSPolicyMandatory:
options = append(options, mail.WithTLSPolicy(mail.TLSMandatory))
break
case TLSPolicyOpportunistic:
options = append(options, mail.WithTLSPolicy(mail.TLSOpportunistic))
break
default:
return nil, fmt.Errorf("unknown tls policy: %s", config.TLS.Policy)
}
if !config.TLS.Verify {
tlsCfg := &tls.Config{
InsecureSkipVerify: true,
}
options = append(options, mail.WithTLSConfig(tlsCfg))
}
break
case TLSModeNone:
break
default:
return nil, fmt.Errorf("unknown tls mode: %s", config.TLS.Mode)
}
options = append(options, mail.WithSSL())
return mail.NewClient(config.Host, options...)
}

View File

@@ -1,10 +1,17 @@
package daemon
import "git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/firewall"
import (
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/analyzer/config"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/db"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/firewall"
)
type DaemonOptions struct {
PathPidFile string
PathSocketFile string
DataDir string
PathNftables string
ConfigFirewall firewall.Config
ConfigAnalyzer config.Config
Repositories db.Repositories
}

View File

@@ -3,13 +3,18 @@ package daemon
import (
"errors"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/analyzer"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/analyzer/log/analysis/brute_force_protection_group"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/docker_monitor"
firewall2 "git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/firewall"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/firewall/blocking"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/notifications"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/pidfile"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/socket"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/log"
)
func NewDaemon(opts DaemonOptions, logger log.Logger) (Daemon, error) {
func NewDaemon(opts DaemonOptions, logger log.Logger, notifications notifications.Notifications, docker docker_monitor.Docker) (Daemon, error) {
if logger == nil {
return nil, errors.New("logger is nil")
}
@@ -24,12 +29,19 @@ func NewDaemon(opts DaemonOptions, logger log.Logger) (Daemon, error) {
return nil, err
}
firewall, err := firewall2.New(opts.PathNftables, logger, opts.ConfigFirewall)
blockingService := blocking.New(opts.Repositories.Blocking(), logger)
firewall, err := firewall2.New(opts.PathNftables, blockingService, logger, opts.ConfigFirewall, docker)
blockService := brute_force_protection_group.NewBlockService(firewall.BlockIP, firewall.BlockIPWithPorts)
analyzerService := analyzer.New(opts.ConfigAnalyzer, blockService, opts.Repositories, logger, notifications)
return &daemon{
pidFile: pidFile,
socket: sock,
logger: logger,
firewall: firewall,
pidFile: pidFile,
socket: sock,
logger: logger,
firewall: firewall,
notifications: notifications,
analyzer: analyzerService,
docker: docker,
}, nil
}

View File

@@ -2,6 +2,7 @@ package socket
import (
"context"
"encoding/json"
"errors"
"fmt"
"net"
@@ -11,7 +12,12 @@ import (
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/log"
)
type HandleCommand func(command string, socket Connect) error
type Message struct {
Command string `json:"command"`
Args map[string]string `json:"args"`
}
type HandleCommand func(command string, args map[string]string, socket Connect) error
type Socket interface {
EnsureNoOtherProcess() error
@@ -121,13 +127,19 @@ func (s *socket) handleAction(conn net.Conn, handleCommand HandleCommand) {
_ = sock.Close()
}()
cmd, err := sock.Read()
raw, err := sock.Read()
if err != nil {
s.logger.Error(fmt.Sprintf("Failed to read command: %s", err))
return
}
if err := handleCommand(cmd, sock); err != nil {
cmd, args, err := parseCommand(raw)
if err != nil {
s.logger.Error(fmt.Sprintf("Failed to parse command: %s", err))
return
}
if err := handleCommand(cmd, args, sock); err != nil {
s.logger.Error(fmt.Sprintf("Failed to handle command: %s", err))
}
}
@@ -147,3 +159,21 @@ func canConnect(path string) bool {
func isUseOfClosedNetworkError(err error) bool {
return err != nil && strings.Contains(err.Error(), "use of closed network connection")
}
func parseCommand(raw string) (string, map[string]string, error) {
var msg Message
if err := json.Unmarshal([]byte(raw), &msg); err != nil {
return "", nil, err
}
if msg.Command == "" {
return "", nil, errors.New("command is empty")
}
if msg.Args == nil {
msg.Args = map[string]string{}
}
return msg.Command, msg.Args, nil
}

View File

@@ -15,6 +15,33 @@
"cmd.daemon.reopen_logger.Usage": "Reopen the file for logging",
"cmd.daemon.reopen_logger.Description": "Reopen the file where the daemon's logs are written",
"cmd.daemon.notifications.Usage": "Notifications",
"cmd.daemon.notifications.queue.Usage": "Notification queue",
"cmd.daemon.notifications.queue.count.Usage": "Number of notifications in the pending queue",
"cmd.daemon.notifications.queue.count.Description": "The number of notifications waiting to be sent after an error.",
"cmd.daemon.notifications.queue.count.result": "Number in backlog queue: {{.Count}}",
"cmd.daemon.notifications.queue.clear.Usage": "Clear the notification queue",
"cmd.daemon.notifications.queue.clear.Description": "Clear the queue of notifications waiting to be sent after an error.",
"notifications_queue_clear_error": "Failed to clear notification queue",
"notifications_queue_clear_success": "The notification queue has been cleared.",
"cmd.daemon.block.Usage": "Blocking",
"cmd.daemon.block.clear.Usage": "Unblock all banned IP addresses",
"cmd.daemon.block.clear.Description": "Unblock all banned IP addresses.",
"block_clear_error": "Unable to unblock all IP addresses",
"block_clear_success": "The request was successfully completed",
"cmd.daemon.block.add.Usage": "Add IP address to block list",
"cmd.daemon.block.add.Description": "Add an IP address to the block list. \nExamples: \nkor-elf-shield block add 192.168.1.1 \nkor-elf-shield block add 192.168.1.1 --seconds=600 \nkor-elf-shield block add 192.168.1.1 --port 80/tcp",
"cmd.daemon.block.add.FlagUsage.port": "The port to be blocked. If not specified, all ports will be blocked. \nExamples: \n--port=80/tcp \n--port=1000/udp",
"cmd.daemon.block.add.FlagUsage.seconds": "The blocking time in seconds. If not specified, the blocking will be permanent.",
"cmd.daemon.block.add.FlagUsage.reason": "Reason for blocking.",
"block_add_ip_success": "The IP address has been successfully added to the block list.",
"cmd.daemon.block.delete.Usage": "Remove IP address from block list",
"cmd.daemon.block.delete.Description": "Remove an IP address from the block list. \nExample: \nkor-elf-shield block delete 192.168.1.1",
"block_delete_ip_success": "The IP address has been successfully removed from the block list.",
"Command error": "Command error",
"invalid log level": "The log level specified in the settings is invalid. It is currently set to: {{.Level}}. Valid values: {{.Levels}}",
"invalid log encoding": "Invalid encoding setting. Currently set to: {{.Encoding}}. Valid values: {{.Encodings}}",
@@ -25,5 +52,27 @@
"daemon stopped": "Daemon stopped",
"daemon stop failed": "Daemon stop failed",
"daemon is not running": "Daemon is not running",
"daemon is not reopening logger": "The daemon did not reopen the log"
"daemon is not reopening logger": "The daemon did not reopen the log",
"time": "Time: {{.Time}}",
"log": "Log: ",
"user": "User",
"access to user has been gained": "Access to user has been gained",
"unknown": "unknown",
"blockSec": "Blocked for {{.BlockSec}} seconds",
"ports": "Ports: {{.Ports}}",
"alert.subject": "Alert detected ({{.Name}}) (group:{{.GroupName}})",
"alert.login.ssh.message": "Logged into the OS via ssh.",
"alert.login.local.message": "Logged into the OS via TTY.",
"alert.login.su.message": "Gained access to another user via su.",
"alert.login.sudo.message": "Gained access to another user via sudo.",
"alert.bruteForceProtection.subject": "A hacking attempt was detected and IP {{.IP}} was blocked. Alert ({{.Name}}) (Group:{{.GroupName}})",
"alert.bruteForceProtection.subject-error": "A hacking attempt was detected, but the IP {{.IP}} is not blocked. Alert ({{.Name}}) (group:{{.GroupName}})",
"alert.bruteForceProtection.error": "Error: {{.Error}}",
"alert.bruteForceProtection.ssh.message": "An attempt to brute-force SSH was detected.",
"alert.bruteForceProtection.group._default.message": "Default group.",
"cmd.error": "Command error: {{.Error}}"
}

View File

@@ -15,6 +15,33 @@
"cmd.daemon.reopen_logger.Usage": "Файлды тіркеу үшін қайта ашыңыз",
"cmd.daemon.reopen_logger.Description": "Демонның журналдары жазылған файлды қайта ашыңыз.",
"cmd.daemon.notifications.Usage": "Хабарландырулар",
"cmd.daemon.notifications.queue.Usage": "Хабарландыру кезегі",
"cmd.daemon.notifications.queue.count.Usage": "Күтудегі кезектегі хабарландырулар саны",
"cmd.daemon.notifications.queue.count.Description": "Қатеден кейін жіберуді күтіп тұрған хабарландырулар саны.",
"cmd.daemon.notifications.queue.count.result": "Кезекте тұрған нөмір: {{.Count}}",
"cmd.daemon.notifications.queue.clear.Usage": "Хабарландыру кезегін тазалау",
"cmd.daemon.notifications.queue.clear.Description": "Қатеден кейін жіберуді күтіп тұрған хабарландырулар кезегін тазалаңыз.",
"notifications_queue_clear_error": "Хабарландыру кезегі тазаланбады",
"notifications_queue_clear_success": "Хабарландыру кезегі тазартылды",
"cmd.daemon.block.Usage": "Бұғаттау",
"cmd.daemon.block.clear.Usage": "Барлық тыйым салынған IP мекенжайларын бұғаттан шығарыңыз",
"cmd.daemon.block.clear.Description": "Барлық тыйым салынған IP мекенжайларын бұғаттан шығарыңыз.",
"block_clear_error": "Барлық IP мекенжайларын бұғаттан шығару мүмкін емес",
"block_clear_success": "Сұраныс сәтті орындалды",
"cmd.daemon.block.add.Usage": "Блоктау тізіміне IP мекенжайын қосу",
"cmd.daemon.block.add.Description": "Блоктау тізіміне IP мекенжайын қосыңыз. \nМысалдар: \nkor-elf-shield block add 192.168.1.1 \nkor-elf-shield block add 192.168.1.1 --seconds=600 \nkor-elf-shield block add 192.168.1.1 --port 80/tcp",
"cmd.daemon.block.add.FlagUsage.port": "Блокталатын порт. Егер көрсетілмесе, барлық порттар бұғатталады. \nМысалдар: \n--port=80/tcp \n--port=1000/udp",
"cmd.daemon.block.add.FlagUsage.seconds": "Блоктау уақыты секундпен. Егер көрсетілмесе, блоктау тұрақты болады.",
"cmd.daemon.block.add.FlagUsage.reason": "Блоктау себебі.",
"block_add_ip_success": "IP мекенжайы блоктау тізіміне сәтті қосылды.",
"cmd.daemon.block.delete.Usage": "IP мекенжайын блоктау тізімінен алып тастаңыз",
"cmd.daemon.block.delete.Description": "IP мекенжайын блоктау тізімінен алып тастаңыз. \nМысал: \nkor-elf-shield block delete 192.168.1.1",
"block_delete_ip_success": "IP мекенжайы блоктау тізімінен сәтті жойылды.",
"Command error": "Командалық қате",
"invalid log level": "Параметрлерде көрсетілген журнал деңгейі жарамсыз. Ол қазір мына күйге орнатылған: {{.Level}}. Жарамды мәндер: {{.Levels}}",
"invalid log encoding": "Жарамсыз кодтау параметрі. Қазіргі уақытта орнатылған: {{.Encoding}}. Жарамды мәндер: {{.Encodings}}",
@@ -25,5 +52,27 @@
"daemon stopped": "Жын тоқтатылды",
"daemon stop failed": "Жынды тоқтату сәтсіз аяқталды",
"daemon is not running": "Демон жұмыс істемейді",
"daemon is not reopening logger": "Жын журналды қайта ашпады"
"daemon is not reopening logger": "Жын журналды қайта ашпады",
"time": "Уақыт: {{.Time}}",
"log": "Лог: ",
"user": "Пайдаланушы",
"access to user has been gained": "Пайдаланушыға кіру мүмкіндігі алынды",
"unknown": "белгісіз",
"blockSec": "{{.BlockSec}} секундқа блокталды",
"ports": "Порттар: {{.Ports}}",
"alert.subject": "Ескерту анықталды ({{.Name}}) (топ:{{.GroupName}})",
"alert.login.ssh.message": "ОС-қа ssh арқылы кірді.",
"alert.login.local.message": "ОЖ-ға TTY арқылы кірдіңіз.",
"alert.login.su.message": "su арқылы басқа пайдаланушыға кіру мүмкіндігі алынды.",
"alert.login.sudo.message": "sudo арқылы басқа пайдаланушыға кіру мүмкіндігі алынды.",
"alert.bruteForceProtection.subject": "Хакерлік әрекет анықталды және IP мекенжайы {{.IP}} бұғатталды. Ескерту ({{.Name}}) (Топ:{{.GroupName}})",
"alert.bruteForceProtection.subject-error": "Хакерлік әрекет анықталды, бірақ IP мекенжайы {{.IP}} бұғатталмаған. Ескерту ({{.Name}}) (Топ:{{.GroupName}})",
"alert.bruteForceProtection.error": "Қате: {{.Error}}",
"alert.bruteForceProtection.ssh.message": "SSH-ті күштеп қолдану әрекеті анықталды.",
"alert.bruteForceProtection.group._default.message": "Әдепкі топ.",
"cmd.error": "Команда қатесі: {{.Error}}"
}

View File

@@ -15,6 +15,33 @@
"cmd.daemon.reopen_logger.Usage": "Переоткрыть файл для логирования",
"cmd.daemon.reopen_logger.Description": "Переоткроет файл, куда пишутся логи от демона",
"cmd.daemon.notifications.Usage": "Уведомления",
"cmd.daemon.notifications.queue.Usage": "Очередь уведомлений",
"cmd.daemon.notifications.queue.count.Usage": "Количество уведомлений в отложенной очереди",
"cmd.daemon.notifications.queue.count.Description": "Количество уведомлений, ожидающих отправки после ошибки.",
"cmd.daemon.notifications.queue.count.result": "Количество в отложенной очереди: {{.Count}}",
"cmd.daemon.notifications.queue.clear.Usage": "Очистить очередь уведомлений",
"cmd.daemon.notifications.queue.clear.Description": "Очистить очередь уведомлений, ожидающих отправки после ошибки.",
"notifications_queue_clear_error": "Не удалось очистить очередь уведомлений",
"notifications_queue_clear_success": "Очередь уведомлений очищена",
"cmd.daemon.block.Usage": "Блокировка",
"cmd.daemon.block.clear.Usage": "Разблокировать все забаненные IP адреса",
"cmd.daemon.block.clear.Description": "блокировка все забаненные IP адреса.",
"block_clear_error": "Не смогли разблокировать все IP адреса",
"block_clear_success": "Запрос успешно выполнен",
"cmd.daemon.block.add.Usage": "Добавить IP адрес в список заблокированных",
"cmd.daemon.block.add.Description": "Добавить IP адрес в список заблокированных. \nПримеры: \nkor-elf-shield block add 192.168.1.1 \nkor-elf-shield block add 192.168.1.1 --seconds=600 \nkor-elf-shield block add 192.168.1.1 --port 80/tcp",
"cmd.daemon.block.add.FlagUsage.port": "Порт, который будет заблокирован. Если не указать, то заблокируются все порты. \nПримеры: \n--port=80/tcp \n--port=1000/udp",
"cmd.daemon.block.add.FlagUsage.seconds": "Время блокировки в секундах. Если не указать, то блокировка будет вечной.",
"cmd.daemon.block.add.FlagUsage.reason": "Причина блокировки.",
"block_add_ip_success": "IP адрес успешно добавлен в список заблокированных.",
"cmd.daemon.block.delete.Usage": "Удалить IP адрес из списка заблокированных",
"cmd.daemon.block.delete.Description": "Удалить IP адрес из списка заблокированных. \nПример: \nkor-elf-shield block delete 192.168.1.1",
"block_delete_ip_success": "IP адрес успешно удален из списка заблокированных.",
"Command error": "Ошибка команды",
"invalid log level": "В настройках указан не верный уровень log. Сейчас указан: {{.Level}}. Допустимые значения: {{.Levels}}",
"invalid log encoding": "Неверная настройка encoding. Сейчас указан: {{.Encoding}}. Допустимые значения: {{.Encodings}}",
@@ -25,5 +52,27 @@
"daemon stopped": "Демон остановлен",
"daemon stop failed": "Остановка демона не удалась",
"daemon is not running": "Демон не запущен",
"daemon is not reopening logger": "Демон не открыл журнал повторно"
"daemon is not reopening logger": "Демон не открыл журнал повторно",
"time": "Время: {{.Time}}",
"log": "Лог: ",
"user": "Пользователь",
"access to user has been gained": "Получен доступ к пользователю",
"unknown": "неизвестный",
"blockSec": "Блокировка на {{.BlockSec}} секунд",
"ports": "Порты: {{.Ports}}",
"alert.subject": "Обнаружено оповещение ({{.Name}}) (группа:{{.GroupName}})",
"alert.login.ssh.message": "Вошли в ОС через ssh.",
"alert.login.local.message": "Вошли в ОС через TTY.",
"alert.login.su.message": "Получили доступ к другому пользователю через su.",
"alert.login.sudo.message": "Получили доступ к другому пользователю через sudo.",
"alert.bruteForceProtection.subject": "Обнаружена попытка взлома, IP {{.IP}} заблокирован. Оповещение ({{.Name}}) (группа:{{.GroupName}})",
"alert.bruteForceProtection.subject-error": "Обнаружена попытка взлома, но IP {{.IP}} не заблокирован. Оповещение ({{.Name}}) (группа:{{.GroupName}})",
"alert.bruteForceProtection.error": "Ошибка: {{.Error}}",
"alert.bruteForceProtection.ssh.message": "Обнаружена попытка атаки на SSH методом перебора паролей.",
"alert.bruteForceProtection.group._default.message": "Группа по умолчанию.",
"cmd.error": "Ошибка команды: {{.Error}}"
}

View File

@@ -12,6 +12,17 @@ const (
IPv6
)
func (v Version) ToNft() string {
switch v {
case IPv4:
return "ip"
case IPv6:
return "ip6"
default:
return "unknown"
}
}
func DetermineIPVersion(ip string) (ipNet string, version Version, err error) {
ipNet, version, err = parseCIDR(ip)
if err != nil {
@@ -21,6 +32,11 @@ func DetermineIPVersion(ip string) (ipNet string, version Version, err error) {
return
}
func IPVersion(ip string) (Version, error) {
_, version, err := parseIP(ip)
return version, err
}
func parseCIDR(parseIP string) (ipNet string, version Version, err error) {
_, parseIPNet, err := net.ParseCIDR(parseIP)
if err != nil {

View File

@@ -4,40 +4,40 @@ import (
"errors"
"strings"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/firewall"
"git.kor-elf.net/kor-elf-shield/kor-elf-shield/internal/daemon/firewall/types"
)
func ToDirection(direction string) (firewall.Direction, error) {
func ToDirection(direction string) (types.Direction, error) {
switch strings.ToLower(direction) {
case "in":
return firewall.DirectionIn, nil
return types.DirectionIn, nil
case "out":
return firewall.DirectionOut, nil
return types.DirectionOut, nil
default:
return firewall.DirectionIn, errors.New("invalid direction. Must be in or out")
return types.DirectionIn, errors.New("invalid direction. Must be in or out")
}
}
func ToProtocol(protocol string) (firewall.Protocol, error) {
func ToProtocol(protocol string) (types.Protocol, error) {
switch strings.ToLower(protocol) {
case "tcp":
return firewall.ProtocolTCP, nil
return types.ProtocolTCP, nil
case "udp":
return firewall.ProtocolUDP, nil
return types.ProtocolUDP, nil
default:
return firewall.ProtocolTCP, errors.New("invalid protocol. Must be tcp or udp")
return types.ProtocolTCP, errors.New("invalid protocol. Must be tcp or udp")
}
}
func ToAction(action string) (firewall.Action, error) {
func ToAction(action string) (types.Action, error) {
switch strings.ToLower(action) {
case "accept":
return firewall.ActionAccept, nil
return types.ActionAccept, nil
case "drop":
return firewall.ActionDrop, nil
return types.ActionDrop, nil
case "reject":
return firewall.ActionReject, nil
return types.ActionReject, nil
default:
return firewall.ActionAccept, errors.New("invalid action. Must be accept, drop or reject")
return types.ActionAccept, errors.New("invalid action. Must be accept, drop or reject")
}
}

Some files were not shown because too many files have changed in this diff Show More