diff --git a/README.md b/README.md
index 08bc191..3773664 100644
--- a/README.md
+++ b/README.md
@@ -1,2 +1,13 @@
# FSBenchmarks-module
+FSBenchmarks module
+
+luguru настроен под формат:
+```python
+fmt = "{elapsed} -- {time:YYYY-MM-DD HH:mm:ss.SSS} | {level:<8} | {extra[module]:^12} | {extra[prefix]:<12} | {message}"
+```
+
+Добавление в проект:
+```bash
+git submodule add ssh://git@git.anidev.ru:222/SantaSpeen/FSBenchmarks-module.git src/modules/FSBenchmarks
+```
diff --git a/benchmarks/__init__.py b/benchmarks/__init__.py
new file mode 100644
index 0000000..483ec15
--- /dev/null
+++ b/benchmarks/__init__.py
@@ -0,0 +1,2 @@
+from .iops import test_iops
+from .speed import test_speed
diff --git a/benchmarks/iops.py b/benchmarks/iops.py
new file mode 100644
index 0000000..fa54c93
--- /dev/null
+++ b/benchmarks/iops.py
@@ -0,0 +1,58 @@
+import os
+import shutil
+import time
+from pathlib import Path
+
+# Настройки теста
+DEPTH = 3 # Глубина вложенности
+FILES_PER_LAYER = 3 # Количество файлов в каждом слое
+FILE_SIZE = 1024 # Размер файла в байтах
+
+
+def create_nested_structure(base_dir, depth, files_per_layer, file_size):
+ """Создаёт вложенную структуру с именами layerX_Y и файлами file_X_Y"""
+ start_time = time.time()
+ total_files = 0
+
+ for layer in range(depth):
+ for sublayer in range(files_per_layer):
+ layer_dir = os.path.join(base_dir, f"layer{layer}_{sublayer}")
+ os.makedirs(layer_dir, exist_ok=True)
+
+ for file_index in range(files_per_layer):
+ file_path = os.path.join(layer_dir, f"file_{layer}_{sublayer}_{file_index}.txt")
+ with open(file_path, "wb") as f:
+ f.write(os.urandom(file_size))
+ total_files += 1
+
+ elapsed_time = time.time() - start_time
+ iops = total_files / max(elapsed_time, 1e-9) # Предотвращение деления на ноль
+ return elapsed_time, iops, total_files
+
+
+def scan_files(base_dir):
+ """Сканирует все файлы и возвращает скорость доступа"""
+ start_time = time.time()
+ total_files = 0
+
+ for root, dirs, files in os.walk(base_dir):
+ for file in files:
+ file_path = os.path.join(root, file)
+ stat = os.stat(file_path) # Получение метаинформации
+ _ = (stat.st_size, stat.st_ctime, file) # Вес, дата создания, имя файла
+ total_files += 1
+
+ elapsed_time = time.time() - start_time
+ iops = total_files / max(elapsed_time, 1e-9) # Предотвращение деления на ноль
+ return elapsed_time, iops, total_files
+
+
+def test_iops(path: Path):
+ os.makedirs(path / ".tests", exist_ok=True)
+ base_bir = (path / ".tests" / "iops").as_posix()
+ _, create_iops, _ = create_nested_structure(
+ base_bir, DEPTH, FILES_PER_LAYER, FILE_SIZE
+ )
+ _, scan_iops, _ = scan_files(base_bir)
+ shutil.rmtree(base_bir)
+ return create_iops, scan_iops
diff --git a/benchmarks/speed.py b/benchmarks/speed.py
new file mode 100644
index 0000000..11d7a2e
--- /dev/null
+++ b/benchmarks/speed.py
@@ -0,0 +1,62 @@
+import os
+import random
+import sys
+import time
+from pathlib import Path
+
+from loguru import logger as llogger
+
+logger = llogger.bind(module="FSBench", prefix="test")
+
+b = 1
+kb = 1024 * b
+mb = 1024 * kb
+rsize = [
+ # 100 * mb, 90 * mb, 80 * mb, 70 * mb, 60 * mb, 50 * mb, 40 * mb, 30 * mb, 20 * mb, 10 * mb, # 100mb -> 10mb
+ 5 * mb, 4.5 * mb, 4 * mb, 3.5 * mb, 3 * mb, 2.5 * mb, 2 * mb, 1.5 * mb, 1 * mb, # 5mb -> 1mb
+ 500 * kb, 450 * kb, 400 * kb, 350 * kb, 300 * kb, 250 * kb, 200 * kb, 150 * kb, 100 * kb, # 500kb -> 100kb
+ # 50 * kb, 45 * kb, 40 * kb, 35 * kb, 30 * kb, 25 * kb, 20 * kb, 15 * kb, 10 * kb # 50kb -> 10kb
+]
+
+min_float = sys.float_info.min * sys.float_info.epsilon
+
+def _test(path: Path, size: int):
+ size = int(size)
+ path = path / ".tests" / "speed"
+ logger.debug(f"Speed testing. Path: {path}, Size: {size / mb:.4f}MB")
+ os.makedirs(path, exist_ok=True)
+ _test_file = path / ".test_speed"
+ if os.path.exists(_test_file):
+ os.remove(_test_file)
+
+ # Запись файла
+ t1 = time.perf_counter()
+ with open(_test_file, 'wb') as f:
+ f.write(b'\0' * size)
+ t2 = time.perf_counter()
+
+ # Чтение файла
+ t3 = time.perf_counter()
+ with open(_test_file, 'rb') as f:
+ while f.read(1024 * 1024): # Читаем блоками по 1MB
+ pass
+ t4 = time.perf_counter()
+
+ os.remove(_test_file)
+
+ # Защита от деления на ноль
+ write_time = max(t2 - t1, 1e-9)
+ read_time = max(t4 - t3, 1e-9)
+
+ return size / write_time / mb, size / read_time / mb # MB/s upload, MB/s download
+
+def test_speed(path: Path, iterations: int = 5):
+ data_upload = []
+ data_download = []
+ for _ in range(iterations):
+ u, d = _test(path, random.choice(rsize))
+ data_upload.append(u)
+ data_download.append(d)
+ avg_uspeed = sum(data_upload) / len(data_upload)
+ avg_dspeed = sum(data_download) / len(data_download)
+ return avg_uspeed, avg_dspeed