Skip to content

Arprax Algorithms Library Reference

Welcome to the technical heart of the Arprax ecosystem. This section provides the low-level API details for the arprax-algorithms Python package.


🚀 Core Modules (In Development)

The library is organized into three industrial pillars:

Module Purpose Status
arprax.algos.algorithms High-performance sorting and searching. 🟢 Active
arprax.algos.utils Data generators and helper logic. 🟢 Active
arprax.algos.profiler Time and Space complexity analysis. 🟡 Beta

📦 Installation & Usage

To use these modules in your local environment:

# Core only
pip install arprax-algorithms

# With visual tools
pip install arprax-algorithms[visuals]

# With research tools
pip install arprax-algorithms[research]
from arprax.algos.algorithms import bubble_sort, selection_sort
from arprax.algos.utils import random_array

📖 API Documentation (Live)

The documentation below is automatically pulled from the source code.


📊 Profiler Logic

Arprax Algorithms: Performance Profiling Tools Provides precision timing and algorithmic complexity analysis for research.

Profiler

Industrial-grade performance analyzer for Arprax Lab.

Provides precision timing, statistical analysis, and doubling-test complexity estimation. Designed to work without external dependencies.

Attributes:

Name Type Description
repeats int

Number of times to run each benchmark.

warmup int

Number of discarded runs to prime the CPU cache.

mode str

Statistical mode for final result ('min', 'mean', 'median').

Source code in src/arprax/algos/profiler.py
class Profiler:
    """
    Industrial-grade performance analyzer for Arprax Lab.

    Provides precision timing, statistical analysis, and doubling-test
    complexity estimation. Designed to work without external dependencies.

    Attributes:
        repeats (int): Number of times to run each benchmark.
        warmup (int): Number of discarded runs to prime the CPU cache.
        mode (str): Statistical mode for final result ('min', 'mean', 'median').
    """

    def __init__(self, repeats: int = 5, warmup: int = 1, mode: str = "min"):
        """Initializes the Profiler with user-defined benchmark settings."""
        self.repeats = repeats
        self.warmup = warmup
        self.mode = mode
        self._profile_stats = {}

    @contextmanager
    def stopwatch(self, label: str = "Block") -> Generator[None, None, None]:
        """
        Context manager for precision timing of a specific code block.

        Args:
            label (str): Name of the block for the final report.
        """
        start = timeit.default_timer()
        try:
            yield
        finally:
            end = timeit.default_timer()
            elapsed = end - start
            if label not in self._profile_stats:
                self._profile_stats[label] = []
            self._profile_stats[label].append(elapsed)

    def benchmark(self, func: Callable, *args: Any) -> float:
        """
        Runs a function with garbage collection disabled to ensure timing purity.

        Args:
            func (Callable): The function to measure.
            *args (Any): Arguments to pass to the function.

        Returns:
            float: The measured time in seconds based on the 'mode' setting.
        """
        # Warmup runs (not timed)
        for _ in range(self.warmup):
            safe_args = copy.deepcopy(args)
            func(*safe_args)

        times = []
        gc_old = gc.isenabled()
        gc.disable()
        try:
            for _ in range(self.repeats):
                # Deepcopy used to ensure input data isn't pre-sorted by previous runs
                safe_args = copy.deepcopy(args)
                start = timeit.default_timer()
                func(*safe_args)
                end = timeit.default_timer()
                times.append(end - start)
        finally:
            if gc_old:
                gc.enable()

        if self.mode == "median":
            return statistics.median(times)
        elif self.mode == "mean":
            return statistics.mean(times)
        return min(times)

    def run_doubling_test(
        self,
        func: Callable,
        input_gen: Callable[[int], Any],
        start_n: int = 250,
        rounds: int = 6,
    ) -> List[Dict[str, Any]]:
        """
        Performs doubling analysis to estimate Big O complexity.

        Args:
            func (Callable): Algorithm to analyze.
            input_gen (Callable): Function that generates data for size N.
            start_n (int): Initial input size.
            rounds (int): How many times to double N.

        Returns:
            List[Dict[str, Any]]: A log of N, Time, Ratio, and estimated Complexity.
        """
        sys.setrecursionlimit(max(3000, sys.getrecursionlimit()))
        results = []
        prev_time = 0.0
        n = start_n

        for _ in range(rounds):
            data = input_gen(n)
            args = data if isinstance(data, tuple) else (data,)
            curr_time = self.benchmark(func, *args)

            ratio = curr_time / prev_time if prev_time > 0 else 0.0
            complexity = self._guess_complexity(ratio)

            results.append(
                {"N": n, "Time": curr_time, "Ratio": ratio, "Complexity": complexity}
            )
            prev_time = curr_time
            n *= 2
        return results

    def profile(self, func: Callable) -> Callable:
        """
        Decorator to track function execution time during normal program flow.

        Args:
            func (Callable): The function to be decorated.

        Returns:
            Callable: The wrapped function.
        """

        @functools.wraps(func)
        def wrapper(*args, **kwargs):
            start = timeit.default_timer()
            result = func(*args, **kwargs)
            end = timeit.default_timer()
            elapsed = end - start
            if func.__name__ not in self._profile_stats:
                self._profile_stats[func.__name__] = []
            self._profile_stats[func.__name__].append(elapsed)
            return result

        return wrapper

    def print_decorator_report(self) -> None:
        """Prints a summary table of all tracked functions and stopwatch blocks."""
        print("\n📝 ARPRAX PROFILE REPORT")
        print(
            f"{'Label/Function':<20} | {'Calls':<6} | {'Avg Time (s)':<12} | {'Total Time'}"
        )
        print("-" * 65)
        for fname, times in self._profile_stats.items():
            avg_t = statistics.mean(times)
            total_t = sum(times)
            print(f"{fname:<20} | {len(times):<6} | {avg_t:<12.5f} | {total_t:.5f}")

    def _guess_complexity(self, ratio: float) -> str:
        """
        Heuristic to map doubling ratios to Big O notations.

        Args:
            ratio (float): The ratio of T(2N) / T(N).

        Returns:
            str: The guessed complexity string.
        """
        if ratio < 1.4:
            return "O(1) / O(log N)"
        if 1.6 <= ratio <= 2.5:
            return "O(N)"
        if 3.5 <= ratio <= 4.8:
            return "O(N^2)"
        if 7.0 <= ratio <= 9.0:
            return "O(N^3)"
        return "High Growth"

    def print_analysis(self, func_name: str, results: List[Dict[str, Any]]) -> None:
        """
        Prints a formatted results table from a doubling test.

        Args:
            func_name (str): Name of the analyzed algorithm.
            results (List[Dict]): Data from run_doubling_test.
        """
        print(f"\n🔬 ANALYSIS: {func_name} (Mode: {self.mode})")
        print(f"{'N':<10} | {'Time (s)':<12} | {'Ratio':<8} | {'Est. Complexity':<15}")
        print("-" * 55)
        for row in results:
            r_str = f"{row['Ratio']:.2f}" if row["Ratio"] > 0 else "-"
            print(
                f"{row['N']:<10} | {row['Time']:<12.5f} | {r_str:<8} | {row['Complexity']:<15}"
            )

    def run_stress_suite(
        self,
        funcs: Dict[str, Callable],
        input_gen: Callable[[int], Any],
        n_values: List[int] = [1000, 2000, 4000],
    ) -> Dict[int, Dict[str, float]]:
        """
        Runs multiple algorithms against multiple input sizes for head-to-head comparison.

        Args:
            funcs (Dict): Map of {'Name': Function}.
            input_gen (Callable): Data generator function.
            n_values (List[int]): List of N sizes to test.

        Returns:
            Dict[int, Dict[str, float]]: Nested mapping of {N: {Name: Time}}.
        """
        suite_results = {}
        for n in n_values:
            suite_results[n] = {}
            data = input_gen(n)
            args = data if isinstance(data, tuple) else (data,)

            for name, func in funcs.items():
                suite_results[n][name] = self.benchmark(func, *args)
        return suite_results
__init__(repeats=5, warmup=1, mode='min')

Initializes the Profiler with user-defined benchmark settings.

Source code in src/arprax/algos/profiler.py
def __init__(self, repeats: int = 5, warmup: int = 1, mode: str = "min"):
    """Initializes the Profiler with user-defined benchmark settings."""
    self.repeats = repeats
    self.warmup = warmup
    self.mode = mode
    self._profile_stats = {}
benchmark(func, *args)

Runs a function with garbage collection disabled to ensure timing purity.

Parameters:

Name Type Description Default
func Callable

The function to measure.

required
*args Any

Arguments to pass to the function.

()

Returns:

Name Type Description
float float

The measured time in seconds based on the 'mode' setting.

Source code in src/arprax/algos/profiler.py
def benchmark(self, func: Callable, *args: Any) -> float:
    """
    Runs a function with garbage collection disabled to ensure timing purity.

    Args:
        func (Callable): The function to measure.
        *args (Any): Arguments to pass to the function.

    Returns:
        float: The measured time in seconds based on the 'mode' setting.
    """
    # Warmup runs (not timed)
    for _ in range(self.warmup):
        safe_args = copy.deepcopy(args)
        func(*safe_args)

    times = []
    gc_old = gc.isenabled()
    gc.disable()
    try:
        for _ in range(self.repeats):
            # Deepcopy used to ensure input data isn't pre-sorted by previous runs
            safe_args = copy.deepcopy(args)
            start = timeit.default_timer()
            func(*safe_args)
            end = timeit.default_timer()
            times.append(end - start)
    finally:
        if gc_old:
            gc.enable()

    if self.mode == "median":
        return statistics.median(times)
    elif self.mode == "mean":
        return statistics.mean(times)
    return min(times)
print_analysis(func_name, results)

Prints a formatted results table from a doubling test.

Parameters:

Name Type Description Default
func_name str

Name of the analyzed algorithm.

required
results List[Dict]

Data from run_doubling_test.

required
Source code in src/arprax/algos/profiler.py
def print_analysis(self, func_name: str, results: List[Dict[str, Any]]) -> None:
    """
    Prints a formatted results table from a doubling test.

    Args:
        func_name (str): Name of the analyzed algorithm.
        results (List[Dict]): Data from run_doubling_test.
    """
    print(f"\n🔬 ANALYSIS: {func_name} (Mode: {self.mode})")
    print(f"{'N':<10} | {'Time (s)':<12} | {'Ratio':<8} | {'Est. Complexity':<15}")
    print("-" * 55)
    for row in results:
        r_str = f"{row['Ratio']:.2f}" if row["Ratio"] > 0 else "-"
        print(
            f"{row['N']:<10} | {row['Time']:<12.5f} | {r_str:<8} | {row['Complexity']:<15}"
        )
print_decorator_report()

Prints a summary table of all tracked functions and stopwatch blocks.

Source code in src/arprax/algos/profiler.py
def print_decorator_report(self) -> None:
    """Prints a summary table of all tracked functions and stopwatch blocks."""
    print("\n📝 ARPRAX PROFILE REPORT")
    print(
        f"{'Label/Function':<20} | {'Calls':<6} | {'Avg Time (s)':<12} | {'Total Time'}"
    )
    print("-" * 65)
    for fname, times in self._profile_stats.items():
        avg_t = statistics.mean(times)
        total_t = sum(times)
        print(f"{fname:<20} | {len(times):<6} | {avg_t:<12.5f} | {total_t:.5f}")
profile(func)

Decorator to track function execution time during normal program flow.

Parameters:

Name Type Description Default
func Callable

The function to be decorated.

required

Returns:

Name Type Description
Callable Callable

The wrapped function.

Source code in src/arprax/algos/profiler.py
def profile(self, func: Callable) -> Callable:
    """
    Decorator to track function execution time during normal program flow.

    Args:
        func (Callable): The function to be decorated.

    Returns:
        Callable: The wrapped function.
    """

    @functools.wraps(func)
    def wrapper(*args, **kwargs):
        start = timeit.default_timer()
        result = func(*args, **kwargs)
        end = timeit.default_timer()
        elapsed = end - start
        if func.__name__ not in self._profile_stats:
            self._profile_stats[func.__name__] = []
        self._profile_stats[func.__name__].append(elapsed)
        return result

    return wrapper
run_doubling_test(func, input_gen, start_n=250, rounds=6)

Performs doubling analysis to estimate Big O complexity.

Parameters:

Name Type Description Default
func Callable

Algorithm to analyze.

required
input_gen Callable

Function that generates data for size N.

required
start_n int

Initial input size.

250
rounds int

How many times to double N.

6

Returns:

Type Description
List[Dict[str, Any]]

List[Dict[str, Any]]: A log of N, Time, Ratio, and estimated Complexity.

Source code in src/arprax/algos/profiler.py
def run_doubling_test(
    self,
    func: Callable,
    input_gen: Callable[[int], Any],
    start_n: int = 250,
    rounds: int = 6,
) -> List[Dict[str, Any]]:
    """
    Performs doubling analysis to estimate Big O complexity.

    Args:
        func (Callable): Algorithm to analyze.
        input_gen (Callable): Function that generates data for size N.
        start_n (int): Initial input size.
        rounds (int): How many times to double N.

    Returns:
        List[Dict[str, Any]]: A log of N, Time, Ratio, and estimated Complexity.
    """
    sys.setrecursionlimit(max(3000, sys.getrecursionlimit()))
    results = []
    prev_time = 0.0
    n = start_n

    for _ in range(rounds):
        data = input_gen(n)
        args = data if isinstance(data, tuple) else (data,)
        curr_time = self.benchmark(func, *args)

        ratio = curr_time / prev_time if prev_time > 0 else 0.0
        complexity = self._guess_complexity(ratio)

        results.append(
            {"N": n, "Time": curr_time, "Ratio": ratio, "Complexity": complexity}
        )
        prev_time = curr_time
        n *= 2
    return results
run_stress_suite(funcs, input_gen, n_values=[1000, 2000, 4000])

Runs multiple algorithms against multiple input sizes for head-to-head comparison.

Parameters:

Name Type Description Default
funcs Dict

Map of {'Name': Function}.

required
input_gen Callable

Data generator function.

required
n_values List[int]

List of N sizes to test.

[1000, 2000, 4000]

Returns:

Type Description
Dict[int, Dict[str, float]]

Dict[int, Dict[str, float]]: Nested mapping of {N: {Name: Time}}.

Source code in src/arprax/algos/profiler.py
def run_stress_suite(
    self,
    funcs: Dict[str, Callable],
    input_gen: Callable[[int], Any],
    n_values: List[int] = [1000, 2000, 4000],
) -> Dict[int, Dict[str, float]]:
    """
    Runs multiple algorithms against multiple input sizes for head-to-head comparison.

    Args:
        funcs (Dict): Map of {'Name': Function}.
        input_gen (Callable): Data generator function.
        n_values (List[int]): List of N sizes to test.

    Returns:
        Dict[int, Dict[str, float]]: Nested mapping of {N: {Name: Time}}.
    """
    suite_results = {}
    for n in n_values:
        suite_results[n] = {}
        data = input_gen(n)
        args = data if isinstance(data, tuple) else (data,)

        for name, func in funcs.items():
            suite_results[n][name] = self.benchmark(func, *args)
    return suite_results
stopwatch(label='Block')

Context manager for precision timing of a specific code block.

Parameters:

Name Type Description Default
label str

Name of the block for the final report.

'Block'
Source code in src/arprax/algos/profiler.py
@contextmanager
def stopwatch(self, label: str = "Block") -> Generator[None, None, None]:
    """
    Context manager for precision timing of a specific code block.

    Args:
        label (str): Name of the block for the final report.
    """
    start = timeit.default_timer()
    try:
        yield
    finally:
        end = timeit.default_timer()
        elapsed = end - start
        if label not in self._profile_stats:
            self._profile_stats[label] = []
        self._profile_stats[label].append(elapsed)

🛠️ Data Generators

Arprax Algorithms: Data Generators Provides industrial-grade tools for creating research-ready datasets.

large_scale_dataset(n)

High-performance data generator for large-scale research.

Attempts to use NumPy for speed if available; otherwise, falls back to standard Python random generation.

Parameters:

Name Type Description Default
n int

The number of elements to generate.

required

Returns:

Type Description
List[int]

List[int]: A list of random integers.

Source code in src/arprax/algos/utils/generators.py
def large_scale_dataset(n: int) -> List[int]:
    """
    High-performance data generator for large-scale research.

    Attempts to use NumPy for speed if available; otherwise, falls back
    to standard Python random generation.

    Args:
        n (int): The number of elements to generate.

    Returns:
        List[int]: A list of random integers.
    """
    try:
        import numpy as np

        # NumPy is much faster for generating millions of integers
        return np.random.randint(0, 1000, n).tolist()  # pragma: no cover
    except ImportError:
        # Graceful fallback for the 'Ultra-Lean' configuration
        return random_array(n)
random_array(n, lo=0, hi=1000)

Generates an array of n random integers using Python's built-in random module.

This serves as the default, dependency-free generator for the library.

Parameters:

Name Type Description Default
n int

The number of elements to generate.

required
lo int

The lower bound of the random range (inclusive).

0
hi int

The upper bound of the random range (inclusive).

1000

Returns:

Type Description
List[int]

List[int]: A list of n random integers.

Source code in src/arprax/algos/utils/generators.py
def random_array(n: int, lo: int = 0, hi: int = 1000) -> List[int]:
    """
    Generates an array of n random integers using Python's built-in random module.

    This serves as the default, dependency-free generator for the library.

    Args:
        n (int): The number of elements to generate.
        lo (int): The lower bound of the random range (inclusive).
        hi (int): The upper bound of the random range (inclusive).

    Returns:
        List[int]: A list of n random integers.
    """
    return [random.randint(lo, hi) for _ in range(n)]
reverse_sorted_array(n)

Legacy wrapper for descending order.

Frequently used for 'Worst Case' algorithm testing in the Arprax Lab.

Parameters:

Name Type Description Default
n int

The number of elements to generate.

required

Returns:

Type Description
List[int]

List[int]: A list containing integers from n-1 down to 0.

Source code in src/arprax/algos/utils/generators.py
def reverse_sorted_array(n: int) -> List[int]:
    """
    Legacy wrapper for descending order.

    Frequently used for 'Worst Case' algorithm testing in the Arprax Lab.

    Args:
        n (int): The number of elements to generate.

    Returns:
        List[int]: A list containing integers from n-1 down to 0.
    """
    return sorted_array(n, reverse=True)
sorted_array(n, reverse=False)

Generates an array of n integers in sorted order.

Parameters:

Name Type Description Default
n int

The number of elements to generate.

required
reverse bool

If True, returns descending order (Worst Case).

False

Returns:

Type Description
List[int]

List[int]: A list containing integers from 0 to n-1 (or reversed).

Source code in src/arprax/algos/utils/generators.py
def sorted_array(n: int, reverse: bool = False) -> List[int]:
    """
    Generates an array of n integers in sorted order.

    Args:
        n (int): The number of elements to generate.
        reverse (bool): If True, returns descending order (Worst Case).

    Returns:
        List[int]: A list containing integers from 0 to n-1 (or reversed).
    """
    arr = list(range(n))
    if reverse:
        arr.reverse()
    return arr

🧬 Algorithms

Arprax Algorithms: Sorting Module.

Provides industrial-grade implementations of fundamental sorting algorithms. Includes elementary sorts (Selection, Insertion, Shell) and advanced sorts (Merge, Quick, Heap).

Features
  • Generator Support: All functions support a 'visualize=True' flag to yield intermediate states for animation.
  • Optimization: Quick Sort uses 3-way partitioning (Dijkstra) for duplicate handling.
  • Efficiency: Merge Sort uses a single auxiliary array to reduce memory overhead.
Reference

Algorithms, 4th Edition by Sedgewick and Wayne, Chapter 2.

heap_sort(arr, visualize=False)

Heap Sort: Uses a binary heap to sort in-place. Guarantees O(N log N) time with O(1) space.

Source code in src/arprax/algos/algorithms/sorting.py
def heap_sort(arr: List[Any], visualize: bool = False) -> Union[List[Any], Generator]:
    """
    Heap Sort: Uses a binary heap to sort in-place.
    Guarantees O(N log N) time with O(1) space.
    """
    data = list(arr)
    n = len(data)

    def _sink(k: int, max_n: int):
        while 2 * k + 1 < max_n:
            j = 2 * k + 1
            if j < max_n - 1 and data[j] < data[j + 1]:
                j += 1
            if data[k] >= data[j]:
                break
            data[k], data[j] = data[j], data[k]
            k = j
            if visualize:
                yield list(data)

    def _algo():
        # 1. Heap Construction (Bottom-up)
        for k in range(n // 2 - 1, -1, -1):
            yield from _sink(k, n)

        # 2. Sort-down
        k = n - 1
        while k > 0:
            data[0], data[k] = data[k], data[0]
            if visualize:
                yield list(data)
            yield from _sink(0, k)
            k -= 1

        if not visualize:
            yield list(data)

    gen = _algo()
    return gen if visualize else list(gen)[-1]
insertion_sort(arr, visualize=False)

Insertion Sort: Builds the sort by moving elements one at a time. Excellent for partially sorted arrays or small subarrays.

Complexity: Time O(N^2) (O(N) best case) | Space O(1)

Source code in src/arprax/algos/algorithms/sorting.py
def insertion_sort(
    arr: List[Any], visualize: bool = False
) -> Union[List[Any], Generator]:
    """
    Insertion Sort: Builds the sort by moving elements one at a time.
    Excellent for partially sorted arrays or small subarrays.

    Complexity: Time O(N^2) (O(N) best case) | Space O(1)
    """
    data = list(arr)
    n = len(data)

    def _algo():
        for i in range(1, n):
            for j in range(i, 0, -1):
                if data[j] < data[j - 1]:
                    data[j], data[j - 1] = data[j - 1], data[j]
                    if visualize:
                        yield list(data)
                else:
                    break
        if not visualize:
            yield list(data)

    gen = _algo()
    return gen if visualize else list(gen)[-1]
merge_sort(arr, visualize=False)

Merge Sort: Recursive divide-and-conquer. Guarantees O(N log N) time, but requires O(N) auxiliary space.

Source code in src/arprax/algos/algorithms/sorting.py
def merge_sort(arr: List[Any], visualize: bool = False) -> Union[List[Any], Generator]:
    """
    Merge Sort: Recursive divide-and-conquer.
    Guarantees O(N log N) time, but requires O(N) auxiliary space.
    """
    data = list(arr)
    aux = list(arr)  # Auxiliary array for merging

    def _merge(lo: int, mid: int, hi: int):
        # Copy to aux
        for k in range(lo, hi + 1):
            aux[k] = data[k]

        i, j = lo, mid + 1
        for k in range(lo, hi + 1):
            if i > mid:
                data[k] = aux[j]
                j += 1
            elif j > hi:
                data[k] = aux[i]
                i += 1
            elif aux[j] < aux[i]:
                data[k] = aux[j]
                j += 1
            else:
                data[k] = aux[i]
                i += 1

            if visualize:
                yield list(data)

    def _sort(lo: int, hi: int):
        if hi <= lo:
            return
        mid = lo + (hi - lo) // 2
        yield from _sort(lo, mid)
        yield from _sort(mid + 1, hi)
        yield from _merge(lo, mid, hi)

    def _wrapper():
        yield from _sort(0, len(data) - 1)
        if not visualize:
            yield data

    gen = _wrapper()
    return gen if visualize else list(gen)[-1]
quick_sort(arr, visualize=False)

Quick Sort (3-Way Partition): The standard for general purpose sorting. Uses Dijkstra's 3-way partitioning to handle duplicate keys efficiently.

Complexity: Time O(N log N) average | Space O(log N) recursion

Source code in src/arprax/algos/algorithms/sorting.py
def quick_sort(arr: List[Any], visualize: bool = False) -> Union[List[Any], Generator]:
    """
    Quick Sort (3-Way Partition): The standard for general purpose sorting.
    Uses Dijkstra's 3-way partitioning to handle duplicate keys efficiently.

    Complexity: Time O(N log N) average | Space O(log N) recursion
    """
    data = list(arr)

    def _sort(lo: int, hi: int):
        if hi <= lo:
            return

        # 3-Way Partitioning (lt, i, gt)
        lt, i, gt = lo, lo + 1, hi
        v = data[lo]

        while i <= gt:
            if data[i] < v:
                data[lt], data[i] = data[i], data[lt]
                lt += 1
                i += 1
                if visualize:
                    yield list(data)
            elif data[i] > v:
                data[i], data[gt] = data[gt], data[i]
                gt -= 1
                if visualize:
                    yield list(data)
            else:
                i += 1

        # Recurse
        yield from _sort(lo, lt - 1)
        yield from _sort(gt + 1, hi)

    def _wrapper():
        yield from _sort(0, len(data) - 1)
        if not visualize:
            yield data

    gen = _wrapper()
    return gen if visualize else list(gen)[-1]
selection_sort(arr, visualize=False)

Selection Sort: Scans for the minimum item and swaps it into place.

Complexity: Time O(N^2) | Space O(1)

Source code in src/arprax/algos/algorithms/sorting.py
def selection_sort(
    arr: List[Any], visualize: bool = False
) -> Union[List[Any], Generator]:
    """
    Selection Sort: Scans for the minimum item and swaps it into place.

    Complexity: Time O(N^2) | Space O(1)
    """
    data = list(arr)
    n = len(data)

    def _algo():
        for i in range(n):
            min_idx = i
            for j in range(i + 1, n):
                if data[j] < data[min_idx]:
                    min_idx = j
            data[i], data[min_idx] = data[min_idx], data[i]
            if visualize:
                yield list(data)
        if not visualize:
            yield list(data)

    gen = _algo()
    return gen if visualize else list(gen)[-1]
shell_sort(arr, visualize=False)

Shell Sort: An optimized Insertion Sort using 'h-gaps'. Moves elements long distances to produce a partially sorted array, then finishes with standard insertion sort.

Complexity: Time O(N^1.5) approx | Space O(1)

Source code in src/arprax/algos/algorithms/sorting.py
def shell_sort(arr: List[Any], visualize: bool = False) -> Union[List[Any], Generator]:
    """
    Shell Sort: An optimized Insertion Sort using 'h-gaps'.
    Moves elements long distances to produce a partially sorted array,
    then finishes with standard insertion sort.

    Complexity: Time O(N^1.5) approx | Space O(1)
    """
    data = list(arr)
    n = len(data)

    def _algo():
        # 1. Compute max h-sequence (Knuth's: 1, 4, 13, 40...)
        h = 1
        while h < n // 3:
            h = 3 * h + 1

        # 2. Sort
        while h >= 1:
            for i in range(h, n):
                # Insertion sort with gap 'h'
                for j in range(i, h - 1, -1):
                    if data[j] < data[j - h]:
                        data[j], data[j - h] = data[j - h], data[j]
                        if visualize:
                            yield list(data)
                    else:
                        break
            h //= 3
        if not visualize:
            yield list(data)

    gen = _algo()
    return gen if visualize else list(gen)[-1]