Demo benchmark with R/Python

Demo benchmark with R/Python#

  • Lasso Regression[fit_intercept=False,reg=0.5] Data: Simulated[n_features=5000,n_samples=100,rho=0]
  • Lasso Regression[fit_intercept=False,reg=0.5] Data: Simulated[n_features=5000,n_samples=100,rho=0]
  • Lasso Regression[fit_intercept=False,reg=0.5] Data: Simulated[n_features=5000,n_samples=100,rho=0]
  • Lasso Regression[fit_intercept=False,reg=0.5] Data: Simulated[n_features=5000,n_samples=100,rho=0]
  • Lasso Regression[fit_intercept=False,reg=0.5] Data: Simulated[n_features=5000,n_samples=100,rho=0]
  • Lasso Regression[fit_intercept=False,reg=0.5] Data: Simulated[n_features=5000,n_samples=100,rho=0]
  • Lasso Regression[fit_intercept=False,reg=0.5] Data: Simulated[n_features=5000,n_samples=100,rho=0]
  • Lasso Regression[fit_intercept=False,reg=0.5] Data: Simulated[n_features=5000,n_samples=100,rho=0]
  • Lasso Regression[fit_intercept=False,reg=0.5] Data: Simulated[n_features=5000,n_samples=100,rho=0]
/home/tom/.local/miniconda/lib/python3.12/site-packages/rpy2/rinterface/__init__.py:1211: UserWarning: Environment variable "BASH_FUNC_blame%%" redefined by R and overriding existing variable. Current: "() {  RESSOURCE=${1:-cpu};
 if [[ "$RESSOURCE" == "memory" ]]; then
 col=10;
 else
 if [[ "$RESSOURCE" == "cpu" ]]; then
 col=9;
 fi;
 fi;
 for user in $(top -bn 1 | awk 'NR>7 { if ($2 !~ /\+/) print $2; }' | sort -u);
 do
 top -b -n 1 -u "$user" | awk -v user="$user" -v col="$col" 'NR>7 { gsub ( ",", ".", $col); sum += $col ; } END { if (sum > 0.0){ if (col == 9) print user, sum / 100. " cores "; else print user, sum "%";}}';
 done
}", R: "() {  RESSOURCE=${1:-cpu}; if [[ "$RESSOURCE" == "memory" ]]; then col=10; else if [[ "$RESSOURCE" == "cpu" ]]; then col=9; fi; fi; for user in $(top -bn 1 | awk 'NR>7 { if ($2 !~ /\+/) print $2; }' | sort -u); do top -b -n 1 -u "$user" | awk -v user="$user" -v col="$col" 'NR>7 { gsub ( ",", ".", $col); sum += $col ; } END { if (sum > 0.0){ if (col == 9) print user, sum / 100. " cores "; else print user, sum "%";}}'; done}"
  warnings.warn(
/home/tom/.local/miniconda/lib/python3.12/site-packages/rpy2/rinterface/__init__.py:1211: UserWarning: Environment variable "BASH_FUNC_git_add_images%%" redefined by R and overriding existing variable. Current: "() {  main_tex=${1:-main.tex};
 image_dir=${2:-images};
 [[ -f "$main_tex" ]] || ( echo "main file '$main_tex' does not exist" && return 1 );
 [[ -d "$image_dir" ]] || ( echo "images dir '$image_dir' does not exist" && return 1 );
 for f in $images_dir/*;
 do
 [[ -d $f ]] && continue;
 basename=$(echo $f | sed 's|images/\(.*\)\..\+|\1|');
 grep --color=auto $basename $main_tex -q && git add $f || git rm $f;
 done
}", R: "() {  main_tex=${1:-main.tex}; image_dir=${2:-images}; [[ -f "$main_tex" ]] || ( echo "main file '$main_tex' does not exist" && return 1 ); [[ -d "$image_dir" ]] || ( echo "images dir '$image_dir' does not exist" && return 1 ); for f in $images_dir/*; do [[ -d $f ]] && continue; basename=$(echo $f | sed 's|images/\(.*\)\..\+|\1|'); grep --color=auto $basename $main_tex -q && git add $f || git rm $f; done}"
  warnings.warn(
/home/tom/.local/miniconda/lib/python3.12/site-packages/rpy2/rinterface/__init__.py:1211: UserWarning: Environment variable "PWD" redefined by R and overriding existing variable. Current: "/home/tom/Work/prog/benchopt/doc", R: "/home/tom/Work/prog/benchopt/examples"
  warnings.warn(
/home/tom/.local/miniconda/lib/python3.12/site-packages/rpy2/rinterface/__init__.py:1211: UserWarning: Environment variable "R_SESSION_TMPDIR" redefined by R and overriding existing variable. Current: "/tmp/RtmpxzQQXG", R: "/tmp/RtmpTHpABK"
  warnings.warn(
Simulated[n_features=5000,n_samples=100,rho=0]
  |--Lasso Regression[fit_intercept=False,reg=0.5]
    |--Python-PGD[use_acceleration=False]: done
Failed to import Solver from /home/tom/Work/prog/benchopt/benchmarks/benchmark_lasso/solvers/spams.py. Please fix the following error to use this file with benchopt:
Traceback (most recent call last):
  File "/home/tom/Work/prog/benchopt/benchopt/benchmark.py", line 201, in _list_benchmark_classes
    cls = _load_class_from_module(
          ^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/tom/Work/prog/benchopt/benchopt/utils/dynamic_modules.py", line 67, in _load_class_from_module
    module = _get_module_from_file(module_filename, benchmark_dir)
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/tom/Work/prog/benchopt/benchopt/utils/dynamic_modules.py", line 35, in _get_module_from_file
    spec.loader.exec_module(module)
  File "<frozen importlib._bootstrap_external>", line 999, in exec_module
  File "<frozen importlib._bootstrap>", line 488, in _call_with_frames_removed
  File "/home/tom/Work/prog/benchopt/benchmarks/lasso/solvers/spams.py", line 5, in <module>
    from spams import lasso, fistaFlat
ModuleNotFoundError: No module named 'spams'

    |--R-PGD: not installed
Saving result in: /home/tom/Work/prog/benchopt/benchmarks/benchmark_lasso/outputs/benchopt_run_2025-09-25_11h24m38.parquet
Save objective_curve plot of objective_value for Simulated[n_features=5000,n_samples=100,rho=0] and Lasso Regression[fit_intercept=False,reg=0.5] as: /home/tom/Work/prog/benchopt/benchmarks/benchmark_lasso/outputs/2af7dcbf2df6f8e0e43f36411ae0b7e2_objective_value_objective_curve.pdf
Save objective_curve plot of objective_support_size for Simulated[n_features=5000,n_samples=100,rho=0] and Lasso Regression[fit_intercept=False,reg=0.5] as: /home/tom/Work/prog/benchopt/benchmarks/benchmark_lasso/outputs/2af7dcbf2df6f8e0e43f36411ae0b7e2_objective_support_size_objective_curve.pdf
Save objective_curve plot of objective_duality_gap for Simulated[n_features=5000,n_samples=100,rho=0] and Lasso Regression[fit_intercept=False,reg=0.5] as: /home/tom/Work/prog/benchopt/benchmarks/benchmark_lasso/outputs/2af7dcbf2df6f8e0e43f36411ae0b7e2_objective_duality_gap_objective_curve.pdf
Save suboptimality_curve plot of objective_value for Simulated[n_features=5000,n_samples=100,rho=0] and Lasso Regression[fit_intercept=False,reg=0.5] as: /home/tom/Work/prog/benchopt/benchmarks/benchmark_lasso/outputs/2af7dcbf2df6f8e0e43f36411ae0b7e2_objective_value_suboptimality_curve.pdf
Save relative_suboptimality_curve plot of objective_value for Simulated[n_features=5000,n_samples=100,rho=0] and Lasso Regression[fit_intercept=False,reg=0.5] as: /home/tom/Work/prog/benchopt/benchmarks/benchmark_lasso/outputs/2af7dcbf2df6f8e0e43f36411ae0b7e2_objective_value_relative_suboptimality_curve.pdf
Save bar_chart plot of objective_value for Simulated[n_features=5000,n_samples=100,rho=0] and Lasso Regression[fit_intercept=False,reg=0.5] as: /home/tom/Work/prog/benchopt/benchmarks/benchmark_lasso/outputs/2af7dcbf2df6f8e0e43f36411ae0b7e2_objective_value_bar_chart.pdf
Save boxplot plot of objective_value for Simulated[n_features=5000,n_samples=100,rho=0] and Lasso Regression[fit_intercept=False,reg=0.5] as: /home/tom/Work/prog/benchopt/benchmarks/benchmark_lasso/outputs/2af7dcbf2df6f8e0e43f36411ae0b7e2_objective_value_boxplot.pdf
Save boxplot plot of objective_support_size for Simulated[n_features=5000,n_samples=100,rho=0] and Lasso Regression[fit_intercept=False,reg=0.5] as: /home/tom/Work/prog/benchopt/benchmarks/benchmark_lasso/outputs/2af7dcbf2df6f8e0e43f36411ae0b7e2_objective_support_size_boxplot.pdf
Save boxplot plot of objective_duality_gap for Simulated[n_features=5000,n_samples=100,rho=0] and Lasso Regression[fit_intercept=False,reg=0.5] as: /home/tom/Work/prog/benchopt/benchmarks/benchmark_lasso/outputs/2af7dcbf2df6f8e0e43f36411ae0b7e2_objective_duality_gap_boxplot.pdf

from pathlib import Path
import matplotlib.pyplot as plt
from benchopt import run_benchmark
from benchopt.benchmark import Benchmark
from benchopt.plotting import plot_benchmark, PLOT_KINDS
from benchopt.plotting.plot_objective_curve import reset_solver_styles_idx


BENCHMARK_PATH = Path().resolve().parent / 'benchmarks' / 'benchmark_lasso'

if not BENCHMARK_PATH.exists():
    raise RuntimeError(
        "This example can only work when Lasso benchmark is cloned in the "
        "example folder. Please run:\n"
        "$ git clone https://github.com/benchopt/benchmark_lasso "
        f"{BENCHMARK_PATH.resolve()}"
    )

save_file = run_benchmark(
    BENCHMARK_PATH,
    solver_names=['Python-PGD[use_acceleration=False]', 'R-PGD'],
    dataset_names=["Simulated[n_features=5000,n_samples=100,rho=0]"],
    objective_filters=['*[fit_intercept=False,reg=0.5]'],
    max_runs=100, timeout=100, n_repetitions=5,
    plot_result=False, show_progress=False
)


kinds = list(PLOT_KINDS.keys())
reset_solver_styles_idx()
figs = plot_benchmark(
    save_file, benchmark=Benchmark(BENCHMARK_PATH), kinds=kinds, html=False
)
plt.show()

Total running time of the script: (0 minutes 2.928 seconds)

Gallery generated by Sphinx-Gallery