Skip to content

Commit

Permalink
Remove deprecated max_worker
Browse files Browse the repository at this point in the history
  • Loading branch information
khk-globus committed Sep 4, 2024
1 parent 3f2bf18 commit 94cbf8d
Show file tree
Hide file tree
Showing 5 changed files with 18 additions and 45 deletions.
18 changes: 1 addition & 17 deletions parsl/executors/high_throughput/executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,9 +199,6 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
will check the available memory at startup and limit the number of workers such that
the there's sufficient memory for each worker. Default: None
max_workers : int
Deprecated. Please use max_workers_per_node instead.
max_workers_per_node : int
Caps the number of workers launched per node. Default: None
Expand Down Expand Up @@ -239,7 +236,6 @@ def __init__(self,
worker_debug: bool = False,
cores_per_worker: float = 1.0,
mem_per_worker: Optional[float] = None,
max_workers: Optional[Union[int, float]] = None,
max_workers_per_node: Optional[Union[int, float]] = None,
cpu_affinity: str = 'none',
available_accelerators: Union[int, Sequence[str]] = (),
Expand Down Expand Up @@ -272,9 +268,7 @@ def __init__(self,
else:
self.all_addresses = ','.join(get_all_addresses())

if max_workers:
self._warn_deprecated("max_workers", "max_workers_per_node")
self.max_workers_per_node = max_workers_per_node or max_workers or float("inf")
self.max_workers_per_node = max_workers_per_node or float("inf")

mem_slots = self.max_workers_per_node
cpu_slots = self.max_workers_per_node
Expand Down Expand Up @@ -335,16 +329,6 @@ def _warn_deprecated(self, old: str, new: str):
stacklevel=2
)

@property
def max_workers(self):
self._warn_deprecated("max_workers", "max_workers_per_node")
return self.max_workers_per_node

@max_workers.setter
def max_workers(self, val: Union[int, float]):
self._warn_deprecated("max_workers", "max_workers_per_node")
self.max_workers_per_node = val

@property
def logdir(self):
return "{}/{}".format(self.run_dir, self.label)
Expand Down
29 changes: 15 additions & 14 deletions parsl/tests/site_tests/README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -46,27 +46,28 @@ Adding a new site
1. Specialized python builds for the system (for eg, Summit)
2. Anaconda available via modules
3. User's conda installation
* Add a new block to `conda_setup.sh` that installs a fresh environment and writes out
the activation commands to `~/setup_parsl_test_env.sh`
* Add a site config to `parsl/tests/configs/<SITE.py>` and add your local user options
to `parsl/tests/configs/local_user_opts.py`. For eg, `here's mine<https://gist.github.com/yadudoc/b71765284d2db0706c4f43605dd8b8d6>`_
Make sure that the site config uses the `fresh_config` pattern.
* Add a new block to ``conda_setup.sh`` that installs a fresh environment and writes out
the activation commands to ``~/setup_parsl_test_env.sh``
* Add a site config to ``parsl/tests/configs/<SITE.py>`` and add your local user options
to ``parsl/tests/configs/local_user_opts.py``. For example,
`here's mine<https://gist.github.com/yadudoc/b71765284d2db0706c4f43605dd8b8d6>`_
Make sure that the site config uses the ``fresh_config`` pattern.
Please ensure that the site config uses:
* max_workers = 1
* init_blocks = 1
* min_blocks = 0
* ``max_workers_per_node = 1``
* ``init_blocks = 1``
* ``min_blocks = 0``

* Add this site config to `parsl/tests/site_tests/site_config_selector.py`
* Reinstall parsl, using `pip install .`
* Test a single test: `python3 test_site.py -d` to confirm that the site works correctly.
* Once tests are passing run the whole site_test with `make site_test`
* Add this site config to ``parsl/tests/site_tests/site_config_selector.py``
* Reinstall parsl, using ``pip install .``
* Test a single test: ``python3 test_site.py -d`` to confirm that the site works correctly.
* Once tests are passing run the whole site_test with ``make site_test``


Shared filesystem option
------------------------

There is a new env variable "SHARED_FS_OPTIONS" to pass markers to pytest to skip certain tests.
There is a new env variable ``SHARED_FS_OPTIONS`` to pass markers to pytest to skip certain tests.

When there's a shared-FS, the default NoOpStaging works. However, when there's no shared-FS some tests
that uses File objects require a staging provider (eg. rsync). These tests can be turned off with
`-k "not staging_required"`
``-k "not staging_required"``
12 changes: 0 additions & 12 deletions parsl/tests/test_htex/test_htex.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,18 +126,6 @@ def kill_interchange(*args, **kwargs):
assert "HighThroughputExecutor has not started" in caplog.text


@pytest.mark.local
def test_max_workers_per_node():
with pytest.warns(DeprecationWarning) as record:
htex = HighThroughputExecutor(max_workers_per_node=1, max_workers=2)

warning_msg = "max_workers is deprecated"
assert any(warning_msg in str(warning.message) for warning in record)

# Ensure max_workers_per_node takes precedence
assert htex.max_workers_per_node == htex.max_workers == 1


@pytest.mark.local
@pytest.mark.parametrize("cmd", (None, "custom-launch-cmd"))
def test_htex_worker_pool_launch_cmd(cmd: Optional[str]):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def local_config():
poll_period=100,
label="htex_local",
address="127.0.0.1",
max_workers=1,
max_workers_per_node=1,
encrypted=True,
provider=LocalProvider(
channel=LocalChannel(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def local_config():
poll_period=100,
label="htex_local",
address="127.0.0.1",
max_workers=1,
max_workers_per_node=1,
encrypted=True,
launch_cmd="sleep inf",
provider=LocalProvider(
Expand Down

0 comments on commit 94cbf8d

Please sign in to comment.