Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/workflows/deploy_to_pypi.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ jobs:
env:
TWINE_USERNAME: "__token__"
TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }}
TORCHQUAD_RELEASE_BUILD: "True"
run: |
python setup.py sdist bdist_wheel
twine upload dist/*
1 change: 1 addition & 0 deletions .github/workflows/deploy_to_test_pypi.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ jobs:
env:
TWINE_USERNAME: "__token__"
TWINE_PASSWORD: ${{ secrets.TEST_PYPI_TOKEN }}
TORCHQUAD_RELEASE_BUILD: "True"
run: |
python setup.py sdist bdist_wheel
twine upload -r testpypi dist/*
20 changes: 18 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -193,8 +193,24 @@ integral_value = mc.integrate(
backend="torch",
)
```
To change the logger verbosity, set the `TORCHQUAD_LOG_LEVEL` environment
variable; for example `export TORCHQUAD_LOG_LEVEL=DEBUG`.
## Logging Configuration

By default, torchquad disables its internal logging when installed from PyPI to avoid interfering with other loggers in your application. To enable logging:

1. **Set the log level**: Use the `TORCHQUAD_LOG_LEVEL` environment variable:
```bash
export TORCHQUAD_LOG_LEVEL=DEBUG # For detailed debugging
export TORCHQUAD_LOG_LEVEL=INFO # For general information
export TORCHQUAD_LOG_LEVEL=WARNING # For warnings only (default when enabled)
```

2. **Enable logging programmatically**:
```python
import torchquad
torchquad.set_log_level("DEBUG") # This will enable and configure logging
```

Note: When developing from a git clone, logging is enabled by default. The `TORCHQUAD_RELEASE_BUILD` environment variable controls this behavior.

You can find all available integrators [here](https://torchquad.readthedocs.io/en/main/integration_methods.html).

Expand Down
11 changes: 8 additions & 3 deletions torchquad/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import os
from loguru import logger

# TODO: Currently this is the way to expose to the docs
# hopefully changes with setup.py
Expand Down Expand Up @@ -42,5 +41,11 @@
"_deployment_test",
]

set_log_level(os.environ.get("TORCHQUAD_LOG_LEVEL", "WARNING"))
logger.info("Initializing torchquad.")
# Check for release build flag to avoid interfering with other loggers
TORCHQUAD_RELEASE_BUILD = os.environ.get("TORCHQUAD_RELEASE_BUILD", "False").lower() == "true"

if not TORCHQUAD_RELEASE_BUILD:
from loguru import logger

set_log_level(os.environ.get("TORCHQUAD_LOG_LEVEL", "WARNING"))
logger.info("Initializing torchquad.")
4 changes: 3 additions & 1 deletion torchquad/integration/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,9 @@ def _setup_integration_domain(dim, integration_domain, backend):
domain_arg_backend = infer_backend(integration_domain)
convert_to_tensor = domain_arg_backend == "builtins"
if not convert_to_tensor and backend is not None and domain_arg_backend != backend:
logger.warning("integration_domain should be a list when the backend argument is set.")
warning_msg = "integration_domain should be a list when the backend argument is set."
logger.warning(warning_msg)
warnings.warn(warning_msg, RuntimeWarning)
convert_to_tensor = True

# Convert integration_domain to a tensor if needed
Expand Down
11 changes: 7 additions & 4 deletions torchquad/integration/vegas_map.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from autoray import numpy as anp
from autoray import astype, to_backend_dtype
from loguru import logger
import warnings

from .utils import _add_at_indices

Expand Down Expand Up @@ -185,10 +186,12 @@ def update_map(self):
"""Update the adaptive map, Section II C."""
smoothed_weights = self._smooth_map(self.weights, self.counts, self.alpha)
if smoothed_weights is None:
logger.warning(
warning_msg = (
"Cannot update the VEGASMap. This can happen with an integrand "
"which evaluates to zero everywhere."
)
logger.warning(warning_msg)
warnings.warn(warning_msg, RuntimeWarning)
self._reset_weight()
return

Expand Down Expand Up @@ -242,9 +245,9 @@ def update_map(self):
# smoothed_weights[i][indices] can be zero, which leads to
# invalid edges.
num_edges = self.x_edges.shape[1]
logger.warning(
f"{num_edges - anp.sum(finite_edges)} out of {num_edges} calculated VEGASMap edges were infinite"
)
warning_msg = f"{num_edges - anp.sum(finite_edges)} out of {num_edges} calculated VEGASMap edges were infinite"
logger.warning(warning_msg)
warnings.warn(warning_msg, RuntimeWarning)
# Replace inf edges with the average of their two neighbours
middle_edges = 0.5 * (self.x_edges[i][:-2] + self.x_edges[i][2:])
self.x_edges[i][1:-1] = anp.where(
Expand Down
5 changes: 4 additions & 1 deletion torchquad/utils/enable_cuda.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from loguru import logger
import warnings

from .set_precision import set_precision

Expand All @@ -20,4 +21,6 @@ def enable_cuda(data_type="float32"):
if data_type is not None:
set_precision(data_type)
else:
logger.warning("Error enabling CUDA. cuda.is_available() returned False. CPU will be used.")
warning_msg = "Error enabling CUDA. cuda.is_available() returned False. CPU will be used."
logger.warning(warning_msg)
warnings.warn(warning_msg, RuntimeWarning)
11 changes: 7 additions & 4 deletions torchquad/utils/set_precision.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from loguru import logger
import os
import sys


def _get_precision(backend):
Expand Down Expand Up @@ -31,9 +32,9 @@ def set_precision(data_type="float32", backend="torch"):
# upper-case letters
data_type = {"float": "float32", "double": "float64"}.get(data_type.lower(), data_type)
if data_type not in ["float32", "float64"]:
logger.error(
f'Invalid data type "{data_type}". Only float32 and float64 are supported. Setting the data type to float32.'
)
error_msg = f'Invalid data type "{data_type}". Only float32 and float64 are supported. Setting the data type to float32.'
logger.error(error_msg)
print(f"ERROR: {error_msg}", file=sys.stderr)
data_type = "float32"

if backend == "torch":
Expand Down Expand Up @@ -67,4 +68,6 @@ def set_precision(data_type="float32", backend="torch"):
os.environ["TORCHQUAD_DTYPE_NUMPY"] = data_type
logger.info(f"NumPy default dtype set to {_get_precision('numpy')}")
else:
logger.error(f"Changing the data type is not supported for backend {backend}")
error_msg = f"Changing the data type is not supported for backend {backend}"
logger.error(error_msg)
print(f"ERROR: {error_msg}", file=sys.stderr)
Loading