Skip to content

Commit d3ce48c

Browse files
authored
Merge pull request #1 from mpes-kit/release
Release
2 parents 701eb9e + c3780bf commit d3ce48c

12 files changed

Lines changed: 347 additions & 96 deletions

File tree

.cspell/custom-dictionary.txt

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
accum
2+
clahe
3+
coeff
4+
inds
5+
keepdims
6+
mclahe
7+
nbins
8+
ndims
9+
sess

.github/workflows/release.yml

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
name: Publish to PyPI
2+
3+
# Workflow runs a release job on every published tag.
4+
on:
5+
release:
6+
types: [published]
7+
8+
# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
9+
# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
10+
concurrency:
11+
group: "release"
12+
cancel-in-progress: false
13+
14+
env:
15+
UV_SYSTEM_PYTHON: true
16+
17+
jobs:
18+
release:
19+
name: Upload release to PyPI
20+
runs-on: ubuntu-latest
21+
environment:
22+
name: pypi
23+
url: https://pypi.org/p/mclahe
24+
permissions:
25+
id-token: write
26+
27+
steps:
28+
- uses: actions/checkout@v4
29+
with:
30+
fetch-depth: 0
31+
32+
- name: Set up Python
33+
uses: actions/setup-python@v5
34+
with:
35+
python-version: "3.x"
36+
37+
- name: Install dependencies
38+
run: |
39+
curl -LsSf https://astral.sh/uv/install.sh | sh
40+
uv pip install build
41+
42+
- name: Build package
43+
run: |
44+
git reset --hard HEAD
45+
python -m build
46+
47+
- name: Publish package distributions to PyPI
48+
uses: pypa/gh-action-pypi-publish@release/v1
49+
with:
50+
verbose: true

.gitignore

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
__pycache__/
2+
3+
# C extensions
4+
*.so
5+
6+
# pycharm
7+
.idea/
8+
.idea
9+
10+
# Packages
11+
*.egg
12+
*.egg-info
13+
.pypirc
14+
build
15+
eggs
16+
parts
17+
bin
18+
var
19+
sdist
20+
develop-eggs
21+
.installed.cfg
22+
lib
23+
lib64
24+
dist/
25+
26+
# Installer logs
27+
pip-log.txt
28+
29+
# Unit test / coverage reports
30+
.coverage
31+
.tox
32+
nosetests.xml
33+
34+
# Complexity
35+
output/*.html
36+
output/*/index.html
37+
38+
# Sphinx
39+
docs/_build
40+
41+
# Cookiecutter
42+
output/
43+
# data
44+
data/
45+
*.zip
46+
*.h5
47+
**/plots/
48+
49+
.python-version

.pre-commit-config.yaml

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
# See https://pre-commit.com for more information
2+
# See https://pre-commit.com/hooks.html for more hooks
3+
repos:
4+
- repo: https://github.com/pre-commit/pre-commit-hooks
5+
rev: v4.3.0
6+
hooks:
7+
- id: trailing-whitespace
8+
- id: end-of-file-fixer
9+
- id: check-yaml
10+
- id: check-added-large-files
11+
- id: check-ast
12+
- id: check-docstring-first
13+
14+
- repo: https://github.com/astral-sh/ruff-pre-commit
15+
# Ruff version.
16+
rev: v0.2.2
17+
hooks:
18+
# Run the formatter.
19+
- id: ruff-format
20+
# Run the linter.
21+
- id: ruff
22+
- repo: https://github.com/pre-commit/mirrors-mypy
23+
rev: v1.7.1
24+
hooks:
25+
- id: mypy
26+
- repo: https://github.com/asottile/reorder_python_imports
27+
rev: v3.8.2
28+
hooks:
29+
- id: reorder-python-imports
30+
args: [--application-directories, '.:src', --py39-plus]
31+
- repo: https://github.com/asottile/pyupgrade
32+
rev: v3.16.0
33+
hooks:
34+
- id: pyupgrade
35+
args: [--py39-plus]
36+
- repo: https://github.com/asottile/add-trailing-comma
37+
rev: v2.2.3
38+
hooks:
39+
- id: add-trailing-comma
40+
args: [--py36-plus]
41+
- repo: https://github.com/kynan/nbstripout
42+
rev: 0.6.0
43+
hooks:
44+
- id: nbstripout
45+
- repo: https://github.com/streetsidesoftware/cspell-cli
46+
rev: v6.31.1
47+
hooks:
48+
- id: cspell

MANIFEST.in

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,4 @@
1-
include README.md
2-
include requirements.txt
1+
prune *
2+
exclude *
3+
recursive-include mclahe *.py
4+
include pyproject.toml README.md

cspell.json

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
{
2+
"version": "0.2",
3+
"ignorePaths": [
4+
"./tests/data/*",
5+
"*.toml",
6+
"Makefile",
7+
"*.bat",
8+
"*.egg-info",
9+
],
10+
"dictionaryDefinitions": [
11+
{
12+
"name": "custom-dictionary",
13+
"path": "./.cspell/custom-dictionary.txt",
14+
"addWords": true
15+
}
16+
],
17+
"dictionaries": [ "custom-dictionary"
18+
],
19+
"words": [],
20+
"ignoreWords": [],
21+
"import": [],
22+
"language": "en-GB, en-US"
23+
}

mclahe/core.py

Lines changed: 52 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
#! /usr/bin/env python
2-
# -*- coding: utf-8 -*-
2+
from itertools import product
33

44
import numpy as np
55
import tensorflow as tf
6+
67
from .utils import *
78

8-
from itertools import product
99

1010
def mclahe(x, kernel_size=None, n_bins=128, clip_limit=0.01, adaptive_hist_range=False, use_gpu=True):
1111
"""
@@ -37,10 +37,10 @@ def mclahe(x, kernel_size=None, n_bins=128, clip_limit=0.01, adaptive_hist_range
3737
padding_x_length = kernel_size - 1 - ((x_shape - 1) % kernel_size)
3838
padding_x = np.column_stack(((padding_x_length + 1) // 2, padding_x_length // 2))
3939
padding_hist = np.column_stack((kernel_size // 2, (kernel_size + 1) // 2)) + padding_x
40-
x_hist_padded = np.pad(x, padding_hist, 'symmetric')
40+
x_hist_padded = np.pad(x, padding_hist, "symmetric")
4141

4242
# Set up tf graph
43-
with tf.variable_scope("clahe") as scope:
43+
with tf.variable_scope("clahe"):
4444
tf_x_hist_padded_init = tf.placeholder(tf.float32, shape=x_hist_padded.shape)
4545
tf_x_hist_padded = tf.Variable(tf_x_hist_padded_init)
4646
tf_x_padded = tf.slice(tf_x_hist_padded, kernel_size // 2, x_shape + padding_x_length)
@@ -67,15 +67,22 @@ def mclahe(x, kernel_size=None, n_bins=128, clip_limit=0.01, adaptive_hist_range
6767
tf_x_hist_max = tf.reduce_max(tf_x_hist, np.arange(-dim, 0))
6868
tf_x_hist_norm = tf.Variable(tf_x_hist_ex_init, dtype=tf.float32)
6969
tf_get_hist_min = tf.assign(tf_x_hist_min, tf.reduce_min(tf_x_hist, np.arange(-dim, 0)))
70-
tf_get_hist_norm = tf.assign(tf_x_hist_norm, tf.where(tf.equal(tf_x_hist_min, tf_x_hist_max),
71-
tf.ones_like(tf_x_hist_min),
72-
tf_x_hist_max - tf_x_hist_min))
73-
74-
tf_x_hist_scaled = (tf_x_hist - tf.reshape(tf_x_hist_min, hist_ex_shape))\
75-
/ tf.reshape(tf_x_hist_norm, hist_ex_shape)
70+
tf_get_hist_norm = tf.assign(
71+
tf_x_hist_norm,
72+
tf.where(
73+
tf.equal(tf_x_hist_min, tf_x_hist_max),
74+
tf.ones_like(tf_x_hist_min),
75+
tf_x_hist_max - tf_x_hist_min,
76+
),
77+
)
78+
79+
tf_x_hist_scaled = (tf_x_hist - tf.reshape(tf_x_hist_min, hist_ex_shape)) / tf.reshape(
80+
tf_x_hist_norm,
81+
hist_ex_shape,
82+
)
7683
else:
7784
tf_x_hist_scaled = tf_x_hist
78-
tf_hist = tf.cast(tf_batch_histogram(tf_x_hist_scaled, [0., 1.], dim, nbins=n_bins), tf.float32)
85+
tf_hist = tf.cast(tf_batch_histogram(tf_x_hist_scaled, [0.0, 1.0], dim, nbins=n_bins), tf.float32)
7986
# Clip histogram
8087
tf_n_to_high = tf.reduce_sum(tf.nn.relu(tf_hist - np.prod(kernel_size) * clip_limit), -1, keepdims=True)
8188
tf_hist_clipped = tf.minimum(tf_hist, np.prod(kernel_size) * clip_limit) + tf_n_to_high / n_bins
@@ -105,13 +112,15 @@ def mclahe(x, kernel_size=None, n_bins=128, clip_limit=0.01, adaptive_hist_range
105112
tf_hist_norm_slice_shape = np.concatenate((n_blocks, [1] * dim))
106113
tf_x_hist_min_sub = tf.slice(tf_x_hist_min, tf_slice_begin, n_blocks)
107114
tf_x_hist_norm_sub = tf.slice(tf_x_hist_norm, tf_slice_begin, n_blocks)
108-
tf_x_block_scaled = (tf_x_block - tf.reshape(tf_x_hist_min_sub, tf_hist_norm_slice_shape))\
109-
/ tf.reshape(tf_x_hist_norm_sub, tf_hist_norm_slice_shape)
110-
tf_bin = tf.histogram_fixed_width_bins(tf_x_block_scaled, [0., 1.], nbins=n_bins)
115+
tf_x_block_scaled = (tf_x_block - tf.reshape(tf_x_hist_min_sub, tf_hist_norm_slice_shape)) / tf.reshape(
116+
tf_x_hist_norm_sub,
117+
tf_hist_norm_slice_shape,
118+
)
119+
tf_bin = tf.histogram_fixed_width_bins(tf_x_block_scaled, [0.0, 1.0], nbins=n_bins)
111120
else:
112121
# Global bins
113122
tf_bin = tf.Variable(tf.cast(tf_x_block_init, tf.int32), dtype=tf.int32)
114-
tf_get_bin = tf.assign(tf_bin, tf.histogram_fixed_width_bins(tf_x_block, [0., 1.], nbins=n_bins))
123+
tf_get_bin = tf.assign(tf_bin, tf.histogram_fixed_width_bins(tf_x_block, [0.0, 1.0], nbins=n_bins))
115124
# Apply map
116125
tf_mapped_sub = tf_batch_gather(tf_map_slice, tf_bin, dim)
117126
# Apply coefficients
@@ -132,32 +141,49 @@ def mclahe(x, kernel_size=None, n_bins=128, clip_limit=0.01, adaptive_hist_range
132141
new_shape = tuple((axis, axis + dim) for axis in range(dim))
133142
new_shape = tuple(j for i in new_shape for j in i)
134143
tf_res_transposed = tf.transpose(tf_res, new_shape)
135-
tf_res_reshaped = tf.reshape(tf_res_transposed, tuple(n_blocks[axis] * kernel_size[axis] for axis in range(dim)))
144+
tf_res_reshaped = tf.reshape(
145+
tf_res_transposed,
146+
tuple(n_blocks[axis] * kernel_size[axis] for axis in range(dim)),
147+
)
136148

137149
# Recover original size
138150
tf_res_cropped = tf.slice(tf_res_reshaped, padding_x[:, 0], x.shape)
139151

140152
# Setting up tf session
141153
if use_gpu:
142-
config = None
154+
config = tf.ConfigProto()
155+
config.gpu_options.allow_growth = True
143156
else:
144-
config = tf.ConfigProto(device_count={'GPU': 0})
157+
config = tf.ConfigProto(device_count={"GPU": 0})
145158

146159
with tf.Session(config=config) as sess:
147160
map_init = np.zeros(map_shape, dtype=np.float32)
148161
x_block_init = np.zeros(shape_x_block, dtype=np.float32)
149162
# Initialize vars for local hist range if needed
150163
if adaptive_hist_range:
151164
x_hist_ex_init = np.zeros(n_blocks_hist, dtype=np.float32)
152-
tf_var_init = tf.initializers.variables([tf_x_hist_padded, tf_map, tf_res, tf_res_sub,
153-
tf_x_hist_min, tf_x_hist_norm])
154-
sess.run(tf_var_init, feed_dict={tf_x_hist_padded_init: x_hist_padded,
155-
tf_map_init: map_init, tf_x_block_init: x_block_init,
156-
tf_x_hist_ex_init: x_hist_ex_init})
165+
tf_var_init = tf.initializers.variables(
166+
[tf_x_hist_padded, tf_map, tf_res, tf_res_sub, tf_x_hist_min, tf_x_hist_norm],
167+
)
168+
sess.run(
169+
tf_var_init,
170+
feed_dict={
171+
tf_x_hist_padded_init: x_hist_padded,
172+
tf_map_init: map_init,
173+
tf_x_block_init: x_block_init,
174+
tf_x_hist_ex_init: x_hist_ex_init,
175+
},
176+
)
157177
else:
158178
tf_var_init = tf.initializers.variables([tf_x_hist_padded, tf_map, tf_bin, tf_res, tf_res_sub])
159-
sess.run(tf_var_init, feed_dict={tf_x_hist_padded_init: x_hist_padded, tf_map_init: map_init,
160-
tf_x_block_init: x_block_init})
179+
sess.run(
180+
tf_var_init,
181+
feed_dict={
182+
tf_x_hist_padded_init: x_hist_padded,
183+
tf_map_init: map_init,
184+
tf_x_block_init: x_block_init,
185+
},
186+
)
161187

162188
# Run calculations
163189
# Normalize histogram data if needed
@@ -178,7 +204,7 @@ def mclahe(x, kernel_size=None, n_bins=128, clip_limit=0.01, adaptive_hist_range
178204
if kernel_size[axis] % 2 == 0:
179205
coeff = 0.5 / kernel_size[axis] + coeff
180206
if ind_map[axis] == 0:
181-
coeff = 1. - coeff
207+
coeff = 1.0 - coeff
182208
new_shape = [1] * (dim + axis) + [kernel_size[axis]] + [1] * (dim - 1 - axis)
183209
coeff = np.reshape(coeff, new_shape)
184210
sess.run(tf_apply_coeff, feed_dict={tf_coeff: coeff})

0 commit comments

Comments
 (0)