Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ Please [make the pull request a draft](https://github.blog/2019-02-14-introducin

Currently, we have two regular reviewers of pull requests:

* Kevin Santana ([kevinsantana11](https://github.com/kevinsantana11))
* Kevin Shuman ([KevinShuman](https://github.com/KevinShuman))
* Shane Elipot ([selipot](https://github.com/selipot))

You can request a review from one of us or just comment in GitHub that you want a review and we'll see it. Only one review is required to be allowed to merge a pull request. We'll work with you to get it into shape.
Expand Down Expand Up @@ -228,7 +228,7 @@ make html

Currently, only one person can deploy releases:

* Kevin Santana ([kevinsantana11](https://github.com/kevinsantana11))
* Kevin Shuman ([KevinShuman](https://github.com/KevinShuman))

If you need your merged pull request to be deployed in a release, just ask!

Expand Down
1 change: 0 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,6 @@ cd clouddrift/
2. Create an environment as specified in the yml file with the required library dependencies:
```bash
conda env create -f environment.yml # creates a new env with the dependencies
conda env update -f environment.yml # install dependencies in current environment
```

2a. Make sure you **created** the environment by activating it:
Expand Down
2 changes: 1 addition & 1 deletion clouddrift/adapters/gdp/gdpsource.py
Original file line number Diff line number Diff line change
Expand Up @@ -390,7 +390,7 @@

# Transform the initial dataframe filtering out rows with really anomolous values
# examples include: years in the future, years way in the past before GDP program, etc...
preremove_df_chunk = df_chunk.assign(obs_index=range(start_idx, end_idx))
preremove_df_chunk = df_chunk.assign(obs_index=np.arange(start_idx, end_idx))

Check warning on line 393 in clouddrift/adapters/gdp/gdpsource.py

View check run for this annotation

Codecov / codecov/patch

clouddrift/adapters/gdp/gdpsource.py#L393

Added line #L393 was not covered by tests
df_chunk = _apply_remove(
preremove_df_chunk,
filters=[
Expand Down
24 changes: 20 additions & 4 deletions clouddrift/adapters/subsurface_floats.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,8 @@

metadata = {}
for var in meta_variables:
metadata[var] = np.array([v.flatten()[0] for v in source_data[var].flatten()])
arrs = _to_dense_flatten(source_data[str(var)])
metadata[var] = np.array([_flatten_array(v)[0] for v in arrs])

# bring the expList to the "traj" dimension
_, float_per_exp = np.unique(metadata["indexExp"], return_counts=True)
Expand All @@ -71,11 +72,12 @@
data_variables = ["dtnum", "lon", "lat", "p", "t", "u", "v"]
data = {}
for var in data_variables:
data[var] = np.concatenate([v.flatten() for v in source_data[var].flatten()])
arrs = _to_dense_flatten(source_data[str(var)])
data[var] = np.concatenate([_flatten_array(v) for v in arrs])

# create rowsize variable
rowsize = np.array([len(v) for v in source_data["dtnum"].flatten()])
assert np.sum(rowsize) == len(data["dtnum"])
arrs = _to_dense_flatten(source_data["dtnum"])
rowsize = np.array([len(_flatten_array(v)) for v in arrs])

# Unix epoch start (1970-01-01)
origin_datenum = 719529
Expand Down Expand Up @@ -197,3 +199,17 @@
ds = ds.set_coords(["time", "id"])

return ds


def _flatten_array(arr):
# Convert sparse to dense if needed, then flatten
if hasattr(arr, "toarray"):
arr = arr.toarray()

Check warning on line 207 in clouddrift/adapters/subsurface_floats.py

View check run for this annotation

Codecov / codecov/patch

clouddrift/adapters/subsurface_floats.py#L207

Added line #L207 was not covered by tests
return np.array(arr).flatten()


def _to_dense_flatten(arr):
"""Convert a possibly sparse array to dense and flatten it."""
if hasattr(arr, "toarray"):
arr = arr.toarray()

Check warning on line 214 in clouddrift/adapters/subsurface_floats.py

View check run for this annotation

Codecov / codecov/patch

clouddrift/adapters/subsurface_floats.py#L214

Added line #L214 was not covered by tests
return np.array(arr).flatten()
77 changes: 43 additions & 34 deletions clouddrift/transfer.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
"""

import numpy as np
from numpy import floating as _Floating
from numpy.lib.scimath import sqrt
from scipy.special import factorial, iv, kv # type: ignore

Expand Down Expand Up @@ -63,7 +64,11 @@
boundary_condition: str = "no-slip",
method: str = "lilly",
density: float = 1025.0,
) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
) -> tuple[
float | _Floating | np.ndarray,
float | _Floating | np.ndarray,
float | _Floating | np.ndarray,
]:
"""
Compute the transfer function from wind stress to oceanic velocity based on the physically-based
models of Elipot and Gille (2009) and Lilly and Elipot (2021).
Expand Down Expand Up @@ -677,57 +682,61 @@
def _bessels_freeslip(
xiz: float | np.ndarray,
xih: float | np.ndarray,
xi0: float | np.ndarray | None | None = None,
) -> tuple[
float | np.ndarray,
float | np.ndarray,
float | np.ndarray,
float | np.ndarray,
float | np.ndarray,
float | np.ndarray,
]:
xi0: float | np.ndarray | None = None,
) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Compute the Bessel functions for the free-slip boundary condition for the xsi(z), xsi(h), and xsi(0) functions.
"""
k0z = kv(0, xiz)
i0z = iv(0, xiz)
k1h = kv(1, xih)
i1h = iv(0, xih)
# Convert inputs to numpy arrays
xiz = np.asarray(xiz)
xih = np.asarray(xih)

# Ensure all outputs are numpy arrays
k0z = np.asarray(kv(0, xiz))
i0z = np.asarray(iv(0, xiz))
k1h = np.asarray(kv(1, xih))
i1h = np.asarray(iv(0, xih))

if xi0 is not None:
k10 = kv(1, xi0)
i10 = iv(1, xi0)
return k0z, i0z, k1h, i1h, k10, i10
xi0 = np.asarray(xi0)
k10 = np.asarray(kv(1, xi0))
i10 = np.asarray(iv(1, xi0))
else:
return k0z, i0z, k1h, i1h, np.nan, np.nan
# Create nan values as numpy arrays with same shape as k0z
k10 = np.full_like(k0z, np.nan)
i10 = np.full_like(k0z, np.nan)

Check warning on line 707 in clouddrift/transfer.py

View check run for this annotation

Codecov / codecov/patch

clouddrift/transfer.py#L706-L707

Added lines #L706 - L707 were not covered by tests

return k0z, i0z, k1h, i1h, k10, i10


def _bessels_noslip(
xiz: float | np.ndarray,
xih: float | np.ndarray,
xi0: float | np.ndarray | None = None,
) -> tuple[
float | np.ndarray,
float | np.ndarray,
float | np.ndarray,
float | np.ndarray,
float | np.ndarray,
float | np.ndarray,
]:
) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Compute the Bessel functions for the no-slip boundary condition for the xsi(z), xsi(h), and xsi(0) functions.
"""
k0z = kv(0, xiz)
i0z = iv(0, xiz)
k0h = kv(0, xih)
i0h = iv(0, xih)
# Convert inputs to numpy arrays
xiz = np.asarray(xiz)
xih = np.asarray(xih)

# Ensure all outputs are numpy arrays
k0z = np.asarray(kv(0, xiz))
i0z = np.asarray(iv(0, xiz))
k0h = np.asarray(kv(0, xih))
i0h = np.asarray(iv(0, xih))

if xi0 is not None:
k10 = kv(1, xi0)
i10 = iv(1, xi0)
return k0z, i0z, k0h, i0h, k10, i10
xi0 = np.asarray(xi0)
k10 = np.asarray(kv(1, xi0))
i10 = np.asarray(iv(1, xi0))
else:
return k0z, i0z, k0h, i0h, np.nan, np.nan
# Create nan values as numpy arrays with same shape as k0z
k10 = np.full_like(k0z, np.nan)
i10 = np.full_like(k0z, np.nan)

return k0z, i0z, k0h, i0h, k10, i10


def _besseltildes_noslip(
Expand Down
Loading