Skip to content

Commit dd77a2a

Browse files
Update tensorflow requirement from !=2.6.0,!=2.6.1,<2.15.0,>=2.2.0 to >=2.2.0,!=2.6.0,!=2.6.1,<2.19.0 (#908)
* Update tensorflow requirement Updates the requirements on [tensorflow](https://github.com/tensorflow/tensorflow) to permit the latest version. - [Release notes](https://github.com/tensorflow/tensorflow/releases) - [Changelog](https://github.com/tensorflow/tensorflow/blob/master/RELEASE.md) - [Commits](tensorflow/tensorflow@v2.2.0...v2.18.0) --- updated-dependencies: - dependency-name: tensorflow dependency-type: direct:development ... Signed-off-by: dependabot[bot] <[email protected]> * Updated upper and lower bound on tensorflow * Fixed input shape for test_misc_tf * Fixed activation serialization issue for test_saving_legacy.py * Fixed preprocessor * Fixed admd * Fixed model cloning * Fixed TFDataset index error * Fixed trainable vars for spot the diff detector * Fixed infer sigma flag in mmd * Fixed classifier tf test * Fixed kernes trainable variables * Fixed llr tests * Fixed saving and optimizer saving * Included test entry in Makefile and updated ci * Test all notebooks - to be reverted * Removed python3.8 from ci * Improved test command * Fixed saving test models * Fixed non-tensor inputs as positional arguments * Fixed env variable in makefile * Fixed optimizer tests, including legacy tests * Fixed optional dependencies imports * Fixed od_vae_adult.ipynb * Fixed od_vae_cifar10.ipynb * Fixed cd_model_unc_cifar10_wine.ipynb * Fixed od_aegmm_kddcup.ipynb * Fixed od_vae_kddcup.ipynb * Fixed od_seq2seq_ecg.ipynb * Fixed od_ae_cifar10.ipynb * Fixed cd_distillation_cifar10.ipynb * Fixed cd_ks_cifar10.ipynb * Fixed cd_mmd_cifar10.ipynb * Fixed od_llr_genome.ipynb * Fixed od_llr_mnist.ipynb * Fixed od_seq2seq_synth.ipynb * Fixed cd_text_imdb.ipynb * Fixed alibi_detect_deploy.ipynb * Fixed ad_ae_cifar10.ipynb * Reverted a few things in misc * Fixed flake8 errors * Reverted test all notebooks github actions * Addressed PR comments * Fixed flake8 error --------- Signed-off-by: dependabot[bot] <[email protected]> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Robert <[email protected]>
1 parent bd3cb4c commit dd77a2a

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

50 files changed

+503
-299
lines changed

.github/workflows/ci.yml

+2-5
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ jobs:
3434
strategy:
3535
matrix:
3636
os: [ ubuntu-latest ]
37-
python-version: [ '3.8', '3.9', '3.10', '3.11']
37+
python-version: ['3.9', '3.10', '3.11']
3838
pydantic-version: [ '1.10.15', '2.7.1' ]
3939
include: # Run windows tests on only one python version
4040
- os: windows-latest
@@ -71,10 +71,7 @@ jobs:
7171
limit-access-to-actor: true
7272

7373
- name: Test with pytest
74-
run: |
75-
pytest --randomly-seed=0 alibi_detect
76-
# Note: The pytest-randomly seed is fixed at 0 for now. Once the legacy np.random.seed(0)'s
77-
# are removed from tests, this can be removed, allowing all tests to use random seeds.
74+
run: make test
7875

7976
- name: Upload coverage to Codecov
8077
uses: codecov/codecov-action@v3

Makefile

+6-2
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,12 @@ install-dev:
66
install:
77
pip install -e .[all]
88

9+
# Note: The pytest-randomly seed is fixed at 0 for now. Once the legacy np.random.seed(0)'s
10+
# are removed from tests, this can be removed, allowing all tests to use random seeds.
911
.PHONY: test
10-
test: ## Run all tests
11-
python setup.py test
12+
test:
13+
TF_USE_LEGACY_KERAS=1 pytest --randomly-seed=0 alibi_detect/utils/tests/test_saving_legacy.py
14+
pytest --randomly-seed=0 --ignore=alibi_detect/utils/tests/test_saving_legacy.py alibi_detect
1215

1316
.PHONY: lint
1417
lint: ## Check linting according to the flake8 configuration in setup.cfg
@@ -68,3 +71,4 @@ check_licenses:
6871
tox-env=default
6972
repl:
7073
env COMMAND="python" tox -e $(tox-env)
74+

alibi_detect/ad/tests/test_admd.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ def test_adv_md(adv_md_params):
4242
threshold, loss_type, threshold_perc, return_instance_score = adv_md_params
4343

4444
# define ancillary model
45-
layers = [tf.keras.layers.InputLayer(input_shape=(input_dim)),
45+
layers = [tf.keras.layers.InputLayer(input_shape=(input_dim, )),
4646
tf.keras.layers.Dense(y.shape[1], activation=tf.nn.softmax)]
4747
distilled_model = tf.keras.Sequential(layers)
4848

alibi_detect/cd/tensorflow/mmd.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ def __init__(
9393

9494
def kernel_matrix(self, x: Union[np.ndarray, tf.Tensor], y: Union[np.ndarray, tf.Tensor]) -> tf.Tensor:
9595
""" Compute and return full kernel matrix between arrays x and y. """
96-
k_xy = self.kernel(x, y, self.infer_sigma)
96+
k_xy = self.kernel(x, y, infer_sigma=self.infer_sigma)
9797
k_xx = self.k_xx if self.k_xx is not None and self.update_x_ref is None else self.kernel(x, x)
9898
k_yy = self.kernel(y, y)
9999
kernel_mat = tf.concat([tf.concat([k_xx, k_xy], 1), tf.concat([tf.transpose(k_xy, (1, 0)), k_yy], 1)], 0)

alibi_detect/cd/tensorflow/preprocess.py

+16-6
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,11 @@
22

33
import numpy as np
44
import tensorflow as tf
5+
56
from alibi_detect.utils.tensorflow.prediction import (
6-
predict_batch, predict_batch_transformer)
7-
from tensorflow.keras.layers import Dense, Flatten, Input, InputLayer
7+
predict_batch, predict_batch_transformer, get_call_arg_mapping
8+
)
9+
from tensorflow.keras.layers import Dense, Flatten, Input, Lambda
810
from tensorflow.keras.models import Model
911

1012

@@ -34,7 +36,11 @@ def __init__(
3436
'tf.keras.Sequential or tf.keras.Model `mlp`')
3537

3638
def call(self, x: Union[np.ndarray, tf.Tensor, Dict[str, tf.Tensor]]) -> tf.Tensor:
37-
x = self.input_layer(x)
39+
if not isinstance(x, (np.ndarray, tf.Tensor)):
40+
x = get_call_arg_mapping(self.input_layer, x)
41+
x = self.input_layer(**x)
42+
else:
43+
x = self.input_layer(x)
3844
return self.mlp(x)
3945

4046

@@ -52,7 +58,7 @@ def __init__(
5258
if is_enc:
5359
self.encoder = encoder_net
5460
elif not is_enc and is_enc_dim: # set default encoder
55-
input_layer = InputLayer(input_shape=shape) if input_layer is None else input_layer
61+
input_layer = Lambda(lambda x: x) if input_layer is None else input_layer
5662
input_dim = np.prod(shape)
5763
step_dim = int((input_dim - enc_dim) / 3)
5864
self.encoder = _Encoder(input_layer, enc_dim=enc_dim, step_dim=step_dim)
@@ -61,7 +67,11 @@ def __init__(
6167
' or tf.keras.Model `encoder_net`.')
6268

6369
def call(self, x: Union[np.ndarray, tf.Tensor, Dict[str, tf.Tensor]]) -> tf.Tensor:
64-
return self.encoder(x)
70+
if not isinstance(x, (np.ndarray, tf.Tensor)):
71+
x = get_call_arg_mapping(self.encoder, x)
72+
return self.encoder(**x)
73+
else:
74+
return self.encoder(x)
6575

6676

6777
class HiddenOutput(tf.keras.Model):
@@ -73,7 +83,7 @@ def __init__(
7383
flatten: bool = False
7484
) -> None:
7585
super().__init__()
76-
if input_shape and not model.inputs:
86+
if input_shape and not (hasattr(model, 'inputs') and model.inputs):
7787
inputs = Input(shape=input_shape)
7888
model.call(inputs)
7989
else:

alibi_detect/cd/tensorflow/spot_the_diff.py

+17-3
Original file line numberDiff line numberDiff line change
@@ -170,9 +170,23 @@ def __init__(self, kernel: tf.keras.Model, x_ref: np.ndarray, initial_diffs: np.
170170
self.config = {'kernel': kernel, 'x_ref': x_ref, 'initial_diffs': initial_diffs}
171171
self.kernel = kernel
172172
self.mean = tf.convert_to_tensor(x_ref.mean(0))
173-
self.diffs = tf.Variable(initial_diffs, dtype=np.float32)
174-
self.bias = tf.Variable(tf.zeros((1,)))
175-
self.coeffs = tf.Variable(tf.zeros((len(initial_diffs),)))
173+
174+
self.diffs = self.add_weight(
175+
shape=initial_diffs.shape,
176+
initializer=tf.keras.initializers.Constant(initial_diffs),
177+
dtype=tf.float32,
178+
trainable=True
179+
)
180+
self.bias = self.add_weight(
181+
shape=(1,),
182+
initializer="zeros",
183+
trainable=True,
184+
)
185+
self.coeffs = self.add_weight(
186+
shape=(len(initial_diffs),),
187+
initializer="zeros",
188+
trainable=True,
189+
)
176190

177191
def call(self, x: tf.Tensor) -> tf.Tensor:
178192
k_xtl = self.kernel(x, self.mean + self.diffs)

alibi_detect/cd/tensorflow/tests/test_classifier_tf.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
import numpy as np
33
import pytest
44
import tensorflow as tf
5-
from tensorflow.keras.layers import Dense, Input
5+
from tensorflow.keras.layers import Dense, Input, Softmax
66
from typing import Union
77
from alibi_detect.cd.tensorflow.classifier import ClassifierDriftTF
88

@@ -14,7 +14,7 @@ def mymodel(shape, softmax: bool = True):
1414
x = Dense(20, activation=tf.nn.relu)(x_in)
1515
x = Dense(2)(x)
1616
if softmax:
17-
x = tf.nn.softmax(x)
17+
x = Softmax()(x)
1818
return tf.keras.models.Model(inputs=x_in, outputs=x)
1919

2020

alibi_detect/od/tests/test_llr.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
import numpy as np
33
import pytest
44
import tensorflow as tf
5-
from tensorflow.keras.layers import Dense, Input, LSTM
5+
from tensorflow.keras.layers import Dense, Input, LSTM, CategoryEncoding
66
from alibi_detect.od import LLR
77
from alibi_detect.version import __version__
88

@@ -48,7 +48,7 @@ def test_llr(llr_params):
4848

4949
# define model and detector
5050
inputs = Input(shape=(shape[-1] - 1,), dtype=tf.int32)
51-
x = tf.one_hot(tf.cast(inputs, tf.int32), input_dim)
51+
x = CategoryEncoding(num_tokens=input_dim, output_mode="one_hot")(inputs)
5252
x = LSTM(hidden_dim, return_sequences=True)(x)
5353
logits = Dense(input_dim, activation=None)(x)
5454
model = tf.keras.Model(inputs=inputs, outputs=logits)

alibi_detect/saving/_tensorflow/tests/test_saving_tf.py

+10-18
Original file line numberDiff line numberDiff line change
@@ -15,26 +15,18 @@
1515
backend = param_fixture("backend", ['tensorflow'])
1616

1717

18-
# Note: The full save/load functionality of optimizers (inc. validation) is tested in test_save_classifierdrift.
19-
@pytest.mark.skipif(version.parse(tf.__version__) < version.parse('2.11.0'),
20-
reason="Skipping since tensorflow < 2.11.0")
21-
@parametrize('legacy', [True, False])
22-
def test_load_optimizer_object_tf2pt11(legacy, backend):
18+
def test_load_optimizer_object_tf2pt11(backend):
2319
"""
24-
Test the _load_optimizer_config with a tensorflow optimizer config. Only run if tensorflow>=2.11.
25-
26-
Here we test that "new" and legacy optimizers can be saved/laoded. We expect the returned optimizer to be an
27-
instantiated `tf.keras.optimizers.Optimizer` object. Also test that the loaded optimizer can be saved.
20+
Test the _load_optimizer_config with a tensorflow optimizer config. Only run if tensorflow>=2.16.
2821
"""
2922
class_name = 'Adam'
30-
class_str = class_name if legacy else 'Custom>' + class_name # Note: see discussion in #739 re 'Custom>'
31-
learning_rate = np.float32(0.01) # Set as float32 since this is what _save_optimizer_config returns
32-
epsilon = np.float32(1e-7)
23+
learning_rate = 0.01
24+
epsilon = 1e-7
3325
amsgrad = False
3426

3527
# Load
3628
cfg_opt = {
37-
'class_name': class_str,
29+
'class_name': class_name,
3830
'config': {
3931
'name': class_name,
4032
'learning_rate': learning_rate,
@@ -45,10 +37,7 @@ def test_load_optimizer_object_tf2pt11(legacy, backend):
4537
optimizer = _load_optimizer_config(cfg_opt, backend=backend)
4638
# Check optimizer
4739
SupportedOptimizer.validate_optimizer(optimizer, {'backend': 'tensorflow'})
48-
if legacy:
49-
assert isinstance(optimizer, tf.keras.optimizers.legacy.Optimizer)
50-
else:
51-
assert isinstance(optimizer, tf.keras.optimizers.Optimizer)
40+
assert isinstance(optimizer, tf.keras.optimizers.Optimizer)
5241
assert type(optimizer).__name__ == class_name
5342
assert optimizer.learning_rate == learning_rate
5443
assert optimizer.epsilon == epsilon
@@ -58,7 +47,10 @@ def test_load_optimizer_object_tf2pt11(legacy, backend):
5847
cfg_saved = _save_optimizer_config(optimizer)
5948
# Compare to original config
6049
for key, value in cfg_opt['config'].items():
61-
assert value == cfg_saved['config'][key]
50+
if isinstance(value, float):
51+
assert np.isclose(value, cfg_saved['config'][key])
52+
else:
53+
assert value == cfg_saved['config'][key]
6254

6355

6456
@pytest.mark.skipif(version.parse(tf.__version__) >= version.parse('2.11.0'),

alibi_detect/saving/tests/models.py

+5-4
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33

44
import numpy as np
55
import tensorflow as tf
6+
from tensorflow.keras.activations import relu, softmax
67
import torch
78
import torch.nn as nn
89
from sklearn.ensemble import RandomForestClassifier
@@ -46,7 +47,7 @@ def encoder_model(backend, current_cases):
4647
model = tf.keras.Sequential(
4748
[
4849
tf.keras.layers.InputLayer(input_shape=(input_dim,)),
49-
tf.keras.layers.Dense(5, activation=tf.nn.relu),
50+
tf.keras.layers.Dense(5, activation=relu),
5051
tf.keras.layers.Dense(LATENT_DIM, activation=None)
5152
]
5253
)
@@ -73,7 +74,7 @@ def encoder_dropout_model(backend, current_cases):
7374
model = tf.keras.Sequential(
7475
[
7576
tf.keras.layers.InputLayer(input_shape=(input_dim,)),
76-
tf.keras.layers.Dense(5, activation=tf.nn.relu),
77+
tf.keras.layers.Dense(5, activation=relu),
7778
tf.keras.layers.Dropout(0.0), # 0.0 to ensure determinism
7879
tf.keras.layers.Dense(LATENT_DIM, activation=None)
7980
]
@@ -191,7 +192,7 @@ def classifier_model(backend, current_cases):
191192
model = tf.keras.Sequential(
192193
[
193194
tf.keras.layers.InputLayer(input_shape=(input_dim,)),
194-
tf.keras.layers.Dense(2, activation=tf.nn.softmax),
195+
tf.keras.layers.Dense(2, activation=softmax),
195196
]
196197
)
197198
elif backend in ('pytorch', 'keops'):
@@ -240,7 +241,7 @@ def nlp_embedding_and_tokenizer(model_name, max_len, uae, backend):
240241
except (OSError, HTTPError):
241242
pytest.skip(f"Problem downloading {model_name} from huggingface.co")
242243
if uae:
243-
x_emb = embedding(tokens)
244+
x_emb = embedding(tokens=tokens)
244245
shape = (x_emb.shape[1],)
245246
embedding = UAE_tf(input_layer=embedding, shape=shape, enc_dim=enc_dim)
246247
elif backend == 'pt':

alibi_detect/utils/missing_optional_dependency.py

+1
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@
3232
"prophet": 'prophet',
3333
"tensorflow_probability": 'tensorflow',
3434
"tensorflow": 'tensorflow',
35+
"keras": 'tensorflow',
3536
"torch": 'torch',
3637
"pytorch": 'torch',
3738
"keops": 'keops',

alibi_detect/utils/tensorflow/data.py

+3
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,9 @@ def __init__(
1414
self.shuffle = shuffle
1515

1616
def __getitem__(self, idx: int) -> Union[Tuple[Indexable, ...], Indexable]:
17+
if idx >= self.__len__():
18+
raise IndexError("Index out of bounds.")
19+
1720
istart, istop = idx * self.batch_size, (idx + 1) * self.batch_size
1821
output = tuple(indexable[istart:istop] for indexable in self.indexables)
1922
return output if len(output) > 1 else output[0]

alibi_detect/utils/tensorflow/kernels.py

+10-3
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
import tensorflow as tf
2-
import numpy as np
32
from . import distance
43
from typing import Optional, Union, Callable
54
from scipy.special import logit
@@ -59,11 +58,19 @@ def __init__(
5958
init_sigma_fn = sigma_median if init_sigma_fn is None else init_sigma_fn
6059
self.config = {'sigma': sigma, 'trainable': trainable, 'init_sigma_fn': init_sigma_fn}
6160
if sigma is None:
62-
self.log_sigma = tf.Variable(np.empty(1), dtype=tf.keras.backend.floatx(), trainable=trainable)
61+
self.log_sigma = self.add_weight(
62+
shape=(1,),
63+
initializer='zeros',
64+
trainable=trainable
65+
)
6366
self.init_required = True
6467
else:
6568
sigma = tf.cast(tf.reshape(sigma, (-1,)), dtype=tf.keras.backend.floatx()) # [Ns,]
66-
self.log_sigma = tf.Variable(tf.math.log(sigma), trainable=trainable)
69+
self.log_sigma = self.add_weight(
70+
shape=(sigma.shape[0],),
71+
initializer=tf.keras.initializers.Constant(tf.math.log(sigma)),
72+
trainable=trainable
73+
)
6774
self.init_required = False
6875
self.init_sigma_fn = init_sigma_fn
6976
self.trainable = trainable

alibi_detect/utils/tensorflow/misc.py

+11-3
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,6 @@
1+
import keras
12
import tensorflow as tf
3+
from tensorflow.keras.models import Sequential, Model
24

35

46
def zero_diag(mat: tf.Tensor) -> tf.Tensor:
@@ -85,13 +87,19 @@ def subset_matrix(mat: tf.Tensor, inds_0: tf.Tensor, inds_1: tf.Tensor) -> tf.Te
8587
return subbed_rows_cols
8688

8789

88-
def clone_model(model: tf.keras.Model) -> tf.keras.Model:
90+
def clone_model(model: Model) -> Model:
8991
""" Clone a sequential, functional or subclassed tf.keras.Model. """
90-
try: # sequential or functional model
92+
conditions = [
93+
isinstance(model, Sequential),
94+
isinstance(model, keras.src.models.functional.Functional)
95+
]
96+
97+
if any(conditions):
9198
return tf.keras.models.clone_model(model)
92-
except ValueError: # subclassed model
99+
else:
93100
try:
94101
config = model.get_config()
95102
except NotImplementedError:
96103
config = {}
104+
97105
return model.__class__.from_config(config)

0 commit comments

Comments
 (0)