Skip to content

Commit

Permalink
Merge branch 'failing_tests' into numpy2
Browse files Browse the repository at this point in the history
  • Loading branch information
naoise-h committed Apr 10, 2024
2 parents 9590b24 + d369f63 commit f73cbb7
Show file tree
Hide file tree
Showing 6 changed files with 25 additions and 22 deletions.
2 changes: 1 addition & 1 deletion tests/mechanisms/test_Exponential.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ def test_zero_measure(self):
measure = [1, 1, 0]
utility = [1, 1, 1]
runs = 10000
mech = self.mech(epsilon=1, utility=utility, measure=measure, sensitivity=1)
mech = self.mech(epsilon=1, utility=utility, measure=measure, sensitivity=1, random_state=0)
count = [0] * 3

for i in range(runs):
Expand Down
13 changes: 6 additions & 7 deletions tests/models/test_LinearRegression.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,8 @@
import numpy as np
from unittest import TestCase

import pytest

from diffprivlib.models.linear_regression import LinearRegression
from diffprivlib.utils import PrivacyLeakWarning, DiffprivlibCompatibilityWarning, BudgetError
from diffprivlib.utils import PrivacyLeakWarning, DiffprivlibCompatibilityWarning, BudgetError, check_random_state


class TestLinearRegression(TestCase):
Expand Down Expand Up @@ -58,7 +56,6 @@ def test_large_data(self):

self.assertIsNotNone(clf.fit(X, y))

@pytest.mark.filterwarnings('ignore: numpy.ufunc size changed')
def test_different_results(self):
from sklearn import datasets
from sklearn import linear_model
Expand Down Expand Up @@ -87,17 +84,19 @@ def test_different_results(self):
self.assertFalse(np.all(predict1 == predict2))
self.assertFalse(np.all(predict3 == predict1) and np.all(predict3 == predict2))

@pytest.mark.filterwarnings('ignore: numpy.ufunc size changed')
def test_same_results(self):
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn import linear_model

rng = check_random_state(42)

dataset = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(dataset.data, dataset.target, test_size=0.2)
X_train, X_test, y_train, y_test = train_test_split(dataset.data, dataset.target, test_size=0.2,
random_state=rng)

clf = LinearRegression(epsilon=float("inf"), bounds_X=([4.3, 2.0, 1.0, 0.1], [7.9, 4.4, 6.9, 2.5]),
bounds_y=(0, 2))
bounds_y=(0, 2), random_state=rng)
clf.fit(X_train, y_train)

predict1 = clf.predict(X_test)
Expand Down
17 changes: 9 additions & 8 deletions tests/models/test_StandardScaler.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import sklearn.preprocessing as sk_pp

from diffprivlib.models.standard_scaler import StandardScaler
from diffprivlib.utils import PrivacyLeakWarning, DiffprivlibCompatibilityWarning, BudgetError
from diffprivlib.utils import PrivacyLeakWarning, DiffprivlibCompatibilityWarning, BudgetError, check_random_state


class TestStandardScaler(TestCase):
Expand Down Expand Up @@ -65,12 +65,13 @@ def test_inf_epsilon(self):
self.assertTrue(np.all(dp_ss.n_samples_seen_ == sk_ss.n_samples_seen_))

def test_different_results(self):
X = np.random.rand(10, 5)
rng = check_random_state(1)
X = rng.random((10, 5))

ss1 = StandardScaler(bounds=(0, 1))
ss1 = StandardScaler(bounds=(0, 1), random_state=rng)
ss1.fit(X)

ss2 = StandardScaler(bounds=(0, 1))
ss2 = StandardScaler(bounds=(0, 1), random_state=rng)
ss2.fit(X)

self.assertFalse(np.allclose(ss1.mean_, ss2.mean_), "Arrays %s and %s should be different" %
Expand All @@ -88,8 +89,8 @@ def test_functionality(self):
self.assertIsNotNone(ss.fit_transform(X))

def test_similar_results(self):
rng = np.random.RandomState(0)
X = rng.rand(100000, 5)
rng = check_random_state(0)
X = rng.random((100000, 5))

dp_ss = StandardScaler(bounds=(0, 1), epsilon=float("inf"), random_state=rng)
dp_ss.fit(X)
Expand All @@ -104,8 +105,8 @@ def test_similar_results(self):
self.assertTrue(np.all(dp_ss.n_samples_seen_ == sk_ss.n_samples_seen_))

def test_random_state(self):
rng = np.random.RandomState(0)
X = rng.rand(100000, 5)
rng = check_random_state(0)
X = rng.random((100000, 5))

ss0 = StandardScaler(bounds=(0, 1), epsilon=1, random_state=0)
ss1 = StandardScaler(bounds=(0, 1), epsilon=1, random_state=1)
Expand Down
6 changes: 4 additions & 2 deletions tests/tools/test_histogram2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

from diffprivlib.accountant import BudgetAccountant
from diffprivlib.tools.histograms import histogram2d
from diffprivlib.utils import PrivacyLeakWarning, BudgetError
from diffprivlib.utils import PrivacyLeakWarning, BudgetError, check_random_state


class TestHistogram2d(TestCase):
Expand Down Expand Up @@ -60,9 +60,11 @@ def test_different_result(self):
self.assertTrue((hist != dp_hist).any())

def test_density(self):
rng = check_random_state(1)

x = np.array([1, 2, 3, 4, 5])
y = np.array([5, 7, 1, 5, 9])
dp_hist, _, _ = histogram2d(x, y, epsilon=1, bins=3, range=[(0, 10), (0, 10)], density=True)
dp_hist, _, _ = histogram2d(x, y, epsilon=1, bins=3, range=[(0, 10), (0, 10)], density=True, random_state=rng)

# print(dp_hist.sum())

Expand Down
2 changes: 1 addition & 1 deletion tests/tools/test_histogramdd.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def test_same_edges(self):
def test_different_result(self):
a = np.array([1, 2, 3, 4, 5])
hist, _ = np.histogramdd(a, bins=3, range=[(0, 10)])
dp_hist, _ = histogramdd(a, epsilon=0.1, bins=3, range=[(0, 10)])
dp_hist, _ = histogramdd(a, epsilon=0.1, bins=3, range=[(0, 10)], random_state=0)

# print("Non-private histogram: %s" % hist)
# print("Private histogram: %s" % dp_hist)
Expand Down
7 changes: 4 additions & 3 deletions tests/tools/test_median.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import numpy as np

from diffprivlib.tools.quantiles import median
from diffprivlib.utils import PrivacyLeakWarning, BudgetError
from diffprivlib.utils import PrivacyLeakWarning, BudgetError, check_random_state


class TestMedian(TestCase):
Expand Down Expand Up @@ -57,9 +57,10 @@ def test_output_type(self):
self.assertTrue(isinstance(res, float))

def test_simple(self):
a = np.random.random(1000)
rng = check_random_state(10)
a = rng.random(1000)

res = median(a, epsilon=5, bounds=(0, 1))
res = median(a, epsilon=5, bounds=(0, 1), random_state=rng)
self.assertAlmostEqual(res, 0.5, delta=0.05)

def test_normal(self):
Expand Down

0 comments on commit f73cbb7

Please sign in to comment.