Skip to content

Commit 13d3bc9

Browse files
committed
Added DeepDefend main files.
0 parents  commit 13d3bc9

6 files changed

Lines changed: 314 additions & 0 deletions

File tree

LICENSE

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
MIT License (Modified)
2+
3+
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to make derivative works based on the Software, provided that any substantial changes to the Software are clearly distinguished from the original work and are distributed under a different name.
4+
5+
The original copyright notice and disclaimer must be retained in all copies or substantial portions of the Software.
6+
7+
THE SOFTWARE IS PROVIDED "AS IS," WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF, OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

deepdefend/__init__.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
import deepdefend
2+
from .attacks import fgsm, pgd, bim
3+
from .defenses import adversarial_training, feature_squeezing

deepdefend/attacks.py

Lines changed: 103 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,103 @@
1+
"""
2+
Functions to run adversarial attacks on deep learning models.
3+
4+
Available functions:
5+
- `fgsm(model, x, y, epsilon=0.01)`: Fast Gradient Sign Method (FGSM) attack.
6+
- `pgd(model, x, y, epsilon=0.01, alpha=0.01, num_steps=10)`: Projected Gradient Descent (PGD) attack.
7+
- `bim(model, x, y, epsilon=0.01, alpha=0.01, num_steps=10)`: Basic Iterative Method (BIM) attack.
8+
9+
"""
10+
11+
import numpy as np
12+
import tensorflow as tf
13+
14+
def fgsm(model, x, y, epsilon=0.01):
15+
"""
16+
Fast Gradient Sign Method (FGSM) attack.
17+
18+
Parameters:
19+
model (tensorflow.keras.Model): The target model to attack.
20+
x (numpy.ndarray): The input example to attack.
21+
y (numpy.ndarray): The true labels of the input example.
22+
epsilon (float): The magnitude of the perturbation (default: 0.01).
23+
24+
Returns:
25+
adversarial_example (numpy.ndarray): The perturbed input example.
26+
"""
27+
# Determine the loss function based on the number of classes
28+
if y.shape[-1] == 1 or len(y.shape) == 1:
29+
loss_object = tf.keras.losses.BinaryCrossentropy()
30+
else:
31+
loss_object = tf.keras.losses.CategoricalCrossentropy()
32+
33+
with tf.GradientTape() as tape:
34+
tape.watch(x)
35+
prediction = model(x)
36+
loss = loss_object(y, prediction)
37+
38+
gradient = tape.gradient(loss, x)
39+
40+
# Generate adversarial example
41+
perturbation = epsilon * tf.sign(gradient)
42+
adversarial_example = x + perturbation
43+
return adversarial_example.numpy()
44+
45+
def pgd(model, x, y, epsilon=0.01, alpha=0.01, num_steps=10):
46+
"""
47+
Projected Gradient Descent (PGD) attack.
48+
49+
Parameters:
50+
model (tensorflow.keras.Model): The target model to attack.
51+
x (numpy.ndarray): The input example to attack.
52+
y (numpy.ndarray): The true labels of the input example.
53+
epsilon (float): The maximum magnitude of the perturbation (default: 0.01).
54+
alpha (float): The step size for each iteration (default: 0.01).
55+
num_steps (int): The number of PGD iterations (default: 10).
56+
57+
Returns:
58+
adversarial_example (numpy.ndarray): The perturbed input example.
59+
"""
60+
adversarial_example = tf.identity(x)
61+
62+
for _ in range(num_steps):
63+
with tf.GradientTape() as tape:
64+
tape.watch(adversarial_example)
65+
prediction = model(adversarial_example)
66+
loss = tf.keras.losses.CategoricalCrossentropy()(y, prediction)
67+
68+
gradient = tape.gradient(loss, adversarial_example)
69+
perturbation = alpha * tf.sign(gradient)
70+
adversarial_example = tf.clip_by_value(adversarial_example + perturbation, 0, 1)
71+
adversarial_example = tf.clip_by_value(adversarial_example, x - epsilon, x + epsilon)
72+
73+
return adversarial_example.numpy()
74+
75+
def bim(model, x, y, epsilon=0.01, alpha=0.01, num_steps=10):
76+
"""
77+
Basic Iterative Method (BIM) attack.
78+
79+
Parameters:
80+
model (tensorflow.keras.Model): The target model to attack.
81+
x (numpy.ndarray): The input example to attack.
82+
y (numpy.ndarray): The true labels of the input example.
83+
epsilon (float): The maximum magnitude of the perturbation (default: 0.01).
84+
alpha (float): The step size for each iteration (default: 0.01).
85+
num_steps (int): The number of BIM iterations (default: 10).
86+
87+
Returns:
88+
adversarial_example (numpy.ndarray): The perturbed input example.
89+
"""
90+
adversarial_example = tf.identity(x)
91+
92+
for _ in range(num_steps):
93+
with tf.GradientTape() as tape:
94+
tape.watch(adversarial_example)
95+
prediction = model(adversarial_example)
96+
loss = tf.keras.losses.CategoricalCrossentropy()(y, prediction)
97+
98+
gradient = tape.gradient(loss, adversarial_example)
99+
perturbation = alpha * tf.sign(gradient)
100+
adversarial_example = tf.clip_by_value(adversarial_example + perturbation, 0, 1)
101+
adversarial_example = tf.clip_by_value(adversarial_example, x - epsilon, x + epsilon)
102+
103+
return adversarial_example.numpy()

deepdefend/defenses.py

Lines changed: 71 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
"""
2+
Functions to apply adversarial defense mechanisms to deep learning models.
3+
4+
Available functions:
5+
- `adversarial_training(model, x, y, epsilon=0.01)`: Adversarial Training defense.
6+
- `feature_squeezing(model, bit_depth=4)`: Feature Squeezing defense.
7+
8+
"""
9+
10+
import numpy as np
11+
import tensorflow as tf
12+
from deepdefend import attacks
13+
14+
def adversarial_training(model, x, y, epsilon=0.01):
15+
"""
16+
Adversarial Training defense.
17+
18+
Adversarial training is a method where the model is trained on both the original
19+
and adversarial examples, aiming to make the model more robust to adversarial attacks.
20+
21+
Parameters:
22+
model (tensorflow.keras.Model): The model to defend.
23+
x (numpy.ndarray): The input training examples.
24+
y (numpy.ndarray): The true labels of the training examples.
25+
epsilon (float): The magnitude of the perturbation (default: 0.01).
26+
27+
Returns:
28+
defended_model (tensorflow.keras.Model): The adversarially trained model.
29+
"""
30+
defended_model = tf.keras.models.clone_model(model)
31+
defended_model.set_weights(model.get_weights())
32+
33+
adversarial_examples = []
34+
for i in range(len(x)):
35+
adversarial_example = attacks.fgsm(model, x[i:i+1], y[i:i+1], epsilon)
36+
adversarial_examples.append(adversarial_example)
37+
38+
x_adversarial = np.concatenate(adversarial_examples, axis=0)
39+
y_adversarial = np.copy(y)
40+
x_train = np.concatenate([x, x_adversarial], axis=0)
41+
y_train = np.concatenate([y, y_adversarial], axis=0)
42+
43+
defended_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
44+
defended_model.fit(x_train, y_train, epochs=10, batch_size=32)
45+
46+
return defended_model
47+
48+
def feature_squeezing(model, bit_depth=4):
49+
"""
50+
Feature Squeezing defense.
51+
52+
Feature squeezing reduces the number of bits used to represent the input features,
53+
which can remove certain adversarial perturbations.
54+
55+
Parameters:
56+
model (tensorflow.keras.Model): The model to defend.
57+
bit_depth (int): The number of bits per feature (default: 4).
58+
59+
Returns:
60+
defended_model (tensorflow.keras.Model): The model with feature squeezing defense.
61+
"""
62+
defended_model = tf.keras.models.clone_model(model)
63+
defended_model.set_weights(model.get_weights())
64+
65+
for layer in defended_model.layers:
66+
if isinstance(layer, tf.keras.layers.Conv2D) or isinstance(layer, tf.keras.layers.Dense):
67+
layer_weights = layer.get_weights()
68+
squeezed_weights = [np.clip(np.round(w * (2**bit_depth) / np.max(np.abs(w))), -2**(bit_depth - 1), 2**(bit_depth - 1) - 1) / (2**(bit_depth) / np.max(np.abs(w))) for w in layer_weights]
69+
layer.set_weights(squeezed_weights)
70+
71+
return defended_model

readme.md

Lines changed: 100 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,100 @@
1+
# DeepDefend 0.1.0
2+
![Python Version](https://img.shields.io/badge/python-3.11-blue.svg)
3+
![Code Size](https://img.shields.io/github/languages/code-size/infinitode/deepdefend)
4+
![Downloads](https://pepy.tech/badge/deepdefend)
5+
![License Compliance](https://img.shields.io/badge/license-compliance-brightgreen.svg)
6+
![PyPI Version](https://img.shields.io/pypi/v/deepdefend)
7+
8+
An open-source Python library for adversarial attacks and defenses in deep learning models, enhancing the security and robustness of AI systems.
9+
10+
## Notice
11+
12+
DeepDefend has not yet been fully tested. Please report any issues you may encounter when using DeepDefend.
13+
14+
## Installation
15+
16+
You can install DeepDefend using pip:
17+
18+
```bash
19+
pip install deepdefend
20+
```
21+
22+
## Supported Python Versions
23+
24+
DeepDefend supports the following Python versions:
25+
26+
- Python 3.6
27+
- Python 3.7
28+
- Python 3.8
29+
- Python 3.9
30+
- Python 3.10
31+
- Python 3.11
32+
33+
Please ensure that you have one of these Python versions installed before using DeepDefend. DeepDefend may not work as expected on lower versions of Python than the supported.
34+
35+
## Features
36+
37+
- Adversarial Attacks: Generate adversarial examples to evaluate model vulnerabilities.
38+
- Adversarial Defenses: Employ various methods to protect models against adversarial attacks.
39+
40+
## Usage
41+
42+
### Adversarial Attacks
43+
44+
```python
45+
import tensorflow as tf
46+
from deepdefend.attacks import fgsm, pgd, bim
47+
48+
# Load a pre-trained TensorFlow model
49+
model = ...
50+
51+
# Load example input and label data (replace this with your own data loading code)
52+
x_example = ... # example input data
53+
y_example = ... # true label
54+
55+
# Perform FGSM attack on the example data
56+
adversarial_example_fgsm = fgsm(model, x_example, y_example, epsilon=0.01)
57+
58+
# Perform PGD attack on the example data
59+
adversarial_example_pgd = pgd(model, x_example, y_example, epsilon=0.01, alpha=0.01, num_steps=10)
60+
61+
# Perfrom BIM attack on the example data
62+
adversarial_example_bim = bim(model, x_example, y_example, epsilon=0.01, alpha=0.01, num_steps=10)
63+
```
64+
65+
### Adversarial Defenses
66+
67+
```python
68+
import tensorflow as tf
69+
from deepdefend.defenses import adversarial_training, feature_squeezing
70+
71+
# Load a pre-trained TensorFlow model
72+
model = ...
73+
74+
# Load training data
75+
x_train, y_train = ... # training data and labels
76+
77+
# Adversarial training to defend against attacks
78+
defended_model = adversarial_training(model, x_train, y_train, epsilon=0.01)
79+
80+
# Feature squeezing defense
81+
defended_model_squeezed = feature_squeezing(model, bit_depth=4)
82+
```
83+
84+
## Contributing
85+
86+
Contributions are welcome! If you encounter any issues, have suggestions, or want to contribute to DeepDefend, please open an issue or submit a pull request on [GitHub](https://github.com/infinitode/deepdefend).
87+
88+
## License
89+
90+
DeepDefend is released under the terms of the **MIT License (Modified)**. Please see the [LICENSE](https://github.com/infinitode/deepdefend/blob/main/LICENSE) file for the full text.
91+
92+
**Modified License Clause**
93+
94+
95+
96+
The modified license clause grants users the permission to make derivative works based on the DeepDefend software. However, it requires any substantial changes to the software to be clearly distinguished from the original work and distributed under a different name.
97+
98+
By enforcing this distinction, it aims to prevent direct publishing of the source code without changes while allowing users to create derivative works that incorporate the code but are not exactly the same.
99+
100+
Please read the full license terms in the [LICENSE](https://github.com/infinitode/deepdefend/blob/main/LICENSE) file for complete details.

setup.py

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
from setuptools import setup, find_packages
2+
3+
setup(
4+
name='deepdefend',
5+
version='0.1.0',
6+
author='Infinitode Pty Ltd',
7+
author_email='infinitode.ltd@gmail.com',
8+
description='An open-source Python library for adversarial attacks and defenses in deep learning models.',
9+
long_description='An open-source Python library for adversarial attacks and defenses in deep learning models, enhancing the security and robustness of AI systems.',
10+
long_description_content_type='text/markdown',
11+
url='https://github.com/infinitode/deepdefend',
12+
packages=find_packages(),
13+
install_requires=[
14+
'numpy',
15+
'tensorflow',
16+
],
17+
classifiers=[
18+
'Development Status :: 3 - Alpha',
19+
'Intended Audience :: Developers',
20+
'License :: OSI Approved :: MIT License',
21+
'Programming Language :: Python :: 3',
22+
'Programming Language :: Python :: 3.6',
23+
'Programming Language :: Python :: 3.7',
24+
'Programming Language :: Python :: 3.8',
25+
'Programming Language :: Python :: 3.9',
26+
'Programming Language :: Python :: 3.10',
27+
'Programming Language :: Python :: 3.11',
28+
],
29+
python_requires='>=3.6',
30+
)

0 commit comments

Comments
 (0)