forked from oneapi-src/oneAPI-samples
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathsample.json
More file actions
36 lines (35 loc) · 1.22 KB
/
sample.json
File metadata and controls
36 lines (35 loc) · 1.22 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
{
"guid": "82e7612f-2810-4d12-9c75-c17fcbb946fa",
"name": "Intel® Neural Compressor Accelerate Inference with Intel® Optimization for TensorFlow*",
"categories": ["Toolkit/oneAPI AI And Analytics/Getting Started"],
"description": "This sample illustrates how to run Intel® Neural Compressor to quantize the FP32 model trained by Keras on Tensorflow to INT8 model to speed up the inference.",
"languages": [{"python":{}}],
"dependencies": ["tensorflow","neural-compressor"],
"os": ["linux"],
"builder": ["cli"],
"targetDevice": ["CPU"],
"ciTests": {
"linux": [
{
"env": ["source /intel/oneapi/intelpython/bin/activate",
"conda activate tensorflow",
"pip install uv",
"uv init",
"uv python pin $(which python)",
"uv venv --system-site-packages",
"uv add -r requirements.txt",
"uv add flatbuffers",
"uv add neural-compressor",
"pip install notebook",
"uv add --dev ipykernel",
"uv run ipython kernel install --user --name tensorflow"
],
"id": "neural-compressor tensorflow",
"steps": [
"jupyter nbconvert --ExecutePreprocessor.enabled=True --ExecutePreprocessor.kernel_name=tensorflow --to notebook inc_sample_tensorflow.ipynb"
]
}
]
},
"expertise": "Getting Started"
}