forked from oneapi-src/oneAPI-samples
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathsample.json
More file actions
34 lines (33 loc) · 1.35 KB
/
sample.json
File metadata and controls
34 lines (33 loc) · 1.35 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
{
"guid": "0A887217-5621-4C8D-9418-17558088698B",
"name": "Intel® Extension for TensorFlow* BF16 Inference",
"categories": ["Toolkit/oneAPI AI And Analytics/Features and Functionality"],
"description": "This sample illustrates how to inference a TensorFlow model using Advanced Matrix Extensions Bfloat16",
"builder": ["cli"],
"languages": [{"python":{}}],
"os":["linux"],
"targetDevice": ["CPU"],
"cpuInstructionSets": ["AVX512", "AMX"],
"ciTests": {
"linux": [
{
"id": "intel amx bf16 inference",
"steps": [
"source /intel/oneapi/intelpython/bin/activate",
"conda activate tensorflow",
"pip install uv",
"uv init",
"uv python pin $(which python)",
"uv venv --system-site-packages",
"uv add -r requirements.txt",
"uv add numpy==1.26.4",
"uv add ipykernel jupyter notebook",
"uv run python Intel_TensorFlow_AMX_BF16_Inference.py",
"uv run python -m ipykernel install --user --name=tensorflow",
"uv run jupyter nbconvert --ExecutePreprocessor.enabled=True --ExecutePreprocessor.kernel_name=tensorflow2 --to notebook IntelTensorFlow_AMX_BF16_Inference.ipynb"
]
}
]
},
"expertise": "Code Optimization"
}