Skip to content

Commit 8126ddb

Browse files
committed
Add all pipelines to separate workflow
1 parent 126d30b commit 8126ddb

22 files changed

Lines changed: 790 additions & 69 deletions

File tree

.github/workflows/beam_Inference_Python_Benchmarks_Dataflow.yml

Lines changed: 11 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
# See the License for the specific language governing permissions and
1414
# limitations under the License.
1515

16-
name: Inference Python Benchmarks Dataflow
16+
name: Inference Python Benchmarks Dataflow (1 part)
1717

1818
on:
1919
schedule:
@@ -92,30 +92,9 @@ jobs:
9292
${{ github.workspace }}/.github/workflows/load-tests-pipeline-options/beam_Inference_Python_Benchmarks_Dataflow_Pytorch_Sentiment_Streaming_DistilBert_Base_Uncased.txt
9393
${{ github.workspace }}/.github/workflows/load-tests-pipeline-options/beam_Inference_Python_Benchmarks_Dataflow_Pytorch_Sentiment_Batch_DistilBert_Base_Uncased.txt
9494
${{ github.workspace }}/.github/workflows/load-tests-pipeline-options/beam_Inference_Python_Benchmarks_Dataflow_VLLM_Gemma_Batch.txt
95-
${{ github.workspace }}/.github/workflows/load-tests-pipeline-options/beam_Inference_Python_Benchmarks_Dataflow_Pytorch_Image_Classification_Rightfit.txt
96-
${{ github.workspace }}/.github/workflows/load-tests-pipeline-options/beam_Inference_Python_Benchmarks_Dataflow_Pytorch_Image_Object_Detection.txt
97-
${{ github.workspace }}/.github/workflows/load-tests-pipeline-options/beam_Inference_Python_Benchmarks_Dataflow_Pytorch_Image_Captioning.txt
9895
# The env variables are created and populated in the test-arguments-action as "<github.job>_test_arguments_<argument_file_paths_index>"
9996
- name: get current time
10097
run: echo "NOW_UTC=$(date '+%m%d%H%M%S' --utc)" >> $GITHUB_ENV
101-
- name: Build VLLM Development Image
102-
id: build_vllm_image
103-
uses: ./.github/actions/build-push-docker-action
104-
with:
105-
dockerfile_path: 'sdks/python/apache_beam/ml/inference/test_resources/vllm.dockerfile'
106-
image_name: 'us-docker.pkg.dev/apache-beam-testing/beam-temp/beam-vllm-gpu-base'
107-
image_tag: ${{ github.sha }}
108-
- name: Run VLLM Gemma Batch Test
109-
uses: ./.github/actions/gradle-command-self-hosted-action
110-
timeout-minutes: 180
111-
with:
112-
gradle-command: :sdks:python:apache_beam:testing:load_tests:run
113-
arguments: |
114-
-PloadTest.mainClass=apache_beam.testing.benchmarks.inference.vllm_gemma_benchmarks \
115-
-Prunner=DataflowRunner \
116-
-PsdkLocationOverride=false \
117-
-PpythonVersion=3.10 \
118-
-PloadTest.requirementsTxtFile=apache_beam/ml/inference/vllm_tests_requirements.txt '-PloadTest.args=${{ env.beam_Inference_Python_Benchmarks_Dataflow_test_arguments_8 }} --mode=batch --job_name=benchmark-tests-vllm-with-gemma-2b-it-batch-${{env.NOW_UTC}} --sdk_container_image=${{ steps.build_vllm_image.outputs.image_url }}'
11998
- name: run Pytorch Sentiment Streaming using Hugging Face distilbert-base-uncased model
12099
uses: ./.github/actions/gradle-command-self-hosted-action
121100
timeout-minutes: 180
@@ -193,36 +172,21 @@ jobs:
193172
-PpythonVersion=3.10 \
194173
-PloadTest.requirementsTxtFile=apache_beam/ml/inference/torch_tests_requirements.txt \
195174
'-PloadTest.args=${{ env.beam_Inference_Python_Benchmarks_Dataflow_test_arguments_5 }} --job_name=benchmark-tests-pytorch-imagenet-python-gpu-${{env.NOW_UTC}} --output=gs://temp-storage-for-end-to-end-tests/torch/result_resnet152_gpu-${{env.NOW_UTC}}.txt' \
196-
- name: run PyTorch Image Classification EfficientNet-B0 Streaming (Right-fitting) GPU
197-
uses: ./.github/actions/gradle-command-self-hosted-action
198-
timeout-minutes: 180
199-
with:
200-
gradle-command: :sdks:python:apache_beam:testing:load_tests:run
201-
arguments: |
202-
-PloadTest.mainClass=apache_beam.testing.benchmarks.inference.pytorch_imagenet_rightfit_benchmarks \
203-
-Prunner=DataflowRunner \
204-
-PpythonVersion=3.10 \
205-
-PloadTest.requirementsTxtFile=apache_beam/ml/inference/pytorch_rightfit_requirements.txt \
206-
'-PloadTest.args=${{ env.beam_Inference_Python_Benchmarks_Dataflow_test_arguments_9 }} --mode=streaming --job_name=benchmark-tests-pytorch-imagenet-rightfit-streaming-${{env.NOW_UTC}} --output_table=apache-beam-testing.beam_run_inference.result_torch_inference_imagenet_stream_rightfit' \
207-
- name: run PyTorch Image Object Detection Faster R-CNN ResNet-50 Batch GPU
208-
uses: ./.github/actions/gradle-command-self-hosted-action
209-
timeout-minutes: 180
175+
- name: Build VLLM Development Image
176+
id: build_vllm_image
177+
uses: ./.github/actions/build-push-docker-action
210178
with:
211-
gradle-command: :sdks:python:apache_beam:testing:load_tests:run
212-
arguments: |
213-
-PloadTest.mainClass=apache_beam.testing.benchmarks.inference.pytorch_image_object_detection_benchmarks \
214-
-Prunner=DataflowRunner \
215-
-PpythonVersion=3.10 \
216-
-PloadTest.requirementsTxtFile=apache_beam/ml/inference/pytorch_image_object_detection_requirements.txt \
217-
'-PloadTest.args=${{ env.beam_Inference_Python_Benchmarks_Dataflow_test_arguments_10 }} --mode=batch --job_name=benchmark-tests-pytorch-image-object-detection-batch-${{env.NOW_UTC}} --output_table=apache-beam-testing.beam_run_inference.result_torch_inference_image_object_detection_batch' \
218-
- name: run PyTorch Image Captioning BLIP + CLIP Batch GPU
179+
dockerfile_path: 'sdks/python/apache_beam/ml/inference/test_resources/vllm.dockerfile'
180+
image_name: 'us-docker.pkg.dev/apache-beam-testing/beam-temp/beam-vllm-gpu-base'
181+
image_tag: ${{ github.sha }}
182+
- name: Run VLLM Gemma Batch Test
219183
uses: ./.github/actions/gradle-command-self-hosted-action
220184
timeout-minutes: 180
221185
with:
222186
gradle-command: :sdks:python:apache_beam:testing:load_tests:run
223187
arguments: |
224-
-PloadTest.mainClass=apache_beam.testing.benchmarks.inference.pytorch_image_captioning_benchmarks \
188+
-PloadTest.mainClass=apache_beam.testing.benchmarks.inference.vllm_gemma_benchmarks \
225189
-Prunner=DataflowRunner \
190+
-PsdkLocationOverride=false \
226191
-PpythonVersion=3.10 \
227-
-PloadTest.requirementsTxtFile=apache_beam/ml/inference/pytorch_image_captioning_requirements.txt \
228-
'-PloadTest.args=${{ env.beam_Inference_Python_Benchmarks_Dataflow_test_arguments_11 }} --mode=batch --job_name=benchmark-tests-pytorch-image-captioning-batch-${{env.NOW_UTC}} --output_table=apache-beam-testing.beam_run_inference.result_torch_inference_image_captioning_batch'
192+
-PloadTest.requirementsTxtFile=apache_beam/ml/inference/vllm_tests_requirements.txt '-PloadTest.args=${{ env.beam_Inference_Python_Benchmarks_Dataflow_test_arguments_8 }} --mode=batch --job_name=benchmark-tests-vllm-with-gemma-2b-it-batch-${{env.NOW_UTC}} --sdk_container_image=${{ steps.build_vllm_image.outputs.image_url }}'

0 commit comments

Comments
 (0)