Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
f8cb55d
start work on new bq ui
fhirschmann Sep 4, 2025
63bec74
bigquery ui changed once again
fhirschmann Sep 4, 2025
f7517c3
fix dataplex spotlights
fhirschmann Oct 9, 2025
82e50dc
Update bk
fhirschmann Oct 9, 2025
a91b873
Update bk
fhirschmann Oct 14, 2025
dea7363
Merge pull request #5 from fhirschmann/new_bq_ui
fhirschmann Oct 21, 2025
2f728c2
remove pip install
fhirschmann Oct 21, 2025
b5354a9
Add nbformat and nbconvert imports for rendering
fhirschmann Oct 21, 2025
97c5e83
Change name of external connection
clde22 Oct 21, 2025
99536b1
bq UI changes explorer
clde22 Oct 21, 2025
cfc06b7
bq UI changes explorer
clde22 Oct 21, 2025
5441ea0
bq UI changes explorer
clde22 Oct 21, 2025
9abc444
bq UI changes explorer
clde22 Oct 21, 2025
aa3e12f
label changes in bq Details and query
clde22 Oct 21, 2025
06627ff
bq UI changes explorer
clde22 Oct 21, 2025
0bf66d7
IAM link
clde22 Oct 21, 2025
18da58e
do not auto init bootkon
fhirschmann Oct 21, 2025
bef8312
do not auto init script and fix vertex ai perms
fhirschmann Oct 21, 2025
46c239c
enpoints label
clde22 Oct 21, 2025
3b7807c
remove superfluous roles
fhirschmann Oct 21, 2025
cf9a27f
Merge pull request #6 from fhirschmann/no_init
fhirschmann Oct 21, 2025
e4c7407
Comment out role assignment in bk-bootstrap script
fhirschmann Oct 21, 2025
1e0e308
Update tutorial instructions for bootkon initialization
fhirschmann Oct 22, 2025
6754ad7
Locator fix for layout changes in Vertex AI
mikrovvelle Oct 22, 2025
caf1154
Merge pull request #7 from mikrovvelle/patch-20251022
fhirschmann Oct 22, 2025
7aa3561
make explicit: deploy model after endpoint created
mikrovvelle Oct 22, 2025
a91d088
Merge pull request #8 from mikrovvelle/endpoint-ordering-ml-pipeline
fhirschmann Oct 22, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
53 changes: 22 additions & 31 deletions .scripts/bk
Original file line number Diff line number Diff line change
Expand Up @@ -65,13 +65,10 @@ export BK_REPO_URL="https://github.com/${BK_REPO}.git"
export BK_TUTORIAL="${BK_TUTORIAL:-docs/TUTORIAL.md}" # defaults to .TUTORIAL.md; can be overwritten
export BK_BRANCH="${BK_BRANCH:-main}" # defaults to main; can be overwritten
export BK_DIR=~/${BK_GITHUB_REPOSITORY}
export BK_INIT_SCRIPT=~/${BK_GITHUB_REPOSITORY}/bk
export BK_INITIALIZED=1

cd ~/

pip install --quiet jinja2 nbformat nbconvert

if ! command -v git &> /dev/null; then
sudo apt update
sudo apt install -y git
Expand All @@ -87,25 +84,37 @@ fi
cd $BK_GITHUB_REPOSITORY

NEW_PATH=~/${BK_GITHUB_REPOSITORY}/.scripts
PATH_EXPORT_LINE="export PATH=\${HOME}/${BK_GITHUB_REPOSITORY}/.scripts:\$PATH"

# Check if the new path is already in the PATH
# 1. Add to current session's PATH if missing
if [[ ":$PATH:" != *":$NEW_PATH:"* ]]; then
echo -e "${MAGENTA}Adding $NEW_PATH to your PATH${NC}"
echo -e "${MAGENTA}Adding $NEW_PATH to your current session's PATH${NC}"
export PATH=${NEW_PATH}:$PATH
else
echo -e "${GREEN}Your PATH already contains $NEW_PATH. Not adding it again.${NC}"
echo -e "${GREEN}Your current session's PATH already contains $NEW_PATH. Not adding it again.${NC}"
fi

# 2. Persist the PATH setting in ~/.bashrc if not already there
if ! grep -qF "$PATH_EXPORT_LINE" ~/.bashrc ; then
echo -e "${MAGENTA}Adding $NEW_PATH to ~/.bashrc for future sessions.${NC}"
# Use '>>' to append the line to the file
echo "$PATH_EXPORT_LINE" >> ~/.bashrc
else
echo -e "${GREEN}The permanent PATH export for $NEW_PATH is already in ~/.bashrc. Skipping.${NC}"
fi

unset NEW_PATH
unset PATH_EXPORT_LINE

echo -e "Sourcing $(readlink -f vars.sh)"
source vars.sh
echo -e "Sourcing $(readlink -f $BK_DIR/vars.sh)"
source $BK_DIR/vars.sh

if [ -f vars.local.sh ]; then
echo -e "Sourcing $(readlink -f vars.local.sh)"
source vars.local.sh
if [ -f $BK_DIR/vars.local.sh ]; then
echo -e "Sourcing $(readlink -f $BK_DIR/vars.local.sh)"
source $BK_DIR/vars.local.sh
fi

echo -e "Variables from vars.sh: PROJECT_ID=${YELLOW}$PROJECT_ID${NC} GCP_USERNAME=${YELLOW}$GCP_USERNAME${NC} REGION=${YELLOW}$REGION${NC}"
echo -e "Variables from $BK_DIR/vars.sh: PROJECT_ID=${YELLOW}$PROJECT_ID${NC} GCP_USERNAME=${YELLOW}$GCP_USERNAME${NC} REGION=${YELLOW}$REGION${NC}"


if [ -z $PROJECT_ID ]; then
Expand Down Expand Up @@ -138,24 +147,6 @@ else
echo "$line" >> ~/.bashrc
fi

## Set or update $BK_INIT_SCRIPT in ~/.bashrc
line="export BK_INIT_SCRIPT=~/${BK_GITHUB_REPOSITORY}/.scripts/bk"
if grep -q '^export BK_INIT_SCRIPT=' ~/.bashrc; then
# If the line exists but differs, update it
if ! grep -Fxq "$line" ~/.bashrc; then
sed -i "s|^export BK_INIT_SCRIPT=.*|$line|" ~/.bashrc
echo "Updated the existing BK_INIT_SCRIPT line in ~/.bashrc."
fi
else
echo "$line" >> ~/.bashrc
fi

## Load $BK_INIT_SCRIPT in ~/.bashrc
line='if [ -f ${BK_INIT_SCRIPT} ]; then source ${BK_INIT_SCRIPT}; fi'
grep -qxF "$line" ~/.bashrc || echo "$line" >> ~/.bashrc

unset line

echo
echo -e " __ --------------------------------------------------------"
echo -e " _(\ |${RED}@@${NC}| | |"
Expand All @@ -171,4 +162,4 @@ echo
if [ "$(basename ${BASH_SOURCE[0]})" != "bk" ]; then
# This script is run the first time from GitHub
bk-start
fi
fi
16 changes: 16 additions & 0 deletions .scripts/bk-bootstrap
Original file line number Diff line number Diff line change
Expand Up @@ -58,3 +58,19 @@ for role in "${service_account_roles[@]}"; do
gcloud projects add-iam-policy-binding "$PROJECT_ID" \
--member="serviceAccount:$COMPUTE_SERVICE_ACCOUNT" --role="$role" >>/dev/null
done

VERTEX_AI_CC_SERVICE_ACCOUNT="service-$PROJECT_NUMBER@gcp-sa-aiplatform-cc.iam.gserviceaccount.com"

# Array of roles to grant to the Vertex AI Custom Code Service Agent
declare -a vertex_cc_service_agent_roles=(
"roles/artifactregistry.reader" # AI Platform Artifact Registry Reader
"roles/artifactregistry.serviceAgent" # Custom Artifact Registry Service Agent
"roles/aiplatform.customCodeServiceAgent" # Vertex AI Custom Code Service Agent
)

# Assign roles to the Vertex AI Custom Code Service Account
#for role in "${vertex_cc_service_agent_roles[@]}"; do
# echo "Assigning role $role to $VERTEX_AI_CC_SERVICE_ACCOUNT in project $PROJECT_ID..."
# gcloud projects add-iam-policy-binding "$PROJECT_ID" \
# --member="serviceAccount:$VERTEX_AI_CC_SERVICE_ACCOUNT" --role="$role" >>/dev/null
#done
5 changes: 3 additions & 2 deletions .scripts/bk-render-jinja2
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,8 @@ import json
import sys
import os
import re
import nbformat
import base64
from functools import partial
from nbconvert import HTMLExporter, MarkdownExporter

import jinja2

Expand Down Expand Up @@ -42,6 +40,9 @@ def apply_to_content(data, func):


def render_jupyter(path):
import nbformat
from nbconvert import HTMLExporter, MarkdownExporter

with open(path) as f:
nb = nbformat.read(f, as_version=4)
exporter = HTMLExporter()
Expand Down
8 changes: 8 additions & 0 deletions .scripts/bk-start
Original file line number Diff line number Diff line change
@@ -1,5 +1,13 @@
#!/bin/sh

cd $BK_DIR

if [ -z "$BK_INITIALIZED" ]; then
echo "Bootkon has not been initialized."
echo "Please execute: "
echo " . bk (including the dot)"
exit 1
fi

bk-tutorial $BK_TUTORIAL
cloudshell open-workspace .
10 changes: 6 additions & 4 deletions docs/TUTORIAL.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,19 +38,21 @@ and set `GCP_USERNAME`, `PROJECT_ID` according to the information you received.

❗ Please do not include any whitespaces when setting these variablers.

Please reload bootkon and make sure there are no errors printed:
Please initialize bootkon. The next command will set environment variables in your current terminal.

```bash
. bk
```


And restart the tutorial using the next command. You can also use the next command to continue bootkon in case you accidentally close the tutorial or the editor:
Reload the tutorial window on the right-hand side of your screen.

```bash
bk-start
```

In case you accidently close the tutorial or the editor, you can execute `bk-start` to start it again. Please make sure that you execute `. bk` in every terminal
you open so that the environment variables are set.

Now, your

* `PROJECT_ID` is `{% if PROJECT_ID == "" %}None{% else %}{{ PROJECT_ID }}{% endif %}`
Expand Down Expand Up @@ -85,4 +87,4 @@ The authors of Data & AI Bootkon are:
Data & AI Bootkon received contributions from many people, including:
- [Christine Schulze](https://www.linkedin.com/in/christine-schulze-33822765/)
- [Daniel Quinlan](https://www.linkedin.com/in/%F0%9F%8C%8Ddaniel-quinlan-51126016/)
- [Dinesh Sandra](https://www.linkedin.com/in/sandradinesh/)
- [Dinesh Sandra](https://www.linkedin.com/in/sandradinesh/)
41 changes: 23 additions & 18 deletions docs/labs/2_data_ingestion.md
Original file line number Diff line number Diff line change
Expand Up @@ -43,11 +43,12 @@ echo $CONN_SERVICE_ACCOUNT
```

Let's double check the service account.

<walkthrough-spotlight-pointer locator="semantic({treeitem 'Toggle node astute-ace-336608'} {button 'Toggle node'})"></walkthrough-spotlight-pointer>
1. Go to the [BigQuery Console](https://console.cloud.google.com/bigquery).
2. Expand <walkthrough-spotlight-pointer locator="semantic({treeitem '{{ PROJECT_ID }}'} {button 'Toggle node'})">{{ PROJECT_ID }}</walkthrough-spotlight-pointer>
3. Expand <walkthrough-spotlight-pointer locator="semantic({treeitem 'External connections'} {button 'Toggle node'})">External connections</walkthrough-spotlight-pointer>
4. Click ``us.fraud-transactions-conn``.
2. Click <walkthrough-spotlight-pointer locator="semantic({tab 'Explorer'})">Explorer</walkthrough-spotlight-pointer>
3. Expand <walkthrough-spotlight-pointer locator="semantic({treeitem 'Toggle node {{ PROJECT_ID }}'} {button 'Toggle node'})">{{ PROJECT_ID }}</walkthrough-spotlight-pointer>
4. Click <walkthrough-spotlight-pointer locator="css(span[id$=ProjectTreeDatasource-{{ PROJECT_ID }}-bucket-connection])">Connections</walkthrough-spotlight-pointer>
5. Click <walkthrough-spotlight-pointer locator="semantic({button 'fraud-transactions-conn'})">fraud-transactions-conn</walkthrough-spotlight-pointer>

Is the service account equivalent to the one you got from the command line?

Expand All @@ -62,21 +63,23 @@ gcloud storage buckets add-iam-policy-binding gs://{{ PROJECT_ID }}-bucket \
Let's create a data set that contains the table and the external connection to Cloud Storage.

1. Go to the [BigQuery Console](https://console.cloud.google.com/bigquery)
2. Click the three <walkthrough-spotlight-pointer locator="semantic({treeitem '{{ PROJECT_ID }}'} {button})">vertical dots ⋮</walkthrough-spotlight-pointer> next to `{{ PROJECT_ID }}` in the navigation menu
3. Click <walkthrough-spotlight-pointer locator="semantic({menuitem 'Create dataset'})">Create dataset</walkthrough-spotlight-pointer>
4. Enter `ml_datasets` (plural) in the ID field. Region should be multi-region US.
5. Click <walkthrough-spotlight-pointer locator="semantic({button 'Create dataset'})">Create dataset</walkthrough-spotlight-pointer>
2. Choose <walkthrough-spotlight-pointer locator="semantic({tab 'Explorer'})">Explorer</walkthrough-spotlight-pointer>
3. Hover your mouse over <walkthrough-spotlight-pointer locator="semantic({treeitem 'Toggle node {{ PROJECT_ID }}'} {button 'Toggle node'})">{{ PROJECT_ID }}</walkthrough-spotlight-pointer>
4. Click the three vertical dots (⋮) and go to `Create dataset`
5. Enter `ml_datasets` (plural) in the ID field. Region should be multi-region US.
6. Click `Create dataset`

Alternatively, you can create the data set on the command line:
```bash
bq --location=us mk -d ml_datasets
```

Next, we connect the data in Cloud Storage to BigQuery:
1. Click <walkthrough-spotlight-pointer locator="spotlight(bigquery-add-data)">+ Add data</walkthrough-spotlight-pointer>
2. Click <walkthrough-spotlight-pointer locator="semantic({button 'Google Cloud Storage'})">Google Cloud Storage</walkthrough-spotlight-pointer>
3. Select `Load to BigQuery`
4. Enter the following details:
1. Choose <walkthrough-spotlight-pointer locator="semantic({tab 'Explorer'})">Explorer</walkthrough-spotlight-pointer>
2. Click <walkthrough-spotlight-pointer locator="spotlight(bigquery-add-data)">+ Add data</walkthrough-spotlight-pointer>
3. Click <walkthrough-spotlight-pointer locator="semantic({button 'Google Cloud Storage'})">Google Cloud Storage</walkthrough-spotlight-pointer>
4. Select `Load to BigQuery`
5. Enter the following details:
- Create table from: `Google Cloud Storage`
- Select file: `{{ PROJECT_ID }}-bucket/data/parquet/ulb_fraud_detection/*`
- File format: `Parquet`
Expand All @@ -87,7 +90,7 @@ Next, we connect the data in Cloud Storage to BigQuery:
- Check *Create a BigLake table using a Cloud Resource connection*
- Connection ID: Select `us.fraud-transactions-conn`
- Schema: `Auto detect`
5. Click on <walkthrough-spotlight-pointer locator="semantic({button 'Create table'})">Create table</walkthrough-spotlight-pointer>
6. Click on <walkthrough-spotlight-pointer locator="semantic({button 'Create table'})">Create table</walkthrough-spotlight-pointer>

Alternatively, you can also use the command line to create the table:

Expand All @@ -99,17 +102,19 @@ bq mk --table \

Let's have a look at the data set:
1. Go to the [BigQuery Console](https://console.cloud.google.com/bigquery)
2. Expand <walkthrough-spotlight-pointer locator="semantic({treeitem '{{ PROJECT_ID }}'} {button 'Toggle node'})">{{ PROJECT_ID }}</walkthrough-spotlight-pointer>
3. Expand <walkthrough-spotlight-pointer locator="semantic({treeitem 'ml_datasets'} {button 'Toggle node'})">ml_datasets</walkthrough-spotlight-pointer>
4. Click <walkthrough-spotlight-pointer locator="semantic({treeitem 'ulb_fraud_detection_biglake'})">``ulb_fraud_detection_biglake``</walkthrough-spotlight-pointer>
5. Click <walkthrough-spotlight-pointer locator="text('DETAILS')">DETAILS</walkthrough-spotlight-pointer>
2. Choose <walkthrough-spotlight-pointer locator="semantic({tab 'Explorer'})">Explorer</walkthrough-spotlight-pointer>
3. Expand <walkthrough-spotlight-pointer locator="semantic({treeitem 'Toggle node {{ PROJECT_ID }}'} {button 'Toggle node'})">{{ PROJECT_ID }}</walkthrough-spotlight-pointer>
4. Click <walkthrough-spotlight-pointer locator="css(span[id$=ProjectTreeDatasource-{{ PROJECT_ID }}-bucket-dataset])">Datasets</walkthrough-spotlight-pointer>
5. Click <walkthrough-spotlight-pointer locator="semantic({button 'ml_datasets'})">ml_datasets</walkthrough-spotlight-pointer>
6. Click <walkthrough-spotlight-pointer locator="semantic({gridcell 'ulb_fraud_detection_biglake'})">ulb_fraud_detection_biglake</walkthrough-spotlight-pointer>
7. Click <walkthrough-spotlight-pointer locator="text('DETAILS')">Details</walkthrough-spotlight-pointer>

Have a look at the external data configuration. You can see the Cloud Storage bucket (`gs://...`) your data
lives in.

Let's query it:

1. Click <walkthrough-spotlight-pointer locator="text('QUERY')">QUERY</walkthrough-spotlight-pointer>
1. Click <walkthrough-spotlight-pointer locator="text('QUERY')">Query</walkthrough-spotlight-pointer>
2. Insert the following SQL query.

```sql
Expand Down
4 changes: 2 additions & 2 deletions docs/labs/3_dataform.md
Original file line number Diff line number Diff line change
Expand Up @@ -209,11 +209,11 @@ Go to [Dataform](https://console.cloud.google.com/bigquery/dataform)\> <walkthro

For the sentiment inference step to succeed, you need to grant the external connection service account the Vertex AI user privilege. More details can be found in this [link](https://cloud.google.com/bigquery/docs/generate-text-tutorial#grant-permissions).

1. You can find the service account ID under [BigQuery Studio](https://console.cloud.google.com/bigquery) \> ``{{ PROJECT_ID }}`` \> <walkthrough-spotlight-pointer locator="semantic({treeitem 'External connections'})">External connections</walkthrough-spotlight-pointer> \> `fraud-transactions-conn`
1. You can find the service account ID under [BigQuery Studio](https://console.cloud.google.com/bigquery) \> <walkthrough-spotlight-pointer locator="semantic({tab 'Explorer'})">Explorer</walkthrough-spotlight-pointer> \> ``{{ PROJECT_ID }}`` \> <walkthrough-spotlight-pointer locator="semantic({treeitem 'Connections'})">Connections</walkthrough-spotlight-pointer> \> `fraud-transactions-conn`

<img src= "../img/lab3/serviceaccountconnection.png" alt="serviceaccountconnection" style="border: 1px solid grey;">

2. Take note of the service account and grant it the `Vertex AI User` role.
2. Take note of the service account and grant it the `Vertex AI User` role in [IAM](https://console.cloud.google.com/iam-admin).
<img src= "../img/lab3/vertexairole.png" alt="vertexairole" style="border: 1px solid grey;">

3. Back in your [Dataform](https://console.cloud.google.com/bigquery/dataform) workspace, click <walkthrough-spotlight-pointer locator="semantic({button 'Start execution'})"> Start execution</walkthrough-spotlight-pointer> from the top menu, then <walkthrough-spotlight-pointer locator="semantic({menuitem 'Execute actions'})">Execute Actions</walkthrough-spotlight-pointer>
Expand Down
6 changes: 3 additions & 3 deletions docs/labs/4_ml.md
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ Here you can can see that a model in the Vertex AI Model Registry is made up fro

The endpoint is created in a parallel branch in the pipeline you just ran. You can deploy models to an endpoint through the model registry.

1. Click <walkthrough-spotlight-pointer locator="css(a[id$=cfctest-section-nav-item-ai-platform-online-prediction])">Online Prediction</walkthrough-spotlight-pointer> in the navigation menu
1. Click <walkthrough-spotlight-pointer locator="css(a[id$=cfctest-section-nav-item-ai-platform-online-prediction])">Endpoints</walkthrough-spotlight-pointer> in the navigation menu
2. Click <walkthrough-spotlight-pointer locator="semantic({link 'bootkon-endpoint'})">bootkon-endpoint</walkthrough-spotlight-pointer>

You can see that the endpoint has one model deployed currently, and all the traffic is routed to it (traffic split is 100%). When scrolling down, you get live graphs as soon as predictions are coming in.
Expand All @@ -91,8 +91,8 @@ You can also train and deploy models on Vertex in the UI only. Let's have a more

Let's have a look at the Pipeline as well.

1. Click <walkthrough-spotlight-pointer locator="css(a[id$=cfctest-section-nav-item-ai-platform-ml-pipelines])">Pipelines</walkthrough-spotlight-pointer> in the navigation menu
2. Click <walkthrough-spotlight-pointer locator="semantic({link 'bootkon-pipeline-'})">bootkon-pipeline-...</walkthrough-spotlight-pointer>
1. Click <walkthrough-spotlight-pointer locator="css(a[aria-label$=Pipelines])">Pipelines</walkthrough-spotlight-pointer> in the navigation menu
2. Click <walkthrough-spotlight-pointer locator='semantic({link "bootkon-pipeline-*"})'>bootkon-pipeline-...</walkthrough-spotlight-pointer>

You can now see the individual steps in the pipeline. Please click through the individual steps of the pipeline and have a look at the *Pipeline run analysis* on the right hand side as you cycle pipeline steps.

Expand Down
9 changes: 4 additions & 5 deletions docs/labs/5_dataplex.md
Original file line number Diff line number Diff line change
Expand Up @@ -148,9 +148,8 @@ You can filter the data to be scanned for profiling by using row filters and col
Dataplex lets you specify a percentage of records from your data to sample for running a data profiling scan. Creating data profiling scans on a smaller sample of data can reduce the execution time and cost of querying the entire dataset.

Let's get started:

1. Go to the <walkthrough-spotlight-pointer locator="semantic({link 'Profile, 1 of 2'})">Profile</walkthrough-spotlight-pointer> section in Dataplex.
2. Click <walkthrough-spotlight-pointer locator="semantic({button 'Create Data Profile scan'})">+ CREATE DATA PROFILE SCAN</walkthrough-spotlight-pointer>
1. Go to the <walkthrough-spotlight-pointer locator="semantic({link 'Data profiling &amp; quality, 1 of 1'})">Data profiling & quality</walkthrough-spotlight-pointer> section in Dataplex.
2. Click <walkthrough-spotlight-pointer locator="semantic({button 'Create data profile scan'})">Create data profile scan</walkthrough-spotlight-pointer>
3. Set Display Name to `bootkon-profile-fraud-prediction` for example
4. Optionally add a description. For example, "data profile scans for fraud detection predictions"
5. Leave the “Browse within Dataplex Lakes” option turned off
Expand Down Expand Up @@ -223,9 +222,9 @@ Creating and using a data quality scan consists of the following steps:

**Lab Instructions**

1. Go to the [Data Quality](https://console.cloud.google.com/dataplex/govern/quality) section in the left hand menu of Dataplex.
1. Go to the <walkthrough-spotlight-pointer locator="semantic({link 'Data profiling &amp; quality, 1 of 1'})">Data profiling & quality</walkthrough-spotlight-pointer> section in the left hand menu of Dataplex.

2. Click on <walkthrough-spotlight-pointer locator="semantic({button 'Create Data Quality scan'})"> + CREATE DATA QUALITY SCAN</walkthrough-spotlight-pointer>
2. Click on <walkthrough-spotlight-pointer locator="semantic({button 'Create data quality scan'})">Create data quality scan</walkthrough-spotlight-pointer>
3. Display Name: `bootkon-dquality-fraud-prediction` for example
4. Optionally add a description. For example, "data quality scans for fraud detection predictions"
5. Leave the "Browse within Dataplex Lakes" option turned off
Expand Down
3 changes: 2 additions & 1 deletion src/ml/pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,14 +77,15 @@ def pipeline(
display_name="bootkon-endpoint",
)

ModelDeployOp(
model_deploy_op = ModelDeployOp(
endpoint=endpoint_create_op.outputs["endpoint"],
model=model_upload_op.outputs["model"],
deployed_model_display_name="bootkon-endpoint",
dedicated_resources_machine_type="n1-standard-4",
dedicated_resources_min_replica_count=1,
dedicated_resources_max_replica_count=1
)
model_deploy_op.after(endpoint_create_op)


compiler.Compiler().compile(
Expand Down