Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions NEXT_CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

### New Features and Improvements

* Add support for unified hosts with experimental flag.
* Increase async cache stale period from 3 to 5 minutes to cover the maximum monthly downtime of a 99.99% uptime SLA.

### Bug Fixes
Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
package com.databricks.sdk.core;

import com.databricks.sdk.support.InternalApi;

/**
* Represents the type of Databricks client being used for API operations.
*
* <p>This is determined by the combination of host type and workspace ID presence:
*
* <ul>
* <li>WORKSPACE: Can call workspace-level APIs
* <li>ACCOUNT: Can call account-level APIs
* <li>WORKSPACE_ON_UNIFIED: Workspace operations on a unified host (requires workspace ID)
* <li>ACCOUNT_ON_UNIFIED: Account operations on a unified host
* </ul>
*/
@InternalApi
public enum ClientType {
/** Traditional workspace client */
WORKSPACE,

/** Traditional account client */
ACCOUNT,

/** Workspace-scoped client on unified host (requires X-Databricks-Org-Id header) */
WORKSPACE_ON_UNIFIED,

/** Account-scoped client on unified host */
ACCOUNT_ON_UNIFIED
}
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,8 @@ private CliTokenSource getDatabricksCliTokenSource(DatabricksConfig config) {
}
List<String> cmd =
new ArrayList<>(Arrays.asList(cliPath, "auth", "token", "--host", config.getHost()));
if (config.isAccountClient()) {
if (config.getClientType() == ClientType.ACCOUNT
|| config.getClientType() == ClientType.ACCOUNT_ON_UNIFIED) {
cmd.add("--account-id");
cmd.add(config.getAccountId());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,23 @@
import java.util.*;
import org.apache.http.HttpMessage;

/**
* Configuration for Databricks SDK clients.
*
* <p>This class holds all configuration needed to authenticate and connect to Databricks services,
* including support for:
*
* <ul>
* <li>Traditional workspace and account hosts
* <li>Unified hosts (SPOG) that support both workspace and account operations
* <li>Multiple authentication methods (PAT, OAuth, Azure, etc.)
* </ul>
*
* <p><b>Unified Host Support:</b> When using a unified host, set {@code experimentalIsUnifiedHost}
* to {@code true} and optionally provide a {@code workspaceId} for workspace-scoped operations. Use
* {@link #getHostType()} and {@link #getClientType()} instead of the deprecated {@link
* #isAccountClient()} method.
*/
public class DatabricksConfig {
private CredentialsProvider credentialsProvider = new DefaultCredentialsProvider();

Expand All @@ -27,6 +44,27 @@ public class DatabricksConfig {
@ConfigAttribute(env = "DATABRICKS_ACCOUNT_ID")
private String accountId;

/**
* Workspace ID for unified host operations. When using a unified host that supports both
* workspace and account-level operations, this field specifies which workspace context to operate
* under for workspace-level API calls.
*
* <p><b>Note:</b> This API is experimental and may change or be removed in future releases
* without notice.
*/
@ConfigAttribute(env = "DATABRICKS_WORKSPACE_ID")
private String workspaceId;

/**
* Flag to explicitly mark a host as a unified host. When true, the host is treated as supporting
* both workspace and account-level operations through a single endpoint.
*
* <p><b>Note:</b> This API is experimental and may change or be removed in future releases
* without notice.
*/
@ConfigAttribute(env = "DATABRICKS_EXPERIMENTAL_IS_UNIFIED_HOST")
private Boolean experimentalIsUnifiedHost;

@ConfigAttribute(env = "DATABRICKS_TOKEN", auth = "pat", sensitive = true)
private String token;

Expand Down Expand Up @@ -233,8 +271,16 @@ public synchronized Map<String, String> authenticate() throws DatabricksExceptio
if (headerFactory == null) {
// Calling authenticate without resolve
ConfigLoader.fixHostIfNeeded(this);
headerFactory = credentialsProvider.configure(this);
HeaderFactory rawHeaderFactory = credentialsProvider.configure(this);
setAuthType(credentialsProvider.authType());

// For unified hosts with workspace operations, wrap the header factory
// to inject the X-Databricks-Org-Id header
if (getClientType() == ClientType.WORKSPACE_ON_UNIFIED) {
headerFactory = new UnifiedHostHeaderFactory(rawHeaderFactory, workspaceId);
} else {
headerFactory = rawHeaderFactory;
}
}
return headerFactory.headers();
} catch (DatabricksException e) {
Expand Down Expand Up @@ -298,6 +344,24 @@ public DatabricksConfig setAccountId(String accountId) {
return this;
}

public String getWorkspaceId() {
return workspaceId;
}

public DatabricksConfig setWorkspaceId(String workspaceId) {
this.workspaceId = workspaceId;
return this;
}

public Boolean getExperimentalIsUnifiedHost() {
return experimentalIsUnifiedHost;
}

public DatabricksConfig setExperimentalIsUnifiedHost(Boolean experimentalIsUnifiedHost) {
this.experimentalIsUnifiedHost = experimentalIsUnifiedHost;
return this;
}

public String getDatabricksCliPath() {
return this.databricksCliPath;
}
Expand Down Expand Up @@ -679,12 +743,63 @@ public boolean isAws() {
}

public boolean isAccountClient() {
if (getHostType() == HostType.UNIFIED) {
throw new DatabricksException(
"Cannot determine account client status for unified hosts. "
+ "Use getHostType() or getClientType() instead. "
+ "For unified hosts, client type depends on whether workspaceId is set.");
}
if (host == null) {
return false;
}
return host.startsWith("https://accounts.") || host.startsWith("https://accounts-dod.");
}

/**
* Determines the type of host based on configuration settings and host URL.
*
* <p>Returns UNIFIED if experimentalIsUnifiedHost is true, ACCOUNTS if the host starts with
* "accounts." or "accounts-dod.", and WORKSPACE otherwise.
*
* @return The detected host type
*/
public HostType getHostType() {
if (experimentalIsUnifiedHost != null && experimentalIsUnifiedHost) {
return HostType.UNIFIED;
}
if (host == null) {
return HostType.WORKSPACE;
}
if (host.startsWith("https://accounts.") || host.startsWith("https://accounts-dod.")) {
return HostType.ACCOUNTS;
}
return HostType.WORKSPACE;
}

/**
* Determines the client type based on host type and workspace ID configuration.
*
* <p>For unified hosts, returns WORKSPACE_ON_UNIFIED if a workspace ID is set, or
* ACCOUNT_ON_UNIFIED otherwise. For traditional hosts, returns ACCOUNT or WORKSPACE based on the
* host type.
*
* @return The determined client type
*/
public ClientType getClientType() {
HostType hostType = getHostType();
switch (hostType) {
case UNIFIED:
return (workspaceId != null && !workspaceId.isEmpty())
? ClientType.WORKSPACE_ON_UNIFIED
: ClientType.ACCOUNT_ON_UNIFIED;
case ACCOUNTS:
return ClientType.ACCOUNT;
case WORKSPACE:
default:
return ClientType.WORKSPACE;
}
}

public OpenIDConnectEndpoints getOidcEndpoints() throws IOException {
if (discoveryUrl == null) {
return fetchDefaultOidcEndpoints();
Expand All @@ -705,10 +820,36 @@ private OpenIDConnectEndpoints fetchOidcEndpointsFromDiscovery() {
return null;
}

/**
* Fetches OIDC endpoints for unified hosts using the account ID.
*
* <p>For unified hosts, the OIDC endpoints follow the pattern:
* {host}/oidc/accounts/{accountId}/v1/{token|authorize}
*
* @param accountId The account ID to use for endpoint construction
* @return OpenIDConnectEndpoints configured for the unified host
* @throws DatabricksException if accountId is null or empty
* @throws IOException if endpoint construction fails
*/
private OpenIDConnectEndpoints getUnifiedOidcEndpoints(String accountId) throws IOException {
if (accountId == null || accountId.isEmpty()) {
throw new DatabricksException(
"account_id is required for unified host OIDC endpoint discovery");
}
String prefix = getHost() + "/oidc/accounts/" + accountId;
return new OpenIDConnectEndpoints(prefix + "/v1/token", prefix + "/v1/authorize");
}

private OpenIDConnectEndpoints fetchDefaultOidcEndpoints() throws IOException {
if (getHost() == null) {
return null;
}

// For unified hosts, use account-based OIDC endpoints
if (getHostType() == HostType.UNIFIED) {
return getUnifiedOidcEndpoints(getAccountId());
}

if (isAzure() && getAzureClientId() != null) {
Request request = new Request("GET", getHost() + "/oidc/oauth2/v2.0/authorize");
request.setRedirectionBehavior(false);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,11 @@ private void addOIDCCredentialsProviders(DatabricksConfig config) {
namedIdTokenSource.idTokenSource,
config.getHttpClient())
.audience(config.getTokenAudience())
.accountId(config.isAccountClient() ? config.getAccountId() : null)
.accountId(
(config.getClientType() == ClientType.ACCOUNT
|| config.getClientType() == ClientType.ACCOUNT_ON_UNIFIED)
? config.getAccountId()
: null)
.scopes(config.getScopes())
.build();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,8 @@ public HeaderFactory configure(DatabricksConfig config) {
Map<String, String> headers = new HashMap<>();
headers.put("Authorization", String.format("Bearer %s", idToken.getTokenValue()));

if (config.isAccountClient()) {
if (config.getClientType() == ClientType.ACCOUNT
|| config.getClientType() == ClientType.ACCOUNT_ON_UNIFIED) {
AccessToken token;
try {
token = finalServiceAccountCredentials.createScoped(GCP_SCOPES).refreshAccessToken();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,8 @@ public HeaderFactory configure(DatabricksConfig config) {
throw new DatabricksException(message, e);
}

if (config.isAccountClient()) {
if (config.getClientType() == ClientType.ACCOUNT
|| config.getClientType() == ClientType.ACCOUNT_ON_UNIFIED) {
try {
headers.put(
SA_ACCESS_TOKEN_HEADER, gcpScopedCredentials.refreshAccessToken().getTokenValue());
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
package com.databricks.sdk.core;

import com.databricks.sdk.support.InternalApi;

/**
* Represents the type of Databricks host being used.
*
* <p>This determines which APIs are available and how authentication should be handled:
*
* <ul>
* <li>WORKSPACE: Traditional workspace host (e.g., adb-*.azuredatabricks.net)
* <li>ACCOUNTS: Traditional account host (e.g., accounts.cloud.databricks.com)
* <li>UNIFIED: Unified host supporting both workspace and account operations
* </ul>
*/
@InternalApi
public enum HostType {
/** Traditional workspace host - supports workspace-level APIs only */
WORKSPACE,

/** Traditional accounts host - supports account-level APIs only */
ACCOUNTS,

/** Unified host - supports both workspace and account APIs based on context */
UNIFIED
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
package com.databricks.sdk.core;

import java.util.HashMap;
import java.util.Map;

/**
* HeaderFactory wrapper that adds X-Databricks-Org-Id header for unified host workspace operations.
*
* <p>When making workspace-level API calls to a unified host, this header is required to specify
* which workspace context the operation should execute in.
*/
class UnifiedHostHeaderFactory implements HeaderFactory {
private final HeaderFactory delegate;
private final String workspaceId;

/**
* Creates a new unified host header factory.
*
* @param delegate The underlying header factory (e.g., OAuth, PAT)
* @param workspaceId The workspace ID to inject in the X-Databricks-Org-Id header
*/
public UnifiedHostHeaderFactory(HeaderFactory delegate, String workspaceId) {
if (delegate == null) {
throw new IllegalArgumentException("delegate cannot be null");
}
if (workspaceId == null || workspaceId.isEmpty()) {
throw new IllegalArgumentException("workspaceId cannot be null or empty");
}
this.delegate = delegate;
this.workspaceId = workspaceId;
}

@Override
public Map<String, String> headers() {
Map<String, String> headers = new HashMap<>(delegate.headers());
headers.put("X-Databricks-Org-Id", workspaceId);
return headers;
}
}
Loading
Loading