spark

package
v0.1.0-beta.5 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Nov 13, 2024 License: MIT Imports: 14 Imported by: 0

Documentation

Index

Examples

Constants

This section is empty.

Variables

This section is empty.

Functions

This section is empty.

Types

type AutoScaleProperties

type AutoScaleProperties struct {
	// REQUIRED; The status of the auto scale. False - Disabled, true - Enabled.
	Enabled *bool

	// REQUIRED; The maximum node count.
	MaxNodeCount *int32

	// REQUIRED; The minimum node count.
	MinNodeCount *int32
}

AutoScaleProperties - Autoscale properties.

func (AutoScaleProperties) MarshalJSON

func (a AutoScaleProperties) MarshalJSON() ([]byte, error)

MarshalJSON implements the json.Marshaller interface for type AutoScaleProperties.

func (*AutoScaleProperties) UnmarshalJSON

func (a *AutoScaleProperties) UnmarshalJSON(data []byte) error

UnmarshalJSON implements the json.Unmarshaller interface for type AutoScaleProperties.

type AutomaticLogProperties

type AutomaticLogProperties struct {
	// REQUIRED; The status of the automatic log. False - Disabled, true - Enabled.
	Enabled *bool
}

AutomaticLogProperties - Automatic Log Properties.

func (AutomaticLogProperties) MarshalJSON

func (a AutomaticLogProperties) MarshalJSON() ([]byte, error)

MarshalJSON implements the json.Marshaller interface for type AutomaticLogProperties.

func (*AutomaticLogProperties) UnmarshalJSON

func (a *AutomaticLogProperties) UnmarshalJSON(data []byte) error

UnmarshalJSON implements the json.Unmarshaller interface for type AutomaticLogProperties.

type ClientFactory

type ClientFactory struct {
	// contains filtered or unexported fields
}

ClientFactory is a client factory used to create any client in this module. Don't use this type directly, use NewClientFactory instead.

func NewClientFactory

func NewClientFactory(credential azcore.TokenCredential, endpoint *string, options *azcore.ClientOptions) (*ClientFactory, error)

NewClientFactory creates a new instance of ClientFactory with the specified values. The parameter values will be propagated to any client created from this factory.

  • credential - used to authorize requests. Usually a credential from azidentity.
  • endpoint - pass nil to accept the default values.
  • options - pass nil to accept the default values.

func NewClientFactoryWithClient

func NewClientFactoryWithClient(client fabric.Client) *ClientFactory

NewClientFactoryWithClient creates a new instance of ClientFactory with sharable Client. The Client will be propagated to any client created from this factory.

  • client - Client created in the containing module: github.com/microsoft/fabric-sdk-go/fabric

func (*ClientFactory) NewCustomPoolsClient

func (c *ClientFactory) NewCustomPoolsClient() *CustomPoolsClient

NewCustomPoolsClient creates a new instance of CustomPoolsClient.

func (*ClientFactory) NewWorkspaceSettingsClient

func (c *ClientFactory) NewWorkspaceSettingsClient() *WorkspaceSettingsClient

NewWorkspaceSettingsClient creates a new instance of WorkspaceSettingsClient.

type CreateCustomPoolRequest

type CreateCustomPoolRequest struct {
	// REQUIRED; Autoscale.
	AutoScale *AutoScaleProperties

	// REQUIRED; Dynamic executor allocation.
	DynamicExecutorAllocation *DynamicExecutorAllocationProperties

	// REQUIRED; Custom pool name.
	// The name must be between 1 and 64 characters long and must contain only letters, numbers, dashes, underscores and spaces.
	// Custom pool names must be unique within the workspace.
	// "Starter Pool" is a reserved custom pool name.
	Name *string

	// REQUIRED; Node family.
	NodeFamily *NodeFamily

	// REQUIRED; Node size.
	NodeSize *NodeSize
}

CreateCustomPoolRequest - Create custom pool request payload.

func (CreateCustomPoolRequest) MarshalJSON

func (c CreateCustomPoolRequest) MarshalJSON() ([]byte, error)

MarshalJSON implements the json.Marshaller interface for type CreateCustomPoolRequest.

func (*CreateCustomPoolRequest) UnmarshalJSON

func (c *CreateCustomPoolRequest) UnmarshalJSON(data []byte) error

UnmarshalJSON implements the json.Unmarshaller interface for type CreateCustomPoolRequest.

type CustomPool

type CustomPool struct {
	// Autoscale.
	AutoScale *AutoScaleProperties

	// Dynamic executor allocation.
	DynamicExecutorAllocation *DynamicExecutorAllocationProperties

	// Custom pool ID.
	ID *string

	// Custom pool name.
	Name *string

	// Node family.
	NodeFamily *NodeFamily

	// Node size.
	NodeSize *NodeSize

	// Custom pool type.
	Type *CustomPoolType
}

CustomPool - Custom pool.

func (CustomPool) MarshalJSON

func (c CustomPool) MarshalJSON() ([]byte, error)

MarshalJSON implements the json.Marshaller interface for type CustomPool.

func (*CustomPool) UnmarshalJSON

func (c *CustomPool) UnmarshalJSON(data []byte) error

UnmarshalJSON implements the json.Unmarshaller interface for type CustomPool.

type CustomPoolType

type CustomPoolType string

CustomPoolType - Custom pool type. Additional CustomPoolType types may be added over time.

const (
	// CustomPoolTypeCapacity - Capacity level custom pool
	CustomPoolTypeCapacity CustomPoolType = "Capacity"
	// CustomPoolTypeWorkspace - Workspace level custom pool
	CustomPoolTypeWorkspace CustomPoolType = "Workspace"
)

func PossibleCustomPoolTypeValues

func PossibleCustomPoolTypeValues() []CustomPoolType

PossibleCustomPoolTypeValues returns the possible values for the CustomPoolType const type.

type CustomPools

type CustomPools struct {
	// REQUIRED; A list of custom pools.
	Value []CustomPool

	// The token for the next result set batch. If there are no more records, it's removed from the response.
	ContinuationToken *string

	// The URI of the next result set batch. If there are no more records, it's removed from the response.
	ContinuationURI *string
}

func (CustomPools) MarshalJSON

func (c CustomPools) MarshalJSON() ([]byte, error)

MarshalJSON implements the json.Marshaller interface for type CustomPools.

func (*CustomPools) UnmarshalJSON

func (c *CustomPools) UnmarshalJSON(data []byte) error

UnmarshalJSON implements the json.Unmarshaller interface for type CustomPools.

type CustomPoolsClient

type CustomPoolsClient struct {
	// contains filtered or unexported fields
}

CustomPoolsClient contains the methods for the CustomPools group. Don't use this type directly, use a constructor function instead.

func (*CustomPoolsClient) CreateWorkspaceCustomPool

CreateWorkspaceCustomPool - PERMISSIONS The caller must have admin workspace role. REQUIRED DELEGATED SCOPES Workspace.ReadWrite.All MICROSOFT ENTRA SUPPORTED IDENTITIES This API supports the Microsoft identities [/rest/api/fabric/articles/identity-support] listed in this section. | Identity | Support | |-|-| | User | Yes | | Service principal [/entra/identity-platform/app-objects-and-service-principals#service-principal-object] | No | | Managed identities [/entra/identity/managed-identities-azure-resources/overview] | No | INTERFACE If the operation fails it returns an *core.ResponseError type.

Generated from API version v1

  • workspaceID - The workspace ID.
  • createCustomPoolRequest - Create custom pool request payload.
  • options - CustomPoolsClientCreateWorkspaceCustomPoolOptions contains the optional parameters for the CustomPoolsClient.CreateWorkspaceCustomPool method.
Example

Generated from example definition

package main

import (
	"context"
	"log"

	"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
	"github.com/Azure/azure-sdk-for-go/sdk/azidentity"

	"github.com/microsoft/fabric-sdk-go/fabric/spark"
)

func main() {
	cred, err := azidentity.NewDefaultAzureCredential(nil)
	if err != nil {
		log.Fatalf("failed to obtain a credential: %v", err)
	}
	ctx := context.Background()
	clientFactory, err := spark.NewClientFactory(cred, nil, nil)
	if err != nil {
		log.Fatalf("failed to create client: %v", err)
	}
	_, err = clientFactory.NewCustomPoolsClient().CreateWorkspaceCustomPool(ctx, "f089354e-8366-4e18-aea3-4cb4a3a50b48", spark.CreateCustomPoolRequest{
		Name: to.Ptr("pool1"),
		AutoScale: &spark.AutoScaleProperties{
			Enabled:      to.Ptr(true),
			MaxNodeCount: to.Ptr[int32](2),
			MinNodeCount: to.Ptr[int32](1),
		},
		DynamicExecutorAllocation: &spark.DynamicExecutorAllocationProperties{
			Enabled:      to.Ptr(true),
			MaxExecutors: to.Ptr[int32](1),
			MinExecutors: to.Ptr[int32](1),
		},
		NodeFamily: to.Ptr(spark.NodeFamilyMemoryOptimized),
		NodeSize:   to.Ptr(spark.NodeSizeSmall),
	}, nil)
	if err != nil {
		log.Fatalf("failed to finish the request: %v", err)
	}
}
Output:

func (*CustomPoolsClient) DeleteWorkspaceCustomPool

DeleteWorkspaceCustomPool - PERMISSIONS The caller must have admin workspace role. REQUIRED DELEGATED SCOPES Workspace.ReadWrite.All MICROSOFT ENTRA SUPPORTED IDENTITIES This API supports the Microsoft identities [/rest/api/fabric/articles/identity-support] listed in this section. | Identity | Support | |-|-| | User | Yes | | Service principal [/entra/identity-platform/app-objects-and-service-principals#service-principal-object] | No | | Managed identities [/entra/identity/managed-identities-azure-resources/overview] | No | INTERFACE If the operation fails it returns an *core.ResponseError type.

Generated from API version v1

  • workspaceID - The workspace ID.
  • poolID - The custom pool ID.
  • options - CustomPoolsClientDeleteWorkspaceCustomPoolOptions contains the optional parameters for the CustomPoolsClient.DeleteWorkspaceCustomPool method.
Example

Generated from example definition

package main

import (
	"context"
	"log"

	"github.com/Azure/azure-sdk-for-go/sdk/azidentity"

	"github.com/microsoft/fabric-sdk-go/fabric/spark"
)

func main() {
	cred, err := azidentity.NewDefaultAzureCredential(nil)
	if err != nil {
		log.Fatalf("failed to obtain a credential: %v", err)
	}
	ctx := context.Background()
	clientFactory, err := spark.NewClientFactory(cred, nil, nil)
	if err != nil {
		log.Fatalf("failed to create client: %v", err)
	}
	_, err = clientFactory.NewCustomPoolsClient().DeleteWorkspaceCustomPool(ctx, "f089354e-8366-4e18-aea3-4cb4a3a50b48", "2367293d-b70b-4b33-97f2-161b8d04a8d7", nil)
	if err != nil {
		log.Fatalf("failed to finish the request: %v", err)
	}
}
Output:

func (*CustomPoolsClient) GetWorkspaceCustomPool

GetWorkspaceCustomPool - PERMISSIONS The caller must have viewer or higher workspace role. REQUIRED DELEGATED SCOPES Workspace.Read.All or Workspace.ReadWrite.All MICROSOFT ENTRA SUPPORTED IDENTITIES This API supports the Microsoft identities [/rest/api/fabric/articles/identity-support] listed in this section. | Identity | Support | |-|-| | User | Yes | | Service principal [/entra/identity-platform/app-objects-and-service-principals#service-principal-object] | No | | Managed identities [/entra/identity/managed-identities-azure-resources/overview] | No | INTERFACE If the operation fails it returns an *core.ResponseError type.

Generated from API version v1

  • workspaceID - The workspace ID.
  • poolID - The custom pool ID.
  • options - CustomPoolsClientGetWorkspaceCustomPoolOptions contains the optional parameters for the CustomPoolsClient.GetWorkspaceCustomPool method.
Example

Generated from example definition

package main

import (
	"context"
	"log"

	"github.com/Azure/azure-sdk-for-go/sdk/azidentity"

	"github.com/microsoft/fabric-sdk-go/fabric/spark"
)

func main() {
	cred, err := azidentity.NewDefaultAzureCredential(nil)
	if err != nil {
		log.Fatalf("failed to obtain a credential: %v", err)
	}
	ctx := context.Background()
	clientFactory, err := spark.NewClientFactory(cred, nil, nil)
	if err != nil {
		log.Fatalf("failed to create client: %v", err)
	}
	res, err := clientFactory.NewCustomPoolsClient().GetWorkspaceCustomPool(ctx, "f089354e-8366-4e18-aea3-4cb4a3a50b48", "2367293d-b70b-4b33-97f2-161b8d04a8d7", nil)
	if err != nil {
		log.Fatalf("failed to finish the request: %v", err)
	}
	// You could use response here. We use blank identifier for just demo purposes.
	_ = res
	// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.
	// res.CustomPool = spark.CustomPool{
	// 	Name: to.Ptr("pool1"),
	// 	Type: to.Ptr(spark.CustomPoolTypeWorkspace),
	// 	AutoScale: &spark.AutoScaleProperties{
	// 		Enabled: to.Ptr(true),
	// 		MaxNodeCount: to.Ptr[int32](4),
	// 		MinNodeCount: to.Ptr[int32](1),
	// 	},
	// 	DynamicExecutorAllocation: &spark.DynamicExecutorAllocationProperties{
	// 		Enabled: to.Ptr(true),
	// 		MaxExecutors: to.Ptr[int32](2),
	// 		MinExecutors: to.Ptr[int32](1),
	// 	},
	// 	ID: to.Ptr("2367293d-b70b-4b33-97f2-161b8d04a8d7"),
	// 	NodeFamily: to.Ptr(spark.NodeFamilyMemoryOptimized),
	// 	NodeSize: to.Ptr(spark.NodeSizeSmall),
	// }
}
Output:

func (*CustomPoolsClient) ListWorkspaceCustomPools

func (client *CustomPoolsClient) ListWorkspaceCustomPools(ctx context.Context, workspaceID string, options *CustomPoolsClientListWorkspaceCustomPoolsOptions) ([]CustomPool, error)

ListWorkspaceCustomPools - returns array of CustomPool from all pages. PERMISSIONS The caller must have viewer or higher workspace role.

REQUIRED DELEGATED SCOPES Workspace.Read.All or Workspace.ReadWrite.All

MICROSOFT ENTRA SUPPORTED IDENTITIES This API supports the Microsoft identities [/rest/api/fabric/articles/identity-support] listed in this section.

| Identity | Support | |-|-| | User | Yes | | Service principal [/entra/identity-platform/app-objects-and-service-principals#service-principal-object] | No | | Managed identities [/entra/identity/managed-identities-azure-resources/overview] | No |

INTERFACE Generated from API version v1

  • workspaceID - The workspace ID.
  • options - CustomPoolsClientListWorkspaceCustomPoolsOptions contains the optional parameters for the CustomPoolsClient.NewListWorkspaceCustomPoolsPager method.

func (*CustomPoolsClient) NewListWorkspaceCustomPoolsPager

NewListWorkspaceCustomPoolsPager - PERMISSIONS The caller must have viewer or higher workspace role. REQUIRED DELEGATED SCOPES Workspace.Read.All or Workspace.ReadWrite.All MICROSOFT ENTRA SUPPORTED IDENTITIES This API supports the Microsoft identities [/rest/api/fabric/articles/identity-support] listed in this section. | Identity | Support | |-|-| | User | Yes | | Service principal [/entra/identity-platform/app-objects-and-service-principals#service-principal-object] | No | | Managed identities [/entra/identity/managed-identities-azure-resources/overview] | No | INTERFACE

Generated from API version v1

  • workspaceID - The workspace ID.
  • options - CustomPoolsClientListWorkspaceCustomPoolsOptions contains the optional parameters for the CustomPoolsClient.NewListWorkspaceCustomPoolsPager method.
Example (ListCustomPoolsExample)

Generated from example definition

package main

import (
	"context"
	"log"

	"github.com/Azure/azure-sdk-for-go/sdk/azidentity"

	"github.com/microsoft/fabric-sdk-go/fabric/spark"
)

func main() {
	cred, err := azidentity.NewDefaultAzureCredential(nil)
	if err != nil {
		log.Fatalf("failed to obtain a credential: %v", err)
	}
	ctx := context.Background()
	clientFactory, err := spark.NewClientFactory(cred, nil, nil)
	if err != nil {
		log.Fatalf("failed to create client: %v", err)
	}
	pager := clientFactory.NewCustomPoolsClient().NewListWorkspaceCustomPoolsPager("f089354e-8366-4e18-aea3-4cb4a3a50b48", &spark.CustomPoolsClientListWorkspaceCustomPoolsOptions{ContinuationToken: nil})
	for pager.More() {
		page, err := pager.NextPage(ctx)
		if err != nil {
			log.Fatalf("failed to advance page: %v", err)
		}
		for _, v := range page.Value {
			// You could use page here. We use blank identifier for just demo purposes.
			_ = v
		}
		// If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.
		// page.CustomPools = spark.CustomPools{
		// 	Value: []spark.CustomPool{
		// 		{
		// 			Name: to.Ptr("pool1"),
		// 			Type: to.Ptr(spark.CustomPoolTypeWorkspace),
		// 			AutoScale: &spark.AutoScaleProperties{
		// 				Enabled: to.Ptr(true),
		// 				MaxNodeCount: to.Ptr[int32](4),
		// 				MinNodeCount: to.Ptr[int32](1),
		// 			},
		// 			DynamicExecutorAllocation: &spark.DynamicExecutorAllocationProperties{
		// 				Enabled: to.Ptr(true),
		// 				MaxExecutors: to.Ptr[int32](2),
		// 				MinExecutors: to.Ptr[int32](1),
		// 			},
		// 			ID: to.Ptr("2367293d-b70b-4b33-97f2-161b8d04a8d7"),
		// 			NodeFamily: to.Ptr(spark.NodeFamilyMemoryOptimized),
		// 			NodeSize: to.Ptr(spark.NodeSizeSmall),
		// 	}},
		// }
	}
}
Output:

Example (ListCustomPoolsWithContinuationExample)

Generated from example definition

package main

import (
	"context"
	"log"

	"github.com/Azure/azure-sdk-for-go/sdk/azidentity"

	"github.com/microsoft/fabric-sdk-go/fabric/spark"
)

func main() {
	cred, err := azidentity.NewDefaultAzureCredential(nil)
	if err != nil {
		log.Fatalf("failed to obtain a credential: %v", err)
	}
	ctx := context.Background()
	clientFactory, err := spark.NewClientFactory(cred, nil, nil)
	if err != nil {
		log.Fatalf("failed to create client: %v", err)
	}
	pager := clientFactory.NewCustomPoolsClient().NewListWorkspaceCustomPoolsPager("f089354e-8366-4e18-aea3-4cb4a3a50b48", &spark.CustomPoolsClientListWorkspaceCustomPoolsOptions{ContinuationToken: nil})
	for pager.More() {
		page, err := pager.NextPage(ctx)
		if err != nil {
			log.Fatalf("failed to advance page: %v", err)
		}
		for _, v := range page.Value {
			// You could use page here. We use blank identifier for just demo purposes.
			_ = v
		}
		// If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.
		// page.CustomPools = spark.CustomPools{
		// 	ContinuationToken: to.Ptr("LDEsMTAwMDAwLDA%3D"),
		// 	ContinuationURI: to.Ptr("https://api.fabric.microsoft.com/v1/workspaces/f089354e-8366-4e18-aea3-4cb4a3a50b48/spark/pools?continuationToken=LDEsMTAwMDAwLDA%3D"),
		// 	Value: []spark.CustomPool{
		// 		{
		// 			Name: to.Ptr("pool1"),
		// 			Type: to.Ptr(spark.CustomPoolTypeWorkspace),
		// 			AutoScale: &spark.AutoScaleProperties{
		// 				Enabled: to.Ptr(true),
		// 				MaxNodeCount: to.Ptr[int32](4),
		// 				MinNodeCount: to.Ptr[int32](1),
		// 			},
		// 			DynamicExecutorAllocation: &spark.DynamicExecutorAllocationProperties{
		// 				Enabled: to.Ptr(true),
		// 				MaxExecutors: to.Ptr[int32](2),
		// 				MinExecutors: to.Ptr[int32](1),
		// 			},
		// 			ID: to.Ptr("2367293d-b70b-4b33-97f2-161b8d04a8d7"),
		// 			NodeFamily: to.Ptr(spark.NodeFamilyMemoryOptimized),
		// 			NodeSize: to.Ptr(spark.NodeSizeSmall),
		// 	}},
		// }
	}
}
Output:

func (*CustomPoolsClient) UpdateWorkspaceCustomPool

func (client *CustomPoolsClient) UpdateWorkspaceCustomPool(ctx context.Context, workspaceID string, poolID string, updateCustomPoolRequest UpdateCustomPoolRequest, options *CustomPoolsClientUpdateWorkspaceCustomPoolOptions) (CustomPoolsClientUpdateWorkspaceCustomPoolResponse, error)

UpdateWorkspaceCustomPool - PERMISSIONS The caller must have admin workspace role. REQUIRED DELEGATED SCOPES Workspace.ReadWrite.All MICROSOFT ENTRA SUPPORTED IDENTITIES This API supports the Microsoft identities [/rest/api/fabric/articles/identity-support] listed in this section. | Identity | Support | |-|-| | User | Yes | | Service principal [/entra/identity-platform/app-objects-and-service-principals#service-principal-object] | No | | Managed identities [/entra/identity/managed-identities-azure-resources/overview] | No | INTERFACE If the operation fails it returns an *core.ResponseError type.

Generated from API version v1

  • workspaceID - The workspace ID.
  • poolID - The custom pool ID.
  • updateCustomPoolRequest - Update custom pool request payload.
  • options - CustomPoolsClientUpdateWorkspaceCustomPoolOptions contains the optional parameters for the CustomPoolsClient.UpdateWorkspaceCustomPool method.
Example

Generated from example definition

package main

import (
	"context"
	"log"

	"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
	"github.com/Azure/azure-sdk-for-go/sdk/azidentity"

	"github.com/microsoft/fabric-sdk-go/fabric/spark"
)

func main() {
	cred, err := azidentity.NewDefaultAzureCredential(nil)
	if err != nil {
		log.Fatalf("failed to obtain a credential: %v", err)
	}
	ctx := context.Background()
	clientFactory, err := spark.NewClientFactory(cred, nil, nil)
	if err != nil {
		log.Fatalf("failed to create client: %v", err)
	}
	res, err := clientFactory.NewCustomPoolsClient().UpdateWorkspaceCustomPool(ctx, "f089354e-8366-4e18-aea3-4cb4a3a50b48", "2367293d-b70b-4b33-97f2-161b8d04a8d7", spark.UpdateCustomPoolRequest{
		Name: to.Ptr("pool1"),
		AutoScale: &spark.AutoScaleProperties{
			Enabled:      to.Ptr(true),
			MaxNodeCount: to.Ptr[int32](2),
			MinNodeCount: to.Ptr[int32](1),
		},
		DynamicExecutorAllocation: &spark.DynamicExecutorAllocationProperties{
			Enabled:      to.Ptr(true),
			MaxExecutors: to.Ptr[int32](1),
			MinExecutors: to.Ptr[int32](1),
		},
		NodeFamily: to.Ptr(spark.NodeFamilyMemoryOptimized),
		NodeSize:   to.Ptr(spark.NodeSizeSmall),
	}, nil)
	if err != nil {
		log.Fatalf("failed to finish the request: %v", err)
	}
	// You could use response here. We use blank identifier for just demo purposes.
	_ = res
	// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.
	// res.CustomPool = spark.CustomPool{
	// 	Name: to.Ptr("pool1"),
	// 	Type: to.Ptr(spark.CustomPoolTypeWorkspace),
	// 	AutoScale: &spark.AutoScaleProperties{
	// 		Enabled: to.Ptr(true),
	// 		MaxNodeCount: to.Ptr[int32](2),
	// 		MinNodeCount: to.Ptr[int32](1),
	// 	},
	// 	DynamicExecutorAllocation: &spark.DynamicExecutorAllocationProperties{
	// 		Enabled: to.Ptr(true),
	// 		MaxExecutors: to.Ptr[int32](1),
	// 		MinExecutors: to.Ptr[int32](1),
	// 	},
	// 	ID: to.Ptr("2367293d-b70b-4b33-97f2-161b8d04a8d7"),
	// 	NodeFamily: to.Ptr(spark.NodeFamilyMemoryOptimized),
	// 	NodeSize: to.Ptr(spark.NodeSizeSmall),
	// }
}
Output:

type CustomPoolsClientCreateWorkspaceCustomPoolOptions

type CustomPoolsClientCreateWorkspaceCustomPoolOptions struct {
}

CustomPoolsClientCreateWorkspaceCustomPoolOptions contains the optional parameters for the CustomPoolsClient.CreateWorkspaceCustomPool method.

type CustomPoolsClientCreateWorkspaceCustomPoolResponse

type CustomPoolsClientCreateWorkspaceCustomPoolResponse struct {
	// Custom pool.
	CustomPool

	// Location contains the information returned from the Location header response.
	Location *string
}

CustomPoolsClientCreateWorkspaceCustomPoolResponse contains the response from method CustomPoolsClient.CreateWorkspaceCustomPool.

type CustomPoolsClientDeleteWorkspaceCustomPoolOptions

type CustomPoolsClientDeleteWorkspaceCustomPoolOptions struct {
}

CustomPoolsClientDeleteWorkspaceCustomPoolOptions contains the optional parameters for the CustomPoolsClient.DeleteWorkspaceCustomPool method.

type CustomPoolsClientDeleteWorkspaceCustomPoolResponse

type CustomPoolsClientDeleteWorkspaceCustomPoolResponse struct {
}

CustomPoolsClientDeleteWorkspaceCustomPoolResponse contains the response from method CustomPoolsClient.DeleteWorkspaceCustomPool.

type CustomPoolsClientGetWorkspaceCustomPoolOptions

type CustomPoolsClientGetWorkspaceCustomPoolOptions struct {
}

CustomPoolsClientGetWorkspaceCustomPoolOptions contains the optional parameters for the CustomPoolsClient.GetWorkspaceCustomPool method.

type CustomPoolsClientGetWorkspaceCustomPoolResponse

type CustomPoolsClientGetWorkspaceCustomPoolResponse struct {
	// Custom pool.
	CustomPool
}

CustomPoolsClientGetWorkspaceCustomPoolResponse contains the response from method CustomPoolsClient.GetWorkspaceCustomPool.

type CustomPoolsClientListWorkspaceCustomPoolsOptions

type CustomPoolsClientListWorkspaceCustomPoolsOptions struct {
	// Continuation token. Used to get the next items in the list.
	ContinuationToken *string
}

CustomPoolsClientListWorkspaceCustomPoolsOptions contains the optional parameters for the CustomPoolsClient.NewListWorkspaceCustomPoolsPager method.

type CustomPoolsClientListWorkspaceCustomPoolsResponse

type CustomPoolsClientListWorkspaceCustomPoolsResponse struct {
	CustomPools
}

CustomPoolsClientListWorkspaceCustomPoolsResponse contains the response from method CustomPoolsClient.NewListWorkspaceCustomPoolsPager.

type CustomPoolsClientUpdateWorkspaceCustomPoolOptions

type CustomPoolsClientUpdateWorkspaceCustomPoolOptions struct {
}

CustomPoolsClientUpdateWorkspaceCustomPoolOptions contains the optional parameters for the CustomPoolsClient.UpdateWorkspaceCustomPool method.

type CustomPoolsClientUpdateWorkspaceCustomPoolResponse

type CustomPoolsClientUpdateWorkspaceCustomPoolResponse struct {
	// Custom pool.
	CustomPool
}

CustomPoolsClientUpdateWorkspaceCustomPoolResponse contains the response from method CustomPoolsClient.UpdateWorkspaceCustomPool.

type DynamicExecutorAllocationProperties

type DynamicExecutorAllocationProperties struct {
	// REQUIRED; The status of the dynamic executor allocation. False - Disabled, true - Enabled.
	Enabled *bool

	// REQUIRED; The maximum executors.
	MaxExecutors *int32

	// REQUIRED; The minimum executors.
	MinExecutors *int32
}

DynamicExecutorAllocationProperties - Dynamic executor allocation proerties.

func (DynamicExecutorAllocationProperties) MarshalJSON

func (d DynamicExecutorAllocationProperties) MarshalJSON() ([]byte, error)

MarshalJSON implements the json.Marshaller interface for type DynamicExecutorAllocationProperties.

func (*DynamicExecutorAllocationProperties) UnmarshalJSON

func (d *DynamicExecutorAllocationProperties) UnmarshalJSON(data []byte) error

UnmarshalJSON implements the json.Unmarshaller interface for type DynamicExecutorAllocationProperties.

type EnvironmentProperties

type EnvironmentProperties struct {
	// The name of the default environment. Empty string indicated there is no workspace default environment
	Name *string

	// Runtime [/fabric/data-engineering/runtime] version.
	RuntimeVersion *string
}

func (EnvironmentProperties) MarshalJSON

func (e EnvironmentProperties) MarshalJSON() ([]byte, error)

MarshalJSON implements the json.Marshaller interface for type EnvironmentProperties.

func (*EnvironmentProperties) UnmarshalJSON

func (e *EnvironmentProperties) UnmarshalJSON(data []byte) error

UnmarshalJSON implements the json.Unmarshaller interface for type EnvironmentProperties.

type HighConcurrencyProperties

type HighConcurrencyProperties struct {
	// REQUIRED; The status of the high concurrency for notebook interactive run. False - Disabled, true - Enabled.
	NotebookInteractiveRunEnabled *bool
}

HighConcurrencyProperties - High Concurrency Properties.

func (HighConcurrencyProperties) MarshalJSON

func (h HighConcurrencyProperties) MarshalJSON() ([]byte, error)

MarshalJSON implements the json.Marshaller interface for type HighConcurrencyProperties.

func (*HighConcurrencyProperties) UnmarshalJSON

func (h *HighConcurrencyProperties) UnmarshalJSON(data []byte) error

UnmarshalJSON implements the json.Unmarshaller interface for type HighConcurrencyProperties.

type InstancePool

type InstancePool struct {
	// REQUIRED; Instance pool name.
	Name *string

	// REQUIRED; Instance pool type.
	Type *CustomPoolType

	// Instance pool ID.
	ID *string
}

func (InstancePool) MarshalJSON

func (i InstancePool) MarshalJSON() ([]byte, error)

MarshalJSON implements the json.Marshaller interface for type InstancePool.

func (*InstancePool) UnmarshalJSON

func (i *InstancePool) UnmarshalJSON(data []byte) error

UnmarshalJSON implements the json.Unmarshaller interface for type InstancePool.

type NodeFamily

type NodeFamily string

NodeFamily - Node family. Additional NodeFamily types may be added over time.

const (
	// NodeFamilyMemoryOptimized - Memory optimized
	NodeFamilyMemoryOptimized NodeFamily = "MemoryOptimized"
)

func PossibleNodeFamilyValues

func PossibleNodeFamilyValues() []NodeFamily

PossibleNodeFamilyValues returns the possible values for the NodeFamily const type.

type NodeSize

type NodeSize string

NodeSize - Node size [/fabric/data-engineering/spark-compute#node-sizes]. Additional NodeSize types may be added over time.

const (
	// NodeSizeLarge - Large node size
	NodeSizeLarge NodeSize = "Large"
	// NodeSizeMedium - Medium node size
	NodeSizeMedium NodeSize = "Medium"
	// NodeSizeSmall - Small node size
	NodeSizeSmall NodeSize = "Small"
	// NodeSizeXLarge - XLarge node size
	NodeSizeXLarge NodeSize = "XLarge"
	// NodeSizeXXLarge - XXLarge node size
	NodeSizeXXLarge NodeSize = "XXLarge"
)

func PossibleNodeSizeValues

func PossibleNodeSizeValues() []NodeSize

PossibleNodeSizeValues returns the possible values for the NodeSize const type.

type PoolProperties

type PoolProperties struct {
	// Customize compute configurations for items. False - Disabled, true - Enabled.
	CustomizeComputeEnabled *bool

	// Default pool for workspace. It should be a valid custom pool name. "Starter Pool" means use starter pool.
	DefaultPool *InstancePool

	// Customize starter pool. For more information about configuring starter pool, see configuring starter pool [/fabric/data-engineering/configure-starter-pools].
	StarterPool *StarterPoolProperties
}

func (PoolProperties) MarshalJSON

func (p PoolProperties) MarshalJSON() ([]byte, error)

MarshalJSON implements the json.Marshaller interface for type PoolProperties.

func (*PoolProperties) UnmarshalJSON

func (p *PoolProperties) UnmarshalJSON(data []byte) error

UnmarshalJSON implements the json.Unmarshaller interface for type PoolProperties.

type StarterPoolProperties

type StarterPoolProperties struct {
	// The maximum executors count.
	MaxExecutors *int32

	// The maximum node count.
	MaxNodeCount *int32
}

StarterPoolProperties - Custom starter pool.

func (StarterPoolProperties) MarshalJSON

func (s StarterPoolProperties) MarshalJSON() ([]byte, error)

MarshalJSON implements the json.Marshaller interface for type StarterPoolProperties.

func (*StarterPoolProperties) UnmarshalJSON

func (s *StarterPoolProperties) UnmarshalJSON(data []byte) error

UnmarshalJSON implements the json.Unmarshaller interface for type StarterPoolProperties.

type UpdateCustomPoolRequest

type UpdateCustomPoolRequest struct {
	// Autoscale.
	AutoScale *AutoScaleProperties

	// Dynamic executor allocation.
	DynamicExecutorAllocation *DynamicExecutorAllocationProperties

	// Custom pool name.
	// The name must be between 1 and 64 characters long and must contain only letters, numbers, dashes, underscores and spaces.
	// Custom pool names must be unique within the workspace.
	// "Starter Pool" is a reserved custom pool name.
	Name *string

	// Node family.
	NodeFamily *NodeFamily

	// Node size.
	NodeSize *NodeSize
}

UpdateCustomPoolRequest - Update custom pool request payload.

func (UpdateCustomPoolRequest) MarshalJSON

func (u UpdateCustomPoolRequest) MarshalJSON() ([]byte, error)

MarshalJSON implements the json.Marshaller interface for type UpdateCustomPoolRequest.

func (*UpdateCustomPoolRequest) UnmarshalJSON

func (u *UpdateCustomPoolRequest) UnmarshalJSON(data []byte) error

UnmarshalJSON implements the json.Unmarshaller interface for type UpdateCustomPoolRequest.

type UpdateWorkspaceSparkSettingsRequest

type UpdateWorkspaceSparkSettingsRequest struct {
	// Automatic log settings.
	AutomaticLog *AutomaticLogProperties

	// Environment settings.
	Environment *EnvironmentProperties

	// High concurrency settings.
	HighConcurrency *HighConcurrencyProperties

	// Pool settings.
	Pool *PoolProperties
}

UpdateWorkspaceSparkSettingsRequest - Update workspace Spark settings request payload.

func (UpdateWorkspaceSparkSettingsRequest) MarshalJSON

func (u UpdateWorkspaceSparkSettingsRequest) MarshalJSON() ([]byte, error)

MarshalJSON implements the json.Marshaller interface for type UpdateWorkspaceSparkSettingsRequest.

func (*UpdateWorkspaceSparkSettingsRequest) UnmarshalJSON

func (u *UpdateWorkspaceSparkSettingsRequest) UnmarshalJSON(data []byte) error

UnmarshalJSON implements the json.Unmarshaller interface for type UpdateWorkspaceSparkSettingsRequest.

type WorkspaceSettingsClient

type WorkspaceSettingsClient struct {
	// contains filtered or unexported fields
}

WorkspaceSettingsClient contains the methods for the WorkspaceSettings group. Don't use this type directly, use a constructor function instead.

func (*WorkspaceSettingsClient) GetSparkSettings

GetSparkSettings - PERMISSIONS The caller must have viewer or higher workspace role. REQUIRED DELEGATED SCOPES Workspace.Read.All or Workspace.ReadWrite.All MICROSOFT ENTRA SUPPORTED IDENTITIES This API supports the Microsoft identities [/rest/api/fabric/articles/identity-support] listed in this section. | Identity | Support | |-|-| | User | Yes | | Service principal [/entra/identity-platform/app-objects-and-service-principals#service-principal-object] | No | | Managed identities [/entra/identity/managed-identities-azure-resources/overview] | No | INTERFACE If the operation fails it returns an *core.ResponseError type.

Generated from API version v1

  • workspaceID - The workspace ID.
  • options - WorkspaceSettingsClientGetSparkSettingsOptions contains the optional parameters for the WorkspaceSettingsClient.GetSparkSettings method.
Example

Generated from example definition

package main

import (
	"context"
	"log"

	"github.com/Azure/azure-sdk-for-go/sdk/azidentity"

	"github.com/microsoft/fabric-sdk-go/fabric/spark"
)

func main() {
	cred, err := azidentity.NewDefaultAzureCredential(nil)
	if err != nil {
		log.Fatalf("failed to obtain a credential: %v", err)
	}
	ctx := context.Background()
	clientFactory, err := spark.NewClientFactory(cred, nil, nil)
	if err != nil {
		log.Fatalf("failed to create client: %v", err)
	}
	res, err := clientFactory.NewWorkspaceSettingsClient().GetSparkSettings(ctx, "f089354e-8366-4e18-aea3-4cb4a3a50b48", nil)
	if err != nil {
		log.Fatalf("failed to finish the request: %v", err)
	}
	// You could use response here. We use blank identifier for just demo purposes.
	_ = res
	// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.
	// res.WorkspaceSparkSettings = spark.WorkspaceSparkSettings{
	// 	AutomaticLog: &spark.AutomaticLogProperties{
	// 		Enabled: to.Ptr(true),
	// 	},
	// 	Environment: &spark.EnvironmentProperties{
	// 		Name: to.Ptr("environment1"),
	// 		RuntimeVersion: to.Ptr("1.2"),
	// 	},
	// 	HighConcurrency: &spark.HighConcurrencyProperties{
	// 		NotebookInteractiveRunEnabled: to.Ptr(true),
	// 	},
	// 	Pool: &spark.PoolProperties{
	// 		CustomizeComputeEnabled: to.Ptr(true),
	// 		DefaultPool: &spark.InstancePool{
	// 			Name: to.Ptr("Starter Pool"),
	// 			Type: to.Ptr(spark.CustomPoolTypeWorkspace),
	// 		},
	// 		StarterPool: &spark.StarterPoolProperties{
	// 			MaxExecutors: to.Ptr[int32](1),
	// 			MaxNodeCount: to.Ptr[int32](2),
	// 		},
	// 	},
	// }
}
Output:

func (*WorkspaceSettingsClient) UpdateSparkSettings

UpdateSparkSettings - PERMISSIONS The caller must have admin workspace role. REQUIRED DELEGATED SCOPES Workspace.ReadWrite.All MICROSOFT ENTRA SUPPORTED IDENTITIES This API supports the Microsoft identities [/rest/api/fabric/articles/identity-support] listed in this section. | Identity | Support | |-|-| | User | Yes | | Service principal [/entra/identity-platform/app-objects-and-service-principals#service-principal-object] | No | | Managed identities [/entra/identity/managed-identities-azure-resources/overview] | No | INTERFACE If the operation fails it returns an *core.ResponseError type.

Generated from API version v1

  • workspaceID - The workspace ID.
  • updateWorkspaceSettingsRequest - Update workspace Spark settings request payload.
  • options - WorkspaceSettingsClientUpdateSparkSettingsOptions contains the optional parameters for the WorkspaceSettingsClient.UpdateSparkSettings method.
Example

Generated from example definition

package main

import (
	"context"
	"log"

	"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
	"github.com/Azure/azure-sdk-for-go/sdk/azidentity"

	"github.com/microsoft/fabric-sdk-go/fabric/spark"
)

func main() {
	cred, err := azidentity.NewDefaultAzureCredential(nil)
	if err != nil {
		log.Fatalf("failed to obtain a credential: %v", err)
	}
	ctx := context.Background()
	clientFactory, err := spark.NewClientFactory(cred, nil, nil)
	if err != nil {
		log.Fatalf("failed to create client: %v", err)
	}
	res, err := clientFactory.NewWorkspaceSettingsClient().UpdateSparkSettings(ctx, "f089354e-8366-4e18-aea3-4cb4a3a50b48", spark.UpdateWorkspaceSparkSettingsRequest{
		AutomaticLog: &spark.AutomaticLogProperties{
			Enabled: to.Ptr(false),
		},
		Environment: &spark.EnvironmentProperties{
			Name:           to.Ptr("environment1"),
			RuntimeVersion: to.Ptr("1.2"),
		},
		HighConcurrency: &spark.HighConcurrencyProperties{
			NotebookInteractiveRunEnabled: to.Ptr(false),
		},
		Pool: &spark.PoolProperties{
			CustomizeComputeEnabled: to.Ptr(false),
			DefaultPool: &spark.InstancePool{
				Name: to.Ptr("Starter Pool"),
				Type: to.Ptr(spark.CustomPoolTypeWorkspace),
			},
			StarterPool: &spark.StarterPoolProperties{
				MaxExecutors: to.Ptr[int32](1),
				MaxNodeCount: to.Ptr[int32](3),
			},
		},
	}, nil)
	if err != nil {
		log.Fatalf("failed to finish the request: %v", err)
	}
	// You could use response here. We use blank identifier for just demo purposes.
	_ = res
	// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.
	// res.WorkspaceSparkSettings = spark.WorkspaceSparkSettings{
	// 	AutomaticLog: &spark.AutomaticLogProperties{
	// 		Enabled: to.Ptr(false),
	// 	},
	// 	Environment: &spark.EnvironmentProperties{
	// 		Name: to.Ptr("environment1"),
	// 		RuntimeVersion: to.Ptr("1.2"),
	// 	},
	// 	HighConcurrency: &spark.HighConcurrencyProperties{
	// 		NotebookInteractiveRunEnabled: to.Ptr(false),
	// 	},
	// 	Pool: &spark.PoolProperties{
	// 		CustomizeComputeEnabled: to.Ptr(false),
	// 		DefaultPool: &spark.InstancePool{
	// 			Name: to.Ptr("Starter Pool"),
	// 			Type: to.Ptr(spark.CustomPoolTypeWorkspace),
	// 		},
	// 		StarterPool: &spark.StarterPoolProperties{
	// 			MaxExecutors: to.Ptr[int32](1),
	// 			MaxNodeCount: to.Ptr[int32](3),
	// 		},
	// 	},
	// }
}
Output:

type WorkspaceSettingsClientGetSparkSettingsOptions

type WorkspaceSettingsClientGetSparkSettingsOptions struct {
}

WorkspaceSettingsClientGetSparkSettingsOptions contains the optional parameters for the WorkspaceSettingsClient.GetSparkSettings method.

type WorkspaceSettingsClientGetSparkSettingsResponse

type WorkspaceSettingsClientGetSparkSettingsResponse struct {
	// Workspace Spark settings.
	WorkspaceSparkSettings
}

WorkspaceSettingsClientGetSparkSettingsResponse contains the response from method WorkspaceSettingsClient.GetSparkSettings.

type WorkspaceSettingsClientUpdateSparkSettingsOptions

type WorkspaceSettingsClientUpdateSparkSettingsOptions struct {
}

WorkspaceSettingsClientUpdateSparkSettingsOptions contains the optional parameters for the WorkspaceSettingsClient.UpdateSparkSettings method.

type WorkspaceSettingsClientUpdateSparkSettingsResponse

type WorkspaceSettingsClientUpdateSparkSettingsResponse struct {
	// Workspace Spark settings.
	WorkspaceSparkSettings
}

WorkspaceSettingsClientUpdateSparkSettingsResponse contains the response from method WorkspaceSettingsClient.UpdateSparkSettings.

type WorkspaceSparkSettings

type WorkspaceSparkSettings struct {
	// Automatic log settings.
	AutomaticLog *AutomaticLogProperties

	// Environment settings.
	Environment *EnvironmentProperties

	// High concurrency settings.
	HighConcurrency *HighConcurrencyProperties

	// Pool settings.
	Pool *PoolProperties
}

WorkspaceSparkSettings - Workspace Spark settings.

func (WorkspaceSparkSettings) MarshalJSON

func (w WorkspaceSparkSettings) MarshalJSON() ([]byte, error)

MarshalJSON implements the json.Marshaller interface for type WorkspaceSparkSettings.

func (*WorkspaceSparkSettings) UnmarshalJSON

func (w *WorkspaceSparkSettings) UnmarshalJSON(data []byte) error

UnmarshalJSON implements the json.Unmarshaller interface for type WorkspaceSparkSettings.

Directories

Path Synopsis

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL