1. Packages
  2. Flexibleengine Provider
  3. API Docs
  4. MrsClusterV1
flexibleengine 1.46.0 published on Monday, Apr 14, 2025 by flexibleenginecloud

flexibleengine.MrsClusterV1

Explore with Pulumi AI

Manages a MRS cluster resource within FlexibleEngine.

!> Warning: It has been deprecated, please use flexibleengine.MrsClusterV2 instead.

Example Usage

Creating A MRS Cluster

import * as pulumi from "@pulumi/pulumi";
import * as flexibleengine from "@pulumi/flexibleengine";

const exampleVpc = new flexibleengine.VpcV1("exampleVpc", {cidr: "192.168.0.0/16"});
const exampleSubnet = new flexibleengine.VpcSubnetV1("exampleSubnet", {
    cidr: "192.168.0.0/24",
    gatewayIp: "192.168.0.1",
    vpcId: exampleVpc.vpcV1Id,
});
const cluster1 = new flexibleengine.MrsClusterV1("cluster1", {
    region: "eu-west-0",
    availableZoneId: "eu-west-0a",
    clusterName: "mrs-cluster-test",
    clusterType: 0,
    clusterVersion: "MRS 2.0.1",
    masterNodeNum: 2,
    coreNodeNum: 3,
    masterNodeSize: "s3.2xlarge.4.linux.mrs",
    coreNodeSize: "s3.xlarge.4.linux.mrs",
    volumeType: "SATA",
    volumeSize: 100,
    vpcId: exampleVpc.vpcV1Id,
    subnetId: exampleSubnet.vpcSubnetV1Id,
    safeMode: 0,
    clusterAdminSecret: "{{password_of_mrs_manager}}",
    nodePublicCertName: "KeyPair-ci",
    componentLists: [
        {
            componentName: "Hadoop",
        },
        {
            componentName: "Spark",
        },
        {
            componentName: "Hive",
        },
        {
            componentName: "Tez",
        },
    ],
});
Copy
import pulumi
import pulumi_flexibleengine as flexibleengine

example_vpc = flexibleengine.VpcV1("exampleVpc", cidr="192.168.0.0/16")
example_subnet = flexibleengine.VpcSubnetV1("exampleSubnet",
    cidr="192.168.0.0/24",
    gateway_ip="192.168.0.1",
    vpc_id=example_vpc.vpc_v1_id)
cluster1 = flexibleengine.MrsClusterV1("cluster1",
    region="eu-west-0",
    available_zone_id="eu-west-0a",
    cluster_name="mrs-cluster-test",
    cluster_type=0,
    cluster_version="MRS 2.0.1",
    master_node_num=2,
    core_node_num=3,
    master_node_size="s3.2xlarge.4.linux.mrs",
    core_node_size="s3.xlarge.4.linux.mrs",
    volume_type="SATA",
    volume_size=100,
    vpc_id=example_vpc.vpc_v1_id,
    subnet_id=example_subnet.vpc_subnet_v1_id,
    safe_mode=0,
    cluster_admin_secret="{{password_of_mrs_manager}}",
    node_public_cert_name="KeyPair-ci",
    component_lists=[
        {
            "component_name": "Hadoop",
        },
        {
            "component_name": "Spark",
        },
        {
            "component_name": "Hive",
        },
        {
            "component_name": "Tez",
        },
    ])
Copy
package main

import (
	"github.com/pulumi/pulumi-terraform-provider/sdks/go/flexibleengine/flexibleengine"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		exampleVpc, err := flexibleengine.NewVpcV1(ctx, "exampleVpc", &flexibleengine.VpcV1Args{
			Cidr: pulumi.String("192.168.0.0/16"),
		})
		if err != nil {
			return err
		}
		exampleSubnet, err := flexibleengine.NewVpcSubnetV1(ctx, "exampleSubnet", &flexibleengine.VpcSubnetV1Args{
			Cidr:      pulumi.String("192.168.0.0/24"),
			GatewayIp: pulumi.String("192.168.0.1"),
			VpcId:     exampleVpc.VpcV1Id,
		})
		if err != nil {
			return err
		}
		_, err = flexibleengine.NewMrsClusterV1(ctx, "cluster1", &flexibleengine.MrsClusterV1Args{
			Region:             pulumi.String("eu-west-0"),
			AvailableZoneId:    pulumi.String("eu-west-0a"),
			ClusterName:        pulumi.String("mrs-cluster-test"),
			ClusterType:        pulumi.Float64(0),
			ClusterVersion:     pulumi.String("MRS 2.0.1"),
			MasterNodeNum:      pulumi.Float64(2),
			CoreNodeNum:        pulumi.Float64(3),
			MasterNodeSize:     pulumi.String("s3.2xlarge.4.linux.mrs"),
			CoreNodeSize:       pulumi.String("s3.xlarge.4.linux.mrs"),
			VolumeType:         pulumi.String("SATA"),
			VolumeSize:         pulumi.Float64(100),
			VpcId:              exampleVpc.VpcV1Id,
			SubnetId:           exampleSubnet.VpcSubnetV1Id,
			SafeMode:           pulumi.Float64(0),
			ClusterAdminSecret: pulumi.String("{{password_of_mrs_manager}}"),
			NodePublicCertName: pulumi.String("KeyPair-ci"),
			ComponentLists: flexibleengine.MrsClusterV1ComponentListArray{
				&flexibleengine.MrsClusterV1ComponentListArgs{
					ComponentName: pulumi.String("Hadoop"),
				},
				&flexibleengine.MrsClusterV1ComponentListArgs{
					ComponentName: pulumi.String("Spark"),
				},
				&flexibleengine.MrsClusterV1ComponentListArgs{
					ComponentName: pulumi.String("Hive"),
				},
				&flexibleengine.MrsClusterV1ComponentListArgs{
					ComponentName: pulumi.String("Tez"),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
Copy
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Flexibleengine = Pulumi.Flexibleengine;

return await Deployment.RunAsync(() => 
{
    var exampleVpc = new Flexibleengine.VpcV1("exampleVpc", new()
    {
        Cidr = "192.168.0.0/16",
    });

    var exampleSubnet = new Flexibleengine.VpcSubnetV1("exampleSubnet", new()
    {
        Cidr = "192.168.0.0/24",
        GatewayIp = "192.168.0.1",
        VpcId = exampleVpc.VpcV1Id,
    });

    var cluster1 = new Flexibleengine.MrsClusterV1("cluster1", new()
    {
        Region = "eu-west-0",
        AvailableZoneId = "eu-west-0a",
        ClusterName = "mrs-cluster-test",
        ClusterType = 0,
        ClusterVersion = "MRS 2.0.1",
        MasterNodeNum = 2,
        CoreNodeNum = 3,
        MasterNodeSize = "s3.2xlarge.4.linux.mrs",
        CoreNodeSize = "s3.xlarge.4.linux.mrs",
        VolumeType = "SATA",
        VolumeSize = 100,
        VpcId = exampleVpc.VpcV1Id,
        SubnetId = exampleSubnet.VpcSubnetV1Id,
        SafeMode = 0,
        ClusterAdminSecret = "{{password_of_mrs_manager}}",
        NodePublicCertName = "KeyPair-ci",
        ComponentLists = new[]
        {
            new Flexibleengine.Inputs.MrsClusterV1ComponentListArgs
            {
                ComponentName = "Hadoop",
            },
            new Flexibleengine.Inputs.MrsClusterV1ComponentListArgs
            {
                ComponentName = "Spark",
            },
            new Flexibleengine.Inputs.MrsClusterV1ComponentListArgs
            {
                ComponentName = "Hive",
            },
            new Flexibleengine.Inputs.MrsClusterV1ComponentListArgs
            {
                ComponentName = "Tez",
            },
        },
    });

});
Copy
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.flexibleengine.VpcV1;
import com.pulumi.flexibleengine.VpcV1Args;
import com.pulumi.flexibleengine.VpcSubnetV1;
import com.pulumi.flexibleengine.VpcSubnetV1Args;
import com.pulumi.flexibleengine.MrsClusterV1;
import com.pulumi.flexibleengine.MrsClusterV1Args;
import com.pulumi.flexibleengine.inputs.MrsClusterV1ComponentListArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var exampleVpc = new VpcV1("exampleVpc", VpcV1Args.builder()
            .cidr("192.168.0.0/16")
            .build());

        var exampleSubnet = new VpcSubnetV1("exampleSubnet", VpcSubnetV1Args.builder()
            .cidr("192.168.0.0/24")
            .gatewayIp("192.168.0.1")
            .vpcId(exampleVpc.vpcV1Id())
            .build());

        var cluster1 = new MrsClusterV1("cluster1", MrsClusterV1Args.builder()
            .region("eu-west-0")
            .availableZoneId("eu-west-0a")
            .clusterName("mrs-cluster-test")
            .clusterType(0)
            .clusterVersion("MRS 2.0.1")
            .masterNodeNum(2)
            .coreNodeNum(3)
            .masterNodeSize("s3.2xlarge.4.linux.mrs")
            .coreNodeSize("s3.xlarge.4.linux.mrs")
            .volumeType("SATA")
            .volumeSize(100)
            .vpcId(exampleVpc.vpcV1Id())
            .subnetId(exampleSubnet.vpcSubnetV1Id())
            .safeMode(0)
            .clusterAdminSecret("{{password_of_mrs_manager}}")
            .nodePublicCertName("KeyPair-ci")
            .componentLists(            
                MrsClusterV1ComponentListArgs.builder()
                    .componentName("Hadoop")
                    .build(),
                MrsClusterV1ComponentListArgs.builder()
                    .componentName("Spark")
                    .build(),
                MrsClusterV1ComponentListArgs.builder()
                    .componentName("Hive")
                    .build(),
                MrsClusterV1ComponentListArgs.builder()
                    .componentName("Tez")
                    .build())
            .build());

    }
}
Copy
resources:
  exampleVpc:
    type: flexibleengine:VpcV1
    properties:
      cidr: 192.168.0.0/16
  exampleSubnet:
    type: flexibleengine:VpcSubnetV1
    properties:
      cidr: 192.168.0.0/24
      gatewayIp: 192.168.0.1
      vpcId: ${exampleVpc.vpcV1Id}
  cluster1:
    type: flexibleengine:MrsClusterV1
    properties:
      region: eu-west-0
      availableZoneId: eu-west-0a
      clusterName: mrs-cluster-test
      clusterType: 0
      clusterVersion: MRS 2.0.1
      masterNodeNum: 2
      coreNodeNum: 3
      masterNodeSize: s3.2xlarge.4.linux.mrs
      coreNodeSize: s3.xlarge.4.linux.mrs
      volumeType: SATA
      volumeSize: 100
      vpcId: ${exampleVpc.vpcV1Id}
      subnetId: ${exampleSubnet.vpcSubnetV1Id}
      safeMode: 0
      clusterAdminSecret: '{{password_of_mrs_manager}}'
      nodePublicCertName: KeyPair-ci
      componentLists:
        - componentName: Hadoop
        - componentName: Spark
        - componentName: Hive
        - componentName: Tez
Copy

Create MrsClusterV1 Resource

Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

Constructor syntax

new MrsClusterV1(name: string, args: MrsClusterV1Args, opts?: CustomResourceOptions);
@overload
def MrsClusterV1(resource_name: str,
                 args: MrsClusterV1Args,
                 opts: Optional[ResourceOptions] = None)

@overload
def MrsClusterV1(resource_name: str,
                 opts: Optional[ResourceOptions] = None,
                 subnet_id: Optional[str] = None,
                 available_zone_id: Optional[str] = None,
                 vpc_id: Optional[str] = None,
                 volume_type: Optional[str] = None,
                 cluster_name: Optional[str] = None,
                 volume_size: Optional[float] = None,
                 node_public_cert_name: Optional[str] = None,
                 component_lists: Optional[Sequence[MrsClusterV1ComponentListArgs]] = None,
                 core_node_num: Optional[float] = None,
                 core_node_size: Optional[str] = None,
                 safe_mode: Optional[float] = None,
                 master_node_num: Optional[float] = None,
                 master_node_size: Optional[str] = None,
                 log_collection: Optional[float] = None,
                 mrs_cluster_v1_id: Optional[str] = None,
                 region: Optional[str] = None,
                 add_jobs: Optional[Sequence[MrsClusterV1AddJobArgs]] = None,
                 cluster_version: Optional[str] = None,
                 timeouts: Optional[MrsClusterV1TimeoutsArgs] = None,
                 cluster_type: Optional[float] = None,
                 cluster_admin_secret: Optional[str] = None,
                 billing_type: Optional[float] = None)
func NewMrsClusterV1(ctx *Context, name string, args MrsClusterV1Args, opts ...ResourceOption) (*MrsClusterV1, error)
public MrsClusterV1(string name, MrsClusterV1Args args, CustomResourceOptions? opts = null)
public MrsClusterV1(String name, MrsClusterV1Args args)
public MrsClusterV1(String name, MrsClusterV1Args args, CustomResourceOptions options)
type: flexibleengine:MrsClusterV1
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.

Parameters

name This property is required. string
The unique name of the resource.
args This property is required. MrsClusterV1Args
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
resource_name This property is required. str
The unique name of the resource.
args This property is required. MrsClusterV1Args
The arguments to resource properties.
opts ResourceOptions
Bag of options to control resource's behavior.
ctx Context
Context object for the current deployment.
name This property is required. string
The unique name of the resource.
args This property is required. MrsClusterV1Args
The arguments to resource properties.
opts ResourceOption
Bag of options to control resource's behavior.
name This property is required. string
The unique name of the resource.
args This property is required. MrsClusterV1Args
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
name This property is required. String
The unique name of the resource.
args This property is required. MrsClusterV1Args
The arguments to resource properties.
options CustomResourceOptions
Bag of options to control resource's behavior.

Constructor example

The following reference example uses placeholder values for all input properties.

var mrsClusterV1Resource = new Flexibleengine.MrsClusterV1("mrsClusterV1Resource", new()
{
    SubnetId = "string",
    AvailableZoneId = "string",
    VpcId = "string",
    VolumeType = "string",
    ClusterName = "string",
    VolumeSize = 0,
    NodePublicCertName = "string",
    ComponentLists = new[]
    {
        new Flexibleengine.Inputs.MrsClusterV1ComponentListArgs
        {
            ComponentName = "string",
            ComponentDesc = "string",
            ComponentId = "string",
            ComponentVersion = "string",
        },
    },
    CoreNodeNum = 0,
    CoreNodeSize = "string",
    SafeMode = 0,
    MasterNodeNum = 0,
    MasterNodeSize = "string",
    LogCollection = 0,
    MrsClusterV1Id = "string",
    Region = "string",
    AddJobs = new[]
    {
        new Flexibleengine.Inputs.MrsClusterV1AddJobArgs
        {
            JarPath = "string",
            JobName = "string",
            JobType = 0,
            SubmitJobOnceClusterRun = false,
            Arguments = "string",
            FileAction = "string",
            HiveScriptPath = "string",
            Hql = "string",
            Input = "string",
            JobLog = "string",
            Output = "string",
            ShutdownCluster = false,
        },
    },
    ClusterVersion = "string",
    Timeouts = new Flexibleengine.Inputs.MrsClusterV1TimeoutsArgs
    {
        Create = "string",
        Delete = "string",
    },
    ClusterType = 0,
    ClusterAdminSecret = "string",
    BillingType = 0,
});
Copy
example, err := flexibleengine.NewMrsClusterV1(ctx, "mrsClusterV1Resource", &flexibleengine.MrsClusterV1Args{
SubnetId: pulumi.String("string"),
AvailableZoneId: pulumi.String("string"),
VpcId: pulumi.String("string"),
VolumeType: pulumi.String("string"),
ClusterName: pulumi.String("string"),
VolumeSize: pulumi.Float64(0),
NodePublicCertName: pulumi.String("string"),
ComponentLists: .MrsClusterV1ComponentListArray{
&.MrsClusterV1ComponentListArgs{
ComponentName: pulumi.String("string"),
ComponentDesc: pulumi.String("string"),
ComponentId: pulumi.String("string"),
ComponentVersion: pulumi.String("string"),
},
},
CoreNodeNum: pulumi.Float64(0),
CoreNodeSize: pulumi.String("string"),
SafeMode: pulumi.Float64(0),
MasterNodeNum: pulumi.Float64(0),
MasterNodeSize: pulumi.String("string"),
LogCollection: pulumi.Float64(0),
MrsClusterV1Id: pulumi.String("string"),
Region: pulumi.String("string"),
AddJobs: .MrsClusterV1AddJobArray{
&.MrsClusterV1AddJobArgs{
JarPath: pulumi.String("string"),
JobName: pulumi.String("string"),
JobType: pulumi.Float64(0),
SubmitJobOnceClusterRun: pulumi.Bool(false),
Arguments: pulumi.String("string"),
FileAction: pulumi.String("string"),
HiveScriptPath: pulumi.String("string"),
Hql: pulumi.String("string"),
Input: pulumi.String("string"),
JobLog: pulumi.String("string"),
Output: pulumi.String("string"),
ShutdownCluster: pulumi.Bool(false),
},
},
ClusterVersion: pulumi.String("string"),
Timeouts: &.MrsClusterV1TimeoutsArgs{
Create: pulumi.String("string"),
Delete: pulumi.String("string"),
},
ClusterType: pulumi.Float64(0),
ClusterAdminSecret: pulumi.String("string"),
BillingType: pulumi.Float64(0),
})
Copy
var mrsClusterV1Resource = new MrsClusterV1("mrsClusterV1Resource", MrsClusterV1Args.builder()
    .subnetId("string")
    .availableZoneId("string")
    .vpcId("string")
    .volumeType("string")
    .clusterName("string")
    .volumeSize(0)
    .nodePublicCertName("string")
    .componentLists(MrsClusterV1ComponentListArgs.builder()
        .componentName("string")
        .componentDesc("string")
        .componentId("string")
        .componentVersion("string")
        .build())
    .coreNodeNum(0)
    .coreNodeSize("string")
    .safeMode(0)
    .masterNodeNum(0)
    .masterNodeSize("string")
    .logCollection(0)
    .mrsClusterV1Id("string")
    .region("string")
    .addJobs(MrsClusterV1AddJobArgs.builder()
        .jarPath("string")
        .jobName("string")
        .jobType(0)
        .submitJobOnceClusterRun(false)
        .arguments("string")
        .fileAction("string")
        .hiveScriptPath("string")
        .hql("string")
        .input("string")
        .jobLog("string")
        .output("string")
        .shutdownCluster(false)
        .build())
    .clusterVersion("string")
    .timeouts(MrsClusterV1TimeoutsArgs.builder()
        .create("string")
        .delete("string")
        .build())
    .clusterType(0)
    .clusterAdminSecret("string")
    .billingType(0)
    .build());
Copy
mrs_cluster_v1_resource = flexibleengine.MrsClusterV1("mrsClusterV1Resource",
    subnet_id="string",
    available_zone_id="string",
    vpc_id="string",
    volume_type="string",
    cluster_name="string",
    volume_size=0,
    node_public_cert_name="string",
    component_lists=[{
        "component_name": "string",
        "component_desc": "string",
        "component_id": "string",
        "component_version": "string",
    }],
    core_node_num=0,
    core_node_size="string",
    safe_mode=0,
    master_node_num=0,
    master_node_size="string",
    log_collection=0,
    mrs_cluster_v1_id="string",
    region="string",
    add_jobs=[{
        "jar_path": "string",
        "job_name": "string",
        "job_type": 0,
        "submit_job_once_cluster_run": False,
        "arguments": "string",
        "file_action": "string",
        "hive_script_path": "string",
        "hql": "string",
        "input": "string",
        "job_log": "string",
        "output": "string",
        "shutdown_cluster": False,
    }],
    cluster_version="string",
    timeouts={
        "create": "string",
        "delete": "string",
    },
    cluster_type=0,
    cluster_admin_secret="string",
    billing_type=0)
Copy
const mrsClusterV1Resource = new flexibleengine.MrsClusterV1("mrsClusterV1Resource", {
    subnetId: "string",
    availableZoneId: "string",
    vpcId: "string",
    volumeType: "string",
    clusterName: "string",
    volumeSize: 0,
    nodePublicCertName: "string",
    componentLists: [{
        componentName: "string",
        componentDesc: "string",
        componentId: "string",
        componentVersion: "string",
    }],
    coreNodeNum: 0,
    coreNodeSize: "string",
    safeMode: 0,
    masterNodeNum: 0,
    masterNodeSize: "string",
    logCollection: 0,
    mrsClusterV1Id: "string",
    region: "string",
    addJobs: [{
        jarPath: "string",
        jobName: "string",
        jobType: 0,
        submitJobOnceClusterRun: false,
        arguments: "string",
        fileAction: "string",
        hiveScriptPath: "string",
        hql: "string",
        input: "string",
        jobLog: "string",
        output: "string",
        shutdownCluster: false,
    }],
    clusterVersion: "string",
    timeouts: {
        create: "string",
        "delete": "string",
    },
    clusterType: 0,
    clusterAdminSecret: "string",
    billingType: 0,
});
Copy
type: flexibleengine:MrsClusterV1
properties:
    addJobs:
        - arguments: string
          fileAction: string
          hiveScriptPath: string
          hql: string
          input: string
          jarPath: string
          jobLog: string
          jobName: string
          jobType: 0
          output: string
          shutdownCluster: false
          submitJobOnceClusterRun: false
    availableZoneId: string
    billingType: 0
    clusterAdminSecret: string
    clusterName: string
    clusterType: 0
    clusterVersion: string
    componentLists:
        - componentDesc: string
          componentId: string
          componentName: string
          componentVersion: string
    coreNodeNum: 0
    coreNodeSize: string
    logCollection: 0
    masterNodeNum: 0
    masterNodeSize: string
    mrsClusterV1Id: string
    nodePublicCertName: string
    region: string
    safeMode: 0
    subnetId: string
    timeouts:
        create: string
        delete: string
    volumeSize: 0
    volumeType: string
    vpcId: string
Copy

MrsClusterV1 Resource Properties

To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

Inputs

In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

The MrsClusterV1 resource accepts the following input properties:

AvailableZoneId This property is required. string
ID or Name of an available zone. Obtain the value from Regions and Endpoints.
ClusterName This property is required. string
Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
ComponentLists This property is required. List<MrsClusterV1ComponentList>
Service component list. The object structure is documented below.
CoreNodeNum This property is required. double
Number of Core nodes Value range: 3 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
CoreNodeSize This property is required. string
Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
MasterNodeNum This property is required. double
Number of Master nodes The value is 2.
MasterNodeSize This property is required. string

Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space.

  • Master nodes support s1.4xlarge and s1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
  • Core nodes of a streaming cluster support s1.xlarge, c2.2xlarge, s1.2xlarge, s1.4xlarge, s1.8xlarge, d1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
  • Core nodes of an analysis cluster support all specifications c2.2xlarge, s1.xlarge, s1.4xlarge, s1.8xlarge, d1.xlarge, d1.2xlarge, d1.4xlarge, d1.8xlarge, , c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4, d2.xlarge.8, d2.2xlarge.8, d2.4xlarge.8, d2.8xlarge.8.

The following provides specification details.

node_size | CPU(core) | Memory(GB) | System Disk | Data Disk --- | --- | --- | --- | --- c2.2xlarge.linux.mrs | 8 | 16 | 40 | - cc3.xlarge.4.linux.mrs | 4 | 16 | 40 | - cc3.2xlarge.4.linux.mrs | 8 | 32 | 40 | - cc3.4xlarge.4.linux.mrs | 16 | 64 | 40 | - cc3.8xlarge.4.linux.mrs | 32 | 128 | 40 | - s1.xlarge.linux.mrs | 4 | 16 | 40 | - s1.4xlarge.linux.mrs | 16 | 64 | 40 | - s1.8xlarge.linux.mrs | 32 | 128 | 40 | - s3.xlarge.4.linux.mrs| 4 | 16 | 40 | - s3.2xlarge.4.linux.mrs| 8 | 32 | 40 | - s3.4xlarge.4.linux.mrs| 16 | 64 | 40 | - d1.xlarge.linux.mrs | 6 | 55 | 40 | 1.8 TB x 3 HDDs d1.2xlarge.linux.mrs | 12 | 110 | 40 | 1.8 TB x 6 HDDs d1.4xlarge.linux.mrs | 24 | 220 | 40 | 1.8 TB x 12 HDDs d1.8xlarge.linux.mrs | 48 | 440 | 40 | 1.8 TB x 24 HDDs d2.xlarge.linux.mrs | 4 | 32 | 40 | - d2.2xlarge.linux.mrs | 8 | 64 | 40 | - d2.4xlarge.linux.mrs | 16 | 128 | 40 | 1.8TB8HDDs d2.8xlarge.linux.mrs | 32 | 256 | 40 | 1.8TB16HDDs

NodePublicCertName This property is required. string
Name of a key pair You can use a key to log in to the Master node in the cluster.
SafeMode This property is required. double
MRS cluster running mode.

  • 0: common mode The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.
  • 1: safe mode The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management and job management functions of an MRS cluster or view cluster resource usage and the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator.
SubnetId This property is required. string
Specifies the ID of the VPC Subnet which bound to the MRS cluster. Changing this will create a new MRS cluster resource.
VolumeSize This property is required. double
Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB
VolumeType This property is required. string
Type of disks SATA and SSD are supported. SATA: common I/O SSD: super high-speed I/O
VpcId This property is required. string
ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
AddJobs List<MrsClusterV1AddJob>
You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added. The object structure is documented below.
BillingType double
ClusterAdminSecret string
Indicates the password of the MRS Manager administrator.

  • Must contain 8 to 32 characters.
  • Must contain at least three of the following: Lowercase letters, Uppercase letters, Digits and Special characters: `~!@#$%^&*()-_=+|[{}];:'",<.>/? and space
  • Cannot be the username or the username spelled backwards.
ClusterType double
Type of clusters. 0: analysis cluster; 1: streaming cluster. The default value is 0.
ClusterVersion string
Version of the clusters. Possible values are as follows: MRS 1.8.9, MRS 2.0.1, MRS 2.1.0 and MRS 3.1.0-LTS.1. The latest version of MRS is used by default.
LogCollection double
Indicates whether logs are collected when cluster installation fails. 0: not collected 1: collected The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
MrsClusterV1Id string
The resource ID in UUID format.
Region string
Cluster region information. Obtain the value from Regions and Endpoints.
Timeouts MrsClusterV1Timeouts
AvailableZoneId This property is required. string
ID or Name of an available zone. Obtain the value from Regions and Endpoints.
ClusterName This property is required. string
Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
ComponentLists This property is required. []MrsClusterV1ComponentListArgs
Service component list. The object structure is documented below.
CoreNodeNum This property is required. float64
Number of Core nodes Value range: 3 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
CoreNodeSize This property is required. string
Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
MasterNodeNum This property is required. float64
Number of Master nodes The value is 2.
MasterNodeSize This property is required. string

Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space.

  • Master nodes support s1.4xlarge and s1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
  • Core nodes of a streaming cluster support s1.xlarge, c2.2xlarge, s1.2xlarge, s1.4xlarge, s1.8xlarge, d1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
  • Core nodes of an analysis cluster support all specifications c2.2xlarge, s1.xlarge, s1.4xlarge, s1.8xlarge, d1.xlarge, d1.2xlarge, d1.4xlarge, d1.8xlarge, , c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4, d2.xlarge.8, d2.2xlarge.8, d2.4xlarge.8, d2.8xlarge.8.

The following provides specification details.

node_size | CPU(core) | Memory(GB) | System Disk | Data Disk --- | --- | --- | --- | --- c2.2xlarge.linux.mrs | 8 | 16 | 40 | - cc3.xlarge.4.linux.mrs | 4 | 16 | 40 | - cc3.2xlarge.4.linux.mrs | 8 | 32 | 40 | - cc3.4xlarge.4.linux.mrs | 16 | 64 | 40 | - cc3.8xlarge.4.linux.mrs | 32 | 128 | 40 | - s1.xlarge.linux.mrs | 4 | 16 | 40 | - s1.4xlarge.linux.mrs | 16 | 64 | 40 | - s1.8xlarge.linux.mrs | 32 | 128 | 40 | - s3.xlarge.4.linux.mrs| 4 | 16 | 40 | - s3.2xlarge.4.linux.mrs| 8 | 32 | 40 | - s3.4xlarge.4.linux.mrs| 16 | 64 | 40 | - d1.xlarge.linux.mrs | 6 | 55 | 40 | 1.8 TB x 3 HDDs d1.2xlarge.linux.mrs | 12 | 110 | 40 | 1.8 TB x 6 HDDs d1.4xlarge.linux.mrs | 24 | 220 | 40 | 1.8 TB x 12 HDDs d1.8xlarge.linux.mrs | 48 | 440 | 40 | 1.8 TB x 24 HDDs d2.xlarge.linux.mrs | 4 | 32 | 40 | - d2.2xlarge.linux.mrs | 8 | 64 | 40 | - d2.4xlarge.linux.mrs | 16 | 128 | 40 | 1.8TB8HDDs d2.8xlarge.linux.mrs | 32 | 256 | 40 | 1.8TB16HDDs

NodePublicCertName This property is required. string
Name of a key pair You can use a key to log in to the Master node in the cluster.
SafeMode This property is required. float64
MRS cluster running mode.

  • 0: common mode The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.
  • 1: safe mode The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management and job management functions of an MRS cluster or view cluster resource usage and the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator.
SubnetId This property is required. string
Specifies the ID of the VPC Subnet which bound to the MRS cluster. Changing this will create a new MRS cluster resource.
VolumeSize This property is required. float64
Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB
VolumeType This property is required. string
Type of disks SATA and SSD are supported. SATA: common I/O SSD: super high-speed I/O
VpcId This property is required. string
ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
AddJobs []MrsClusterV1AddJobArgs
You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added. The object structure is documented below.
BillingType float64
ClusterAdminSecret string
Indicates the password of the MRS Manager administrator.

  • Must contain 8 to 32 characters.
  • Must contain at least three of the following: Lowercase letters, Uppercase letters, Digits and Special characters: `~!@#$%^&*()-_=+|[{}];:'",<.>/? and space
  • Cannot be the username or the username spelled backwards.
ClusterType float64
Type of clusters. 0: analysis cluster; 1: streaming cluster. The default value is 0.
ClusterVersion string
Version of the clusters. Possible values are as follows: MRS 1.8.9, MRS 2.0.1, MRS 2.1.0 and MRS 3.1.0-LTS.1. The latest version of MRS is used by default.
LogCollection float64
Indicates whether logs are collected when cluster installation fails. 0: not collected 1: collected The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
MrsClusterV1Id string
The resource ID in UUID format.
Region string
Cluster region information. Obtain the value from Regions and Endpoints.
Timeouts MrsClusterV1TimeoutsArgs
availableZoneId This property is required. String
ID or Name of an available zone. Obtain the value from Regions and Endpoints.
clusterName This property is required. String
Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
componentLists This property is required. List<MrsClusterV1ComponentList>
Service component list. The object structure is documented below.
coreNodeNum This property is required. Double
Number of Core nodes Value range: 3 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
coreNodeSize This property is required. String
Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
masterNodeNum This property is required. Double
Number of Master nodes The value is 2.
masterNodeSize This property is required. String

Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space.

  • Master nodes support s1.4xlarge and s1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
  • Core nodes of a streaming cluster support s1.xlarge, c2.2xlarge, s1.2xlarge, s1.4xlarge, s1.8xlarge, d1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
  • Core nodes of an analysis cluster support all specifications c2.2xlarge, s1.xlarge, s1.4xlarge, s1.8xlarge, d1.xlarge, d1.2xlarge, d1.4xlarge, d1.8xlarge, , c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4, d2.xlarge.8, d2.2xlarge.8, d2.4xlarge.8, d2.8xlarge.8.

The following provides specification details.

node_size | CPU(core) | Memory(GB) | System Disk | Data Disk --- | --- | --- | --- | --- c2.2xlarge.linux.mrs | 8 | 16 | 40 | - cc3.xlarge.4.linux.mrs | 4 | 16 | 40 | - cc3.2xlarge.4.linux.mrs | 8 | 32 | 40 | - cc3.4xlarge.4.linux.mrs | 16 | 64 | 40 | - cc3.8xlarge.4.linux.mrs | 32 | 128 | 40 | - s1.xlarge.linux.mrs | 4 | 16 | 40 | - s1.4xlarge.linux.mrs | 16 | 64 | 40 | - s1.8xlarge.linux.mrs | 32 | 128 | 40 | - s3.xlarge.4.linux.mrs| 4 | 16 | 40 | - s3.2xlarge.4.linux.mrs| 8 | 32 | 40 | - s3.4xlarge.4.linux.mrs| 16 | 64 | 40 | - d1.xlarge.linux.mrs | 6 | 55 | 40 | 1.8 TB x 3 HDDs d1.2xlarge.linux.mrs | 12 | 110 | 40 | 1.8 TB x 6 HDDs d1.4xlarge.linux.mrs | 24 | 220 | 40 | 1.8 TB x 12 HDDs d1.8xlarge.linux.mrs | 48 | 440 | 40 | 1.8 TB x 24 HDDs d2.xlarge.linux.mrs | 4 | 32 | 40 | - d2.2xlarge.linux.mrs | 8 | 64 | 40 | - d2.4xlarge.linux.mrs | 16 | 128 | 40 | 1.8TB8HDDs d2.8xlarge.linux.mrs | 32 | 256 | 40 | 1.8TB16HDDs

nodePublicCertName This property is required. String
Name of a key pair You can use a key to log in to the Master node in the cluster.
safeMode This property is required. Double
MRS cluster running mode.

  • 0: common mode The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.
  • 1: safe mode The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management and job management functions of an MRS cluster or view cluster resource usage and the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator.
subnetId This property is required. String
Specifies the ID of the VPC Subnet which bound to the MRS cluster. Changing this will create a new MRS cluster resource.
volumeSize This property is required. Double
Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB
volumeType This property is required. String
Type of disks SATA and SSD are supported. SATA: common I/O SSD: super high-speed I/O
vpcId This property is required. String
ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
addJobs List<MrsClusterV1AddJob>
You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added. The object structure is documented below.
billingType Double
clusterAdminSecret String
Indicates the password of the MRS Manager administrator.

  • Must contain 8 to 32 characters.
  • Must contain at least three of the following: Lowercase letters, Uppercase letters, Digits and Special characters: `~!@#$%^&*()-_=+|[{}];:'",<.>/? and space
  • Cannot be the username or the username spelled backwards.
clusterType Double
Type of clusters. 0: analysis cluster; 1: streaming cluster. The default value is 0.
clusterVersion String
Version of the clusters. Possible values are as follows: MRS 1.8.9, MRS 2.0.1, MRS 2.1.0 and MRS 3.1.0-LTS.1. The latest version of MRS is used by default.
logCollection Double
Indicates whether logs are collected when cluster installation fails. 0: not collected 1: collected The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
mrsClusterV1Id String
The resource ID in UUID format.
region String
Cluster region information. Obtain the value from Regions and Endpoints.
timeouts MrsClusterV1Timeouts
availableZoneId This property is required. string
ID or Name of an available zone. Obtain the value from Regions and Endpoints.
clusterName This property is required. string
Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
componentLists This property is required. MrsClusterV1ComponentList[]
Service component list. The object structure is documented below.
coreNodeNum This property is required. number
Number of Core nodes Value range: 3 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
coreNodeSize This property is required. string
Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
masterNodeNum This property is required. number
Number of Master nodes The value is 2.
masterNodeSize This property is required. string

Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space.

  • Master nodes support s1.4xlarge and s1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
  • Core nodes of a streaming cluster support s1.xlarge, c2.2xlarge, s1.2xlarge, s1.4xlarge, s1.8xlarge, d1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
  • Core nodes of an analysis cluster support all specifications c2.2xlarge, s1.xlarge, s1.4xlarge, s1.8xlarge, d1.xlarge, d1.2xlarge, d1.4xlarge, d1.8xlarge, , c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4, d2.xlarge.8, d2.2xlarge.8, d2.4xlarge.8, d2.8xlarge.8.

The following provides specification details.

node_size | CPU(core) | Memory(GB) | System Disk | Data Disk --- | --- | --- | --- | --- c2.2xlarge.linux.mrs | 8 | 16 | 40 | - cc3.xlarge.4.linux.mrs | 4 | 16 | 40 | - cc3.2xlarge.4.linux.mrs | 8 | 32 | 40 | - cc3.4xlarge.4.linux.mrs | 16 | 64 | 40 | - cc3.8xlarge.4.linux.mrs | 32 | 128 | 40 | - s1.xlarge.linux.mrs | 4 | 16 | 40 | - s1.4xlarge.linux.mrs | 16 | 64 | 40 | - s1.8xlarge.linux.mrs | 32 | 128 | 40 | - s3.xlarge.4.linux.mrs| 4 | 16 | 40 | - s3.2xlarge.4.linux.mrs| 8 | 32 | 40 | - s3.4xlarge.4.linux.mrs| 16 | 64 | 40 | - d1.xlarge.linux.mrs | 6 | 55 | 40 | 1.8 TB x 3 HDDs d1.2xlarge.linux.mrs | 12 | 110 | 40 | 1.8 TB x 6 HDDs d1.4xlarge.linux.mrs | 24 | 220 | 40 | 1.8 TB x 12 HDDs d1.8xlarge.linux.mrs | 48 | 440 | 40 | 1.8 TB x 24 HDDs d2.xlarge.linux.mrs | 4 | 32 | 40 | - d2.2xlarge.linux.mrs | 8 | 64 | 40 | - d2.4xlarge.linux.mrs | 16 | 128 | 40 | 1.8TB8HDDs d2.8xlarge.linux.mrs | 32 | 256 | 40 | 1.8TB16HDDs

nodePublicCertName This property is required. string
Name of a key pair You can use a key to log in to the Master node in the cluster.
safeMode This property is required. number
MRS cluster running mode.

  • 0: common mode The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.
  • 1: safe mode The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management and job management functions of an MRS cluster or view cluster resource usage and the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator.
subnetId This property is required. string
Specifies the ID of the VPC Subnet which bound to the MRS cluster. Changing this will create a new MRS cluster resource.
volumeSize This property is required. number
Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB
volumeType This property is required. string
Type of disks SATA and SSD are supported. SATA: common I/O SSD: super high-speed I/O
vpcId This property is required. string
ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
addJobs MrsClusterV1AddJob[]
You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added. The object structure is documented below.
billingType number
clusterAdminSecret string
Indicates the password of the MRS Manager administrator.

  • Must contain 8 to 32 characters.
  • Must contain at least three of the following: Lowercase letters, Uppercase letters, Digits and Special characters: `~!@#$%^&*()-_=+|[{}];:'",<.>/? and space
  • Cannot be the username or the username spelled backwards.
clusterType number
Type of clusters. 0: analysis cluster; 1: streaming cluster. The default value is 0.
clusterVersion string
Version of the clusters. Possible values are as follows: MRS 1.8.9, MRS 2.0.1, MRS 2.1.0 and MRS 3.1.0-LTS.1. The latest version of MRS is used by default.
logCollection number
Indicates whether logs are collected when cluster installation fails. 0: not collected 1: collected The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
mrsClusterV1Id string
The resource ID in UUID format.
region string
Cluster region information. Obtain the value from Regions and Endpoints.
timeouts MrsClusterV1Timeouts
available_zone_id This property is required. str
ID or Name of an available zone. Obtain the value from Regions and Endpoints.
cluster_name This property is required. str
Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
component_lists This property is required. Sequence[MrsClusterV1ComponentListArgs]
Service component list. The object structure is documented below.
core_node_num This property is required. float
Number of Core nodes Value range: 3 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
core_node_size This property is required. str
Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
master_node_num This property is required. float
Number of Master nodes The value is 2.
master_node_size This property is required. str

Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space.

  • Master nodes support s1.4xlarge and s1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
  • Core nodes of a streaming cluster support s1.xlarge, c2.2xlarge, s1.2xlarge, s1.4xlarge, s1.8xlarge, d1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
  • Core nodes of an analysis cluster support all specifications c2.2xlarge, s1.xlarge, s1.4xlarge, s1.8xlarge, d1.xlarge, d1.2xlarge, d1.4xlarge, d1.8xlarge, , c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4, d2.xlarge.8, d2.2xlarge.8, d2.4xlarge.8, d2.8xlarge.8.

The following provides specification details.

node_size | CPU(core) | Memory(GB) | System Disk | Data Disk --- | --- | --- | --- | --- c2.2xlarge.linux.mrs | 8 | 16 | 40 | - cc3.xlarge.4.linux.mrs | 4 | 16 | 40 | - cc3.2xlarge.4.linux.mrs | 8 | 32 | 40 | - cc3.4xlarge.4.linux.mrs | 16 | 64 | 40 | - cc3.8xlarge.4.linux.mrs | 32 | 128 | 40 | - s1.xlarge.linux.mrs | 4 | 16 | 40 | - s1.4xlarge.linux.mrs | 16 | 64 | 40 | - s1.8xlarge.linux.mrs | 32 | 128 | 40 | - s3.xlarge.4.linux.mrs| 4 | 16 | 40 | - s3.2xlarge.4.linux.mrs| 8 | 32 | 40 | - s3.4xlarge.4.linux.mrs| 16 | 64 | 40 | - d1.xlarge.linux.mrs | 6 | 55 | 40 | 1.8 TB x 3 HDDs d1.2xlarge.linux.mrs | 12 | 110 | 40 | 1.8 TB x 6 HDDs d1.4xlarge.linux.mrs | 24 | 220 | 40 | 1.8 TB x 12 HDDs d1.8xlarge.linux.mrs | 48 | 440 | 40 | 1.8 TB x 24 HDDs d2.xlarge.linux.mrs | 4 | 32 | 40 | - d2.2xlarge.linux.mrs | 8 | 64 | 40 | - d2.4xlarge.linux.mrs | 16 | 128 | 40 | 1.8TB8HDDs d2.8xlarge.linux.mrs | 32 | 256 | 40 | 1.8TB16HDDs

node_public_cert_name This property is required. str
Name of a key pair You can use a key to log in to the Master node in the cluster.
safe_mode This property is required. float
MRS cluster running mode.

  • 0: common mode The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.
  • 1: safe mode The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management and job management functions of an MRS cluster or view cluster resource usage and the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator.
subnet_id This property is required. str
Specifies the ID of the VPC Subnet which bound to the MRS cluster. Changing this will create a new MRS cluster resource.
volume_size This property is required. float
Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB
volume_type This property is required. str
Type of disks SATA and SSD are supported. SATA: common I/O SSD: super high-speed I/O
vpc_id This property is required. str
ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
add_jobs Sequence[MrsClusterV1AddJobArgs]
You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added. The object structure is documented below.
billing_type float
cluster_admin_secret str
Indicates the password of the MRS Manager administrator.

  • Must contain 8 to 32 characters.
  • Must contain at least three of the following: Lowercase letters, Uppercase letters, Digits and Special characters: `~!@#$%^&*()-_=+|[{}];:'",<.>/? and space
  • Cannot be the username or the username spelled backwards.
cluster_type float
Type of clusters. 0: analysis cluster; 1: streaming cluster. The default value is 0.
cluster_version str
Version of the clusters. Possible values are as follows: MRS 1.8.9, MRS 2.0.1, MRS 2.1.0 and MRS 3.1.0-LTS.1. The latest version of MRS is used by default.
log_collection float
Indicates whether logs are collected when cluster installation fails. 0: not collected 1: collected The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
mrs_cluster_v1_id str
The resource ID in UUID format.
region str
Cluster region information. Obtain the value from Regions and Endpoints.
timeouts MrsClusterV1TimeoutsArgs
availableZoneId This property is required. String
ID or Name of an available zone. Obtain the value from Regions and Endpoints.
clusterName This property is required. String
Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
componentLists This property is required. List<Property Map>
Service component list. The object structure is documented below.
coreNodeNum This property is required. Number
Number of Core nodes Value range: 3 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
coreNodeSize This property is required. String
Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
masterNodeNum This property is required. Number
Number of Master nodes The value is 2.
masterNodeSize This property is required. String

Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space.

  • Master nodes support s1.4xlarge and s1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
  • Core nodes of a streaming cluster support s1.xlarge, c2.2xlarge, s1.2xlarge, s1.4xlarge, s1.8xlarge, d1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
  • Core nodes of an analysis cluster support all specifications c2.2xlarge, s1.xlarge, s1.4xlarge, s1.8xlarge, d1.xlarge, d1.2xlarge, d1.4xlarge, d1.8xlarge, , c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4, d2.xlarge.8, d2.2xlarge.8, d2.4xlarge.8, d2.8xlarge.8.

The following provides specification details.

node_size | CPU(core) | Memory(GB) | System Disk | Data Disk --- | --- | --- | --- | --- c2.2xlarge.linux.mrs | 8 | 16 | 40 | - cc3.xlarge.4.linux.mrs | 4 | 16 | 40 | - cc3.2xlarge.4.linux.mrs | 8 | 32 | 40 | - cc3.4xlarge.4.linux.mrs | 16 | 64 | 40 | - cc3.8xlarge.4.linux.mrs | 32 | 128 | 40 | - s1.xlarge.linux.mrs | 4 | 16 | 40 | - s1.4xlarge.linux.mrs | 16 | 64 | 40 | - s1.8xlarge.linux.mrs | 32 | 128 | 40 | - s3.xlarge.4.linux.mrs| 4 | 16 | 40 | - s3.2xlarge.4.linux.mrs| 8 | 32 | 40 | - s3.4xlarge.4.linux.mrs| 16 | 64 | 40 | - d1.xlarge.linux.mrs | 6 | 55 | 40 | 1.8 TB x 3 HDDs d1.2xlarge.linux.mrs | 12 | 110 | 40 | 1.8 TB x 6 HDDs d1.4xlarge.linux.mrs | 24 | 220 | 40 | 1.8 TB x 12 HDDs d1.8xlarge.linux.mrs | 48 | 440 | 40 | 1.8 TB x 24 HDDs d2.xlarge.linux.mrs | 4 | 32 | 40 | - d2.2xlarge.linux.mrs | 8 | 64 | 40 | - d2.4xlarge.linux.mrs | 16 | 128 | 40 | 1.8TB8HDDs d2.8xlarge.linux.mrs | 32 | 256 | 40 | 1.8TB16HDDs

nodePublicCertName This property is required. String
Name of a key pair You can use a key to log in to the Master node in the cluster.
safeMode This property is required. Number
MRS cluster running mode.

  • 0: common mode The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.
  • 1: safe mode The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management and job management functions of an MRS cluster or view cluster resource usage and the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator.
subnetId This property is required. String
Specifies the ID of the VPC Subnet which bound to the MRS cluster. Changing this will create a new MRS cluster resource.
volumeSize This property is required. Number
Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB
volumeType This property is required. String
Type of disks SATA and SSD are supported. SATA: common I/O SSD: super high-speed I/O
vpcId This property is required. String
ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
addJobs List<Property Map>
You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added. The object structure is documented below.
billingType Number
clusterAdminSecret String
Indicates the password of the MRS Manager administrator.

  • Must contain 8 to 32 characters.
  • Must contain at least three of the following: Lowercase letters, Uppercase letters, Digits and Special characters: `~!@#$%^&*()-_=+|[{}];:'",<.>/? and space
  • Cannot be the username or the username spelled backwards.
clusterType Number
Type of clusters. 0: analysis cluster; 1: streaming cluster. The default value is 0.
clusterVersion String
Version of the clusters. Possible values are as follows: MRS 1.8.9, MRS 2.0.1, MRS 2.1.0 and MRS 3.1.0-LTS.1. The latest version of MRS is used by default.
logCollection Number
Indicates whether logs are collected when cluster installation fails. 0: not collected 1: collected The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
mrsClusterV1Id String
The resource ID in UUID format.
region String
Cluster region information. Obtain the value from Regions and Endpoints.
timeouts Property Map

Outputs

All input properties are implicitly available as output properties. Additionally, the MrsClusterV1 resource produces the following output properties:

AvailableZoneName string
Name of an availability zone.
ChargingStartTime string
Time when charging starts.
ClusterId string
Cluster ID.
ClusterState string
Cluster status. Valid values include: starting, running, terminated, failed, abnormal, terminating, frozen, scaling-out and scaling-in.
CoreNodeProductId string
Product ID of a Core node.
CoreNodeSpecId string
Specification ID of a Core node.
CreateAt string
Cluster creation time.
DeploymentId string
Deployment ID of a cluster.
Duration string
Cluster subscription duration.
ErrorInfo string
Error information.
ExternalAlternateIp string
Backup external IP address.
ExternalIp string
External IP address.
Fee string
Cluster creation fee, which is automatically calculated.
HadoopVersion string
Hadoop version.
Id string
The provider-assigned unique ID for this managed resource.
InstanceId string
Instance ID.
InternalIp string
MasterNodeIp string
IP address of a Master node.

  • externalIp - Internal IP address.
MasterNodeProductId string
Product ID of a Master node.
MasterNodeSpecId string
Specification ID of a Master node.
OrderId string
Order ID for creating clusters.
PrivateIpFirst string
Primary private IP address.
Remark string
Remarks of a cluster.
SecurityGroupsId string
Security group ID.
SlaveSecurityGroupsId string
Standby security group ID.
TenantId string
Project ID.
UpdateAt string
Cluster update time.
Vnc string
URI address for remote login of the elastic cloud server.
AvailableZoneName string
Name of an availability zone.
ChargingStartTime string
Time when charging starts.
ClusterId string
Cluster ID.
ClusterState string
Cluster status. Valid values include: starting, running, terminated, failed, abnormal, terminating, frozen, scaling-out and scaling-in.
CoreNodeProductId string
Product ID of a Core node.
CoreNodeSpecId string
Specification ID of a Core node.
CreateAt string
Cluster creation time.
DeploymentId string
Deployment ID of a cluster.
Duration string
Cluster subscription duration.
ErrorInfo string
Error information.
ExternalAlternateIp string
Backup external IP address.
ExternalIp string
External IP address.
Fee string
Cluster creation fee, which is automatically calculated.
HadoopVersion string
Hadoop version.
Id string
The provider-assigned unique ID for this managed resource.
InstanceId string
Instance ID.
InternalIp string
MasterNodeIp string
IP address of a Master node.

  • externalIp - Internal IP address.
MasterNodeProductId string
Product ID of a Master node.
MasterNodeSpecId string
Specification ID of a Master node.
OrderId string
Order ID for creating clusters.
PrivateIpFirst string
Primary private IP address.
Remark string
Remarks of a cluster.
SecurityGroupsId string
Security group ID.
SlaveSecurityGroupsId string
Standby security group ID.
TenantId string
Project ID.
UpdateAt string
Cluster update time.
Vnc string
URI address for remote login of the elastic cloud server.
availableZoneName String
Name of an availability zone.
chargingStartTime String
Time when charging starts.
clusterId String
Cluster ID.
clusterState String
Cluster status. Valid values include: starting, running, terminated, failed, abnormal, terminating, frozen, scaling-out and scaling-in.
coreNodeProductId String
Product ID of a Core node.
coreNodeSpecId String
Specification ID of a Core node.
createAt String
Cluster creation time.
deploymentId String
Deployment ID of a cluster.
duration String
Cluster subscription duration.
errorInfo String
Error information.
externalAlternateIp String
Backup external IP address.
externalIp String
External IP address.
fee String
Cluster creation fee, which is automatically calculated.
hadoopVersion String
Hadoop version.
id String
The provider-assigned unique ID for this managed resource.
instanceId String
Instance ID.
internalIp String
masterNodeIp String
IP address of a Master node.

  • externalIp - Internal IP address.
masterNodeProductId String
Product ID of a Master node.
masterNodeSpecId String
Specification ID of a Master node.
orderId String
Order ID for creating clusters.
privateIpFirst String
Primary private IP address.
remark String
Remarks of a cluster.
securityGroupsId String
Security group ID.
slaveSecurityGroupsId String
Standby security group ID.
tenantId String
Project ID.
updateAt String
Cluster update time.
vnc String
URI address for remote login of the elastic cloud server.
availableZoneName string
Name of an availability zone.
chargingStartTime string
Time when charging starts.
clusterId string
Cluster ID.
clusterState string
Cluster status. Valid values include: starting, running, terminated, failed, abnormal, terminating, frozen, scaling-out and scaling-in.
coreNodeProductId string
Product ID of a Core node.
coreNodeSpecId string
Specification ID of a Core node.
createAt string
Cluster creation time.
deploymentId string
Deployment ID of a cluster.
duration string
Cluster subscription duration.
errorInfo string
Error information.
externalAlternateIp string
Backup external IP address.
externalIp string
External IP address.
fee string
Cluster creation fee, which is automatically calculated.
hadoopVersion string
Hadoop version.
id string
The provider-assigned unique ID for this managed resource.
instanceId string
Instance ID.
internalIp string
masterNodeIp string
IP address of a Master node.

  • externalIp - Internal IP address.
masterNodeProductId string
Product ID of a Master node.
masterNodeSpecId string
Specification ID of a Master node.
orderId string
Order ID for creating clusters.
privateIpFirst string
Primary private IP address.
remark string
Remarks of a cluster.
securityGroupsId string
Security group ID.
slaveSecurityGroupsId string
Standby security group ID.
tenantId string
Project ID.
updateAt string
Cluster update time.
vnc string
URI address for remote login of the elastic cloud server.
available_zone_name str
Name of an availability zone.
charging_start_time str
Time when charging starts.
cluster_id str
Cluster ID.
cluster_state str
Cluster status. Valid values include: starting, running, terminated, failed, abnormal, terminating, frozen, scaling-out and scaling-in.
core_node_product_id str
Product ID of a Core node.
core_node_spec_id str
Specification ID of a Core node.
create_at str
Cluster creation time.
deployment_id str
Deployment ID of a cluster.
duration str
Cluster subscription duration.
error_info str
Error information.
external_alternate_ip str
Backup external IP address.
external_ip str
External IP address.
fee str
Cluster creation fee, which is automatically calculated.
hadoop_version str
Hadoop version.
id str
The provider-assigned unique ID for this managed resource.
instance_id str
Instance ID.
internal_ip str
master_node_ip str
IP address of a Master node.

  • externalIp - Internal IP address.
master_node_product_id str
Product ID of a Master node.
master_node_spec_id str
Specification ID of a Master node.
order_id str
Order ID for creating clusters.
private_ip_first str
Primary private IP address.
remark str
Remarks of a cluster.
security_groups_id str
Security group ID.
slave_security_groups_id str
Standby security group ID.
tenant_id str
Project ID.
update_at str
Cluster update time.
vnc str
URI address for remote login of the elastic cloud server.
availableZoneName String
Name of an availability zone.
chargingStartTime String
Time when charging starts.
clusterId String
Cluster ID.
clusterState String
Cluster status. Valid values include: starting, running, terminated, failed, abnormal, terminating, frozen, scaling-out and scaling-in.
coreNodeProductId String
Product ID of a Core node.
coreNodeSpecId String
Specification ID of a Core node.
createAt String
Cluster creation time.
deploymentId String
Deployment ID of a cluster.
duration String
Cluster subscription duration.
errorInfo String
Error information.
externalAlternateIp String
Backup external IP address.
externalIp String
External IP address.
fee String
Cluster creation fee, which is automatically calculated.
hadoopVersion String
Hadoop version.
id String
The provider-assigned unique ID for this managed resource.
instanceId String
Instance ID.
internalIp String
masterNodeIp String
IP address of a Master node.

  • externalIp - Internal IP address.
masterNodeProductId String
Product ID of a Master node.
masterNodeSpecId String
Specification ID of a Master node.
orderId String
Order ID for creating clusters.
privateIpFirst String
Primary private IP address.
remark String
Remarks of a cluster.
securityGroupsId String
Security group ID.
slaveSecurityGroupsId String
Standby security group ID.
tenantId String
Project ID.
updateAt String
Cluster update time.
vnc String
URI address for remote login of the elastic cloud server.

Look up Existing MrsClusterV1 Resource

Get an existing MrsClusterV1 resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

public static get(name: string, id: Input<ID>, state?: MrsClusterV1State, opts?: CustomResourceOptions): MrsClusterV1
@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        add_jobs: Optional[Sequence[MrsClusterV1AddJobArgs]] = None,
        available_zone_id: Optional[str] = None,
        available_zone_name: Optional[str] = None,
        billing_type: Optional[float] = None,
        charging_start_time: Optional[str] = None,
        cluster_admin_secret: Optional[str] = None,
        cluster_id: Optional[str] = None,
        cluster_name: Optional[str] = None,
        cluster_state: Optional[str] = None,
        cluster_type: Optional[float] = None,
        cluster_version: Optional[str] = None,
        component_lists: Optional[Sequence[MrsClusterV1ComponentListArgs]] = None,
        core_node_num: Optional[float] = None,
        core_node_product_id: Optional[str] = None,
        core_node_size: Optional[str] = None,
        core_node_spec_id: Optional[str] = None,
        create_at: Optional[str] = None,
        deployment_id: Optional[str] = None,
        duration: Optional[str] = None,
        error_info: Optional[str] = None,
        external_alternate_ip: Optional[str] = None,
        external_ip: Optional[str] = None,
        fee: Optional[str] = None,
        hadoop_version: Optional[str] = None,
        instance_id: Optional[str] = None,
        internal_ip: Optional[str] = None,
        log_collection: Optional[float] = None,
        master_node_ip: Optional[str] = None,
        master_node_num: Optional[float] = None,
        master_node_product_id: Optional[str] = None,
        master_node_size: Optional[str] = None,
        master_node_spec_id: Optional[str] = None,
        mrs_cluster_v1_id: Optional[str] = None,
        node_public_cert_name: Optional[str] = None,
        order_id: Optional[str] = None,
        private_ip_first: Optional[str] = None,
        region: Optional[str] = None,
        remark: Optional[str] = None,
        safe_mode: Optional[float] = None,
        security_groups_id: Optional[str] = None,
        slave_security_groups_id: Optional[str] = None,
        subnet_id: Optional[str] = None,
        tenant_id: Optional[str] = None,
        timeouts: Optional[MrsClusterV1TimeoutsArgs] = None,
        update_at: Optional[str] = None,
        vnc: Optional[str] = None,
        volume_size: Optional[float] = None,
        volume_type: Optional[str] = None,
        vpc_id: Optional[str] = None) -> MrsClusterV1
func GetMrsClusterV1(ctx *Context, name string, id IDInput, state *MrsClusterV1State, opts ...ResourceOption) (*MrsClusterV1, error)
public static MrsClusterV1 Get(string name, Input<string> id, MrsClusterV1State? state, CustomResourceOptions? opts = null)
public static MrsClusterV1 get(String name, Output<String> id, MrsClusterV1State state, CustomResourceOptions options)
resources:  _:    type: flexibleengine:MrsClusterV1    get:      id: ${id}
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
resource_name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
The following state arguments are supported:
AddJobs List<MrsClusterV1AddJob>
You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added. The object structure is documented below.
AvailableZoneId string
ID or Name of an available zone. Obtain the value from Regions and Endpoints.
AvailableZoneName string
Name of an availability zone.
BillingType double
ChargingStartTime string
Time when charging starts.
ClusterAdminSecret string
Indicates the password of the MRS Manager administrator.

  • Must contain 8 to 32 characters.
  • Must contain at least three of the following: Lowercase letters, Uppercase letters, Digits and Special characters: `~!@#$%^&*()-_=+|[{}];:'",<.>/? and space
  • Cannot be the username or the username spelled backwards.
ClusterId string
Cluster ID.
ClusterName string
Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
ClusterState string
Cluster status. Valid values include: starting, running, terminated, failed, abnormal, terminating, frozen, scaling-out and scaling-in.
ClusterType double
Type of clusters. 0: analysis cluster; 1: streaming cluster. The default value is 0.
ClusterVersion string
Version of the clusters. Possible values are as follows: MRS 1.8.9, MRS 2.0.1, MRS 2.1.0 and MRS 3.1.0-LTS.1. The latest version of MRS is used by default.
ComponentLists List<MrsClusterV1ComponentList>
Service component list. The object structure is documented below.
CoreNodeNum double
Number of Core nodes Value range: 3 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
CoreNodeProductId string
Product ID of a Core node.
CoreNodeSize string
Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
CoreNodeSpecId string
Specification ID of a Core node.
CreateAt string
Cluster creation time.
DeploymentId string
Deployment ID of a cluster.
Duration string
Cluster subscription duration.
ErrorInfo string
Error information.
ExternalAlternateIp string
Backup external IP address.
ExternalIp string
External IP address.
Fee string
Cluster creation fee, which is automatically calculated.
HadoopVersion string
Hadoop version.
InstanceId string
Instance ID.
InternalIp string
LogCollection double
Indicates whether logs are collected when cluster installation fails. 0: not collected 1: collected The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
MasterNodeIp string
IP address of a Master node.

  • externalIp - Internal IP address.
MasterNodeNum double
Number of Master nodes The value is 2.
MasterNodeProductId string
Product ID of a Master node.
MasterNodeSize string

Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space.

  • Master nodes support s1.4xlarge and s1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
  • Core nodes of a streaming cluster support s1.xlarge, c2.2xlarge, s1.2xlarge, s1.4xlarge, s1.8xlarge, d1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
  • Core nodes of an analysis cluster support all specifications c2.2xlarge, s1.xlarge, s1.4xlarge, s1.8xlarge, d1.xlarge, d1.2xlarge, d1.4xlarge, d1.8xlarge, , c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4, d2.xlarge.8, d2.2xlarge.8, d2.4xlarge.8, d2.8xlarge.8.

The following provides specification details.

node_size | CPU(core) | Memory(GB) | System Disk | Data Disk --- | --- | --- | --- | --- c2.2xlarge.linux.mrs | 8 | 16 | 40 | - cc3.xlarge.4.linux.mrs | 4 | 16 | 40 | - cc3.2xlarge.4.linux.mrs | 8 | 32 | 40 | - cc3.4xlarge.4.linux.mrs | 16 | 64 | 40 | - cc3.8xlarge.4.linux.mrs | 32 | 128 | 40 | - s1.xlarge.linux.mrs | 4 | 16 | 40 | - s1.4xlarge.linux.mrs | 16 | 64 | 40 | - s1.8xlarge.linux.mrs | 32 | 128 | 40 | - s3.xlarge.4.linux.mrs| 4 | 16 | 40 | - s3.2xlarge.4.linux.mrs| 8 | 32 | 40 | - s3.4xlarge.4.linux.mrs| 16 | 64 | 40 | - d1.xlarge.linux.mrs | 6 | 55 | 40 | 1.8 TB x 3 HDDs d1.2xlarge.linux.mrs | 12 | 110 | 40 | 1.8 TB x 6 HDDs d1.4xlarge.linux.mrs | 24 | 220 | 40 | 1.8 TB x 12 HDDs d1.8xlarge.linux.mrs | 48 | 440 | 40 | 1.8 TB x 24 HDDs d2.xlarge.linux.mrs | 4 | 32 | 40 | - d2.2xlarge.linux.mrs | 8 | 64 | 40 | - d2.4xlarge.linux.mrs | 16 | 128 | 40 | 1.8TB8HDDs d2.8xlarge.linux.mrs | 32 | 256 | 40 | 1.8TB16HDDs

MasterNodeSpecId string
Specification ID of a Master node.
MrsClusterV1Id string
The resource ID in UUID format.
NodePublicCertName string
Name of a key pair You can use a key to log in to the Master node in the cluster.
OrderId string
Order ID for creating clusters.
PrivateIpFirst string
Primary private IP address.
Region string
Cluster region information. Obtain the value from Regions and Endpoints.
Remark string
Remarks of a cluster.
SafeMode double
MRS cluster running mode.

  • 0: common mode The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.
  • 1: safe mode The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management and job management functions of an MRS cluster or view cluster resource usage and the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator.
SecurityGroupsId string
Security group ID.
SlaveSecurityGroupsId string
Standby security group ID.
SubnetId string
Specifies the ID of the VPC Subnet which bound to the MRS cluster. Changing this will create a new MRS cluster resource.
TenantId string
Project ID.
Timeouts MrsClusterV1Timeouts
UpdateAt string
Cluster update time.
Vnc string
URI address for remote login of the elastic cloud server.
VolumeSize double
Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB
VolumeType string
Type of disks SATA and SSD are supported. SATA: common I/O SSD: super high-speed I/O
VpcId string
ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
AddJobs []MrsClusterV1AddJobArgs
You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added. The object structure is documented below.
AvailableZoneId string
ID or Name of an available zone. Obtain the value from Regions and Endpoints.
AvailableZoneName string
Name of an availability zone.
BillingType float64
ChargingStartTime string
Time when charging starts.
ClusterAdminSecret string
Indicates the password of the MRS Manager administrator.

  • Must contain 8 to 32 characters.
  • Must contain at least three of the following: Lowercase letters, Uppercase letters, Digits and Special characters: `~!@#$%^&*()-_=+|[{}];:'",<.>/? and space
  • Cannot be the username or the username spelled backwards.
ClusterId string
Cluster ID.
ClusterName string
Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
ClusterState string
Cluster status. Valid values include: starting, running, terminated, failed, abnormal, terminating, frozen, scaling-out and scaling-in.
ClusterType float64
Type of clusters. 0: analysis cluster; 1: streaming cluster. The default value is 0.
ClusterVersion string
Version of the clusters. Possible values are as follows: MRS 1.8.9, MRS 2.0.1, MRS 2.1.0 and MRS 3.1.0-LTS.1. The latest version of MRS is used by default.
ComponentLists []MrsClusterV1ComponentListArgs
Service component list. The object structure is documented below.
CoreNodeNum float64
Number of Core nodes Value range: 3 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
CoreNodeProductId string
Product ID of a Core node.
CoreNodeSize string
Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
CoreNodeSpecId string
Specification ID of a Core node.
CreateAt string
Cluster creation time.
DeploymentId string
Deployment ID of a cluster.
Duration string
Cluster subscription duration.
ErrorInfo string
Error information.
ExternalAlternateIp string
Backup external IP address.
ExternalIp string
External IP address.
Fee string
Cluster creation fee, which is automatically calculated.
HadoopVersion string
Hadoop version.
InstanceId string
Instance ID.
InternalIp string
LogCollection float64
Indicates whether logs are collected when cluster installation fails. 0: not collected 1: collected The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
MasterNodeIp string
IP address of a Master node.

  • externalIp - Internal IP address.
MasterNodeNum float64
Number of Master nodes The value is 2.
MasterNodeProductId string
Product ID of a Master node.
MasterNodeSize string

Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space.

  • Master nodes support s1.4xlarge and s1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
  • Core nodes of a streaming cluster support s1.xlarge, c2.2xlarge, s1.2xlarge, s1.4xlarge, s1.8xlarge, d1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
  • Core nodes of an analysis cluster support all specifications c2.2xlarge, s1.xlarge, s1.4xlarge, s1.8xlarge, d1.xlarge, d1.2xlarge, d1.4xlarge, d1.8xlarge, , c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4, d2.xlarge.8, d2.2xlarge.8, d2.4xlarge.8, d2.8xlarge.8.

The following provides specification details.

node_size | CPU(core) | Memory(GB) | System Disk | Data Disk --- | --- | --- | --- | --- c2.2xlarge.linux.mrs | 8 | 16 | 40 | - cc3.xlarge.4.linux.mrs | 4 | 16 | 40 | - cc3.2xlarge.4.linux.mrs | 8 | 32 | 40 | - cc3.4xlarge.4.linux.mrs | 16 | 64 | 40 | - cc3.8xlarge.4.linux.mrs | 32 | 128 | 40 | - s1.xlarge.linux.mrs | 4 | 16 | 40 | - s1.4xlarge.linux.mrs | 16 | 64 | 40 | - s1.8xlarge.linux.mrs | 32 | 128 | 40 | - s3.xlarge.4.linux.mrs| 4 | 16 | 40 | - s3.2xlarge.4.linux.mrs| 8 | 32 | 40 | - s3.4xlarge.4.linux.mrs| 16 | 64 | 40 | - d1.xlarge.linux.mrs | 6 | 55 | 40 | 1.8 TB x 3 HDDs d1.2xlarge.linux.mrs | 12 | 110 | 40 | 1.8 TB x 6 HDDs d1.4xlarge.linux.mrs | 24 | 220 | 40 | 1.8 TB x 12 HDDs d1.8xlarge.linux.mrs | 48 | 440 | 40 | 1.8 TB x 24 HDDs d2.xlarge.linux.mrs | 4 | 32 | 40 | - d2.2xlarge.linux.mrs | 8 | 64 | 40 | - d2.4xlarge.linux.mrs | 16 | 128 | 40 | 1.8TB8HDDs d2.8xlarge.linux.mrs | 32 | 256 | 40 | 1.8TB16HDDs

MasterNodeSpecId string
Specification ID of a Master node.
MrsClusterV1Id string
The resource ID in UUID format.
NodePublicCertName string
Name of a key pair You can use a key to log in to the Master node in the cluster.
OrderId string
Order ID for creating clusters.
PrivateIpFirst string
Primary private IP address.
Region string
Cluster region information. Obtain the value from Regions and Endpoints.
Remark string
Remarks of a cluster.
SafeMode float64
MRS cluster running mode.

  • 0: common mode The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.
  • 1: safe mode The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management and job management functions of an MRS cluster or view cluster resource usage and the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator.
SecurityGroupsId string
Security group ID.
SlaveSecurityGroupsId string
Standby security group ID.
SubnetId string
Specifies the ID of the VPC Subnet which bound to the MRS cluster. Changing this will create a new MRS cluster resource.
TenantId string
Project ID.
Timeouts MrsClusterV1TimeoutsArgs
UpdateAt string
Cluster update time.
Vnc string
URI address for remote login of the elastic cloud server.
VolumeSize float64
Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB
VolumeType string
Type of disks SATA and SSD are supported. SATA: common I/O SSD: super high-speed I/O
VpcId string
ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
addJobs List<MrsClusterV1AddJob>
You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added. The object structure is documented below.
availableZoneId String
ID or Name of an available zone. Obtain the value from Regions and Endpoints.
availableZoneName String
Name of an availability zone.
billingType Double
chargingStartTime String
Time when charging starts.
clusterAdminSecret String
Indicates the password of the MRS Manager administrator.

  • Must contain 8 to 32 characters.
  • Must contain at least three of the following: Lowercase letters, Uppercase letters, Digits and Special characters: `~!@#$%^&*()-_=+|[{}];:'",<.>/? and space
  • Cannot be the username or the username spelled backwards.
clusterId String
Cluster ID.
clusterName String
Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
clusterState String
Cluster status. Valid values include: starting, running, terminated, failed, abnormal, terminating, frozen, scaling-out and scaling-in.
clusterType Double
Type of clusters. 0: analysis cluster; 1: streaming cluster. The default value is 0.
clusterVersion String
Version of the clusters. Possible values are as follows: MRS 1.8.9, MRS 2.0.1, MRS 2.1.0 and MRS 3.1.0-LTS.1. The latest version of MRS is used by default.
componentLists List<MrsClusterV1ComponentList>
Service component list. The object structure is documented below.
coreNodeNum Double
Number of Core nodes Value range: 3 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
coreNodeProductId String
Product ID of a Core node.
coreNodeSize String
Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
coreNodeSpecId String
Specification ID of a Core node.
createAt String
Cluster creation time.
deploymentId String
Deployment ID of a cluster.
duration String
Cluster subscription duration.
errorInfo String
Error information.
externalAlternateIp String
Backup external IP address.
externalIp String
External IP address.
fee String
Cluster creation fee, which is automatically calculated.
hadoopVersion String
Hadoop version.
instanceId String
Instance ID.
internalIp String
logCollection Double
Indicates whether logs are collected when cluster installation fails. 0: not collected 1: collected The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
masterNodeIp String
IP address of a Master node.

  • externalIp - Internal IP address.
masterNodeNum Double
Number of Master nodes The value is 2.
masterNodeProductId String
Product ID of a Master node.
masterNodeSize String

Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space.

  • Master nodes support s1.4xlarge and s1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
  • Core nodes of a streaming cluster support s1.xlarge, c2.2xlarge, s1.2xlarge, s1.4xlarge, s1.8xlarge, d1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
  • Core nodes of an analysis cluster support all specifications c2.2xlarge, s1.xlarge, s1.4xlarge, s1.8xlarge, d1.xlarge, d1.2xlarge, d1.4xlarge, d1.8xlarge, , c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4, d2.xlarge.8, d2.2xlarge.8, d2.4xlarge.8, d2.8xlarge.8.

The following provides specification details.

node_size | CPU(core) | Memory(GB) | System Disk | Data Disk --- | --- | --- | --- | --- c2.2xlarge.linux.mrs | 8 | 16 | 40 | - cc3.xlarge.4.linux.mrs | 4 | 16 | 40 | - cc3.2xlarge.4.linux.mrs | 8 | 32 | 40 | - cc3.4xlarge.4.linux.mrs | 16 | 64 | 40 | - cc3.8xlarge.4.linux.mrs | 32 | 128 | 40 | - s1.xlarge.linux.mrs | 4 | 16 | 40 | - s1.4xlarge.linux.mrs | 16 | 64 | 40 | - s1.8xlarge.linux.mrs | 32 | 128 | 40 | - s3.xlarge.4.linux.mrs| 4 | 16 | 40 | - s3.2xlarge.4.linux.mrs| 8 | 32 | 40 | - s3.4xlarge.4.linux.mrs| 16 | 64 | 40 | - d1.xlarge.linux.mrs | 6 | 55 | 40 | 1.8 TB x 3 HDDs d1.2xlarge.linux.mrs | 12 | 110 | 40 | 1.8 TB x 6 HDDs d1.4xlarge.linux.mrs | 24 | 220 | 40 | 1.8 TB x 12 HDDs d1.8xlarge.linux.mrs | 48 | 440 | 40 | 1.8 TB x 24 HDDs d2.xlarge.linux.mrs | 4 | 32 | 40 | - d2.2xlarge.linux.mrs | 8 | 64 | 40 | - d2.4xlarge.linux.mrs | 16 | 128 | 40 | 1.8TB8HDDs d2.8xlarge.linux.mrs | 32 | 256 | 40 | 1.8TB16HDDs

masterNodeSpecId String
Specification ID of a Master node.
mrsClusterV1Id String
The resource ID in UUID format.
nodePublicCertName String
Name of a key pair You can use a key to log in to the Master node in the cluster.
orderId String
Order ID for creating clusters.
privateIpFirst String
Primary private IP address.
region String
Cluster region information. Obtain the value from Regions and Endpoints.
remark String
Remarks of a cluster.
safeMode Double
MRS cluster running mode.

  • 0: common mode The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.
  • 1: safe mode The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management and job management functions of an MRS cluster or view cluster resource usage and the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator.
securityGroupsId String
Security group ID.
slaveSecurityGroupsId String
Standby security group ID.
subnetId String
Specifies the ID of the VPC Subnet which bound to the MRS cluster. Changing this will create a new MRS cluster resource.
tenantId String
Project ID.
timeouts MrsClusterV1Timeouts
updateAt String
Cluster update time.
vnc String
URI address for remote login of the elastic cloud server.
volumeSize Double
Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB
volumeType String
Type of disks SATA and SSD are supported. SATA: common I/O SSD: super high-speed I/O
vpcId String
ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
addJobs MrsClusterV1AddJob[]
You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added. The object structure is documented below.
availableZoneId string
ID or Name of an available zone. Obtain the value from Regions and Endpoints.
availableZoneName string
Name of an availability zone.
billingType number
chargingStartTime string
Time when charging starts.
clusterAdminSecret string
Indicates the password of the MRS Manager administrator.

  • Must contain 8 to 32 characters.
  • Must contain at least three of the following: Lowercase letters, Uppercase letters, Digits and Special characters: `~!@#$%^&*()-_=+|[{}];:'",<.>/? and space
  • Cannot be the username or the username spelled backwards.
clusterId string
Cluster ID.
clusterName string
Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
clusterState string
Cluster status. Valid values include: starting, running, terminated, failed, abnormal, terminating, frozen, scaling-out and scaling-in.
clusterType number
Type of clusters. 0: analysis cluster; 1: streaming cluster. The default value is 0.
clusterVersion string
Version of the clusters. Possible values are as follows: MRS 1.8.9, MRS 2.0.1, MRS 2.1.0 and MRS 3.1.0-LTS.1. The latest version of MRS is used by default.
componentLists MrsClusterV1ComponentList[]
Service component list. The object structure is documented below.
coreNodeNum number
Number of Core nodes Value range: 3 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
coreNodeProductId string
Product ID of a Core node.
coreNodeSize string
Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
coreNodeSpecId string
Specification ID of a Core node.
createAt string
Cluster creation time.
deploymentId string
Deployment ID of a cluster.
duration string
Cluster subscription duration.
errorInfo string
Error information.
externalAlternateIp string
Backup external IP address.
externalIp string
External IP address.
fee string
Cluster creation fee, which is automatically calculated.
hadoopVersion string
Hadoop version.
instanceId string
Instance ID.
internalIp string
logCollection number
Indicates whether logs are collected when cluster installation fails. 0: not collected 1: collected The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
masterNodeIp string
IP address of a Master node.

  • externalIp - Internal IP address.
masterNodeNum number
Number of Master nodes The value is 2.
masterNodeProductId string
Product ID of a Master node.
masterNodeSize string

Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space.

  • Master nodes support s1.4xlarge and s1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
  • Core nodes of a streaming cluster support s1.xlarge, c2.2xlarge, s1.2xlarge, s1.4xlarge, s1.8xlarge, d1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
  • Core nodes of an analysis cluster support all specifications c2.2xlarge, s1.xlarge, s1.4xlarge, s1.8xlarge, d1.xlarge, d1.2xlarge, d1.4xlarge, d1.8xlarge, , c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4, d2.xlarge.8, d2.2xlarge.8, d2.4xlarge.8, d2.8xlarge.8.

The following provides specification details.

node_size | CPU(core) | Memory(GB) | System Disk | Data Disk --- | --- | --- | --- | --- c2.2xlarge.linux.mrs | 8 | 16 | 40 | - cc3.xlarge.4.linux.mrs | 4 | 16 | 40 | - cc3.2xlarge.4.linux.mrs | 8 | 32 | 40 | - cc3.4xlarge.4.linux.mrs | 16 | 64 | 40 | - cc3.8xlarge.4.linux.mrs | 32 | 128 | 40 | - s1.xlarge.linux.mrs | 4 | 16 | 40 | - s1.4xlarge.linux.mrs | 16 | 64 | 40 | - s1.8xlarge.linux.mrs | 32 | 128 | 40 | - s3.xlarge.4.linux.mrs| 4 | 16 | 40 | - s3.2xlarge.4.linux.mrs| 8 | 32 | 40 | - s3.4xlarge.4.linux.mrs| 16 | 64 | 40 | - d1.xlarge.linux.mrs | 6 | 55 | 40 | 1.8 TB x 3 HDDs d1.2xlarge.linux.mrs | 12 | 110 | 40 | 1.8 TB x 6 HDDs d1.4xlarge.linux.mrs | 24 | 220 | 40 | 1.8 TB x 12 HDDs d1.8xlarge.linux.mrs | 48 | 440 | 40 | 1.8 TB x 24 HDDs d2.xlarge.linux.mrs | 4 | 32 | 40 | - d2.2xlarge.linux.mrs | 8 | 64 | 40 | - d2.4xlarge.linux.mrs | 16 | 128 | 40 | 1.8TB8HDDs d2.8xlarge.linux.mrs | 32 | 256 | 40 | 1.8TB16HDDs

masterNodeSpecId string
Specification ID of a Master node.
mrsClusterV1Id string
The resource ID in UUID format.
nodePublicCertName string
Name of a key pair You can use a key to log in to the Master node in the cluster.
orderId string
Order ID for creating clusters.
privateIpFirst string
Primary private IP address.
region string
Cluster region information. Obtain the value from Regions and Endpoints.
remark string
Remarks of a cluster.
safeMode number
MRS cluster running mode.

  • 0: common mode The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.
  • 1: safe mode The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management and job management functions of an MRS cluster or view cluster resource usage and the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator.
securityGroupsId string
Security group ID.
slaveSecurityGroupsId string
Standby security group ID.
subnetId string
Specifies the ID of the VPC Subnet which bound to the MRS cluster. Changing this will create a new MRS cluster resource.
tenantId string
Project ID.
timeouts MrsClusterV1Timeouts
updateAt string
Cluster update time.
vnc string
URI address for remote login of the elastic cloud server.
volumeSize number
Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB
volumeType string
Type of disks SATA and SSD are supported. SATA: common I/O SSD: super high-speed I/O
vpcId string
ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
add_jobs Sequence[MrsClusterV1AddJobArgs]
You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added. The object structure is documented below.
available_zone_id str
ID or Name of an available zone. Obtain the value from Regions and Endpoints.
available_zone_name str
Name of an availability zone.
billing_type float
charging_start_time str
Time when charging starts.
cluster_admin_secret str
Indicates the password of the MRS Manager administrator.

  • Must contain 8 to 32 characters.
  • Must contain at least three of the following: Lowercase letters, Uppercase letters, Digits and Special characters: `~!@#$%^&*()-_=+|[{}];:'",<.>/? and space
  • Cannot be the username or the username spelled backwards.
cluster_id str
Cluster ID.
cluster_name str
Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
cluster_state str
Cluster status. Valid values include: starting, running, terminated, failed, abnormal, terminating, frozen, scaling-out and scaling-in.
cluster_type float
Type of clusters. 0: analysis cluster; 1: streaming cluster. The default value is 0.
cluster_version str
Version of the clusters. Possible values are as follows: MRS 1.8.9, MRS 2.0.1, MRS 2.1.0 and MRS 3.1.0-LTS.1. The latest version of MRS is used by default.
component_lists Sequence[MrsClusterV1ComponentListArgs]
Service component list. The object structure is documented below.
core_node_num float
Number of Core nodes Value range: 3 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
core_node_product_id str
Product ID of a Core node.
core_node_size str
Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
core_node_spec_id str
Specification ID of a Core node.
create_at str
Cluster creation time.
deployment_id str
Deployment ID of a cluster.
duration str
Cluster subscription duration.
error_info str
Error information.
external_alternate_ip str
Backup external IP address.
external_ip str
External IP address.
fee str
Cluster creation fee, which is automatically calculated.
hadoop_version str
Hadoop version.
instance_id str
Instance ID.
internal_ip str
log_collection float
Indicates whether logs are collected when cluster installation fails. 0: not collected 1: collected The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
master_node_ip str
IP address of a Master node.

  • externalIp - Internal IP address.
master_node_num float
Number of Master nodes The value is 2.
master_node_product_id str
Product ID of a Master node.
master_node_size str

Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space.

  • Master nodes support s1.4xlarge and s1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
  • Core nodes of a streaming cluster support s1.xlarge, c2.2xlarge, s1.2xlarge, s1.4xlarge, s1.8xlarge, d1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
  • Core nodes of an analysis cluster support all specifications c2.2xlarge, s1.xlarge, s1.4xlarge, s1.8xlarge, d1.xlarge, d1.2xlarge, d1.4xlarge, d1.8xlarge, , c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4, d2.xlarge.8, d2.2xlarge.8, d2.4xlarge.8, d2.8xlarge.8.

The following provides specification details.

node_size | CPU(core) | Memory(GB) | System Disk | Data Disk --- | --- | --- | --- | --- c2.2xlarge.linux.mrs | 8 | 16 | 40 | - cc3.xlarge.4.linux.mrs | 4 | 16 | 40 | - cc3.2xlarge.4.linux.mrs | 8 | 32 | 40 | - cc3.4xlarge.4.linux.mrs | 16 | 64 | 40 | - cc3.8xlarge.4.linux.mrs | 32 | 128 | 40 | - s1.xlarge.linux.mrs | 4 | 16 | 40 | - s1.4xlarge.linux.mrs | 16 | 64 | 40 | - s1.8xlarge.linux.mrs | 32 | 128 | 40 | - s3.xlarge.4.linux.mrs| 4 | 16 | 40 | - s3.2xlarge.4.linux.mrs| 8 | 32 | 40 | - s3.4xlarge.4.linux.mrs| 16 | 64 | 40 | - d1.xlarge.linux.mrs | 6 | 55 | 40 | 1.8 TB x 3 HDDs d1.2xlarge.linux.mrs | 12 | 110 | 40 | 1.8 TB x 6 HDDs d1.4xlarge.linux.mrs | 24 | 220 | 40 | 1.8 TB x 12 HDDs d1.8xlarge.linux.mrs | 48 | 440 | 40 | 1.8 TB x 24 HDDs d2.xlarge.linux.mrs | 4 | 32 | 40 | - d2.2xlarge.linux.mrs | 8 | 64 | 40 | - d2.4xlarge.linux.mrs | 16 | 128 | 40 | 1.8TB8HDDs d2.8xlarge.linux.mrs | 32 | 256 | 40 | 1.8TB16HDDs

master_node_spec_id str
Specification ID of a Master node.
mrs_cluster_v1_id str
The resource ID in UUID format.
node_public_cert_name str
Name of a key pair You can use a key to log in to the Master node in the cluster.
order_id str
Order ID for creating clusters.
private_ip_first str
Primary private IP address.
region str
Cluster region information. Obtain the value from Regions and Endpoints.
remark str
Remarks of a cluster.
safe_mode float
MRS cluster running mode.

  • 0: common mode The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.
  • 1: safe mode The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management and job management functions of an MRS cluster or view cluster resource usage and the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator.
security_groups_id str
Security group ID.
slave_security_groups_id str
Standby security group ID.
subnet_id str
Specifies the ID of the VPC Subnet which bound to the MRS cluster. Changing this will create a new MRS cluster resource.
tenant_id str
Project ID.
timeouts MrsClusterV1TimeoutsArgs
update_at str
Cluster update time.
vnc str
URI address for remote login of the elastic cloud server.
volume_size float
Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB
volume_type str
Type of disks SATA and SSD are supported. SATA: common I/O SSD: super high-speed I/O
vpc_id str
ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.
addJobs List<Property Map>
You can submit a job when you create a cluster to save time and use MRS easily. Only one job can be added. The object structure is documented below.
availableZoneId String
ID or Name of an available zone. Obtain the value from Regions and Endpoints.
availableZoneName String
Name of an availability zone.
billingType Number
chargingStartTime String
Time when charging starts.
clusterAdminSecret String
Indicates the password of the MRS Manager administrator.

  • Must contain 8 to 32 characters.
  • Must contain at least three of the following: Lowercase letters, Uppercase letters, Digits and Special characters: `~!@#$%^&*()-_=+|[{}];:'",<.>/? and space
  • Cannot be the username or the username spelled backwards.
clusterId String
Cluster ID.
clusterName String
Cluster name, which is globally unique and contains only 1 to 64 letters, digits, hyphens (-), and underscores (_).
clusterState String
Cluster status. Valid values include: starting, running, terminated, failed, abnormal, terminating, frozen, scaling-out and scaling-in.
clusterType Number
Type of clusters. 0: analysis cluster; 1: streaming cluster. The default value is 0.
clusterVersion String
Version of the clusters. Possible values are as follows: MRS 1.8.9, MRS 2.0.1, MRS 2.1.0 and MRS 3.1.0-LTS.1. The latest version of MRS is used by default.
componentLists List<Property Map>
Service component list. The object structure is documented below.
coreNodeNum Number
Number of Core nodes Value range: 3 to 500. A maximum of 500 Core nodes are supported by default. If more than 500 Core nodes are required, contact technical support engineers or invoke background APIs to modify the database.
coreNodeProductId String
Product ID of a Core node.
coreNodeSize String
Instance specification of a Core node Configuration method of this parameter is identical to that of master_node_size.
coreNodeSpecId String
Specification ID of a Core node.
createAt String
Cluster creation time.
deploymentId String
Deployment ID of a cluster.
duration String
Cluster subscription duration.
errorInfo String
Error information.
externalAlternateIp String
Backup external IP address.
externalIp String
External IP address.
fee String
Cluster creation fee, which is automatically calculated.
hadoopVersion String
Hadoop version.
instanceId String
Instance ID.
internalIp String
logCollection Number
Indicates whether logs are collected when cluster installation fails. 0: not collected 1: collected The default value is 0. If log_collection is set to 1, OBS buckets will be created to collect the MRS logs. These buckets will be charged.
masterNodeIp String
IP address of a Master node.

  • externalIp - Internal IP address.
masterNodeNum Number
Number of Master nodes The value is 2.
masterNodeProductId String
Product ID of a Master node.
masterNodeSize String

Best match based on several years of commissioning experience. MRS supports specifications of hosts, and host specifications are determined by CPUs, memory, and disks space.

  • Master nodes support s1.4xlarge and s1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
  • Core nodes of a streaming cluster support s1.xlarge, c2.2xlarge, s1.2xlarge, s1.4xlarge, s1.8xlarge, d1.8xlarge, c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4.
  • Core nodes of an analysis cluster support all specifications c2.2xlarge, s1.xlarge, s1.4xlarge, s1.8xlarge, d1.xlarge, d1.2xlarge, d1.4xlarge, d1.8xlarge, , c3.2xlarge.2, c3.xlarge.4, c3.2xlarge.4, c3.4xlarge.2, c3.4xlarge.4, c3.8xlarge.4, c3.15xlarge.4, d2.xlarge.8, d2.2xlarge.8, d2.4xlarge.8, d2.8xlarge.8.

The following provides specification details.

node_size | CPU(core) | Memory(GB) | System Disk | Data Disk --- | --- | --- | --- | --- c2.2xlarge.linux.mrs | 8 | 16 | 40 | - cc3.xlarge.4.linux.mrs | 4 | 16 | 40 | - cc3.2xlarge.4.linux.mrs | 8 | 32 | 40 | - cc3.4xlarge.4.linux.mrs | 16 | 64 | 40 | - cc3.8xlarge.4.linux.mrs | 32 | 128 | 40 | - s1.xlarge.linux.mrs | 4 | 16 | 40 | - s1.4xlarge.linux.mrs | 16 | 64 | 40 | - s1.8xlarge.linux.mrs | 32 | 128 | 40 | - s3.xlarge.4.linux.mrs| 4 | 16 | 40 | - s3.2xlarge.4.linux.mrs| 8 | 32 | 40 | - s3.4xlarge.4.linux.mrs| 16 | 64 | 40 | - d1.xlarge.linux.mrs | 6 | 55 | 40 | 1.8 TB x 3 HDDs d1.2xlarge.linux.mrs | 12 | 110 | 40 | 1.8 TB x 6 HDDs d1.4xlarge.linux.mrs | 24 | 220 | 40 | 1.8 TB x 12 HDDs d1.8xlarge.linux.mrs | 48 | 440 | 40 | 1.8 TB x 24 HDDs d2.xlarge.linux.mrs | 4 | 32 | 40 | - d2.2xlarge.linux.mrs | 8 | 64 | 40 | - d2.4xlarge.linux.mrs | 16 | 128 | 40 | 1.8TB8HDDs d2.8xlarge.linux.mrs | 32 | 256 | 40 | 1.8TB16HDDs

masterNodeSpecId String
Specification ID of a Master node.
mrsClusterV1Id String
The resource ID in UUID format.
nodePublicCertName String
Name of a key pair You can use a key to log in to the Master node in the cluster.
orderId String
Order ID for creating clusters.
privateIpFirst String
Primary private IP address.
region String
Cluster region information. Obtain the value from Regions and Endpoints.
remark String
Remarks of a cluster.
safeMode Number
MRS cluster running mode.

  • 0: common mode The value indicates that the Kerberos authentication is disabled. Users can use all functions provided by the cluster.
  • 1: safe mode The value indicates that the Kerberos authentication is enabled. Common users cannot use the file management and job management functions of an MRS cluster or view cluster resource usage and the job records of Hadoop and Spark. To use these functions, the users must obtain the relevant permissions from the MRS Manager administrator.
securityGroupsId String
Security group ID.
slaveSecurityGroupsId String
Standby security group ID.
subnetId String
Specifies the ID of the VPC Subnet which bound to the MRS cluster. Changing this will create a new MRS cluster resource.
tenantId String
Project ID.
timeouts Property Map
updateAt String
Cluster update time.
vnc String
URI address for remote login of the elastic cloud server.
volumeSize Number
Data disk storage space of a Core node Users can add disks to expand storage capacity when creating a cluster. There are the following scenarios: Separation of data storage and computing: Data is stored in the OBS system. Costs of clusters are relatively low but computing performance is poor. The clusters can be deleted at any time. It is recommended when data computing is not frequently performed. Integration of data storage and computing: Data is stored in the HDFS system. Costs of clusters are relatively high but computing performance is good. The clusters cannot be deleted in a short term. It is recommended when data computing is frequently performed. Value range: 100 GB to 32000 GB
volumeType String
Type of disks SATA and SSD are supported. SATA: common I/O SSD: super high-speed I/O
vpcId String
ID of the VPC where the subnet locates Obtain the VPC ID from the management console as follows: Register an account and log in to the management console. Click Virtual Private Cloud and select Virtual Private Cloud from the left list. On the Virtual Private Cloud page, obtain the VPC ID from the list.

Supporting Types

MrsClusterV1AddJob
, MrsClusterV1AddJobArgs

JarPath This property is required. string
Path of the .jar file or .sql file for program execution The parameter must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Spark Script must end with .sql; while MapReduce and Spark Jar must end with .jar. sql and jar are case-insensitive.
JobName This property is required. string
Job name It contains only 1 to 64 letters, digits, hyphens (-), and underscores (_). NOTE: Identical job names are allowed but not recommended.
JobType This property is required. double
Job type. 1: MapReduce 2: Spark 3: Hive Script 4: HiveQL (not supported currently) 5: DistCp, importing and exporting data (not supported in this API currently). 6: Spark Script 7: Spark SQL, submitting Spark SQL statements (not supported in this API currently). NOTE: Spark and Hive jobs can be added to only clusters including Spark and Hive components.
SubmitJobOnceClusterRun This property is required. bool
true: A job is submitted when a cluster is created. false: A job is submitted separately. The parameter is set to true in this example.
Arguments string
Key parameter for program execution The parameter is specified by the function of the user's program. MRS is only responsible for loading the parameter. The parameter contains a maximum of 2047 characters, excluding special characters such as ;|&>'<$, and can be empty.
FileAction string
Data import and export import export
HiveScriptPath string
SQL program path This parameter is needed by Spark Script and Hive Script jobs only and must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Ends with .sql. sql is case-insensitive.
Hql string
HiveQL statement
Input string
Path for inputting data, which must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
JobLog string
Path for storing job logs that record job running status. This path must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
Output string
Path for outputting data, which must start with / or s3a://. A correct OBS path is required. If the path does not exist, the system automatically creates it. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
ShutdownCluster bool
Whether to delete the cluster after the jobs are complete true: Yes false: No
JarPath This property is required. string
Path of the .jar file or .sql file for program execution The parameter must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Spark Script must end with .sql; while MapReduce and Spark Jar must end with .jar. sql and jar are case-insensitive.
JobName This property is required. string
Job name It contains only 1 to 64 letters, digits, hyphens (-), and underscores (_). NOTE: Identical job names are allowed but not recommended.
JobType This property is required. float64
Job type. 1: MapReduce 2: Spark 3: Hive Script 4: HiveQL (not supported currently) 5: DistCp, importing and exporting data (not supported in this API currently). 6: Spark Script 7: Spark SQL, submitting Spark SQL statements (not supported in this API currently). NOTE: Spark and Hive jobs can be added to only clusters including Spark and Hive components.
SubmitJobOnceClusterRun This property is required. bool
true: A job is submitted when a cluster is created. false: A job is submitted separately. The parameter is set to true in this example.
Arguments string
Key parameter for program execution The parameter is specified by the function of the user's program. MRS is only responsible for loading the parameter. The parameter contains a maximum of 2047 characters, excluding special characters such as ;|&>'<$, and can be empty.
FileAction string
Data import and export import export
HiveScriptPath string
SQL program path This parameter is needed by Spark Script and Hive Script jobs only and must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Ends with .sql. sql is case-insensitive.
Hql string
HiveQL statement
Input string
Path for inputting data, which must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
JobLog string
Path for storing job logs that record job running status. This path must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
Output string
Path for outputting data, which must start with / or s3a://. A correct OBS path is required. If the path does not exist, the system automatically creates it. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
ShutdownCluster bool
Whether to delete the cluster after the jobs are complete true: Yes false: No
jarPath This property is required. String
Path of the .jar file or .sql file for program execution The parameter must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Spark Script must end with .sql; while MapReduce and Spark Jar must end with .jar. sql and jar are case-insensitive.
jobName This property is required. String
Job name It contains only 1 to 64 letters, digits, hyphens (-), and underscores (_). NOTE: Identical job names are allowed but not recommended.
jobType This property is required. Double
Job type. 1: MapReduce 2: Spark 3: Hive Script 4: HiveQL (not supported currently) 5: DistCp, importing and exporting data (not supported in this API currently). 6: Spark Script 7: Spark SQL, submitting Spark SQL statements (not supported in this API currently). NOTE: Spark and Hive jobs can be added to only clusters including Spark and Hive components.
submitJobOnceClusterRun This property is required. Boolean
true: A job is submitted when a cluster is created. false: A job is submitted separately. The parameter is set to true in this example.
arguments String
Key parameter for program execution The parameter is specified by the function of the user's program. MRS is only responsible for loading the parameter. The parameter contains a maximum of 2047 characters, excluding special characters such as ;|&>'<$, and can be empty.
fileAction String
Data import and export import export
hiveScriptPath String
SQL program path This parameter is needed by Spark Script and Hive Script jobs only and must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Ends with .sql. sql is case-insensitive.
hql String
HiveQL statement
input String
Path for inputting data, which must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
jobLog String
Path for storing job logs that record job running status. This path must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
output String
Path for outputting data, which must start with / or s3a://. A correct OBS path is required. If the path does not exist, the system automatically creates it. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
shutdownCluster Boolean
Whether to delete the cluster after the jobs are complete true: Yes false: No
jarPath This property is required. string
Path of the .jar file or .sql file for program execution The parameter must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Spark Script must end with .sql; while MapReduce and Spark Jar must end with .jar. sql and jar are case-insensitive.
jobName This property is required. string
Job name It contains only 1 to 64 letters, digits, hyphens (-), and underscores (_). NOTE: Identical job names are allowed but not recommended.
jobType This property is required. number
Job type. 1: MapReduce 2: Spark 3: Hive Script 4: HiveQL (not supported currently) 5: DistCp, importing and exporting data (not supported in this API currently). 6: Spark Script 7: Spark SQL, submitting Spark SQL statements (not supported in this API currently). NOTE: Spark and Hive jobs can be added to only clusters including Spark and Hive components.
submitJobOnceClusterRun This property is required. boolean
true: A job is submitted when a cluster is created. false: A job is submitted separately. The parameter is set to true in this example.
arguments string
Key parameter for program execution The parameter is specified by the function of the user's program. MRS is only responsible for loading the parameter. The parameter contains a maximum of 2047 characters, excluding special characters such as ;|&>'<$, and can be empty.
fileAction string
Data import and export import export
hiveScriptPath string
SQL program path This parameter is needed by Spark Script and Hive Script jobs only and must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Ends with .sql. sql is case-insensitive.
hql string
HiveQL statement
input string
Path for inputting data, which must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
jobLog string
Path for storing job logs that record job running status. This path must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
output string
Path for outputting data, which must start with / or s3a://. A correct OBS path is required. If the path does not exist, the system automatically creates it. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
shutdownCluster boolean
Whether to delete the cluster after the jobs are complete true: Yes false: No
jar_path This property is required. str
Path of the .jar file or .sql file for program execution The parameter must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Spark Script must end with .sql; while MapReduce and Spark Jar must end with .jar. sql and jar are case-insensitive.
job_name This property is required. str
Job name It contains only 1 to 64 letters, digits, hyphens (-), and underscores (_). NOTE: Identical job names are allowed but not recommended.
job_type This property is required. float
Job type. 1: MapReduce 2: Spark 3: Hive Script 4: HiveQL (not supported currently) 5: DistCp, importing and exporting data (not supported in this API currently). 6: Spark Script 7: Spark SQL, submitting Spark SQL statements (not supported in this API currently). NOTE: Spark and Hive jobs can be added to only clusters including Spark and Hive components.
submit_job_once_cluster_run This property is required. bool
true: A job is submitted when a cluster is created. false: A job is submitted separately. The parameter is set to true in this example.
arguments str
Key parameter for program execution The parameter is specified by the function of the user's program. MRS is only responsible for loading the parameter. The parameter contains a maximum of 2047 characters, excluding special characters such as ;|&>'<$, and can be empty.
file_action str
Data import and export import export
hive_script_path str
SQL program path This parameter is needed by Spark Script and Hive Script jobs only and must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Ends with .sql. sql is case-insensitive.
hql str
HiveQL statement
input str
Path for inputting data, which must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
job_log str
Path for storing job logs that record job running status. This path must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
output str
Path for outputting data, which must start with / or s3a://. A correct OBS path is required. If the path does not exist, the system automatically creates it. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
shutdown_cluster bool
Whether to delete the cluster after the jobs are complete true: Yes false: No
jarPath This property is required. String
Path of the .jar file or .sql file for program execution The parameter must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Spark Script must end with .sql; while MapReduce and Spark Jar must end with .jar. sql and jar are case-insensitive.
jobName This property is required. String
Job name It contains only 1 to 64 letters, digits, hyphens (-), and underscores (_). NOTE: Identical job names are allowed but not recommended.
jobType This property is required. Number
Job type. 1: MapReduce 2: Spark 3: Hive Script 4: HiveQL (not supported currently) 5: DistCp, importing and exporting data (not supported in this API currently). 6: Spark Script 7: Spark SQL, submitting Spark SQL statements (not supported in this API currently). NOTE: Spark and Hive jobs can be added to only clusters including Spark and Hive components.
submitJobOnceClusterRun This property is required. Boolean
true: A job is submitted when a cluster is created. false: A job is submitted separately. The parameter is set to true in this example.
arguments String
Key parameter for program execution The parameter is specified by the function of the user's program. MRS is only responsible for loading the parameter. The parameter contains a maximum of 2047 characters, excluding special characters such as ;|&>'<$, and can be empty.
fileAction String
Data import and export import export
hiveScriptPath String
SQL program path This parameter is needed by Spark Script and Hive Script jobs only and must meet the following requirements: Contains a maximum of 1023 characters, excluding special characters such as ;|&><'$. The address cannot be empty or full of spaces. Starts with / or s3a://. Ends with .sql. sql is case-insensitive.
hql String
HiveQL statement
input String
Path for inputting data, which must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
jobLog String
Path for storing job logs that record job running status. This path must start with / or s3a://. A correct OBS path is required. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
output String
Path for outputting data, which must start with / or s3a://. A correct OBS path is required. If the path does not exist, the system automatically creates it. The parameter contains a maximum of 1023 characters, excluding special characters such as ;|&>'<$, and can be empty.
shutdownCluster Boolean
Whether to delete the cluster after the jobs are complete true: Yes false: No

MrsClusterV1ComponentList
, MrsClusterV1ComponentListArgs

ComponentName This property is required. string
the Component name.

  • MRS 3.1.0-LTS.1 supports the following components:
  • The analysis cluster contains the following components: Hadoop, Spark2x, HBase, Hive, Hue, HetuEngine, Loader, Flink, Oozie, ZooKeeper, Ranger, and Tez.
  • The streaming cluster contains the following components: Kafka, Flume, ZooKeeper, and Ranger.
  • MRS 2.0.1 supports the following components:
  • The analysis cluster contains the following components: Presto, Hadoop, Spark, HBase, Hive, Hue, Loader, and Tez
  • The streaming cluster contains the following components: Kafka, Storm, and Flume.
  • MRS 1.8.9 supports the following components:
  • The analysis cluster contains the following components: Presto, Hadoop, Spark, HBase, Opentsdb, Hive, Hue, Loader, and Flink.
  • The streaming cluster contains the following components: Kafka, KafkaManager, Storm, and Flume.
ComponentDesc string
Component description.
ComponentId string
Component ID. For example, component_id of Hadoop is MRS 3.1.0-LTS.1_001, MRS 2.1.0_001, MRS 2.0.1_001, and MRS 1.8.9_001.
ComponentVersion string
Component version.
ComponentName This property is required. string
the Component name.

  • MRS 3.1.0-LTS.1 supports the following components:
  • The analysis cluster contains the following components: Hadoop, Spark2x, HBase, Hive, Hue, HetuEngine, Loader, Flink, Oozie, ZooKeeper, Ranger, and Tez.
  • The streaming cluster contains the following components: Kafka, Flume, ZooKeeper, and Ranger.
  • MRS 2.0.1 supports the following components:
  • The analysis cluster contains the following components: Presto, Hadoop, Spark, HBase, Hive, Hue, Loader, and Tez
  • The streaming cluster contains the following components: Kafka, Storm, and Flume.
  • MRS 1.8.9 supports the following components:
  • The analysis cluster contains the following components: Presto, Hadoop, Spark, HBase, Opentsdb, Hive, Hue, Loader, and Flink.
  • The streaming cluster contains the following components: Kafka, KafkaManager, Storm, and Flume.
ComponentDesc string
Component description.
ComponentId string
Component ID. For example, component_id of Hadoop is MRS 3.1.0-LTS.1_001, MRS 2.1.0_001, MRS 2.0.1_001, and MRS 1.8.9_001.
ComponentVersion string
Component version.
componentName This property is required. String
the Component name.

  • MRS 3.1.0-LTS.1 supports the following components:
  • The analysis cluster contains the following components: Hadoop, Spark2x, HBase, Hive, Hue, HetuEngine, Loader, Flink, Oozie, ZooKeeper, Ranger, and Tez.
  • The streaming cluster contains the following components: Kafka, Flume, ZooKeeper, and Ranger.
  • MRS 2.0.1 supports the following components:
  • The analysis cluster contains the following components: Presto, Hadoop, Spark, HBase, Hive, Hue, Loader, and Tez
  • The streaming cluster contains the following components: Kafka, Storm, and Flume.
  • MRS 1.8.9 supports the following components:
  • The analysis cluster contains the following components: Presto, Hadoop, Spark, HBase, Opentsdb, Hive, Hue, Loader, and Flink.
  • The streaming cluster contains the following components: Kafka, KafkaManager, Storm, and Flume.
componentDesc String
Component description.
componentId String
Component ID. For example, component_id of Hadoop is MRS 3.1.0-LTS.1_001, MRS 2.1.0_001, MRS 2.0.1_001, and MRS 1.8.9_001.
componentVersion String
Component version.
componentName This property is required. string
the Component name.

  • MRS 3.1.0-LTS.1 supports the following components:
  • The analysis cluster contains the following components: Hadoop, Spark2x, HBase, Hive, Hue, HetuEngine, Loader, Flink, Oozie, ZooKeeper, Ranger, and Tez.
  • The streaming cluster contains the following components: Kafka, Flume, ZooKeeper, and Ranger.
  • MRS 2.0.1 supports the following components:
  • The analysis cluster contains the following components: Presto, Hadoop, Spark, HBase, Hive, Hue, Loader, and Tez
  • The streaming cluster contains the following components: Kafka, Storm, and Flume.
  • MRS 1.8.9 supports the following components:
  • The analysis cluster contains the following components: Presto, Hadoop, Spark, HBase, Opentsdb, Hive, Hue, Loader, and Flink.
  • The streaming cluster contains the following components: Kafka, KafkaManager, Storm, and Flume.
componentDesc string
Component description.
componentId string
Component ID. For example, component_id of Hadoop is MRS 3.1.0-LTS.1_001, MRS 2.1.0_001, MRS 2.0.1_001, and MRS 1.8.9_001.
componentVersion string
Component version.
component_name This property is required. str
the Component name.

  • MRS 3.1.0-LTS.1 supports the following components:
  • The analysis cluster contains the following components: Hadoop, Spark2x, HBase, Hive, Hue, HetuEngine, Loader, Flink, Oozie, ZooKeeper, Ranger, and Tez.
  • The streaming cluster contains the following components: Kafka, Flume, ZooKeeper, and Ranger.
  • MRS 2.0.1 supports the following components:
  • The analysis cluster contains the following components: Presto, Hadoop, Spark, HBase, Hive, Hue, Loader, and Tez
  • The streaming cluster contains the following components: Kafka, Storm, and Flume.
  • MRS 1.8.9 supports the following components:
  • The analysis cluster contains the following components: Presto, Hadoop, Spark, HBase, Opentsdb, Hive, Hue, Loader, and Flink.
  • The streaming cluster contains the following components: Kafka, KafkaManager, Storm, and Flume.
component_desc str
Component description.
component_id str
Component ID. For example, component_id of Hadoop is MRS 3.1.0-LTS.1_001, MRS 2.1.0_001, MRS 2.0.1_001, and MRS 1.8.9_001.
component_version str
Component version.
componentName This property is required. String
the Component name.

  • MRS 3.1.0-LTS.1 supports the following components:
  • The analysis cluster contains the following components: Hadoop, Spark2x, HBase, Hive, Hue, HetuEngine, Loader, Flink, Oozie, ZooKeeper, Ranger, and Tez.
  • The streaming cluster contains the following components: Kafka, Flume, ZooKeeper, and Ranger.
  • MRS 2.0.1 supports the following components:
  • The analysis cluster contains the following components: Presto, Hadoop, Spark, HBase, Hive, Hue, Loader, and Tez
  • The streaming cluster contains the following components: Kafka, Storm, and Flume.
  • MRS 1.8.9 supports the following components:
  • The analysis cluster contains the following components: Presto, Hadoop, Spark, HBase, Opentsdb, Hive, Hue, Loader, and Flink.
  • The streaming cluster contains the following components: Kafka, KafkaManager, Storm, and Flume.
componentDesc String
Component description.
componentId String
Component ID. For example, component_id of Hadoop is MRS 3.1.0-LTS.1_001, MRS 2.1.0_001, MRS 2.0.1_001, and MRS 1.8.9_001.
componentVersion String
Component version.

MrsClusterV1Timeouts
, MrsClusterV1TimeoutsArgs

Create string
Delete string
Create string
Delete string
create String
delete String
create string
delete string
create str
delete str
create String
delete String

Package Details

Repository
flexibleengine flexibleenginecloud/terraform-provider-flexibleengine
License
Notes
This Pulumi package is based on the flexibleengine Terraform Provider.