本文整理汇总了Java中org.apache.kafka.connect.data.Schema类的典型用法代码示例。如果您正苦于以下问题:Java Schema类的具体用法?Java Schema怎么用?Java Schema使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Schema类属于org.apache.kafka.connect.data包,在下文中一共展示了Schema类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: build
import org.apache.kafka.connect.data.Schema; //导入依赖的package包/类
public void build(String tableName, Schema keySchema, Schema valueSchema) {
log.trace("build() - tableName = '{}'", tableName);
final CassandraSchemaKey key = CassandraSchemaKey.of(this.config.keyspace, tableName);
if (null != this.schemaLookup.getIfPresent(key)) {
return;
}
if (null == keySchema || null == valueSchema) {
log.warn("build() - Schemaless mode detected. Cannot generate DDL so assuming table is correct.");
this.schemaLookup.put(key, DEFAULT);
}
final CassandraTableMetadata tableMetadata = this.session.tableMetadata(tableName);
if (null != tableMetadata) {
alter(key, tableName, keySchema, valueSchema, tableMetadata);
} else {
create(key, tableName, keySchema, valueSchema);
}
}
开发者ID:jcustenborder,项目名称:kafka-connect-cassandra,代码行数:21,代码来源:ConnectSchemaBuilder.java
示例2: createComplexPrimaryKey
import org.apache.kafka.connect.data.Schema; //导入依赖的package包/类
@Test
public void createComplexPrimaryKey() {
final Schema keySchema = SchemaBuilder.struct()
.field("username", Schema.STRING_SCHEMA)
.field("companyID", Schema.INT64_SCHEMA)
.build();
final Schema valueSchema = SchemaBuilder.struct()
.field("username", Schema.STRING_SCHEMA)
.field("companyID", Schema.INT64_SCHEMA)
.field("firstName", Schema.STRING_SCHEMA)
.field("lastName", Schema.STRING_SCHEMA)
.field("created", Timestamp.SCHEMA)
.field("updated", Timestamp.SCHEMA)
.build();
this.builder.build("foo", keySchema, valueSchema);
verify(this.session, times(1)).executeStatement(any(Create.class));
}
开发者ID:jcustenborder,项目名称:kafka-connect-cassandra,代码行数:19,代码来源:ConnectSchemaBuilderTest.java
示例3: configure
import org.apache.kafka.connect.data.Schema; //导入依赖的package包/类
@Override
protected void configure(Map<String, Object> config) {
String valueFieldName;
if (config.get(FILE_READER_TEXT_FIELD_NAME_VALUE) == null ||
config.get(FILE_READER_TEXT_FIELD_NAME_VALUE).toString().equals("")) {
valueFieldName = FIELD_NAME_VALUE_DEFAULT;
} else {
valueFieldName = config.get(FILE_READER_TEXT_FIELD_NAME_VALUE).toString();
}
this.schema = SchemaBuilder.struct()
.field(valueFieldName, Schema.STRING_SCHEMA)
.build();
if (config.get(FILE_READER_TEXT_ENCODING) == null ||
config.get(FILE_READER_TEXT_ENCODING).toString().equals("")) {
this.charset = Charset.defaultCharset();
} else {
this.charset = Charset.forName(config.get(FILE_READER_TEXT_ENCODING).toString());
}
}
开发者ID:mmolimar,项目名称:kafka-connect-fs,代码行数:21,代码来源:TextFileReader.java
示例4: updateSchemaOfStruct
import org.apache.kafka.connect.data.Schema; //导入依赖的package包/类
@Test
public void updateSchemaOfStruct() {
final String fieldName1 = "f1";
final String fieldName2 = "f2";
final String fieldValue1 = "value1";
final int fieldValue2 = 1;
final Schema schema = SchemaBuilder.struct()
.name("my.orig.SchemaDefn")
.field(fieldName1, Schema.STRING_SCHEMA)
.field(fieldName2, Schema.INT32_SCHEMA)
.build();
final Struct value = new Struct(schema).put(fieldName1, fieldValue1).put(fieldName2, fieldValue2);
final Schema newSchema = SchemaBuilder.struct()
.name("my.updated.SchemaDefn")
.field(fieldName1, Schema.STRING_SCHEMA)
.field(fieldName2, Schema.INT32_SCHEMA)
.build();
Struct newValue = (Struct) SetSchemaMetadata.updateSchemaIn(value, newSchema);
assertMatchingSchema(newValue, newSchema);
}
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:23,代码来源:SetSchemaMetadataTest.java
示例5: constructAvroTable
import org.apache.kafka.connect.data.Schema; //导入依赖的package包/类
private Table constructAvroTable(String database, String tableName, Schema schema, Partitioner partitioner)
throws HiveMetaStoreException {
Table table = newTable(database, tableName);
table.setTableType(TableType.EXTERNAL_TABLE);
table.getParameters().put("EXTERNAL", "TRUE");
String tablePath = FileUtils.hiveDirectoryName(url, topicsDir, tableName);
table.setDataLocation(new Path(tablePath));
table.setSerializationLib(avroSerde);
try {
table.setInputFormatClass(avroInputFormat);
table.setOutputFormatClass(avroOutputFormat);
} catch (HiveException e) {
throw new HiveMetaStoreException("Cannot find input/output format:", e);
}
List<FieldSchema> columns = HiveSchemaConverter.convertSchema(schema);
table.setFields(columns);
table.setPartCols(partitioner.partitionFields());
table.getParameters().put(AVRO_SCHEMA_LITERAL, avroData.fromConnectSchema(schema).toString());
return table;
}
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:21,代码来源:AvroHiveUtil.java
示例6: convertRecord
import org.apache.kafka.connect.data.Schema; //导入依赖的package包/类
public static DeletableRecord convertRecord(SinkRecord record, boolean ignoreSchema, String versionType) {
final Schema schema;
final Object value;
if (!ignoreSchema) {
schema = preProcessSchema(record.valueSchema());
value = preProcessValue(record.value(), record.valueSchema(), schema);
} else {
schema = record.valueSchema();
value = record.value();
}
final String payload = new String(JSON_CONVERTER.fromConnectData(record.topic(), schema, value), StandardCharsets.UTF_8);
if (StringUtils.isNotBlank(payload)) {
DeleteEvent deleteEvent = GSON.fromJson(payload, DeleteEvent.class);
return new DeletableRecord(new Key(deleteEvent.getIndex(), deleteEvent.getType(), deleteEvent.getId()), deleteEvent.getVersion(), versionType);
} else {
return null;
}
}
开发者ID:chaokunyang,项目名称:jkes,代码行数:22,代码来源:DataConverter.java
示例7: makeUpdatedSchema
import org.apache.kafka.connect.data.Schema; //导入依赖的package包/类
private Schema makeUpdatedSchema(Schema schema) {
final SchemaBuilder builder = SchemaUtil.copySchemaBasics(schema, SchemaBuilder.struct());
for (Field field : schema.fields()) {
builder.field(field.name(), field.schema());
}
if (topicField != null) {
builder.field(topicField.name, topicField.optional ? Schema.OPTIONAL_STRING_SCHEMA : Schema.STRING_SCHEMA);
}
if (partitionField != null) {
builder.field(partitionField.name, partitionField.optional ? Schema.OPTIONAL_INT32_SCHEMA : Schema.INT32_SCHEMA);
}
if (offsetField != null) {
builder.field(offsetField.name, offsetField.optional ? Schema.OPTIONAL_INT64_SCHEMA : Schema.INT64_SCHEMA);
}
if (timestampField != null) {
builder.field(timestampField.name, timestampField.optional ? OPTIONAL_TIMESTAMP_SCHEMA : Timestamp.SCHEMA);
}
if (staticField != null) {
builder.field(staticField.name, staticField.optional ? Schema.OPTIONAL_STRING_SCHEMA : Schema.STRING_SCHEMA);
}
return builder.build();
}
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:26,代码来源:InsertField.java
示例8: shouldChangeSchema
import org.apache.kafka.connect.data.Schema; //导入依赖的package包/类
public static boolean shouldChangeSchema(Schema valueSchema, Schema currentSchema, Compatibility compatibility) {
if (currentSchema == null) {
return true;
}
if ((valueSchema.version() == null || currentSchema.version() == null) && compatibility != Compatibility.NONE) {
throw new SchemaProjectorException("Schema version required for " + compatibility.toString() + " compatibility");
}
switch (compatibility) {
case BACKWARD:
case FULL:
return (valueSchema.version()).compareTo(currentSchema.version()) > 0;
case FORWARD:
return (valueSchema.version()).compareTo(currentSchema.version()) < 0;
default:
return !valueSchema.equals(currentSchema);
}
}
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:18,代码来源:SchemaUtils.java
示例9: applyWithSchema
import org.apache.kafka.connect.data.Schema; //导入依赖的package包/类
private R applyWithSchema(R record) {
Schema valueSchema = operatingSchema(record);
Schema updatedSchema = getOrBuildSchema(valueSchema);
// Whole-record casting
if (wholeValueCastType != null)
return newRecord(record, updatedSchema, castValueToType(operatingValue(record), wholeValueCastType));
// Casting within a struct
final Struct value = requireStruct(operatingValue(record), PURPOSE);
final Struct updatedValue = new Struct(updatedSchema);
for (Field field : value.schema().fields()) {
final Object origFieldValue = value.get(field);
final Schema.Type targetType = casts.get(field.name());
final Object newFieldValue = targetType != null ? castValueToType(origFieldValue, targetType) : origFieldValue;
updatedValue.put(updatedSchema.field(field.name()), newFieldValue);
}
return newRecord(record, updatedSchema, updatedValue);
}
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:21,代码来源:Cast.java
示例10: testCacheSchemaToConnectConversion
import org.apache.kafka.connect.data.Schema; //导入依赖的package包/类
@Test
public void testCacheSchemaToConnectConversion() {
Cache<JsonNode, Schema> cache = Whitebox.getInternalState(converter, "toConnectSchemaCache");
assertEquals(0, cache.size());
converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"boolean\" }, \"payload\": true }".getBytes());
assertEquals(1, cache.size());
converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"boolean\" }, \"payload\": true }".getBytes());
assertEquals(1, cache.size());
// Different schema should also get cached
converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"boolean\", \"optional\": true }, \"payload\": true }".getBytes());
assertEquals(2, cache.size());
// Even equivalent, but different JSON encoding of schema, should get different cache entry
converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"boolean\", \"optional\": false }, \"payload\": true }".getBytes());
assertEquals(3, cache.size());
}
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:20,代码来源:JsonConverterTest.java
示例11: apply
import org.apache.kafka.connect.data.Schema; //导入依赖的package包/类
@Override
public R apply(R record) {
final Schema schema = operatingSchema(record);
requireSchema(schema, "updating schema metadata");
final boolean isArray = schema.type() == Schema.Type.ARRAY;
final boolean isMap = schema.type() == Schema.Type.MAP;
final Schema updatedSchema = new ConnectSchema(
schema.type(),
schema.isOptional(),
schema.defaultValue(),
schemaName != null ? schemaName : schema.name(),
schemaVersion != null ? schemaVersion : schema.version(),
schema.doc(),
schema.parameters(),
schema.fields(),
isMap ? schema.keySchema() : null,
isMap || isArray ? schema.valueSchema() : null
);
return newRecord(record, updatedSchema);
}
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:21,代码来源:SetSchemaMetadata.java
示例12: putConnectorStateNonRetriableFailure
import org.apache.kafka.connect.data.Schema; //导入依赖的package包/类
@Test
public void putConnectorStateNonRetriableFailure() {
KafkaBasedLog<String, byte[]> kafkaBasedLog = mock(KafkaBasedLog.class);
Converter converter = mock(Converter.class);
KafkaStatusBackingStore store = new KafkaStatusBackingStore(new MockTime(), converter, STATUS_TOPIC, kafkaBasedLog);
byte[] value = new byte[0];
expect(converter.fromConnectData(eq(STATUS_TOPIC), anyObject(Schema.class), anyObject(Struct.class)))
.andStubReturn(value);
final Capture<Callback> callbackCapture = newCapture();
kafkaBasedLog.send(eq("status-connector-conn"), eq(value), capture(callbackCapture));
expectLastCall()
.andAnswer(new IAnswer<Void>() {
@Override
public Void answer() throws Throwable {
callbackCapture.getValue().onCompletion(null, new UnknownServerException());
return null;
}
});
replayAll();
// the error is logged and ignored
ConnectorStatus status = new ConnectorStatus(CONNECTOR, ConnectorStatus.State.RUNNING, WORKER_ID, 0);
store.put(status);
// state is not visible until read back from the log
assertEquals(null, store.get(CONNECTOR));
verifyAll();
}
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:32,代码来源:KafkaStatusBackingStoreTest.java
示例13: castWholeRecordKeyWithSchema
import org.apache.kafka.connect.data.Schema; //导入依赖的package包/类
@Test
public void castWholeRecordKeyWithSchema() {
final Cast<SourceRecord> xform = new Cast.Key<>();
xform.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "int8"));
SourceRecord transformed = xform.apply(new SourceRecord(null, null, "topic", 0,
Schema.INT32_SCHEMA, 42, Schema.STRING_SCHEMA, "bogus"));
assertEquals(Schema.Type.INT8, transformed.keySchema().type());
assertEquals((byte) 42, transformed.key());
}
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:11,代码来源:CastTest.java
示例14: convert
import org.apache.kafka.connect.data.Schema; //导入依赖的package包/类
public SourceRecord convert(String topic, String tag, Long timestamp, EventEntry entry) {
if (config.isFluentdSchemasEnable()) {
SchemaAndValue schemaAndValue = convert(topic, entry);
return new SourceRecord(
null,
null,
topic,
null,
Schema.STRING_SCHEMA,
tag,
schemaAndValue.schema(),
schemaAndValue.value(),
timestamp
);
} else {
Object record;
try {
record = new ObjectMapper().readValue(entry.getRecord().toJson(), LinkedHashMap.class);
} catch (IOException e) {
record = entry.getRecord().toJson();
}
return new SourceRecord(
null,
null,
topic,
null,
null,
null,
null,
record,
timestamp
);
}
}
开发者ID:fluent,项目名称:kafka-connect-fluentd,代码行数:35,代码来源:MessagePackConverver.java
示例15: putConnectorState
import org.apache.kafka.connect.data.Schema; //导入依赖的package包/类
@Test
public void putConnectorState() {
KafkaBasedLog<String, byte[]> kafkaBasedLog = mock(KafkaBasedLog.class);
Converter converter = mock(Converter.class);
KafkaStatusBackingStore store = new KafkaStatusBackingStore(new MockTime(), converter, STATUS_TOPIC, kafkaBasedLog);
byte[] value = new byte[0];
expect(converter.fromConnectData(eq(STATUS_TOPIC), anyObject(Schema.class), anyObject(Struct.class)))
.andStubReturn(value);
final Capture<Callback> callbackCapture = newCapture();
kafkaBasedLog.send(eq("status-connector-conn"), eq(value), capture(callbackCapture));
expectLastCall()
.andAnswer(new IAnswer<Void>() {
@Override
public Void answer() throws Throwable {
callbackCapture.getValue().onCompletion(null, null);
return null;
}
});
replayAll();
ConnectorStatus status = new ConnectorStatus(CONNECTOR, ConnectorStatus.State.RUNNING, WORKER_ID, 0);
store.put(status);
// state is not visible until read back from the log
assertEquals(null, store.get(CONNECTOR));
verifyAll();
}
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:31,代码来源:KafkaStatusBackingStoreTest.java
示例16: castWholeRecordValueWithSchemaInt8
import org.apache.kafka.connect.data.Schema; //导入依赖的package包/类
@Test
public void castWholeRecordValueWithSchemaInt8() {
final Cast<SourceRecord> xform = new Cast.Value<>();
xform.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "int8"));
SourceRecord transformed = xform.apply(new SourceRecord(null, null, "topic", 0,
Schema.INT32_SCHEMA, 42));
assertEquals(Schema.Type.INT8, transformed.valueSchema().type());
assertEquals((byte) 42, transformed.value());
}
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:11,代码来源:CastTest.java
示例17: stringToJson
import org.apache.kafka.connect.data.Schema; //导入依赖的package包/类
@Test
public void stringToJson() {
JsonNode converted = parse(converter.fromConnectData(TOPIC, Schema.STRING_SCHEMA, "test-string"));
validateEnvelope(converted);
assertEquals(parse("{ \"type\": \"string\", \"optional\": false }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME));
assertEquals("test-string", converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).textValue());
}
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:8,代码来源:JsonConverterTest.java
示例18: testOptionalFieldStruct
import org.apache.kafka.connect.data.Schema; //导入依赖的package包/类
@Test
public void testOptionalFieldStruct() {
final Flatten<SourceRecord> xform = new Flatten.Value<>();
xform.configure(Collections.<String, String>emptyMap());
SchemaBuilder builder = SchemaBuilder.struct();
builder.field("opt_int32", Schema.OPTIONAL_INT32_SCHEMA);
Schema supportedTypesSchema = builder.build();
builder = SchemaBuilder.struct();
builder.field("B", supportedTypesSchema);
Schema oneLevelNestedSchema = builder.build();
Struct supportedTypes = new Struct(supportedTypesSchema);
supportedTypes.put("opt_int32", null);
Struct oneLevelNestedStruct = new Struct(oneLevelNestedSchema);
oneLevelNestedStruct.put("B", supportedTypes);
SourceRecord transformed = xform.apply(new SourceRecord(null, null,
"topic", 0,
oneLevelNestedSchema, oneLevelNestedStruct));
assertEquals(Schema.Type.STRUCT, transformed.valueSchema().type());
Struct transformedStruct = (Struct) transformed.value();
assertNull(transformedStruct.get("B.opt_int32"));
}
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:28,代码来源:FlattenTest.java
示例19: arrayToJson
import org.apache.kafka.connect.data.Schema; //导入依赖的package包/类
@Test
public void arrayToJson() {
Schema int32Array = SchemaBuilder.array(Schema.INT32_SCHEMA).build();
JsonNode converted = parse(converter.fromConnectData(TOPIC, int32Array, Arrays.asList(1, 2, 3)));
validateEnvelope(converted);
assertEquals(parse("{ \"type\": \"array\", \"items\": { \"type\": \"int32\", \"optional\": false }, \"optional\": false }"),
converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME));
assertEquals(JsonNodeFactory.instance.arrayNode().add(1).add(2).add(3),
converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME));
}
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:11,代码来源:JsonConverterTest.java
示例20: fromConnectData
import org.apache.kafka.connect.data.Schema; //导入依赖的package包/类
@Override
public byte[] fromConnectData(String topic, Schema schema, Object value) {
if (schema != null && schema.type() != Schema.Type.BYTES)
throw new DataException("Invalid schema type for ByteArrayConverter: " + schema.type().toString());
if (value != null && !(value instanceof byte[]))
throw new DataException("ByteArrayConverter is not compatible with objects of type " + value.getClass());
return (byte[]) value;
}
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:11,代码来源:ByteArrayConverter.java
注:本文中的org.apache.kafka.connect.data.Schema类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论