本文整理汇总了Java中org.apache.hadoop.mapred.MiniMRClientClusterFactory类的典型用法代码示例。如果您正苦于以下问题:Java MiniMRClientClusterFactory类的具体用法?Java MiniMRClientClusterFactory怎么用?Java MiniMRClientClusterFactory使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
MiniMRClientClusterFactory类属于org.apache.hadoop.mapred包,在下文中一共展示了MiniMRClientClusterFactory类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: setUp
import org.apache.hadoop.mapred.MiniMRClientClusterFactory; //导入依赖的package包/类
@SuppressWarnings("deprecation")
@BeforeClass
public static void setUp() throws Exception {
System.setProperty("hadoop.log.dir", "logs");
Configuration conf = new Configuration();
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(numSlaves)
.build();
jConf = new JobConf(conf);
FileSystem.setDefaultUri(conf, dfsCluster.getFileSystem().getUri().toString());
mrCluster = MiniMRClientClusterFactory.create(TestMRCredentials.class, 1, jConf);
createKeysAsJson("keys.json");
}
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestMRCredentials.java
示例2: startCluster
import org.apache.hadoop.mapred.MiniMRClientClusterFactory; //导入依赖的package包/类
private void startCluster(Configuration conf) throws Exception {
if (System.getProperty("hadoop.log.dir") == null) {
System.setProperty("hadoop.log.dir", "target/test-dir");
}
conf.set("dfs.block.access.token.enable", "false");
conf.set("dfs.permissions", "true");
conf.set("hadoop.security.authentication", "simple");
String cp = conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
StringUtils.join(",",
YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH))
+ File.pathSeparator + classpathDir;
conf.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, cp);
dfsCluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fileSystem = dfsCluster.getFileSystem();
fileSystem.mkdirs(new Path("/tmp"));
fileSystem.mkdirs(new Path("/user"));
fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
fileSystem.setPermission(
new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
fileSystem.setPermission(
new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
fileSystem.setPermission(
new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
FileSystem.setDefaultUri(conf, fileSystem.getUri());
mrCluster = MiniMRClientClusterFactory.create(this.getClass(), 1, conf);
// so the minicluster conf is avail to the containers.
Writer writer = new FileWriter(classpathDir + "/core-site.xml");
mrCluster.getConfig().writeXml(writer);
writer.close();
}
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:TestEncryptedShuffle.java
示例3: initCluster
import org.apache.hadoop.mapred.MiniMRClientClusterFactory; //导入依赖的package包/类
public static void initCluster(Class<?> caller) throws IOException {
Configuration conf = new Configuration();
// conf.set("mapred.queue.names", "default,q1,q2");
conf.set("mapred.queue.names", "default");
conf.set("yarn.scheduler.capacity.root.queues", "default");
conf.set("yarn.scheduler.capacity.root.default.capacity", "100.0");
conf.setBoolean(GRIDMIX_USE_QUEUE_IN_TRACE, false);
conf.set(GRIDMIX_DEFAULT_QUEUE, "default");
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true)
.build();// MiniDFSCluster(conf, 3, true, null);
dfs = dfsCluster.getFileSystem();
conf.set(JTConfig.JT_RETIREJOBS, "false");
mrvl = MiniMRClientClusterFactory.create(caller, 2, conf);
conf = mrvl.getConfig();
String[] files = conf.getStrings(MRJobConfig.CACHE_FILES);
if (files != null) {
String[] timestamps = new String[files.length];
for (int i = 0; i < files.length; i++) {
timestamps[i] = Long.toString(System.currentTimeMillis());
}
conf.setStrings(MRJobConfig.CACHE_FILE_TIMESTAMPS, timestamps);
}
}
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:GridmixTestUtils.java
示例4: setUp
import org.apache.hadoop.mapred.MiniMRClientClusterFactory; //导入依赖的package包/类
@SuppressWarnings("deprecation")
@BeforeClass
public static void setUp() throws Exception {
System.setProperty("hadoop.log.dir", "logs");
Configuration conf = new Configuration();
dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null);
jConf = new JobConf(conf);
FileSystem.setDefaultUri(conf, dfsCluster.getFileSystem().getUri().toString());
mrCluster = MiniMRClientClusterFactory.create(TestMRCredentials.class, 1, jConf);
createKeysAsJson("keys.json");
}
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:12,代码来源:TestMRCredentials.java
示例5: startCluster
import org.apache.hadoop.mapred.MiniMRClientClusterFactory; //导入依赖的package包/类
private void startCluster(Configuration conf) throws Exception {
if (System.getProperty("hadoop.log.dir") == null) {
System.setProperty("hadoop.log.dir", "target/test-dir");
}
conf.set("dfs.block.access.token.enable", "false");
conf.set("dfs.permissions", "true");
conf.set("hadoop.security.authentication", "simple");
String cp = conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH) +
File.pathSeparator + classpathDir;
conf.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, cp);
dfsCluster = new MiniDFSCluster(conf, 1, true, null);
FileSystem fileSystem = dfsCluster.getFileSystem();
fileSystem.mkdirs(new Path("/tmp"));
fileSystem.mkdirs(new Path("/user"));
fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
fileSystem.setPermission(
new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
fileSystem.setPermission(
new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
fileSystem.setPermission(
new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
FileSystem.setDefaultUri(conf, fileSystem.getUri());
mrCluster = MiniMRClientClusterFactory.create(this.getClass(), 1, conf);
// so the minicluster conf is avail to the containers.
Writer writer = new FileWriter(classpathDir + "/core-site.xml");
mrCluster.getConfig().writeXml(writer);
writer.close();
}
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:30,代码来源:TestEncryptedShuffle.java
示例6: setUp
import org.apache.hadoop.mapred.MiniMRClientClusterFactory; //导入依赖的package包/类
@SuppressWarnings("deprecation")
@BeforeClass
public static void setUp() throws Exception {
System.setProperty("hadoop.log.dir", "logs");
Configuration conf = new Configuration();
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(numSlaves)
.build();
jConf = new JobConf(conf);
FileSystem.setDefaultUri(conf, dfsCluster.getFileSystem().getUri().toString());
mrCluster = MiniMRClientClusterFactory.create(TestMRCredentials.class, 1, jConf, false);
createKeysAsJson("keys.json");
}
开发者ID:hopshadoop,项目名称:hops,代码行数:13,代码来源:TestMRCredentials.java
示例7: startCluster
import org.apache.hadoop.mapred.MiniMRClientClusterFactory; //导入依赖的package包/类
private void startCluster(Configuration conf) throws Exception {
if (System.getProperty("hadoop.log.dir") == null) {
System.setProperty("hadoop.log.dir", "target/test-dir");
}
conf.set("dfs.block.access.token.enable", "false");
conf.set("dfs.permissions", "true");
conf.set("hadoop.security.authentication", "simple");
String cp = conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
StringUtils.join(",",
YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH))
+ File.pathSeparator + classpathDir;
conf.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, cp);
dfsCluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fileSystem = dfsCluster.getFileSystem();
fileSystem.mkdirs(new Path("/tmp"));
fileSystem.mkdirs(new Path("/user"));
fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
fileSystem.setPermission(
new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
fileSystem.setPermission(
new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
fileSystem.setPermission(
new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
FileSystem.setDefaultUri(conf, fileSystem.getUri());
mrCluster = MiniMRClientClusterFactory.create(this.getClass(), 1, conf, false);
// so the minicluster conf is avail to the containers.
Writer writer = new FileWriter(classpathDir + "/core-site.xml");
mrCluster.getConfig().writeXml(writer);
writer.close();
}
开发者ID:hopshadoop,项目名称:hops,代码行数:32,代码来源:TestEncryptedShuffle.java
示例8: initCluster
import org.apache.hadoop.mapred.MiniMRClientClusterFactory; //导入依赖的package包/类
public static void initCluster(Class<?> caller) throws IOException {
Configuration conf = new Configuration();
// conf.set("mapred.queue.names", "default,q1,q2");
conf.set("mapred.queue.names", "default");
conf.set("yarn.scheduler.capacity.root.queues", "default");
conf.set("yarn.scheduler.capacity.root.default.capacity", "100.0");
conf.setBoolean(GRIDMIX_USE_QUEUE_IN_TRACE, false);
conf.set(GRIDMIX_DEFAULT_QUEUE, "default");
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true)
.build();// MiniDFSCluster(conf, 3, true, null);
dfs = dfsCluster.getFileSystem();
conf.set(JTConfig.JT_RETIREJOBS, "false");
mrvl = MiniMRClientClusterFactory.create(caller, 2, conf,false);
conf = mrvl.getConfig();
String[] files = conf.getStrings(MRJobConfig.CACHE_FILES);
if (files != null) {
String[] timestamps = new String[files.length];
for (int i = 0; i < files.length; i++) {
timestamps[i] = Long.toString(System.currentTimeMillis());
}
conf.setStrings(MRJobConfig.CACHE_FILE_TIMESTAMPS, timestamps);
}
}
开发者ID:hopshadoop,项目名称:hops,代码行数:30,代码来源:GridmixTestUtils.java
示例9: startMrCluster
import org.apache.hadoop.mapred.MiniMRClientClusterFactory; //导入依赖的package包/类
private void startMrCluster() throws IOException {
Configuration conf = new JobConf();
FileSystem.setDefaultUri(conf, HadoopTestUtils.getTestDFS().getUri());
conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
conf.setBoolean(JHAdminConfig.MR_HISTORY_MINICLUSTER_FIXED_PORTS, true);
String addr = MiniYARNCluster.getHostname() + ":0";
conf.set(YarnConfiguration.RM_ADDRESS, addr);
conf.set(JHAdminConfig.MR_HISTORY_ADDRESS, addr);
m_mrCluster = MiniMRClientClusterFactory.create(
HadoopTestUtils.class,
"MR4CTests",
1, // num node managers
conf
);
// make sure startup is finished
for ( int i=0; i<60; i++ ) {
String newAddr = m_mrCluster.getConfig().get(YarnConfiguration.RM_ADDRESS);
if ( newAddr.equals(addr) ) {
s_log.warn("MiniYARNCluster startup not complete");
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
throw new IOException(ie);
}
} else {
s_log.info("MiniYARNCluster now available at {}", newAddr);
return;
}
}
throw new IOException("MiniYARNCluster taking too long to startup");
}
开发者ID:google,项目名称:mr4c,代码行数:34,代码来源:YarnTestBinding.java
示例10: setup
import org.apache.hadoop.mapred.MiniMRClientClusterFactory; //导入依赖的package包/类
@BeforeClass
public static void setup() throws IOException {
Properties props = new Properties();
InputStream is = ClassLoader.getSystemResourceAsStream("hdfs-conf.properties");
props.load(is);
for (Entry<Object, Object> entry : props.entrySet()) {
System.setProperty((String) entry.getKey(), (String) entry.getValue());
}
Map<String, String> envMap = new HashMap<String, String>();
envMap.put("JAVA_HOME", System.getProperty("java.home"));
setEnv(envMap);
final Configuration conf = new Configuration();
final Path TEST_ROOT_DIR = new Path(System.getProperty("test.build.data", "/tmp"));
testdir = new Path(TEST_ROOT_DIR, "TestMiniMRClientCluster");
inDir = new Path(testdir, "in");
outDir = new Path(testdir, "out");
FileSystem fs = FileSystem.getLocal(conf);
if (fs.exists(testdir) && !fs.delete(testdir, true)) {
throw new IOException("Could not delete " + testdir);
}
if (!fs.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir);
}
for (int i = 0; i < inFiles.length; i++) {
inFiles[i] = new Path(inDir, "part_" + i);
createFile(inFiles[i], conf);
}
// create the mini cluster to be used for the tests
mrCluster = MiniMRClientClusterFactory.create(WordCountToolTest.class, 1, new Configuration());
}
开发者ID:xuzhikethinker,项目名称:t4f-data,代码行数:38,代码来源:WordCountToolTest.java
示例11: setup
import org.apache.hadoop.mapred.MiniMRClientClusterFactory; //导入依赖的package包/类
@BeforeClass
public static void setup() throws IOException {
Properties props = new Properties();
InputStream is = ClassLoader.getSystemResourceAsStream("hdfs-conf.properties");
props.load(is);
for (Entry<Object, Object> entry : props.entrySet()) {
System.setProperty((String) entry.getKey(), (String) entry.getValue());
}
Map<String, String> envMap = new HashMap<String, String>();
envMap.put("JAVA_HOME", System.getProperty("java.home"));
setEnv(envMap);
final Configuration conf = new Configuration();
final Path TEST_ROOT_DIR = new Path(System.getProperty("test.build.data", "/tmp"));
testdir = new Path(TEST_ROOT_DIR, "TestMiniMRClientCluster");
inDir = new Path(testdir, "in");
outDir = new Path(testdir, "out");
FileSystem fs = FileSystem.getLocal(conf);
if (fs.exists(testdir) && !fs.delete(testdir, true)) {
throw new IOException("Could not delete " + testdir);
}
if (!fs.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir);
}
for (int i = 0; i < inFiles.length; i++) {
inFiles[i] = new Path(inDir, "part_" + i);
createFile(inFiles[i], conf);
}
// create the mini cluster to be used for the tests
mrCluster = MiniMRClientClusterFactory.create(GrepToolTest.class, 1, new Configuration());
}
开发者ID:xuzhikethinker,项目名称:t4f-data,代码行数:38,代码来源:GrepToolTest.java
示例12: setup
import org.apache.hadoop.mapred.MiniMRClientClusterFactory; //导入依赖的package包/类
@Before
public void setup() throws IOException {
Configuration conf = new YarnConfiguration();
cluster = MiniMRClientClusterFactory.create(this.getClass(), 2, conf);
cluster.start();
}
开发者ID:naver,项目名称:hadoop,代码行数:7,代码来源:TestLargeSort.java
示例13: start
import org.apache.hadoop.mapred.MiniMRClientClusterFactory; //导入依赖的package包/类
/**
* Starts DFS and MR clusters, as specified in member-variable options. Also
* writes out configuration and details, if requested.
*
* @throws IOException
* @throws FileNotFoundException
* @throws URISyntaxException
*/
public void start() throws IOException, FileNotFoundException,
URISyntaxException {
if (!noDFS) {
dfs = new MiniDFSCluster.Builder(conf).nameNodePort(nnPort)
.numDataNodes(numDataNodes).startupOption(dfsOpts).build();
LOG.info("Started MiniDFSCluster -- namenode on port "
+ dfs.getNameNodePort());
}
if (!noMR) {
if (fs == null && dfs != null) {
fs = dfs.getFileSystem().getUri().toString();
} else if (fs == null) {
fs = "file:///tmp/minimr-" + System.nanoTime();
}
FileSystem.setDefaultUri(conf, new URI(fs));
// Instruct the minicluster to use fixed ports, so user will know which
// ports to use when communicating with the cluster.
conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
conf.setBoolean(JHAdminConfig.MR_HISTORY_MINICLUSTER_FIXED_PORTS, true);
conf.set(YarnConfiguration.RM_ADDRESS, MiniYARNCluster.getHostname()
+ ":" + this.rmPort);
conf.set(JHAdminConfig.MR_HISTORY_ADDRESS, MiniYARNCluster.getHostname()
+ ":" + this.jhsPort);
mr = MiniMRClientClusterFactory.create(this.getClass(), numNodeManagers,
conf);
LOG.info("Started MiniMRCluster");
}
if (writeConfig != null) {
FileOutputStream fos = new FileOutputStream(new File(writeConfig));
conf.writeXml(fos);
fos.close();
}
if (writeDetails != null) {
Map<String, Object> map = new TreeMap<String, Object>();
if (dfs != null) {
map.put("namenode_port", dfs.getNameNodePort());
}
if (mr != null) {
map.put("resourcemanager_port", mr.getConfig().get(
YarnConfiguration.RM_ADDRESS).split(":")[1]);
}
FileWriter fw = new FileWriter(new File(writeDetails));
fw.write(new JSON().toJSON(map));
fw.close();
}
}
开发者ID:naver,项目名称:hadoop,代码行数:57,代码来源:MiniHadoopClusterManager.java
示例14: testDistCh
import org.apache.hadoop.mapred.MiniMRClientClusterFactory; //导入依赖的package包/类
public void testDistCh() throws Exception {
final Configuration conf = new Configuration();
conf.set(CapacitySchedulerConfiguration.PREFIX+CapacitySchedulerConfiguration.ROOT+"."+CapacitySchedulerConfiguration.QUEUES, "default");
conf.set(CapacitySchedulerConfiguration.PREFIX+CapacitySchedulerConfiguration.ROOT+".default."+CapacitySchedulerConfiguration.CAPACITY, "100");
final MiniDFSCluster cluster= new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).build();
final FileSystem fs = cluster.getFileSystem();
final FsShell shell = new FsShell(conf);
try {
final FileTree tree = new FileTree(fs, "testDistCh");
final FileStatus rootstatus = fs.getFileStatus(tree.rootdir);
runLsr(shell, tree.root, 0);
final String[] args = new String[NUN_SUBS];
final ChPermissionStatus[] newstatus = new ChPermissionStatus[NUN_SUBS];
args[0]="/test/testDistCh/sub0:sub1::";
newstatus[0] = new ChPermissionStatus(rootstatus, "sub1", "", "");
args[1]="/test/testDistCh/sub1::sub2:";
newstatus[1] = new ChPermissionStatus(rootstatus, "", "sub2", "");
args[2]="/test/testDistCh/sub2:::437";
newstatus[2] = new ChPermissionStatus(rootstatus, "", "", "437");
args[3]="/test/testDistCh/sub3:sub1:sub2:447";
newstatus[3] = new ChPermissionStatus(rootstatus, "sub1", "sub2", "447");
args[4]="/test/testDistCh/sub4::sub5:437";
newstatus[4] = new ChPermissionStatus(rootstatus, "", "sub5", "437");
args[5]="/test/testDistCh/sub5:sub1:sub5:";
newstatus[5] = new ChPermissionStatus(rootstatus, "sub1", "sub5", "");
args[6]="/test/testDistCh/sub6:sub3::437";
newstatus[6] = new ChPermissionStatus(rootstatus, "sub3", "", "437");
System.out.println("args=" + Arrays.asList(args).toString().replace(",", ",\n "));
System.out.println("newstatus=" + Arrays.asList(newstatus).toString().replace(",", ",\n "));
//run DistCh
new DistCh(MiniMRClientClusterFactory.create(this.getClass(), 2, conf).getConfig()).run(args);
runLsr(shell, tree.root, 0);
//check results
for(int i = 0; i < NUN_SUBS; i++) {
Path sub = new Path(tree.root + "/sub" + i);
checkFileStatus(newstatus[i], fs.getFileStatus(sub));
for(FileStatus status : fs.listStatus(sub)) {
checkFileStatus(newstatus[i], status);
}
}
} finally {
cluster.shutdown();
}
}
开发者ID:naver,项目名称:hadoop,代码行数:61,代码来源:TestDistCh.java
注:本文中的org.apache.hadoop.mapred.MiniMRClientClusterFactory类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论