junit.framework.TestListener#com.bigdata.journal.Journal源码实例Demo

下面列出了junit.framework.TestListener#com.bigdata.journal.Journal 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: database   文件: TestConcurrentKBCreate.java
/**
  * Verify that the named kb exists.
  * 
  * @throws SailException
  */
private void assertKBExists(final Journal jnl, final String namespace)
      throws RepositoryException, SailException {
   // Attempt to discover the KB instance.
   BigdataSailRepositoryConnection conn = null;
   try {
      // Request query connection.
      conn = getQueryConnection(jnl, namespace, ITx.READ_COMMITTED);
      // Verify KB exists.
      assertNotNull(namespace, conn);
   } finally {
      // Ensure connection is closed.
      if (conn != null)
         conn.close();
   }
}
 
源代码2 项目: database   文件: TestMemStore.java
public void test_allocCommitFree() {
	final Journal store = getJournal(getProperties());
	try {

		MemStrategy bs = (MemStrategy) store.getBufferStrategy();
          	
          	final long addr = bs.write(randomData(78));
          	
          	store.commit();
          	
          	bs.delete(addr);
          	
          	assertTrue(bs.isCommitted(addr));
          } finally {
          	store.destroy();
          }
}
 
源代码3 项目: database   文件: TestPipelineJoin.java
/**
 * Create and populate relation in the {@link #namespace}.
 */
private void loadData(final Journal store) {

    // create the relation.
    final R rel = new R(store, namespace, ITx.UNISOLATED, new Properties());
    rel.create();

    // data to insert.
    final E[] a = {//
            new E("John", "Mary"),// 
            new E("Mary", "Paul"),// 
            new E("Paul", "Leon"),// 
            new E("Leon", "Paul"),// 
            new E("Mary", "John"),// 
    };

    // insert data (the records are not pre-sorted).
    rel.insert(new ChunkedArrayIterator<E>(a.length, a, null/* keyOrder */));

    // Do commit since not scale-out.
    store.commit();

}
 
源代码4 项目: database   文件: TestRWJournal.java
public void test_multiVoidCommit() {

			final Journal store = (Journal) getStore();
            try {

			final RWStrategy bs = (RWStrategy) store.getBufferStrategy();

			final RWStore rw = bs.getStore();
			
			final boolean initRequire = bs.requiresCommit(store.getRootBlockView());
			assertTrue(initRequire);
			for (int n = 0; n < 20; n++) {
				store.commit();
				final IRootBlockView rbv = store.getRootBlockView();
				assertTrue(1 == rbv.getCommitCounter());
				assertFalse(bs.requiresCommit(store.getRootBlockView()));
				assertFalse(rw.requiresCommit());
			}
			} finally {
				store.destroy();
			}
		}
 
源代码5 项目: database   文件: TestRWJournal.java
public void test_snapshotData() throws IOException {
	final Journal journal = (Journal) getStore(0); // remember no history!

	try {
		for (int i = 0; i < 100; i++)
			commitSomeData(journal);

		final AtomicReference<IRootBlockView> rbv = new AtomicReference<IRootBlockView>();
		final Iterator<ISnapshotEntry> data = journal.snapshotAllocationData(rbv).entries();

		while (data.hasNext()) {
			final ISnapshotEntry e = data.next();
			
			log.info("Position: " + e.getAddress() + ", data size: "
					+ e.getData().length);
		}
	} finally {
		journal.destroy();
	}
}
 
源代码6 项目: database   文件: TestRWJournal.java
/**
 * Test low level RWStore add/removeAddress methods as used in HA
 * WriteCache replication to ensure Allocation consistency
 * 
 * @throws IOException
 */
public void testStressReplication() throws IOException {

	// Create a couple of stores with temp files
	final Journal store1 = (Journal) getStore();
	final Journal store2 = (Journal) getStore();
	try {		
		final RWStore rw1 = ((RWStrategy) store1.getBufferStrategy()).getStore();
		final RWStore rw2 = ((RWStrategy) store2.getBufferStrategy()).getStore();
	
		assertTrue(rw1 != rw2);
		
		final Random r = new Random();
		
		for (int i = 0; i < 100000; i++) {
			final int sze = 1 + r.nextInt(2000);
			final int addr = rw1.alloc(sze, null);
			rw2.addAddress(addr, sze);
		}
	} finally {
		store1.destroy();
		store2.destroy();
	}
}
 
源代码7 项目: database   文件: TestRWJournal.java
/**
 * State1
 * 
 * Allocate - Commit - Free
 * 
 * assert that allocation remains committed
 */
public void test_allocCommitFree() {
	Journal store = (Journal) getStore();
          try {

          	RWStrategy bs = (RWStrategy) store.getBufferStrategy();
          	
          	final long addr = bs.write(randomData(78));
          	
          	store.commit();
          	
          	bs.delete(addr);
          	
          	assertTrue(bs.isCommitted(addr));
          } finally {
          	store.destroy();
          }
}
 
源代码8 项目: database   文件: AbstractBigdataGraphTestCase.java
protected Properties getProperties() {
        
        final Properties p = new Properties();

        p.setProperty(Journal.Options.BUFFER_MODE,
                BufferMode.MemStore.toString());
        
        /*
         * TODO Test both triples and quads.
         * 
         * Note: We need to use different data files for quads (trig). If we use
         * trig for a triples mode kb then we get errors (context bound, but not
         * quads mode).
         */
        p.setProperty(BigdataSail.Options.TRIPLES_MODE, "true");
//        p.setProperty(BigdataSail.Options.QUADS_MODE, "true");
        p.setProperty(BigdataSail.Options.TRUTH_MAINTENANCE, "false");

        return p;
        
    }
 
源代码9 项目: database   文件: BigdataSail.java
/**
 * Create or open a database instance configured using the specified
 * properties.
 * 
 * @see Options
 */
public BigdataSail(final Properties properties) {
    
    this(properties.getProperty(Options.NAMESPACE,Options.DEFAULT_NAMESPACE), new Journal(properties));

    closeOnShutdown = true;
    
    if(!exists()) {
        // Create iff it does not exist (backward compatibility, simple constructor mode).
        try {
            create(properties);
        } catch (InterruptedException | ExecutionException e) {
            throw new RuntimeException(e);
        }
                }

}
 
源代码10 项目: database   文件: TestDistinctTermScanOp.java
/**
* Create and populate relation in the {@link #namespace}.
* 
* FIXME The {@link DistinctTermAdvancer} is IV specific code. We need to
* use a R(elation) and E(lement) type that use IVs to write this test.
*/
  private void loadData(final Journal store) {

      // create the relation.
      final R rel = new R(store, namespace, ITx.UNISOLATED, new Properties());
      rel.create();

      // data to insert.
      final E[] a = {//
              new E("John", "Mary"),// 
              new E("Mary", "Paul"),// 
              new E("Paul", "Leon"),// 
              new E("Leon", "Paul"),// 
              new E("Mary", "John"),// 
      };

      // insert data (the records are not pre-sorted).
      rel.insert(new ChunkedArrayIterator<E>(a.length, a, null/* keyOrder */));

      // Do commit since not scale-out.
      store.commit();

  }
 
源代码11 项目: tinkerpop3   文件: BasicRepositoryProvider.java
/**
 * Open and initialize a BigdataSailRepository using the supplied config
 * properties.  You must specify a journal file in the properties.
 * 
 * @param props
 *          config properties
 * @return
 *          an open and initialized repository
 */
public static BigdataSailRepository open(final Properties props) {
    if (props.getProperty(Journal.Options.FILE) == null) {
        throw new IllegalArgumentException();
    }
    
    final BigdataSail sail = new BigdataSail(props);
    final BigdataSailRepository repo = new BigdataSailRepository(sail);
    Code.wrapThrow(() -> repo.initialize());
    return repo;
}
 
源代码12 项目: database   文件: TestMemStore.java
/**
 * Can be tested by removing RWStore call to journal.removeCommitRecordEntries
 * in freeDeferrals.
 * 
 * final int commitPointsRemoved = journal.removeCommitRecordEntries(fromKey, toKey);
 * 
 * replaced with
 * 
 * final int commitPointsRemoved = commitPointsRecycled;
 * 
 */
public void testVerifyCommitRecordIndex() {
          final Properties properties = new Properties(getProperties());

          properties.setProperty(
                  AbstractTransactionService.Options.MIN_RELEASE_AGE, "400");

	final Journal store = getJournal(properties);
          try {

          	MemStrategy bs = (MemStrategy) store.getBufferStrategy();
          	      	
       	for (int r = 0; r < 10; r++) {
        	ArrayList<Long> addrs = new ArrayList<Long>();
        	for (int i = 0; i < 100; i++) {
        		addrs.add(bs.write(randomData(45)));
        	}
        	store.commit();

        	for (long addr : addrs) {
        		bs.delete(addr);
        	}

           	store.commit();
           	
       	}
       	
       	// Age the history (of the deletes!)
       	Thread.currentThread().sleep(400);
       	
       	verifyCommitIndex(store, 20);
       	
       	store.close();
       	
         } catch (InterruptedException e) {
	} finally {
          	store.destroy();
          }
}
 
源代码13 项目: database   文件: TestServiceWhiteList.java
protected Properties getTripleStoreProperties() {
	 final Properties tripleStoreProperties = new Properties();
     {
         
         tripleStoreProperties.setProperty(BigdataSail.Options.TRIPLES_MODE,
                 "true");
         
         tripleStoreProperties.setProperty(Journal.Options.BUFFER_MODE,
                    BufferMode.MemStore.name());
         
      }
     
     return tripleStoreProperties;
}
 
源代码14 项目: database   文件: TestRWJournal.java
/**
		 * Test of blob allocation, does not check on read back, just the
		 * allocation
		 */
		public void test_blob_allocs() {
//			if (false) {
//				return;
//			}

			final Journal store = (Journal) getStore();

			try {

			    final RWStrategy bufferStrategy = (RWStrategy) store.getBufferStrategy();

				final RWStore rw = bufferStrategy.getStore();
				final long numAllocs = rw.getTotalAllocations();
				final long startAllocations = rw.getTotalAllocationsSize();
				final int startBlob = 1024 * 256;
				final int endBlob = 1024 * 1256;
				final int[] faddrs = allocBatchBuffer(rw, 100, startBlob, endBlob);

                if (log.isInfoEnabled()) {
                    final StringBuilder str = new StringBuilder();
                    rw.showAllocators(str);
                    log.info(str);
				}
			} finally {

				store.destroy();

			}

		}
 
源代码15 项目: database   文件: TestRWJournal.java
/**
 * Test low level RWStore add/removeAddress methods as used in HA
 * WriteCache replication to ensure Allocation consistency
 * 
 * @throws IOException
 */
public void testSimpleReplication() throws IOException {

	// Create a couple of stores with temp files
	Journal store1 = (Journal) getStore();
	Journal store2 = (Journal) getStore();
	
	final RWStore rw1 = ((RWStrategy) store1.getBufferStrategy()).getStore();
	final RWStore rw2 = ((RWStrategy) store2.getBufferStrategy()).getStore();

	assertTrue(rw1 != rw2);
	
	final int addr1 = rw1.alloc(123, null);
	rw2.addAddress(addr1, 123);
	assertTrue(rw1.physicalAddress(addr1) == rw2.physicalAddress(addr1));
	
	rw1.free(addr1, 123);
	rw2.removeAddress(addr1);
	
	// address will still be valid
	assertTrue(rw1.physicalAddress(addr1) == rw2.physicalAddress(addr1));
	
	// confirm address re-cycled
	assert(addr1 == rw1.alloc(123, null));
	
	// and can be "re-added"
	rw2.addAddress(addr1, 123);
}
 
源代码16 项目: database   文件: BigdataConnectionTest.java
/**
     * Overridden to destroy the backend database and its files on the disk.
     */
    @Override
    public void tearDown() throws Exception
    {

        final IIndexManager backend = testRepository == null ? null
                : ((BigdataSailRepository) testRepository).getSail()
                        .getIndexManager();

        /*
         * Note: The code in the block below was taken verbatim from
         * super.testDown() in order to explore a tear down issue in testOpen().
         */
        super.tearDown();
//        {
//            
//            testCon2.close();
//            testCon2 = null;
//
//            testCon.close();
//            testCon = null;
//
//            testRepository.shutDown();
//            testRepository = null;
//
//            vf = null;
//
//        }

        if (backend != null) {
            if(log.isInfoEnabled() && backend instanceof Journal)
                log.info(QueryEngineFactory.getInstance().getExistingQueryController((Journal)backend).getCounters());
            backend.destroy();
        }

    }
 
源代码17 项目: database   文件: TestRWJournal.java
/**
 * Can be tested by removing RWStore call to journal.removeCommitRecordEntries
 * in freeDeferrals.
 * 
 * final int commitPointsRemoved = journal.removeCommitRecordEntries(fromKey, toKey);
 * 
 * replaced with
 * 
 * final int commitPointsRemoved = commitPointsRecycled;
 * 
 */
public void testVerifyCommitRecordIndex() {
          final Properties properties = new Properties(getProperties());

          properties.setProperty(
                  AbstractTransactionService.Options.MIN_RELEASE_AGE, "100");

	final Journal store = (Journal) getStore(properties);
          try {

          	RWStrategy bs = (RWStrategy) store.getBufferStrategy();
          	      	
       	for (int r = 0; r < 10; r++) {
        	ArrayList<Long> addrs = new ArrayList<Long>();
        	for (int i = 0; i < 100; i++) {
        		addrs.add(bs.write(randomData(45)));
        	}
        	store.commit();

        	for (long addr : addrs) {
        		bs.delete(addr);
        	}

           	store.commit();
           	
        	// Age the history (of the deletes!)
        	Thread.currentThread().sleep(200);
       	}
       	
		final String fname = bs.getStore().getStoreFile().getAbsolutePath();
		
       	store.close();
       	
       	VerifyCommitRecordIndex.main(new String[]{fname});
      	
          } catch (InterruptedException e) {
	} finally {
          	store.destroy();
          }
}
 
源代码18 项目: database   文件: TestRWJournal.java
public void testResetHARootBlock() {
          final Properties properties = new Properties(getProperties());

		final Journal store = (Journal) getStore(properties);
          try {

          	final RWStrategy bs = (RWStrategy) store.getBufferStrategy();
          	final RWStore rw = bs.getStore();
          	
        	for (int r = 0; r < 10; r++) {
        	ArrayList<Long> addrs = new ArrayList<Long>();
        	for (int i = 0; i < 1000; i++) {
        		addrs.add(bs.write(randomData(2048)));
        	}
        	store.commit();
        	
        	final StorageStats stats1 = rw.getStorageStats();
        	
        	rw.resetFromHARootBlock(store.getRootBlockView());
        	
        	final StorageStats stats2 = rw.getStorageStats();

        	// Now check that we can read all allocations after reset
        	for (long addr : addrs) {
        		store.read(addr);
        	}

       	}
       	
		final String fname = bs.getStore().getStoreFile().getAbsolutePath();
		
       	store.close();
       	
		} finally {
          	store.destroy();
          }
}
 
源代码19 项目: database   文件: TestRWJournal.java
/**
 * State3
 * 
 * Allocate - Commit - Free - Commit
 * 
 * Tracks writeCache state through allocation
 */
public void test_allocCommitFreeCommitWriteCache() {
	Journal store = (Journal) getStore();
          try {
          	RWStrategy bs = (RWStrategy) store.getBufferStrategy();
          	
          	final long addr = bs.write(randomData(78));
          	
          	// Has just been written so must be in cache
          	assertTrue(bs.inWriteCache(addr));
          	
          	store.commit();
          	
          	bs.delete(addr);
          	
          	// since data is committed, should be accessible from any new
          	// readCommitted transaction
          	assertTrue(bs.inWriteCache(addr));

          	store.commit();
          	
          	// If no transactions are around (which they are not) then must
          	//	be released as may be re-allocated
          	assertFalse(bs.inWriteCache(addr));
          	
          } finally {
          	store.destroy();
          }
}
 
源代码20 项目: tinkerpop3   文件: BasicRepositoryProvider.java
/**
 * Some reasonable defaults to get us up and running. Visit our online 
 * <a href="http://wiki.blazegraph.com">user guide</a> for more information
 * on configuration and performance optimization, or contact us for more direct
 * developer support.
 * 
 * @return
 *      config properties
 */
public static Properties getDefaultProperties() {
    
    final Properties props = new Properties();

    /*
     * Use the RW store for persistence.
     */
    props.setProperty(Journal.Options.BUFFER_MODE, BufferMode.DiskRW.toString());

    /*
     * Turn off all RDFS/OWL inference.
     */
    props.setProperty(BigdataSail.Options.AXIOMS_CLASS, NoAxioms.class.getName());
    props.setProperty(BigdataSail.Options.TRUTH_MAINTENANCE, "false");
    props.setProperty(BigdataSail.Options.JUSTIFY, "false");

    /*
     * Turn on the text index.
     */
    props.setProperty(BigdataSail.Options.TEXT_INDEX, "true");

    /*
     * Turn off quads and turn on statement identifiers.
     */
    props.setProperty(BigdataSail.Options.QUADS, "false");
    props.setProperty(BigdataSail.Options.STATEMENT_IDENTIFIERS, "true");
    
    /*
     * We will manage the grounding of sids manually.
     */
    props.setProperty(AbstractTripleStore.Options.COMPUTE_CLOSURE_FOR_SIDS, "false");

    /*
     * Inline string literals up to 10 characters (avoids dictionary indices
     * for short strings).
     */
    props.setProperty(AbstractTripleStore.Options.INLINE_TEXT_LITERALS, "true");
    props.setProperty(AbstractTripleStore.Options.MAX_INLINE_TEXT_LENGTH, "10");
    
    /*
     * Custom core Tinkerpop3 vocabulary.  Applications will probably want
     * to extend this.
     */
    props.setProperty(AbstractTripleStore.Options.VOCABULARY_CLASS, 
            Tinkerpop3CoreVocab_v10.class.getName());
    
    /*
     * Use a multi-purpose inline URI factory that will auto-inline URIs
     * in the <blaze:> namespace.
     */
    props.setProperty(AbstractTripleStore.Options.INLINE_URI_FACTORY_CLASS,
            Tinkerpop3InlineURIFactory.class.getName());
    
    /*
     * Custom Tinkerpop3 extension factory for the ListIndexExtension IV,
     * used for Cardinality.list.
     */
    props.setProperty(AbstractTripleStore.Options.EXTENSION_FACTORY_CLASS, 
            Tinkerpop3ExtensionFactory.class.getName());
    
    /*
     * Turn on history.  You can turn history off by not setting this
     * property.
     */
    props.setProperty(AbstractTripleStore.Options.RDR_HISTORY_CLASS, 
            RDRHistory.class.getName());

    return props;
    
}
 
public static void main(String[] args) throws Exception {
	
	final String namespace = "test";
	
	final Properties journalProperties = new Properties();
       {
           journalProperties.setProperty(Journal.Options.BUFFER_MODE,
                   BufferMode.MemStore.name());
           
       }
		
	final RemoteRepositoryManager repo = new RemoteRepositoryManager(
			serviceURL , false /* useLBS */);
	
	repo.createRepository(namespace, journalProperties);
	
	try {

		/*
		 * Load data from file located in the resource folder
		 * src/main/resources/data.n3
		 */
		final String resource = "/data.n3";
		loadDataFromResource(repo, namespace, resource);

		// execute query
		final TupleQueryResult result = repo.getRepositoryForNamespace(namespace)
				.prepareTupleQuery(QUERY)
				.evaluate();
		
		//result processing
		try {
			while (result.hasNext()) {
				final BindingSet bs = result.next();
				log.info(bs);
			}
		} finally {
			result.close();
		}

	} finally {
		repo.close();
	}
	

}
 
源代码22 项目: blazegraph-samples   文件: BlazegraphSamplesTest.java
@Before
public void startNss() throws Exception{
	
	final Properties journalProperties = new Properties();
       {
           journalProperties.setProperty(Journal.Options.BUFFER_MODE,
                   BufferMode.MemStore.name());
       }
       
       m_indexManager = new Journal(journalProperties);
		
	nss = NanoSparqlServer.newInstance(9999, "jettyMavenTest.xml", m_indexManager, null);
	
	nss.start();
	
}
 
源代码23 项目: database   文件: TestNativeDistinctFilter.java
public JoinSetup(final String kbNamespace) {

            if (kbNamespace == null)
                throw new IllegalArgumentException();
            
            final Properties properties = new Properties();

            properties.setProperty(Journal.Options.BUFFER_MODE,
                    BufferMode.Transient.toString());
            
            jnl = new Journal(properties);

            // create the kb.
            final AbstractTripleStore kb = new LocalTripleStore(jnl,
                    kbNamespace, ITx.UNISOLATED, properties);

            kb.create();

            this.spoNamespace = kb.getSPORelation().getNamespace();

            // Setup the vocabulary.
            {
                final BigdataValueFactory vf = kb.getValueFactory();
                final String uriString = "http://bigdata.com/";
                final BigdataURI _knows = vf.asValue(FOAFVocabularyDecl.knows);
                final BigdataURI _brad = vf.createURI(uriString+"brad");
                final BigdataURI _john = vf.createURI(uriString+"john");
                final BigdataURI _fred = vf.createURI(uriString+"fred");
                final BigdataURI _mary = vf.createURI(uriString+"mary");
                final BigdataURI _paul = vf.createURI(uriString+"paul");
                final BigdataURI _leon = vf.createURI(uriString+"leon");
                final BigdataURI _luke = vf.createURI(uriString+"luke");

                final BigdataValue[] a = new BigdataValue[] {
                      _knows,//
                      _brad,
                      _john,
                      _fred,
                      _mary,
                      _paul,
                      _leon,
                      _luke
                };

                kb.getLexiconRelation()
                        .addTerms(a, a.length, false/* readOnly */);

                knows = _knows.getIV();
                brad = _brad.getIV();
                john = _john.getIV();
                fred = _fred.getIV();
                mary = _mary.getIV();
                paul = _paul.getIV();
                leon = _leon.getIV();
                luke = _luke.getIV();

            }

//            // data to insert (in key order for convenience).
//            final SPO[] a = {//
//                    new SPO(paul, knows, mary, StatementEnum.Explicit),// [0]
//                    new SPO(paul, knows, brad, StatementEnum.Explicit),// [1]
//                    
//                    new SPO(john, knows, mary, StatementEnum.Explicit),// [2]
//                    new SPO(john, knows, brad, StatementEnum.Explicit),// [3]
//                    
//                    new SPO(mary, knows, brad, StatementEnum.Explicit),// [4]
//                    
//                    new SPO(brad, knows, fred, StatementEnum.Explicit),// [5]
//                    new SPO(brad, knows, leon, StatementEnum.Explicit),// [6]
//            };
//
//            // insert data (the records are not pre-sorted).
//            kb.addStatements(a, a.length);
//
//            // Do commit since not scale-out.
//            jnl.commit();

        }
 
源代码24 项目: database   文件: TestMemStore.java
public void test_stressSessionProtection() {
	// Sequential logic

          final Properties p = getProperties();

          // Note: No longer the default. Must be explicitly set.
          p.setProperty(AbstractTransactionService.Options.MIN_RELEASE_AGE, "0");

          final Journal store = getJournal(p);
	try {
		final IRWStrategy bs = (IRWStrategy) store.getBufferStrategy();

		byte[] buf = new byte[300]; // Just some data
		r.nextBytes(buf);

		ByteBuffer bb = ByteBuffer.wrap(buf);

		IRawTx tx = bs.newTx();
		ArrayList<Long> addrs = new ArrayList<Long>();

		// We just want to stress a single allocator, so do not
		//	make more allocations than a single allocator can
		//	handle since this would prevent predictable
		//	recycling

		for (int i = 0; i < 1000; i++) {
			addrs.add(store.write(bb));
			bb.flip();
		}

		for (int i = 0; i < 1000; i += 2) {
			store.delete(addrs.get(i));
		}

		// now release session to make addrs reavailable
		tx.close();

		for (int i = 0; i < 1000; i += 2) {
			long addr = store.write(bb);
			assertTrue(addr == addrs.get(i));
			bb.flip();
		}

		store.commit();

		bb.position(0);

		for (int i = 0; i < 500; i++) {
			bb.position(0);

			ByteBuffer rdBuf = store.read(addrs.get(i));

			// should be able to
			assertEquals(bb, rdBuf);
		}

		store.commit();
	} finally {
		store.destroy();
	}
}
 
源代码25 项目: database   文件: TestMemStore.java
public void test_allocCommitFreeWithHistory() {
	final Properties properties = new Properties(getProperties());

	properties.setProperty(
			AbstractTransactionService.Options.MIN_RELEASE_AGE, "100");

	final Journal store = getJournal(properties);
	try {

		MemStrategy bs = (MemStrategy) store.getBufferStrategy();

		final long addr = bs.write(randomData(78));

		store.commit();

		bs.delete(addr);

		assertTrue(bs.isCommitted(addr));
	} finally {
		store.destroy();
	}
}
 
源代码26 项目: database   文件: TestMemStore.java
public void test_blobDeferredFrees() {

			final Properties properties = new Properties(getProperties());

			properties.setProperty(
					AbstractTransactionService.Options.MIN_RELEASE_AGE, "100");

			final Journal store = getJournal(properties);
            try {

				MemStrategy bs = (MemStrategy) store.getBufferStrategy();
            	
            	ArrayList<Long> addrs = new ArrayList<Long>();
            	for (int i = 0; i < 4000; i++) {
            		addrs.add(bs.write(randomData(45)));
            	}
            	store.commit();

            	for (long addr : addrs) {
            		bs.delete(addr);
            	}
                for (int i = 0; i < 4000; i++) {
                    if(!bs.isCommitted(addrs.get(i))) {
                        fail("i="+i+", addr="+addrs.get(i));
                    }
                }

               	store.commit();
               	
            	// Age the history (of the deletes!)
            	Thread.currentThread().sleep(6000);
            	
            	// modify store but do not allocate similar size block
            	// as that we want to see has been removed
               	final long addr2 = bs.write(randomData(220)); // modify store
            	
            	store.commit();
            	bs.delete(addr2); // modify store
               	store.commit();
            	
               	// delete is actioned
            	for (int i = 0; i < 4000; i++) {
                   	assertFalse(bs.isCommitted(addrs.get(i)));
            	}
              } catch (InterruptedException e) {
			} finally {
            	store.destroy();
            }
		}
 
源代码27 项目: database   文件: AbstractRawStoreTestCase.java
IRawStore ensureStreamStore(final IRawStore store) {
	return store instanceof Journal ? ((Journal) store).getBufferStrategy() : store;
}
 
源代码28 项目: database   文件: DumpLexicon.java
/**
   * Open the {@link IIndexManager} identified by the property file.
   * 
   * @param propertyFile
   *            The property file (for a standalone bigdata instance) or the
   *            jini configuration file (for a bigdata federation). The file
   *            must end with either ".properties" or ".config".
   *            
   *            Starting with 1.5.2 the remote dump lexicon capability
   *            was moved into the bigdata-jini artifact.  See BLZG-1370.
   *            
   * @return The {@link IIndexManager}.
   */
  protected static IIndexManager openIndexManager(final String propertyFile) {

      final File file = new File(propertyFile);

      if (!file.exists()) {

          throw new RuntimeException("Could not find file: " + file);

      }

      if (propertyFile.endsWith(CONFIG_EXT)) {
          // scale-out.
	throw new RuntimeException(REMOTE_ERR_MSG);
      } else if (propertyFile.endsWith(PROPERTY_EXT)) {
          // local journal.
      } else {
          /*
           * Note: This is a hack, but we are recognizing the jini
           * configuration file with a .config extension and the journal
           * properties file with a .properties extension.
           */
          throw new RuntimeException(
                  "File must have '.config' or '.properties' extension: "
                          + file);
      }

final IIndexManager indexManager;
try {

	/*
	 * Note: we only need to specify the FILE when re-opening a journal
	 * containing a pre-existing KB.
	 */
	final Properties properties = new Properties();
	{
		// Read the properties from the file.
		final InputStream is = new BufferedInputStream(
				new FileInputStream(propertyFile));
		try {
			properties.load(is);
		} finally {
			is.close();
		}
		if (System.getProperty(BigdataSail.Options.FILE) != null) {
			// Override/set from the environment.
			properties.setProperty(BigdataSail.Options.FILE,
					System.getProperty(BigdataSail.Options.FILE));
		}
	}

	final Journal jnl = new Journal(properties);

	indexManager = jnl;

} catch (Exception ex) {

	throw new RuntimeException(ex);

}

      return indexManager;
      
  }
 
源代码29 项目: database   文件: TestRWJournal.java
public void testSimpleUnisolatedAllocationContextRecycling() {
	
	final Journal store = (Journal) getStore(0);

	try {

		final RWStrategy bufferStrategy = (RWStrategy) store.getBufferStrategy();

		final RWStore rw = bufferStrategy.getStore();
		
		final IAllocationContext cntxt = rw.newAllocationContext(false /*Isolated*/); // Unisolated context
		
		// allocate three global addresses of different sizes
		final int sze1 = 48; // 64 bytes allocator
		final int sze2 = 72; // 128	
		final int sze3 = 135; // 256
		
		final int addr1 = rw.alloc(sze1, cntxt);
		final int addr2 = rw.alloc(sze2, cntxt);
		final int addr3 = rw.alloc(sze3, cntxt);
		
		// Commit the addresses
		store.commit();
		
		showAddress(rw, addr1);
		showAddress(rw, addr2);
		showAddress(rw, addr3);
		
		// Now create some allocation contexts
		final IAllocationContext iso_cntxt = rw.newAllocationContext(true /*Isolated*/); // Isolated context
		
		// now allocate a new unisolated address
		rw.alloc(sze1, cntxt);
		// free the originall
		rw.free(addr1,  sze1, cntxt);
		// and ensure that allocator is 'owned' by an iso
		rw.alloc(sze1, iso_cntxt);
		
		// now grab a pristine allocator
		rw.alloc(sze2,  iso_cntxt);
		// and free the original address (with the unisolated context)
		rw.free(addr2, sze2, cntxt);				
		
		if (log.isInfoEnabled()) {
			final StringBuilder str = new StringBuilder();
			rw.showAllocators(str);
			
			log.info(str);
		}	
		
		store.abort();
		
		log.info("ABORT");
		showAddress(rw, addr1);
		showAddress(rw, addr2);
		showAddress(rw, addr3);
		
		// This is the point that the AllocationContext ends up setting the free bits, iff we have committed after the abort.				
		rw.detachContext(iso_cntxt);
		
		log.info("DETACH");
		showAddress(rw, addr1);
		showAddress(rw, addr2);
		showAddress(rw, addr3);
		
		store.commit();
		
		log.info("COMMIT");
		showAddress(rw, addr1);
		showAddress(rw, addr2);
		showAddress(rw, addr3);
		
	} finally {
		store.destroy();
	}
}
 
源代码30 项目: database   文件: Node.java
/**
     * If the caller's <i>toKey</i> is GT the separator keys for the children of
     * this node then the iterator will need to visit the rightSibling of the
     * node and this method will schedule the memoization of the node's
     * rightSibling in order to reduce the IO latency when the iterator visit
     * that rightSibling (non-blocking).
     * 
     * @param node
     *            A node.
     * @param toKey
     *            The exclusive upper bound of some iterator.
     */
    protected void prefetchRightSibling(final Node node, final byte[] toKey) {
        
        final int nkeys = node.getKeyCount();

        final byte[] lastSeparatorKey = node.getKeys().get(nkeys - 1);

        if (BytesUtil.compareBytes(toKey, lastSeparatorKey) <= 0) {

            /*
             * Since the toKey is LTE to the lastSeparatorKey on this node the
             * last tuple to be visited by the iterator is spanned by this node
             * and we will not visit the node's rightSibling.
             * 
             * @todo This test could be optimized if IRaba exposed a method for
             * unsigned byte[] comparisons against the coded representation of
             * the keys.
             */

            return;

        }

        // The parent of this node.
        final Node p = node.parent.get();

//        /*
//         * Test to see if the rightSibling is already materialized.
//         */
//        
//        Note: Don't bother testing as we will test in the task below anyway.
//        
//        Node rightSibling = (Node) p.getRightSibling(node,
//                false/* materialize */);
//
//        if (rightSibling != null) {
//
//            /*
//             * The rightSibling is already materialized and getRightSibling()
//             * touches the rightSibling as a side-effect so it will be retained
//             * longer. Return now as there is nothing to do.
//             */
//
//            return;
//            
//        }

        /*
         * Submit a task which will materialize that right sibling.
         * 
         * Note: This task will only materialize a rightSibling of a common
         * parent. If [node] is the last child of the parent [p] then you would
         * need to ascend to the parent of [p] and then desend again, which is
         * not the specified behavior for getRightSibling(). Since this is just
         * an optimization for IO scheduling, I think that it is fine as it is.
         * 
         * Note: We do not track the future of this task. The task will have a
         * side effect on the parent/child weak references among the nodes in
         * the B+Tree, on the backing hard reference ring buffer, and on the
         * cache of materialized disk records. That side effect is all that we
         * are seeking.
         * 
         * Note: If the B+Tree is concurrently closed, then this task will error
         * out.  That is fine.
         */
        final Executor s = ((Journal) btree.store).getReadExecutor();

        s.execute(new Runnable() {

            public void run() {

                if (!p.btree.isOpen()) {

                    // No longer open.
                    return;
                    
                }

                // Materialize the right sibling.
                p.getRightSibling(node, true/* materialize */);

            }

        });

    }