* added Object allocation monitor on performanceMemory page

* added some final statements
* changed shutdown sequence order

git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@1211 6c8d7289-2bf4-0310-a012-ef5d649a1542
pull/1/head
orbiter 20 years ago
parent 4ff3d219e8
commit 83a34b838d

@ -312,6 +312,30 @@ Increasing this cache may speed up crawling, but not much space is needed, so th
</table> </table>
</p> </p>
<p>
<div class=small><b>Write Cache Object Allocation:</b></div>
<table border="0" cellpadding="2" cellspacing="1">
<tr class="TableHeader" valign="bottom">
<td class="small">&nbsp;<br>Chunk Sizes</td>
#{sizes}#
<td class="small">#[chunk]#</td>
#{/sizes}#
</tr>
<tr class="TableCellDark">
<td class="TableHeader" align="left">now alive in write cache</td>
#{alive}#
<td class="small">#[count]#</td>
#{/alive}#
</tr>
<tr class="TableCellDark">
<td class="TableHeader" align="left">currently held in write buffer heap</td>
#{heap}#
<td class="small">#[count]#</td>
#{/heap}#
</tr>
</table>
</p>
#%env/templates/footer.template%# #%env/templates/footer.template%#
</body> </body>
</html> </html>

@ -43,6 +43,7 @@
//javac -classpath .:../classes PerformanceMemory_p.java //javac -classpath .:../classes PerformanceMemory_p.java
//if the shell's current path is HTROOT //if the shell's current path is HTROOT
import java.util.Iterator;
import java.util.Map; import java.util.Map;
import java.io.File; import java.io.File;
@ -52,6 +53,7 @@ import de.anomic.server.serverObjects;
import de.anomic.server.serverSwitch; import de.anomic.server.serverSwitch;
import de.anomic.server.serverFileUtils; import de.anomic.server.serverFileUtils;
import de.anomic.yacy.yacyCore; import de.anomic.yacy.yacyCore;
import de.anomic.kelondro.kelondroObjectSpace;
public class PerformanceMemory_p { public class PerformanceMemory_p {
@ -196,12 +198,12 @@ public class PerformanceMemory_p {
req = plasmaSwitchboard.robots.size(); req = plasmaSwitchboard.robots.size();
chk = plasmaSwitchboard.robots.dbCacheChunkSize(); chk = plasmaSwitchboard.robots.dbCacheChunkSize();
slt = plasmaSwitchboard.robots.dbCacheFillStatus(); slt = plasmaSwitchboard.robots.dbCacheFillStatus();
putprop(prop, env, "Robots", set); putprop(prop, env, "Robots", set);
req = sb.profiles.size(); req = sb.profiles.size();
chk = sb.profiles.dbCacheChunkSize(); chk = sb.profiles.dbCacheChunkSize();
slt = sb.profiles.dbCacheFillStatus(); slt = sb.profiles.dbCacheFillStatus();
putprop(prop, env, "Profiles", set); putprop(prop, env, "Profiles", set);
prop.put("usedTotal", usedTotal / MB); prop.put("usedTotal", usedTotal / MB);
prop.put("currTotal", currTotal / MB); prop.put("currTotal", currTotal / MB);
@ -214,6 +216,38 @@ public class PerformanceMemory_p {
prop.put("Xmx", Xmx.substring(0, Xmx.length() - 1)); prop.put("Xmx", Xmx.substring(0, Xmx.length() - 1));
String Xms = env.getConfig("javastart_Xms", "Xms10m").substring(3); String Xms = env.getConfig("javastart_Xms", "Xms10m").substring(3);
prop.put("Xms", Xms.substring(0, Xms.length() - 1)); prop.put("Xms", Xms.substring(0, Xms.length() - 1));
// create statistics about write cache object space
int chunksizes = Math.max(
((Integer) kelondroObjectSpace.statAlive().lastKey()).intValue(),
((Integer) kelondroObjectSpace.statHeap().lastKey()).intValue()
);
int[] statAlive = new int[chunksizes];
int[] statHeap = new int[chunksizes];
for (int i = 0; i < chunksizes; i++) { statAlive[i] = 0; statHeap[i] = 0; }
Map.Entry entry;
Iterator i = kelondroObjectSpace.statAlive().entrySet().iterator();
while (i.hasNext()) {
entry = (Map.Entry) i.next();
statAlive[((Integer) entry.getKey()).intValue() - 1] = ((Integer) entry.getValue()).intValue();
}
i = kelondroObjectSpace.statHeap().entrySet().iterator();
while (i.hasNext()) {
entry = (Map.Entry) i.next();
statHeap[((Integer) entry.getKey()).intValue() - 1] = ((Integer) entry.getValue()).intValue();
}
int c = 0;
for (int j = 0; j < chunksizes; j++) {
if ((statAlive[j] > 0) || (statHeap[j] > 0)) {
prop.put("sizes_" + c + "_chunk", Integer.toString(j + 1));
prop.put("alive_" + c + "_count", Integer.toString(statAlive[j]));
prop.put("heap_" + c + "_count", Integer.toString(statHeap[j]));
c++;
}
}
prop.put("sizes", Integer.toString(c));
prop.put("alive", Integer.toString(c));
prop.put("heap" , Integer.toString(c));
// return rewrite values for templates // return rewrite values for templates
return prop; return prop;

@ -94,8 +94,8 @@ public class messageBoard {
return database.cacheFillStatus(); return database.cacheFillStatus();
} }
public void close() throws IOException { public void close() {
database.close(); try {database.close();} catch (IOException e) {}
} }
private static String dateString() { private static String dateString() {

@ -116,9 +116,9 @@ public class wikiBoard {
return new int[]{a[0] + b[0], a[1] + b[1], a[2] + b[2], a[3] + b[3]}; return new int[]{a[0] + b[0], a[1] + b[1], a[2] + b[2], a[3] + b[3]};
} }
public void close() throws IOException { public void close() {
datbase.close(); try {datbase.close();} catch (IOException e) {}
bkpbase.close(); try {bkpbase.close();} catch (IOException e) {}
} }
private static String dateString() { private static String dateString() {

@ -107,6 +107,8 @@ public final class kelondroBufferedIOChunks extends kelondroAbstractIOChunks imp
public void write(long pos, byte[] b, int off, int len) throws IOException { public void write(long pos, byte[] b, int off, int len) throws IOException {
assert (b.length >= off + len): "write pos=" + pos + ", b.length=" + b.length + ", b='" + new String(b) + "', off=" + off + ", len=" + len; assert (b.length >= off + len): "write pos=" + pos + ", b.length=" + b.length + ", b='" + new String(b) + "', off=" + off + ", len=" + len;
//if (len > 10) System.out.println("WRITE(" + name + ", " + pos + ", " + b.length + ", " + off + ", " + len + ")");
// do the write into buffer // do the write into buffer
byte[] bb = kelondroObjectSpace.alloc(len); byte[] bb = kelondroObjectSpace.alloc(len);
System.arraycopy(b, off, bb, 0, len); System.arraycopy(b, off, bb, 0, len);

@ -43,18 +43,40 @@ package de.anomic.kelondro;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.HashMap; import java.util.HashMap;
import java.util.Iterator;
import java.util.TreeMap;
import java.util.Map;
public class kelondroObjectSpace { public class kelondroObjectSpace {
private static final int minSize = 10; private static final int minSize = 10;
private static final int maxSize = 4096; private static final int maxSize = 256;
private static HashMap objects = new HashMap(); private static HashMap objHeap = new HashMap();
private static TreeMap aliveNow = new TreeMap();
//private static TreeMap aliveMax = new TreeMap();
private static void incAlive(int size) {
final Integer s = new Integer(size);
synchronized (aliveNow) {
final Integer x = (Integer) aliveNow.get(s);
if (x == null) aliveNow.put(s, new Integer(1)); else aliveNow.put(s, new Integer(x.intValue() + 1));
}
}
private static void decAlive(int size) {
final Integer s = new Integer(size);
synchronized (aliveNow) {
final Integer x = (Integer) aliveNow.get(s);
if (x == null) aliveNow.put(s, new Integer(-1)); else aliveNow.put(s, new Integer(x.intValue() - 1));
}
}
public static byte[] alloc(int len) { public static byte[] alloc(int len) {
if ((len < minSize) || (len > maxSize)) return new byte[len]; if ((len < minSize) || (len > maxSize)) return new byte[len];
synchronized (objects) { incAlive(len);
ArrayList buf = (ArrayList) objects.get(new Integer(len)); synchronized (objHeap) {
ArrayList buf = (ArrayList) objHeap.get(new Integer(len));
if ((buf == null) || (buf.size() == 0)) return new byte[len]; if ((buf == null) || (buf.size() == 0)) return new byte[len];
return (byte[]) buf.remove(buf.size() - 1); return (byte[]) buf.remove(buf.size() - 1);
} }
@ -65,13 +87,14 @@ public class kelondroObjectSpace {
b = null; b = null;
return; return;
} }
synchronized (objects) { decAlive(b.length);
synchronized (objHeap) {
final Integer i = new Integer(b.length); final Integer i = new Integer(b.length);
ArrayList buf = (ArrayList) objects.get(i); ArrayList buf = (ArrayList) objHeap.get(i);
if (buf == null) { if (buf == null) {
buf = new ArrayList(); buf = new ArrayList();
buf.add(b); buf.add(b);
objects.put(i, buf); objHeap.put(i, buf);
} else { } else {
buf.add(b); buf.add(b);
} }
@ -79,4 +102,24 @@ public class kelondroObjectSpace {
b = null; b = null;
} }
public static TreeMap statAlive() {
return aliveNow;
}
public static TreeMap statHeap() {
// creates a statistic output of this object space
// the result is a mapping from Integer (chunk size) to Integer (number of counts)
// and shows how many Objects are held in this space for usage
TreeMap result = new TreeMap();
synchronized (objHeap) {
Iterator i = objHeap.entrySet().iterator();
Map.Entry entry;
while (i.hasNext()) {
entry = (Map.Entry) i.next();
result.put(entry.getKey(), new Integer(((ArrayList) entry.getValue()).size()));
}
}
return result;
}
} }

@ -419,7 +419,7 @@ public class kelondroRecords {
return new File(filename); return new File(filename);
} }
protected int cacheChunkSize(boolean cacheControl) { protected final int cacheChunkSize(boolean cacheControl) {
return this.headchunksize + element_in_cache + ((cacheControl) ? cache_control_entry : 0); return this.headchunksize + element_in_cache + ((cacheControl) ? cache_control_entry : 0);
} }
@ -475,7 +475,7 @@ public class kelondroRecords {
dispose(handle); dispose(handle);
} }
public class Node { public final class Node {
// an Node holds all information of one row of data. This includes the key to the entry // an Node holds all information of one row of data. This includes the key to the entry
// which is stored as entry element at position 0 // which is stored as entry element at position 0
// an Node object can be created in two ways: // an Node object can be created in two ways:
@ -732,12 +732,14 @@ public class kelondroRecords {
// save head // save head
if (this.headChanged) { if (this.headChanged) {
//System.out.println("WRITEH(" + filename + ", " + seekpos(this.handle) + ", " + this.headChunk.length + ")");
entryFile.write(seekpos(this.handle), this.headChunk); entryFile.write(seekpos(this.handle), this.headChunk);
update2Cache(cachePriority); update2Cache(cachePriority);
} }
// save tail // save tail
if ((this.tailChunk != null) && (this.tailChanged)) { if ((this.tailChunk != null) && (this.tailChanged)) {
//System.out.println("WRITET(" + filename + ", " + (seekpos(this.handle) + headchunksize) + ", " + this.tailChunk.length + ")");
entryFile.write(seekpos(this.handle) + headchunksize, this.tailChunk); entryFile.write(seekpos(this.handle) + headchunksize, this.tailChunk);
} }
} }
@ -954,7 +956,7 @@ public class kelondroRecords {
return this.COLWIDTHS[column]; return this.COLWIDTHS[column];
} }
private long seekpos(Handle handle) { private final long seekpos(Handle handle) {
assert (handle.index >= 0): "handle index too low: " + handle.index; assert (handle.index >= 0): "handle index too low: " + handle.index;
assert (handle.index < USAGE.allCount()): "handle index too high:" + handle.index; assert (handle.index < USAGE.allCount()): "handle index too high:" + handle.index;
return POS_NODES + ((long) recordsize * handle.index); return POS_NODES + ((long) recordsize * handle.index);
@ -1069,21 +1071,21 @@ public class kelondroRecords {
return x; return x;
} }
public static void NUL2bytes(byte[] b, int offset) { public final static void NUL2bytes(byte[] b, int offset) {
b[offset ] = (byte) (0XFF & (NUL >> 24)); b[offset ] = (byte) (0XFF & (NUL >> 24));
b[offset + 1] = (byte) (0XFF & (NUL >> 16)); b[offset + 1] = (byte) (0XFF & (NUL >> 16));
b[offset + 2] = (byte) (0XFF & (NUL >> 8)); b[offset + 2] = (byte) (0XFF & (NUL >> 8));
b[offset + 3] = (byte) (0XFF & NUL); b[offset + 3] = (byte) (0XFF & NUL);
} }
public static void int2bytes(long i, byte[] b, int offset) { public final static void int2bytes(long i, byte[] b, int offset) {
b[offset ] = (byte) (0XFF & (i >> 24)); b[offset ] = (byte) (0XFF & (i >> 24));
b[offset + 1] = (byte) (0XFF & (i >> 16)); b[offset + 1] = (byte) (0XFF & (i >> 16));
b[offset + 2] = (byte) (0XFF & (i >> 8)); b[offset + 2] = (byte) (0XFF & (i >> 8));
b[offset + 3] = (byte) (0XFF & i); b[offset + 3] = (byte) (0XFF & i);
} }
public static int bytes2int(byte[] b, int offset) { public final static int bytes2int(byte[] b, int offset) {
return ( return (
((b[offset ] & 0xff) << 24) | ((b[offset ] & 0xff) << 24) |
((b[offset + 1] & 0xff) << 16) | ((b[offset + 1] & 0xff) << 16) |

@ -174,14 +174,12 @@ public class plasmaCrawlNURL extends plasmaURL {
public void close() { public void close() {
coreStack.close(); coreStack.close();
try { limitStack.close();
limitStack.close(); overhangStack.close();
overhangStack.close(); remoteStack.close();
remoteStack.close(); try {imageStack.close();} catch (IOException e) {}
imageStack.close(); try {movieStack.close();} catch (IOException e) {}
movieStack.close(); try {musicStack.close();} catch (IOException e) {}
musicStack.close();
} catch (IOException e) {}
try { super.close(); } catch (IOException e) {} try { super.close(); } catch (IOException e) {}
} }

@ -291,8 +291,8 @@ public final class plasmaHTCache {
} }
} }
public void close() throws IOException { public void close() {
this.responseHeaderDB.close(); try {this.responseHeaderDB.close();} catch (IOException e) {}
} }
private String ageString(long date, File f) { private String ageString(long date, File f) {

@ -737,30 +737,26 @@ public final class plasmaSwitchboard extends serverAbstractSwitch implements ser
public void close() { public void close() {
log.logConfig("SWITCHBOARD SHUTDOWN STEP 1: sending termination signal to managed threads:"); log.logConfig("SWITCHBOARD SHUTDOWN STEP 1: sending termination signal to managed threads:");
terminateAllThreads(true); terminateAllThreads(true);
log.logConfig("SWITCHBOARD SHUTDOWN STEP 2: sending termination signal to threaded indexing (stand by...)"); log.logConfig("SWITCHBOARD SHUTDOWN STEP 2: sending termination signal to database manager");
// closing all still running db importer jobs
plasmaDbImporter.close();
indexDistribution.close();
cacheLoader.close();
wikiDB.close();
userDB.close();
messageDB.close();
if (facilityDB != null) try {facilityDB.close();} catch (IOException e) {}
sbStackCrawlThread.close();
urlPool.close();
profiles.close();
robots.close();
parser.close();
cacheManager.close();
sbQueue.close();
flushCitationReference(crg, "crg");
log.logConfig("SWITCHBOARD SHUTDOWN STEP 3: sending termination signal to threaded indexing (stand by...)");
int waitingBoundSeconds = Integer.parseInt(getConfig("maxWaitingWordFlush", "120")); int waitingBoundSeconds = Integer.parseInt(getConfig("maxWaitingWordFlush", "120"));
wordIndex.close(waitingBoundSeconds); wordIndex.close(waitingBoundSeconds);
log.logConfig("SWITCHBOARD SHUTDOWN STEP 3: sending termination signal to database manager");
try {
// closing all still running db importer jobs
plasmaDbImporter.close();
indexDistribution.close();
cacheLoader.close();
wikiDB.close();
userDB.close();
messageDB.close();
if (facilityDB != null) facilityDB.close();
sbStackCrawlThread.close();
urlPool.close();
profiles.close();
robots.close();
parser.close();
cacheManager.close();
sbQueue.close();
//flushCitationReference(crl, "crl");
flushCitationReference(crg, "crg");
} catch (IOException e) {}
log.logConfig("SWITCHBOARD SHUTDOWN TERMINATED"); log.logConfig("SWITCHBOARD SHUTDOWN TERMINATED");
} }

@ -82,9 +82,9 @@ public class plasmaURLPool {
return null; return null;
} }
public void close() throws IOException { public void close() {
loadedURL.close(); try {loadedURL.close();} catch (IOException e) {}
noticeURL.close(); noticeURL.close();
errorURL.close(); try {errorURL.close();} catch (IOException e) {}
} }
} }

@ -167,17 +167,16 @@ public final class plasmaWordIndexAssortmentCluster {
int [] spaces = new int[testsize]; int [] spaces = new int[testsize];
for (int i = testsize - 1; i >= 0; i--) spaces[i] = 0; for (int i = testsize - 1; i >= 0; i--) spaces[i] = 0;
int need = newContainer.size(); int need = newContainer.size();
int s = testsize - 1; int selectedAssortment = testsize - 1;
while (s >= 0) { while (selectedAssortment >= 0) {
spaces[s] = (assortments[s].get(wordHash) == null) ? (s + 1) : 0; spaces[selectedAssortment] = (assortments[selectedAssortment].get(wordHash) == null) ? (selectedAssortment + 1) : 0;
need -= spaces[s]; need -= spaces[selectedAssortment];
assert (need >= 0); assert (need >= 0);
if (need == 0) break; if (need == 0) break;
s = (need < s) ? need : s - 1; selectedAssortment = (need < selectedAssortment) ? need : selectedAssortment - 1;
} }
if (need == 0) { if (need == 0) {
// we found spaces so that we can put in the newContainer into these spaces // we found spaces so that we can put in the newContainer into these spaces
plasmaWordIndexEntryContainer c; plasmaWordIndexEntryContainer c;
Iterator i = newContainer.entries(); Iterator i = newContainer.entries();
for (int j = testsize - 1; j >= 0; j--) { for (int j = testsize - 1; j >= 0; j--) {
@ -189,7 +188,6 @@ public final class plasmaWordIndexAssortmentCluster {
} }
storeForced(wordHash, c); storeForced(wordHash, c);
} }
return null; return null;
} }

@ -464,7 +464,7 @@ public final class plasmaWordIndexCache implements plasmaWordIndexInterface {
entries = null; entries = null;
// force flush (sometimes) // force flush (sometimes)
if (System.currentTimeMillis() % 5 == 0) flushFromMem(); if (System.currentTimeMillis() % 7 == 4) flushFromMem();
} }
return added; return added;
} }

Loading…
Cancel
Save