adding the cleanup routines
[nodejs-repoproxy.git] / lib / cache.js
index a649991..b7f111d 100644 (file)
@@ -3,19 +3,6 @@ var http = require("http");
 var url = require("url");
 var path = require("path");
 
-function maintainCache() {
-       // TODO i should check that im already running here and exit if i am
-       console.log("Cache maintainence routine starting...");
-       console.log("Cache maintainence routine ended...");
-}
-
-exports.startTimer = function() {
-       // our once-a-day cache maintainer
-       var cacheTimer = global.repoproxy.scancache*3600*1000;
-       //var cacheTimer = global.repoproxy.scancache*100;
-       setInterval(maintainCache, cacheTimer);
-}
-
 function upstreamRequest(unify) {
        // first do a head request
        console.log("upsteram as ", unify.requestFor);
@@ -151,6 +138,7 @@ function inlineService(unify) {
        var metafilename = unify.fullPathDirName + "/.meta."+ path.basename(unify.requestFor) +".filesize";
        var fsizef = fs.createReadStream(metafilename);
        var fsize = "";
+       var lastchunk = 0;
        fsizef.on("data", function(data) {
                fsize += data;
        });
@@ -168,6 +156,9 @@ function inlineService(unify) {
                        fs.stat(unify.fullFilePath, function(err, stats) {
                                if(err == null) {
                                        if(stats["size"] > sentSoFar) {
+                                               // if file size changed between last chunk and this chunk, send the chunks
+                                               
+                                               lastChunk = 0;
                                                // open the file, send the data
                                                var rs = fs.createReadStream(unify.fullFilePath, {start: sentSoFar, end: stats["size"]});
                                                
@@ -186,6 +177,18 @@ function inlineService(unify) {
                                                                unify.b.end();
                                                        }
                                                });
+                                       } else {
+                                               // if file size did not change between last timeout and this one, incremement the chunk counter
+                                               // if we reach 60, we had a problem, and so we bomb out
+                                               
+                                               lastChunk++;
+                                               
+                                               // we bombed out somehow
+                                               if(lastChunk > 60) {
+                                                       unify.b.end();
+                                               } else {
+                                                       setTimeout(sendPieces, 1000);
+                                               }
                                        }
                                } else {
                                        console.log("inline service - we're in a very bad place");