var fs = require("fs");
var http = require("http");
var url = require("url");
+var path = require("path");
+var crypto = require("crypto");
+var log = require("./log.js");
-function maintainCache() {
- // TODO i should check that im already running here and exit if i am
- console.log("Cache maintainence routine starting...");
- console.log("Cache maintainence routine ended...");
-}
-
-exports.startTimer = function() {
- // our once-a-day cache maintainer
- var cacheTimer = global.repoproxy.scancache*3600*1000;
- //var cacheTimer = global.repoproxy.scancache*100;
- setInterval(maintainCache, cacheTimer);
-}
-
-function upstreamRequest(unify, callback) {
+function upstreamRequest(unify) {
// first do a head request
- console.log("upsteram as ", unify.requestFor);
+ log.debug("upsteram as ", unify.requestFor);
var endData = false;
var xpath = "";
//unify.b.write("would send to '" + xpath + "'");
//unify.b.end();
- console.log("sending off to '%s'", xpath);
+ // not doing this properly yet...
+ if(typeof global.repoproxy.downloads[unify.fullFilePath] != undefined && global.repoproxy.downloads[unify.fullFilePath] == 1) {
+ log.debug("request for file thats being downloaded already, doing inline request");
+ inlineService(unify);
+ return;
+ }
+
+ log.debug("sending off to '%s'", xpath);
var headReq = url.parse(xpath);
headReq["method"] = "HEAD";
- getup = http.request(xpath, function(res) {
- res.setEncoding("utf8");
+ getup = http.request(headReq, function(res) {
+ //res.setEncoding("utf8");
if(!endData) {
- console.log("status code is ", typeof res.statusCode);
+ log.debug("status code is ", typeof res.statusCode);
switch(res.statusCode) {
// TODO: this 301 directory redirect thing needs to work better
case 301:
var against = against_t.substr(against_t.length-4);
if(loc == against) {
- console.log("got a redirect, upstream for loc => loc/ assuming its a directory");
+ log.debug("got a redirect, upstream for loc => loc/ assuming its a directory");
makeCacheDir(unify);
unify.b.writeHead(302, { "Location": unify.originalReq + "/" });
} else {
- console.log("checked '%s' against '%s', was false, sending 404", loc, against);
+ log.debug("checked '%s' against '%s', was false, sending 404", loc, against);
unify.b.writeHead(404, {"Content-Type": "text/plain"});
unify.b.write("404 Not Found\n");
}
endData = true;
} else {
// this is where it gets ugly
- console.log("do ugly write: ", unify);
+ var filesize = res.headers["content-length"];
+ log.debug("do ugly write: ", unify);
//unify.b.write(data);
- getAndService(unify, xpath);
+ var metafilename = unify.fullPathDirName + "/.meta."+ path.basename(unify.requestFor) +".filesize";
+ var metafile = fs.createWriteStream(metafilename);
+ metafile.write(filesize);
+ metafile.end();
+ getAndService(unify, xpath, filesize);
}
break;
default:
- console.log(".... data");
+ log.debug(".... data");
//unify.b.write(data);
}
}
- //console.log("res is now ", res);
+ //log.debug("res is now ", res);
});
getup.end();
- //console.log("getup: ", getup);
+ //log.debug("getup: ", getup);
}
exports.upstreamRequest = upstreamRequest;
-function getAndService(unify, xpath) {
+function getAndService(unify, xpath, filesize) {
- if(global.repoproxy.downloads[unify.fullFilePath] == 1) {
-
- unify.b.write("trying to service inline");
- unify.b.end();
- } else {
- global.repoproxy.downloads[unify.fullFilePath] = 1;
+ log.debug("calling in here with filesize, ", filesize)
+ unify.b.writeHead(200, {'Content-Length' : filesize});
+
+ global.repoproxy.downloads[unify.fullFilePath] = 1;
+
+ http.get(xpath, function(res) {
+
+ var file = fs.createWriteStream(unify.fullFilePath);
- http.get(xpath, function(res) {
+ //log.debug("res: ", res);
- var file = fs.createWriteStream(unify.fullFilePath);
-
- console.log("res: ", res);
+ //res.setEncoding("utf8");
+
+ res.on("data", function(data) {
+ //log.debug("chunk");
+ file.write(data);
+ unify.b.write(data);
+ });
+
+ res.on("end", function() {
+ log.debug("end...");
+ unify.b.end();
+ file.end();
+ global.repoproxy.downloads[unify.fullFilePath] = 0;
+ });
+
+ res.on("error", function(err) {
+ log.debug("res threw error... ", err);
+ });
+ });
+}
+
+// this is nasty nasty thing that can go horribly wrong in some ways, but currently works...
+function inlineService(unify) {
+ // this method is called when we need to service a file thats being downloaded by something else
+ var metafilename = unify.fullPathDirName + "/.meta."+ path.basename(unify.requestFor) +".filesize";
+ var fsizef = fs.createReadStream(metafilename);
+ var fsize = "";
+ var lastchunk = 0;
+ fsizef.on("data", function(data) {
+ fsize += data;
+ });
+
+ fsizef.on("end", function() {
+ var sentSoFar = 0;
+ unify.b.writeHead(200, {"Content-Length" : fsize });
- //res.setEncoding("utf8");
+ // now we go into the file reading loop.
+ log.debug("start of inline services");
+ // we loop every 0.5s and do our thing
- res.on("data", function(data) {
- //console.log("chunk");
- file.write(data);
- unify.b.write(data);
- });
+ function sendPieces() {
+ // this is going to be so fun i want to play real life frogger in real life traffic...
+ fs.stat(unify.fullFilePath, function(err, stats) {
+ if(err == null) {
+ if(stats["size"] > sentSoFar) {
+ // if file size changed between last chunk and this chunk, send the chunks
+
+ lastChunk = 0;
+ // open the file, send the data
+ var rs = fs.createReadStream(unify.fullFilePath, {start: sentSoFar, end: stats["size"]});
+
+ rs.on("data", function(thisdata) {
+ //log.debug("inline chunk: ", thisdata.length);
+ unify.b.write(thisdata);
+ });
+
+ rs.on("end", function() {
+ sentSoFar = stats["size"];
+ // every second, we start again
+ if(sentSoFar != fsize) {
+ setTimeout(sendPieces, 1000);
+ } else {
+ // we're done!
+ unify.b.end();
+ }
+ });
+ } else {
+ // if file size did not change between last timeout and this one, incremement the chunk counter
+ // if we reach 60, we had a problem, and so we bomb out
+
+ lastChunk++;
+
+ // we bombed out somehow
+ if(lastChunk > 60) {
+ unify.b.end();
+ } else {
+ setTimeout(sendPieces, 1000);
+ }
+ }
+ } else {
+ log.error("inline service - we're in a very bad place, how we ended up here we dont know, but we need to crash");
+ process.exit(10);
+ }
+ });
+
+ }
- res.on("end", function() {
- console.log("end...");
- unify.b.end();
- file.end();
- global.repoproxy.downloads[unify.fullFilePath] = 0;
- });
- });
- }
+ setTimeout(sendPieces, 100);
+ });
}
// the service file routine .... PLEASE KILL ME!
function serviceFile(unify) {
// for now, ignore range.
+ // however we need to check if a metadata file exists describing the filesize, check if its all correct
+ // and if not, erase the file (and metafile) and forward the request back to upstream request
- // file should already exist, so we just poop it out
- var inp = fs.createReadStream(unify.fullFilePath);
- inp.setEncoding("utf8");
- inp.on("data", function(data) {
- unify.b.write(data);
- });
- inp.on("end", function(closed) {
- unify.b.end();
+ checkFile(unify, function() {
+
+ // file should already exist, so we just poop it out
+ var inp = fs.createReadStream(unify.fullFilePath);
+ //inp.setEncoding("utf8");
+ inp.on("data", function(data) {
+ unify.b.write(data);
+ });
+
+ inp.on("end", function(closed) {
+ unify.b.end();
+ });
});
}
exports.serviceFile = serviceFile;
+
+function checkFile(unify, callback) {
+ // in here we do the metadata checks
+ var metafilename = unify.fullPathDirName + "/.meta."+ path.basename(unify.requestFor) +".filesize";
+
+ fs.exists(metafilename, function(existence) {
+ if(existence) {
+ var fsizef = fs.createReadStream(metafilename);
+ var fsize = "";
+ fsizef.on("data", function(data) {
+ fsize += data;
+ });
+
+ fsizef.on("end", function() {
+ fs.stat(unify.fullFilePath, function(err, stats) {
+ var rfsize = stats["size"];
+ if(rfsize != fsize.trim()) {
+ // remove the file and start again
+ log.debug("reported filesizes dont match, '%s', '%s', removing file and starting again", rfsize, stats["size"]);
+ try {
+ fs.unlink(metafilename, function(){
+ fs.unlink(unify.fullFilePath, function(){
+ upstreamRequest(unify);
+ })
+ });
+ } catch(e) {
+ upstreamRequest(unify);
+ }
+ } else {
+ // we're good
+ unify.b.writeHead(200, {"Content-Length" : unify.fileSize})
+ callback();
+ }
+ });
+ });
+ } else {
+ log.debug("file, '%s' exists but has no filesize meta data, assuming it was put here manually and servicing", unify.fullFilePath);
+ unify.b.writeHead(200, {"Content-Length" : unify.fileSize})
+ callback();
+ }
+ });
+}
+
function makeCacheDir(path) {
- console.log("attempting to create... '%s' as '%s'", path.fullPathDirName, path.subPathDirName);
+ log.debug("attempting to create... '%s' as '%s'", path.fullPathDirName, path.subPathDirName);
var startAt = path.topFullPath;
var nextbits = path.subPathDirName.split("/");
for(var i=0; i < nextbits.length; i++) {
startAt += "/" + nextbits[i];
- console.log("attempt mkdir on '%s'", startAt);
+ log.debug("attempt mkdir on '%s'", startAt);
try {
fs.mkdirSync(startAt);
} catch(e) {
- //console.log("e in mkdir, ", e);
+ //log.debug("e in mkdir, ", e);
}
}
//process.exit(0);
var res = unify.b;
res.write("<html><h1>Directory listing for " + unify.originalReq + "</h1><hr><pre>");
- if(unify.fullFilePath != "/") res.write("<a href=\"..\">Parent</a>\n\n");
+ if(unify.originalReq != "/") res.write("<a href=\"..\">Parent</a>\n\n");
fs.readdir(unify.fullFilePath, function(err, files) {
- console.log("doing directory listing on: ", unify.fullFilePath);
+ log.debug("doing directory listing on: ", unify.fullFilePath);
if(err == null) {
// TODO: make this work asynchronously...
nfiles++;
}
} else {
- console.log("ignoring file, ", files[i]);
+ log.debug("ignoring file, ", files[i]);
}
}
});
}
-exports.serviceDirectory = serviceDirectory;
\ No newline at end of file
+function moveToCleanup(file_or_dir) {
+ // err..?
+ var cleanup = global.repoproxy.cacheDir + "/.cleanup";
+ var ctime = new Date().getTime();
+ var encoded = (++global.repoproxy.fileid).toString();
+ var toloc = cleanup + "/" + ctime.toString() + "." + encoded;
+
+ //log.debug("Moving %s to %s for cleanup", file_or_dir.replace(/\/$/, ""), toloc);
+
+ fs.renameSync(file_or_dir.replace(/\/$/, ""), toloc);
+}
+
+function cleanupRoutine() {
+
+}
+
+
+exports.serviceDirectory = serviceDirectory;
+exports.moveToCleanup = moveToCleanup;