var fs = require("fs");
var http = require("http");
var url = require("url");
+var path = require("path");
function maintainCache() {
// TODO i should check that im already running here and exit if i am
setInterval(maintainCache, cacheTimer);
}
-function upstreamRequest(unify, callback) {
+function upstreamRequest(unify) {
// first do a head request
console.log("upsteram as ", unify.requestFor);
//unify.b.write("would send to '" + xpath + "'");
//unify.b.end();
+ // not doing this properly yet...
+ if(typeof global.repoproxy.downloads[unify.fullFilePath] != undefined && global.repoproxy.downloads[unify.fullFilePath] == 1) {
+ console.log("request for file thats being downloaded already, doing inline request");
+ inlineService(unify);
+ return;
+ }
+
console.log("sending off to '%s'", xpath);
var headReq = url.parse(xpath);
headReq["method"] = "HEAD";
- getup = http.request(xpath, function(res) {
- res.setEncoding("utf8");
+ getup = http.request(headReq, function(res) {
+ //res.setEncoding("utf8");
if(!endData) {
console.log("status code is ", typeof res.statusCode);
endData = true;
} else {
// this is where it gets ugly
+ var filesize = res.headers["content-length"];
console.log("do ugly write: ", unify);
//unify.b.write(data);
- getAndService(unify, xpath);
+ var metafilename = unify.fullPathDirName + "/.meta."+ path.basename(unify.requestFor) +".filesize";
+ var metafile = fs.createWriteStream(metafilename);
+ metafile.write(filesize);
+ metafile.end();
+ getAndService(unify, xpath, filesize);
}
break;
exports.upstreamRequest = upstreamRequest;
-function getAndService(unify, xpath) {
+function getAndService(unify, xpath, filesize) {
- if(global.repoproxy.downloads[unify.fullFilePath] == 1) {
-
- unify.b.write("trying to service inline");
- unify.b.end();
- } else {
- global.repoproxy.downloads[unify.fullFilePath] = 1;
+ console.log("calling in here with filesize, ", filesize)
+ unify.b.writeHead(200, {'Content-Length' : filesize});
+
- http.get(xpath, function(res) {
+ global.repoproxy.downloads[unify.fullFilePath] = 1;
- var file = fs.createWriteStream(unify.fullFilePath);
-
- console.log("res: ", res);
+
+ http.get(xpath, function(res) {
+
+ var file = fs.createWriteStream(unify.fullFilePath);
+
+ //console.log("res: ", res);
+
+ //res.setEncoding("utf8");
+
+ res.on("data", function(data) {
+ //console.log("chunk");
+ file.write(data);
+ unify.b.write(data);
+ });
+
+ res.on("end", function() {
+ console.log("end...");
+ unify.b.end();
+ file.end();
+ global.repoproxy.downloads[unify.fullFilePath] = 0;
+ });
+
+ res.on("error", function(err) {
+ console.log("res threw error... ", err);
+ });
+ });
+}
+
+// this is nasty nasty thing that can go horribly wrong in some ways, but currently works...
+function inlineService(unify) {
+ // this method is called when we need to service a file thats being downloaded by something else
+ var metafilename = unify.fullPathDirName + "/.meta."+ path.basename(unify.requestFor) +".filesize";
+ var fsizef = fs.createReadStream(metafilename);
+ var fsize = "";
+ var lastchunk = 0;
+ fsizef.on("data", function(data) {
+ fsize += data;
+ });
+
+ fsizef.on("end", function() {
+ var sentSoFar = 0;
+ unify.b.writeHead(200, {"Content-Length" : fsize });
- //res.setEncoding("utf8");
+ // now we go into the file reading loop.
+ console.log("start of inline services");
+ // we loop every 0.5s and do our thing
- res.on("data", function(data) {
- //console.log("chunk");
- file.write(data);
- unify.b.write(data);
- });
+ function sendPieces() {
+ // this is going to be so fun i want to play real life frogger in real life traffic...
+ fs.stat(unify.fullFilePath, function(err, stats) {
+ if(err == null) {
+ if(stats["size"] > sentSoFar) {
+ // if file size changed between last chunk and this chunk, send the chunks
+
+ lastChunk = 0;
+ // open the file, send the data
+ var rs = fs.createReadStream(unify.fullFilePath, {start: sentSoFar, end: stats["size"]});
+
+ rs.on("data", function(thisdata) {
+ //console.log("inline chunk: ", thisdata.length);
+ unify.b.write(thisdata);
+ });
+
+ rs.on("end", function() {
+ sentSoFar = stats["size"];
+ // every second, we start again
+ if(sentSoFar != fsize) {
+ setTimeout(sendPieces, 1000);
+ } else {
+ // we're done!
+ unify.b.end();
+ }
+ });
+ } else {
+ // if file size did not change between last timeout and this one, incremement the chunk counter
+ // if we reach 60, we had a problem, and so we bomb out
+
+ lastChunk++;
+
+ // we bombed out somehow
+ if(lastChunk > 60) {
+ unify.b.end();
+ } else {
+ setTimeout(sendPieces, 1000);
+ }
+ }
+ } else {
+ console.log("inline service - we're in a very bad place");
+ }
+ });
+
+ }
- res.on("end", function() {
- console.log("end...");
- unify.b.end();
- file.end();
- global.repoproxy.downloads[unify.fullFilePath] = 0;
- });
- });
- }
+ setTimeout(sendPieces, 100);
+ });
}
// the service file routine .... PLEASE KILL ME!
function serviceFile(unify) {
// for now, ignore range.
+ // however we need to check if a metadata file exists describing the filesize, check if its all correct
+ // and if not, erase the file (and metafile) and forward the request back to upstream request
- // file should already exist, so we just poop it out
- var inp = fs.createReadStream(unify.fullFilePath);
- inp.setEncoding("utf8");
- inp.on("data", function(data) {
- unify.b.write(data);
- });
- inp.on("end", function(closed) {
- unify.b.end();
+ checkFile(unify, function() {
+
+ // file should already exist, so we just poop it out
+ var inp = fs.createReadStream(unify.fullFilePath);
+ //inp.setEncoding("utf8");
+ inp.on("data", function(data) {
+ unify.b.write(data);
+ });
+
+ inp.on("end", function(closed) {
+ unify.b.end();
+ });
});
}
exports.serviceFile = serviceFile;
+
+function checkFile(unify, callback) {
+ // in here we do the metadata checks
+ var metafilename = unify.fullPathDirName + "/.meta."+ path.basename(unify.requestFor) +".filesize";
+
+ fs.exists(metafilename, function(existence) {
+ if(existence) {
+ var fsizef = fs.createReadStream(metafilename);
+ var fsize = "";
+ fsizef.on("data", function(data) {
+ fsize += data;
+ });
+
+ fsizef.on("end", function() {
+ fs.stat(unify.fullFilePath, function(err, stats) {
+ var rfsize = stats["size"];
+ if(rfsize != fsize.trim()) {
+ // remove the file and start again
+ console.log("reported filesizes dont match, '%s', '%s', removing file and starting again", rfsize, stats["size"]);
+ try {
+ fs.unlink(metafilename, function(){
+ fs.unlink(unify.fullFilePath, function(){
+ upstreamRequest(unify);
+ })
+ });
+ } catch(e) {
+ upstreamRequest(unify);
+ }
+ } else {
+ // we're good
+ unify.b.writeHead(200, {"Content-Length" : unify.fileSize})
+ callback();
+ }
+ });
+ });
+ } else {
+ console.log("file, '%s' exists but has no filesize meta data, assuming it was put here manually and servicing", unify.fullFilePath);
+ unify.b.writeHead(200, {"Content-Length" : unify.fileSize})
+ callback();
+ }
+ });
+}
+
function makeCacheDir(path) {
console.log("attempting to create... '%s' as '%s'", path.fullPathDirName, path.subPathDirName);
var res = unify.b;
res.write("<html><h1>Directory listing for " + unify.originalReq + "</h1><hr><pre>");
- if(unify.fullFilePath != "/") res.write("<a href=\"..\">Parent</a>\n\n");
+ if(unify.originalReq != "/") res.write("<a href=\"..\">Parent</a>\n\n");
fs.readdir(unify.fullFilePath, function(err, files) {
console.log("doing directory listing on: ", unify.fullFilePath);
if(err == null) {