roost/node/cellularChunking.js

158 lines
8.5 KiB
JavaScript

(function() {
// with version 1 the chunk length isn't relevant, but in the future we may want to minimize the number of messages sent so this could concatenate multiple
// short chunks into one message so we are keeping it. I don't think that is going to happen but I am keeping it anyway. It could also be error checking
// on the receiving end.
const chunk_record = {}
const max_chunk_length = 1400
// remove records for old messages so they aren't just taking up ram
function prune_chunk_record() {
Object.keys(chunk_record).forEach(function(thisMessageID) {
if(chunk_record[thisMessageID]) {
if(chunk_record[thisMessageID].ctime && (new Date() - chunk_record[thisMessageID].ctime > 30000)) {
// check to make sure that the ctime is old enough that we aren't worried about unneeded retransmissions
// 30 seconds seems long enough until we have a reason to change it
chunk_record[thisMessageID] = undefined // clear it so that the garbage collector can free the memory
} else if (chunk_record[thisMessageID].rtime && !chunk_record[thisMessageID].ctime && (new Date() - chunk_record[thisMessageID].rtime > 600000)) {
// if the first part of a message was received over 10 minutes ago and we don't have the full message we drop it.
// In the future we have to ask for a resend but we don't have that set up yet.
chunk_record[thisMessageID] = undefined
}
}
})
}
function receive_chunk(chunk, message_handler, rinfo) {
if(typeof message_handler !== 'function') {
message_handler = () => {}
}
console.log('received chunk: ', chunk)
const the_header = parse_chunk_header(chunk)
console.log('header: ', the_header)
if(typeof the_header.message_id !== 'undefined') {
// if this is a new message start a record for it
if(!chunk_record[the_header.message_id]) {
console.log('new message thing')
chunk_record[the_header.message_id] = {
chunks: {},
rtime: new Date(), // received time, when the first chunk came in
ctime: undefined, // completed time, when the full message was reconstructed
num_chunks: the_header.num_chunks,
r_chunks: 0
}
}
if(chunk_record[the_header.message_id].ctime) {
// if the message has already been fully received we just drop it
return
}
// if this is a new chunk save it in chunk_records
if(!chunk_record[the_header.message_id].chunks[the_header.chunk_index]) {
console.log('new chunk for existing message')
chunk_record[the_header.message_id].chunks[the_header.chunk_index] = chunk.slice(the_header.header_length, the_header.chunk_length+the_header.header_length)
chunk_record[the_header.message_id].r_chunks = chunk_record[the_header.message_id].r_chunks + 1
}
// if we have all the chunks reconstruct the message and pass it on for processing
if(chunk_record[the_header.message_id].r_chunks === chunk_record[the_header.message_id].num_chunks) {
console.log('have all chunks')
let reconstructed_message = Buffer.from([])
Object.keys(chunk_record[the_header.message_id].chunks).sort((a,b)=>a-b).forEach(function(thisChunkIndex) {
reconstructed_message = Buffer.concat([reconstructed_message, chunk_record[the_header.message_id].chunks[thisChunkIndex]])
})
if(reconstructed_message) {
chunk_record[the_header.message_id].ctime = new Date() // completed time
message_handler(reconstructed_message, rinfo)
}
}
prune_chunk_record()
}
if(the_header.chunk_length + the_header.header_length < chunk.length) {
// if there is more data than has been processed, cut off the bit we have already done and process the rest.
receive_chunk(chunk.slice(the_header.chunk_length + the_header.header_length), message_handler, rinfo)
}
}
// we should never receive more than one chunk at a time over the cellular network, but make it so that we can handle that in the future
function parse_chunk_header(chunk) {
if (chunk.length < 9) {
return {}
}
chunkBuf = Buffer.from(chunk, 'hex')
message_id = chunkBuf.readUInt16BE()
chunk_length = chunkBuf.readUInt16BE(2)
chunk_version = chunkBuf.readUInt8(4)
if (chunk_version == 0x01) {
return {
"message_id": message_id,
"chunk_length": chunk_length,
"chunk_version": chunk_version,
"chunk_index": (chunkBuf.slice(5,7)).readUInt16BE(),
"num_chunks": (chunkBuf.slice(7,9)).readUInt16BE(),
"header_length": 9
}
} else {
// we don't know how to handle this chunk version, so return the chunk length so it can be skipped
return {
"chunk_length": chunk_length
}
}
}
// take a message and split it into chunks suitable for sending to the base station (the MTU is 1500, so we have to chunk longer messages)
function chunk_message(message, message_id) {
const messageChunks = []
const messageLen = message.length
const numChunks = Math.floor(messageLen / max_chunk_length) // we use 1400 instead of 1500 to account for the header and because I am paranoid about sending things that are the max length
for(let i = 0; i < numChunks; i++) {
// 1409 is the chunk length here, we split the message into 1400 byte lengths and add a 9 byte header
// for now the version is always 1
const thisHeader = make_chunk_header(message_id, i+1, numChunks+1, max_chunk_length+9, 1)
//messageChunks.push(Buffer.concat([thisHeader, Buffer.from(message.slice(max_chunk_length*i, max_chunk_length*(i+1)), 'hex')]))
messageChunks.push(thisHeader.toString('hex') + message.slice(max_chunk_length*i, max_chunk_length*(i+1)))
}
// if the message is less than 1400 bytes than this catches it, and this catches any left over after pulling off all the 1400 byte lengths
if(message.length % max_chunk_length > 0) {
// there is an annoying edge case when the message is an integer multiple of your chunk length and it doesn't have any leftovers which can cause a crash if you don't check for it
const thisHeader = make_chunk_header(message_id, numChunks+1, numChunks+1, (message.length % max_chunk_length) + 9, 1)
//messageChunks.push(Buffer.concat([thisHeader, Buffer.from(message.slice(numChunks * max_chunk_length), 'hex')]))
messageChunks.push(thisHeader.toString('hex') + message.slice(numChunks * max_chunk_length))
}
return messageChunks
}
// to make this we build a buffer then convert it to a hex string at the end
function make_chunk_header(message_id, chunk_index, num_chunks, chunk_length, chunk_version=1) {
// only version 1 so far, so we don't have to check yet because this is making it.
if (!Buffer.isBuffer(message_id)) {
const tmp1 = Buffer.allocUnsafe(2)
tmp1.writeUInt16BE(message_id)
message_id = tmp1
}
if (!Buffer.isBuffer(chunk_length)) {
const tmp5 = Buffer.allocUnsafe(2)
tmp5.writeUInt16BE(chunk_length)
chunk_length = tmp5
}
if (!Buffer.isBuffer(chunk_version)) {
const tmp2 = Buffer.allocUnsafe(1)
tmp2.writeUInt8(chunk_version)
chunk_version = tmp2
}
if (!Buffer.isBuffer(chunk_index)) {
const tmp3 = Buffer.allocUnsafe(2)
tmp3.writeUInt16BE(chunk_index)
chunk_index = tmp3
}
if (!Buffer.isBuffer(num_chunks)) {
const tmp4 = Buffer.allocUnsafe(2)
tmp4.writeUInt16BE(num_chunks)
num_chunks = tmp4
}
chunk_header = Buffer.concat([message_id, chunk_length, chunk_version, chunk_index, num_chunks])
return chunk_header
}
module.exports = {
chunk_message,
receive_chunk,
parse_chunk_header
}
})()