Lucas Teske a révisé ce gist 9 years ago. Aller à la révision
1 file changed, 44 insertions, 10 deletions
channeldecoder.py renommé en channelmuxer.py
| @@ -6,6 +6,9 @@ FRAMESIZE = 892 | |||
| 6 | 6 | M_PDUSIZE = FRAMESIZE - 6 | |
| 7 | 7 | EXPORTCORRUPT = False | |
| 8 | 8 | ||
| 9 | + | tsize = 0 | |
| 10 | + | isCompressed = True | |
| 11 | + | ||
| 9 | 12 | SEQUENCE_FLAG_MAP = { | |
| 10 | 13 | 0: "Continued Segment", | |
| 11 | 14 | 1: "First Segment", | |
| @@ -46,6 +49,10 @@ def CheckCRC(data, crc): | |||
| 46 | 49 | def SavePacket(channelid, packet): | |
| 47 | 50 | global totalCRCErrors | |
| 48 | 51 | global totalSavedPackets | |
| 52 | + | global tsize | |
| 53 | + | global isCompressed | |
| 54 | + | ||
| 55 | + | #packet["sequenceflag_int"] = 3 # TEST | |
| 49 | 56 | try: | |
| 50 | 57 | os.mkdir("channels/%s" %channelid) | |
| 51 | 58 | except: | |
| @@ -55,12 +62,6 @@ def SavePacket(channelid, packet): | |||
| 55 | 62 | print " Fill Packet. Skipping" | |
| 56 | 63 | return | |
| 57 | 64 | ||
| 58 | - | filename = "channels/%s/%s_%s.lrit" % (channelid, packet["apid"], packet["version"]) | |
| 59 | - | print "- Saving packet to %s" %filename | |
| 60 | - | ||
| 61 | - | if packet["framesdropped"]: | |
| 62 | - | print " WARNING: Some frames has been droped for this packet." | |
| 63 | - | ||
| 64 | 65 | datasize = len(packet["data"]) | |
| 65 | 66 | ||
| 66 | 67 | if not datasize - 2 == packet["size"]: # CRC is the latest 2 bytes of the payload | |
| @@ -71,6 +72,25 @@ def SavePacket(channelid, packet): | |||
| 71 | 72 | ||
| 72 | 73 | data = packet["data"][:datasize-2] | |
| 73 | 74 | ||
| 75 | + | if packet["sequenceflag_int"] == 1: | |
| 76 | + | print "Starting packet %s_%s_%s.lrit" % (packet["apid"], packet["version"], packet["packetnumber"]) | |
| 77 | + | p = packetmanager.getHeaderData(data[10:]) | |
| 78 | + | for i in p: | |
| 79 | + | if i["type"] == 1 or i["type"] == 129: | |
| 80 | + | isCompressed = not i["compression"] == 0 | |
| 81 | + | elif packet["sequenceflag_int"] == 2: | |
| 82 | + | print "Ending packet %s_%s_%s.lrit" % (packet["apid"], packet["version"], packet["packetnumber"]) | |
| 83 | + | if packet["framesdropped"]: | |
| 84 | + | print " WARNING: Some frames has been droped for this packet." | |
| 85 | + | ||
| 86 | + | ||
| 87 | + | if isCompressed: | |
| 88 | + | filename = "channels/%s/%s_%s_%s.lrit" % (channelid, packet["apid"], packet["version"], packet["packetnumber"]) | |
| 89 | + | else: | |
| 90 | + | filename = "channels/%s/%s_%s.lrit" % (channelid, packet["apid"], packet["version"]) | |
| 91 | + | #print "- Saving packet to %s" %filename | |
| 92 | + | ||
| 93 | + | ||
| 74 | 94 | crc = packet["data"][datasize-2:datasize] | |
| 75 | 95 | crc = struct.unpack(">H", crc)[0] | |
| 76 | 96 | crc = CheckCRC(data, crc) | |
| @@ -80,11 +100,25 @@ def SavePacket(channelid, packet): | |||
| 80 | 100 | ||
| 81 | 101 | if crc or (EXPORTCORRUPT and not crc): | |
| 82 | 102 | firstorsinglepacket = packet["sequenceflag_int"] == 1 or packet["sequenceflag_int"] == 3 | |
| 83 | - | f = open(filename, "w" if firstorsinglepacket else "a") | |
| 103 | + | if not isCompressed: | |
| 104 | + | f = open(filename, "wb" if firstorsinglepacket else "ab") | |
| 105 | + | else: | |
| 106 | + | f = open(filename, "wb") | |
| 107 | + | ||
| 84 | 108 | f.write(data[10:] if firstorsinglepacket else data) # Remove transport layer size | |
| 85 | 109 | f.close() | |
| 86 | - | if packet["sequenceflag_int"] == 2 or packet["sequenceflag_int"] == 3: | |
| 110 | + | ||
| 111 | + | if (packet["sequenceflag_int"] == 2 or packet["sequenceflag_int"] == 3) and not isCompressed: | |
| 112 | + | print "File is not compressed. Checking headers." | |
| 87 | 113 | packetmanager.manageFile(filename) | |
| 114 | + | ||
| 115 | + | if firstorsinglepacket: | |
| 116 | + | tsize = packet["size"] | |
| 117 | + | else: | |
| 118 | + | tsize += packet["size"] | |
| 119 | + | ||
| 120 | + | if packet["sequenceflag_int"] == 2: | |
| 121 | + | print " Total Size: %s" %tsize | |
| 88 | 122 | totalSavedPackets += 1 | |
| 89 | 123 | else: | |
| 90 | 124 | print " Corrupted frame, skipping..." | |
| @@ -108,7 +142,7 @@ def CreatePacket(data): | |||
| 108 | 142 | "size": packetlength | |
| 109 | 143 | } | |
| 110 | 144 | ||
| 111 | - | print "- Creating packet %s Size: %s - %s" % (apid, packetlength, SEQUENCE_FLAG_MAP[sequenceflag]) | |
| 145 | + | #print "- Creating packet %s Size: %s - %s" % (apid, packetlength, SEQUENCE_FLAG_MAP[sequenceflag]) | |
| 112 | 146 | else: | |
| 113 | 147 | apid = -1 | |
| 114 | 148 | ||
| @@ -162,7 +196,7 @@ while readbytes < fsize: | |||
| 162 | 196 | print " Frames dropped: %s" % (counter-lastFrameNumber-1); | |
| 163 | 197 | totalFrameDrops += counter-lastFrameNumber-1; | |
| 164 | 198 | if not lastAPID == -1: # Fill | |
| 165 | - | pendingpackets[lastAPID]["data"] += "\x00" * 878 | |
| 199 | + | #pendingpackets[lastAPID]["data"] += "\x00" * 878 | |
| 166 | 200 | pendingpackets[lastAPID]["framesdropped"] = True | |
| 167 | 201 | ||
| 168 | 202 | ||
Lucas Teske a révisé ce gist 9 years ago. Aller à la révision
3 files changed, 202 insertions, 48 deletions
packetassemble.py renommé en channeldecoder.py
Fichier renommé sans modifications
packetinfo.py(fichier créé)
| @@ -0,0 +1,35 @@ | |||
| 1 | + | #!/usr/bin/env python | |
| 2 | + | ||
| 3 | + | import sys, struct, os | |
| 4 | + | ||
| 5 | + | from packetmanager import * | |
| 6 | + | ||
| 7 | + | if len(sys.argv) < 2: | |
| 8 | + | print "Usage: packetinfo.py filename.lrit" | |
| 9 | + | exit(1) | |
| 10 | + | ||
| 11 | + | filename = sys.argv[1] | |
| 12 | + | ||
| 13 | + | f = open(filename, "r") | |
| 14 | + | fsize = os.path.getsize(filename) | |
| 15 | + | readbytes = 0 | |
| 16 | + | t = 0 | |
| 17 | + | ||
| 18 | + | data = f.read() | |
| 19 | + | ||
| 20 | + | headers = getHeaderData(data) | |
| 21 | + | hsize = 0 | |
| 22 | + | ftype = 0 | |
| 23 | + | for i in headers: | |
| 24 | + | if i["type"] == 0: | |
| 25 | + | hsize = i["headerlength"] | |
| 26 | + | ftype = i["filetypecode"] | |
| 27 | + | ||
| 28 | + | printHeaders(headers) | |
| 29 | + | ||
| 30 | + | data = data[hsize:] | |
| 31 | + | ||
| 32 | + | if ftype == 2: | |
| 33 | + | print data | |
| 34 | + | ||
| 35 | + | f.close() | |
packetmanager.py
| @@ -1,11 +1,20 @@ | |||
| 1 | 1 | #!/usr/bin/env python | |
| 2 | 2 | import os, struct | |
| 3 | 3 | ||
| 4 | + | ''' | |
| 5 | + | File Type Codes: | |
| 6 | + | 0 - Image | |
| 7 | + | 2 - Text | |
| 8 | + | 130 - DCS Data | |
| 9 | + | ''' | |
| 10 | + | ||
| 4 | 11 | def manageFile(filename): | |
| 5 | 12 | f = open(filename, "r") | |
| 6 | 13 | ||
| 7 | 14 | try: | |
| 8 | - | type, filetypecode, headerlength, datalength = readHeader(f) | |
| 15 | + | k = readHeader(f) | |
| 16 | + | print k | |
| 17 | + | type, filetypecode, headerlength, datalength = k | |
| 9 | 18 | except: | |
| 10 | 19 | print " Header 0 is corrupted for file %s" %filename | |
| 11 | 20 | return | |
| @@ -21,9 +30,72 @@ def manageFile(filename): | |||
| 21 | 30 | if filename != newfilename: | |
| 22 | 31 | print " Renaming %s to %s/%s" %(filename, os.path.dirname(filename), newfilename) | |
| 23 | 32 | os.rename(filename, "%s/%s" %(os.path.dirname(filename), newfilename)) | |
| 33 | + | #os.unlink(filename) | |
| 24 | 34 | else: | |
| 25 | 35 | print " Couldn't find name in %s" %filename | |
| 26 | 36 | ||
| 37 | + | def getHeaderData(data): | |
| 38 | + | headers = [] | |
| 39 | + | while len(data) > 0: | |
| 40 | + | type = ord(data[0]) | |
| 41 | + | size = struct.unpack(">H", data[1:3])[0] | |
| 42 | + | o = data[3:size] | |
| 43 | + | data = data[size:] | |
| 44 | + | td = parseHeader(type, o) | |
| 45 | + | headers.append(td) | |
| 46 | + | if td["type"] == 0: | |
| 47 | + | print "Header Size: %s" % td["headerlength"] | |
| 48 | + | data = data[:td["headerlength"]-size] | |
| 49 | + | return headers | |
| 50 | + | ||
| 51 | + | def parseHeader(type, data): | |
| 52 | + | if type == 0: | |
| 53 | + | filetypecode, headerlength, datalength = struct.unpack(">BIQ", data) | |
| 54 | + | return {"type":type, "filetypecode":filetypecode, "headerlength":headerlength, "datalength":datalength} | |
| 55 | + | elif type == 1: | |
| 56 | + | bitsperpixel, columns, lines, compression = struct.unpack(">BHHB", data) | |
| 57 | + | return {"type":type, "bitsperpixel":bitsperpixel, "columns":columns, "lines":lines, "compression":compression} | |
| 58 | + | ||
| 59 | + | elif type == 2: | |
| 60 | + | projname, cfac, lfac, coff, loff = struct.unpack(">32sIIII", data) | |
| 61 | + | return {"type":type, "projname":projname, "cfac":cfac, "lfac":lfac, "coff":coff, "loff":loff} | |
| 62 | + | ||
| 63 | + | elif type == 3: | |
| 64 | + | return {"type":type, "data":data} | |
| 65 | + | ||
| 66 | + | elif type == 4: | |
| 67 | + | return {"type":type, "filename":data} | |
| 68 | + | ||
| 69 | + | elif type == 5: | |
| 70 | + | days, ms = struct.unpack(">HI", data[1:]) | |
| 71 | + | return {"type":type, "days":days, "ms":ms} | |
| 72 | + | ||
| 73 | + | elif type == 6: | |
| 74 | + | return {"type":type, "data":data} | |
| 75 | + | ||
| 76 | + | elif type == 7: | |
| 77 | + | return {"type":type, "data":data} | |
| 78 | + | ||
| 79 | + | elif type == 128: | |
| 80 | + | imageid, sequence, startcol, startline, maxseg, maxcol, maxrow = struct.unpack(">7H", data) | |
| 81 | + | return {"type":type, "imageid":imageid, "sequence":sequence, "startcol":startcol, "startline":startline, "maxseg":maxseg, "maxcol":maxcol, "maxrow":maxrow} | |
| 82 | + | ||
| 83 | + | elif type == 129: | |
| 84 | + | signature, productId, productSubId, parameter, compression = struct.unpack(">4sHHHB", data) | |
| 85 | + | return {"type":type, "signature":signature, "productId":productId, "productSubId":productSubId, "parameter":parameter, "compression":compression} | |
| 86 | + | ||
| 87 | + | elif type == 130: | |
| 88 | + | return {"type":type, "data":data} | |
| 89 | + | ||
| 90 | + | elif type == 131: | |
| 91 | + | flags, pixel, line = struct.unpack(">HBB", data) | |
| 92 | + | return {"type":type, "flags":flags, "pixel":pixel, "line":line} | |
| 93 | + | ||
| 94 | + | elif type == 132: | |
| 95 | + | return {"type":type, "data": data} | |
| 96 | + | else: | |
| 97 | + | return {"type":type} | |
| 98 | + | ||
| 27 | 99 | def readHeader(f): | |
| 28 | 100 | global t | |
| 29 | 101 | type = ord(f.read(1)) | |
| @@ -34,90 +106,137 @@ def readHeader(f): | |||
| 34 | 106 | if type == 0: | |
| 35 | 107 | filetypecode, headerlength, datalength = struct.unpack(">BIQ", data) | |
| 36 | 108 | return type, filetypecode, headerlength, datalength | |
| 37 | - | #print "Header type: %s (%s) File Type Code: %s Header Length %s Data Field Length: %s" %(type, HEADERTYPE_MAP[type], filetypecode, headerlength, datalength) | |
| 38 | 109 | elif type == 1: | |
| 39 | 110 | bitsperpixel, columns, lines, compression = struct.unpack(">BHHB", data) | |
| 40 | - | #print "Image Structure Header: " | |
| 41 | - | #print " Bits Per Pixel: %s" %bitsperpixel | |
| 42 | - | #print " Columns: %s" %columns | |
| 43 | - | #print " Lines: %s" %lines | |
| 44 | - | #print " Compression: %s" %compression | |
| 45 | 111 | return type, bitsperpixel, columns, lines, compression | |
| 46 | 112 | ||
| 47 | 113 | elif type == 2: | |
| 48 | 114 | projname, cfac, lfac, coff, loff = struct.unpack(">32sIIII", data) | |
| 49 | - | #print "Image Navigation Record" | |
| 50 | - | #print " Projection Name: %s" %projname | |
| 51 | - | #print " Column Scaling Factor: %s" %cfac | |
| 52 | - | #print " Line Scaling Factor: %s" %lfac | |
| 53 | - | #print " Column Offset: %s" %coff | |
| 54 | - | #print " Line Offset: %s" %loff | |
| 55 | 115 | return type, projname, cfac, lfac, coff, loff | |
| 56 | 116 | ||
| 57 | 117 | elif type == 3: | |
| 58 | - | #print "Image Data Function Record" | |
| 59 | - | #print " Data: {HIDDEN}" | |
| 60 | - | ##print " Data: \n%s" %data | |
| 61 | 118 | return type, data | |
| 62 | 119 | ||
| 63 | 120 | elif type == 4: | |
| 64 | - | #print "Annotation Record" | |
| 65 | - | #print " Data: %s" %data | |
| 66 | 121 | return type, data | |
| 67 | 122 | ||
| 68 | 123 | elif type == 5: | |
| 69 | - | #print "Timestamp Record" | |
| 70 | 124 | days, ms = struct.unpack(">HI", data[1:]) | |
| 71 | - | #print " Delta from 1 January 1958" | |
| 72 | - | #print " Days: %s" %days | |
| 73 | - | #print " Miliseconds: %s" %ms | |
| 74 | 125 | return type, days, ms | |
| 75 | 126 | ||
| 76 | 127 | elif type == 6: | |
| 77 | - | #print "Ancillary Text" | |
| 78 | - | #print " Data: %s" %data | |
| 79 | 128 | return type, data | |
| 80 | 129 | ||
| 81 | 130 | elif type == 7: | |
| 82 | - | #print "Key Header" | |
| 83 | - | #print " Data: %s" %data | |
| 84 | 131 | return type, data | |
| 85 | 132 | ||
| 86 | 133 | elif type == 128: | |
| 87 | 134 | imageid, sequence, startcol, startline, maxseg, maxcol, maxrow = struct.unpack(">7H", data) | |
| 88 | - | #print "Segment Identification Header" | |
| 89 | - | #print " Image Id: %s" %imageid | |
| 90 | - | #print " Sequence: %s" %sequence | |
| 91 | - | #print " Start Column: %s" %startcol | |
| 92 | - | #print " Start Line: %s" %startline | |
| 93 | - | #print " Number of Segments: %s" %maxseg | |
| 94 | - | #print " Width: %s" %maxcol | |
| 95 | - | #print " Height: %s" %maxrow | |
| 96 | 135 | return type, imageid, sequence, startcol, startline, maxseg, maxcol, maxrow | |
| 97 | 136 | ||
| 98 | 137 | elif type == 129: | |
| 99 | - | #print "NOAA Specific Header" | |
| 100 | 138 | signature, productId, productSubId, parameter, compression = struct.unpack(">4sHHHB", data) | |
| 101 | - | #print " Signature: %s" %signature | |
| 102 | - | #print " Product ID: %s" %productId | |
| 103 | - | #print " Product SubId: %s" %productSubId | |
| 104 | - | #print " Parameter: %s" %parameter | |
| 105 | - | #print " Compression: %s" %compression | |
| 106 | 139 | return type, signature, productId, productSubId, parameter, compression | |
| 107 | 140 | ||
| 108 | 141 | elif type == 130: | |
| 109 | - | #print "Header Structured Record" | |
| 110 | - | #print " Data: %s" % data | |
| 111 | 142 | return type, data | |
| 112 | 143 | ||
| 113 | 144 | elif type == 131: | |
| 114 | - | #print "Rice Compression Record" | |
| 115 | 145 | flags, pixel, line = struct.unpack(">HBB", data) | |
| 116 | - | #print " Flags: %s" %flags | |
| 117 | - | #print " Pixel: %s" %pixel | |
| 118 | - | #print " Line: %s" %line | |
| 119 | 146 | return type, flags, pixel, line | |
| 120 | 147 | ||
| 148 | + | elif type == 132: | |
| 149 | + | return type, data | |
| 150 | + | ||
| 121 | 151 | else: | |
| 122 | - | #print "Type not mapped: %s" % type | |
| 123 | - | return type | |
| 152 | + | return type | |
| 153 | + | ||
| 154 | + | def printHeaders(headers, showStructuredHeader=False, showImageDataRecord=False): | |
| 155 | + | for head in headers: | |
| 156 | + | type = head["type"] | |
| 157 | + | if type == 0: | |
| 158 | + | print "Header type: %s File Type Code: %s Header Length %s Data Field Length: %s" %(type, head["filetypecode"], head["headerlength"], head["datalength"]) | |
| 159 | + | elif type == 1: | |
| 160 | + | print "Image Structure Header: " | |
| 161 | + | print " Bits Per Pixel: %s" %head["bitsperpixel"] | |
| 162 | + | print " Columns: %s" %head["columns"] | |
| 163 | + | print " Lines: %s" %head["lines"] | |
| 164 | + | print " Compression: %s" %head["compression"] | |
| 165 | + | ||
| 166 | + | elif type == 2: | |
| 167 | + | print "Image Navigation Record" | |
| 168 | + | print " Projection Name: %s" %head["projname"] | |
| 169 | + | print " Column Scaling Factor: %s" %head["cfac"] | |
| 170 | + | print " Line Scaling Factor: %s" %head["lfac"] | |
| 171 | + | print " Column Offset: %s" %head["coff"] | |
| 172 | + | print " Line Offset: %s" %head["loff"] | |
| 173 | + | ||
| 174 | + | elif type == 3: | |
| 175 | + | print "Image Data Function Record" | |
| 176 | + | if showImageDataRecord: | |
| 177 | + | print " Data: %s" %head["data"] | |
| 178 | + | else: | |
| 179 | + | print " Data: {HIDDEN}" | |
| 180 | + | ||
| 181 | + | elif type == 4: | |
| 182 | + | print "Annotation Record" | |
| 183 | + | print " Filename: %s" %head["filename"] | |
| 184 | + | ||
| 185 | + | elif type == 5: | |
| 186 | + | print "Timestamp Record" | |
| 187 | + | print " Delta from 1 January 1958" | |
| 188 | + | print " Days: %s" %head["days"] | |
| 189 | + | print " Miliseconds: %s" %head["ms"] | |
| 190 | + | ||
| 191 | + | elif type == 6: | |
| 192 | + | print "Ancillary Text" | |
| 193 | + | print " Data: " | |
| 194 | + | t = head["data"].split(";") | |
| 195 | + | for i in t: | |
| 196 | + | print " %s" %i | |
| 197 | + | ||
| 198 | + | elif type == 7: | |
| 199 | + | print "Key Header" | |
| 200 | + | print " Data: %s" %head["data"] | |
| 201 | + | ||
| 202 | + | elif type == 128: | |
| 203 | + | print "Segment Identification Header" | |
| 204 | + | print " Image Id: %s" %head["imageid"] | |
| 205 | + | print " Sequence: %s" %head["sequence"] | |
| 206 | + | print " Start Column: %s" %head["startcol"] | |
| 207 | + | print " Start Line: %s" %head["startline"] | |
| 208 | + | print " Number of Segments: %s" %head["maxseg"] | |
| 209 | + | print " Width: %s" %head["maxcol"] | |
| 210 | + | print " Height: %s" %head["maxrow"] | |
| 211 | + | ||
| 212 | + | elif type == 129: | |
| 213 | + | print "NOAA Specific Header" | |
| 214 | + | print " Signature: %s" %head["signature"] | |
| 215 | + | print " Product ID: %s" %head["productId"] | |
| 216 | + | print " Product SubId: %s" %head["productSubId"] | |
| 217 | + | print " Parameter: %s" %head["parameter"] | |
| 218 | + | print " Compression: %s" %head["compression"] | |
| 219 | + | ||
| 220 | + | elif type == 130: | |
| 221 | + | print "Header Structured Record" | |
| 222 | + | if showImageDataRecord: | |
| 223 | + | t = head["data"].split("UI") | |
| 224 | + | print " Data: " | |
| 225 | + | for i in t: | |
| 226 | + | print " %s" %i | |
| 227 | + | else: | |
| 228 | + | print " Data: {HIDDEN}" | |
| 229 | + | ||
| 230 | + | elif type == 131: | |
| 231 | + | print "Rice Compression Record" | |
| 232 | + | print " Flags: %s" %head["flags"] | |
| 233 | + | print " Pixel: %s" %head["pixel"] | |
| 234 | + | print " Line: %s" %head["line"] | |
| 235 | + | ||
| 236 | + | elif type == 132: # Got in DCS Data | |
| 237 | + | print "DCS Data: " | |
| 238 | + | print " Data: %s" %head["data"] | |
| 239 | + | ||
| 240 | + | else: | |
| 241 | + | print "Type not mapped: %s" % type | |
| 242 | + | print "" | |
Lucas Teske a révisé ce gist 9 years ago. Aller à la révision
2 files changed, 348 insertions, 118 deletions
packetassemble.py
| @@ -1,131 +1,238 @@ | |||
| 1 | 1 | #!/usr/bin/env python | |
| 2 | 2 | ||
| 3 | - | import sys, struct, os | |
| 4 | - | ||
| 5 | - | HEADERTYPE_MAP = { | |
| 6 | - | 0: "Primary Header", | |
| 7 | - | 1: "Image Structure", | |
| 8 | - | 2: "Image Navigation", | |
| 9 | - | 3: "Image Data Function", | |
| 10 | - | 4: "Annotation", | |
| 11 | - | 5: "Timestamp", | |
| 12 | - | 6: "Acililary Text", | |
| 13 | - | 7: "Key Header", | |
| 14 | - | 128: "Segment Identification", | |
| 15 | - | 129: "NOAA Specific", | |
| 16 | - | 130: "Header Structured Record" | |
| 17 | - | } | |
| 3 | + | import sys, struct, os, packetmanager | |
| 18 | 4 | ||
| 19 | - | for i in range(8,128): | |
| 20 | - | HEADERTYPE_MAP[i] = "Reserved" | |
| 5 | + | FRAMESIZE = 892 | |
| 6 | + | M_PDUSIZE = FRAMESIZE - 6 | |
| 7 | + | EXPORTCORRUPT = False | |
| 21 | 8 | ||
| 22 | - | for i in range(131,256): | |
| 23 | - | HEADERTYPE_MAP[i] = "Reserved" | |
| 9 | + | SEQUENCE_FLAG_MAP = { | |
| 10 | + | 0: "Continued Segment", | |
| 11 | + | 1: "First Segment", | |
| 12 | + | 2: "Last Segment", | |
| 13 | + | 3: "Single Data" | |
| 14 | + | } | |
| 24 | 15 | ||
| 25 | - | filename = "channels/53/1702_0_1311_1.lrit" | |
| 16 | + | def ParseMSDU(data): | |
| 17 | + | o = struct.unpack(">H", data[:2])[0] | |
| 18 | + | version = (o & 0xE000) >> 13 | |
| 19 | + | type = (o & 0x1000) >> 12 | |
| 20 | + | shf = (o & 0x800) >> 11 | |
| 21 | + | apid = (o & 0x7FF) | |
| 22 | + | ||
| 23 | + | o = struct.unpack(">H", data[2:4])[0] | |
| 24 | + | sequenceflag = (o & 0xC000) >> 14 | |
| 25 | + | packetnumber = (o & 0x3FFF) | |
| 26 | + | packetlength = struct.unpack(">H", data[4:6])[0] -1 | |
| 27 | + | data = data[6:] | |
| 28 | + | return version, type, shf, apid, sequenceflag, packetnumber, packetlength, data | |
| 29 | + | ||
| 30 | + | def CalcCRC(data): | |
| 31 | + | lsb = 0xFF | |
| 32 | + | msb = 0xFF | |
| 33 | + | for c in data: | |
| 34 | + | x = ord(c) ^ msb | |
| 35 | + | x ^= (x >> 4) | |
| 36 | + | msb = (lsb ^ (x >> 3) ^ (x << 4)) & 255 | |
| 37 | + | lsb = (x ^ (x << 5)) & 255 | |
| 38 | + | return (msb << 8) + lsb | |
| 39 | + | ||
| 40 | + | def CheckCRC(data, crc): | |
| 41 | + | c = CalcCRC(data) | |
| 42 | + | if not c == crc: | |
| 43 | + | print " Expected: %s Found %s" %(hex(crc), hex(c)) | |
| 44 | + | return c == crc | |
| 45 | + | ||
| 46 | + | def SavePacket(channelid, packet): | |
| 47 | + | global totalCRCErrors | |
| 48 | + | global totalSavedPackets | |
| 49 | + | try: | |
| 50 | + | os.mkdir("channels/%s" %channelid) | |
| 51 | + | except: | |
| 52 | + | pass | |
| 53 | + | ||
| 54 | + | if packet["apid"] == 2047: | |
| 55 | + | print " Fill Packet. Skipping" | |
| 56 | + | return | |
| 57 | + | ||
| 58 | + | filename = "channels/%s/%s_%s.lrit" % (channelid, packet["apid"], packet["version"]) | |
| 59 | + | print "- Saving packet to %s" %filename | |
| 60 | + | ||
| 61 | + | if packet["framesdropped"]: | |
| 62 | + | print " WARNING: Some frames has been droped for this packet." | |
| 63 | + | ||
| 64 | + | datasize = len(packet["data"]) | |
| 65 | + | ||
| 66 | + | if not datasize - 2 == packet["size"]: # CRC is the latest 2 bytes of the payload | |
| 67 | + | print " WARNING: Packet Size does not match! Expected %s Found: %s" %(packet["size"], len(packet["data"])) | |
| 68 | + | if datasize - 2 > packet["size"]: | |
| 69 | + | datasize = packet["size"] + 2 | |
| 70 | + | print " WARNING: Trimming data to %s" % datasize | |
| 71 | + | ||
| 72 | + | data = packet["data"][:datasize-2] | |
| 73 | + | ||
| 74 | + | crc = packet["data"][datasize-2:datasize] | |
| 75 | + | crc = struct.unpack(">H", crc)[0] | |
| 76 | + | crc = CheckCRC(data, crc) | |
| 77 | + | if not crc: | |
| 78 | + | print " WARNING: CRC does not match!" | |
| 79 | + | totalCRCErrors += 1 | |
| 80 | + | ||
| 81 | + | if crc or (EXPORTCORRUPT and not crc): | |
| 82 | + | firstorsinglepacket = packet["sequenceflag_int"] == 1 or packet["sequenceflag_int"] == 3 | |
| 83 | + | f = open(filename, "w" if firstorsinglepacket else "a") | |
| 84 | + | f.write(data[10:] if firstorsinglepacket else data) # Remove transport layer size | |
| 85 | + | f.close() | |
| 86 | + | if packet["sequenceflag_int"] == 2 or packet["sequenceflag_int"] == 3: | |
| 87 | + | packetmanager.manageFile(filename) | |
| 88 | + | totalSavedPackets += 1 | |
| 89 | + | else: | |
| 90 | + | print " Corrupted frame, skipping..." | |
| 91 | + | ||
| 92 | + | def CreatePacket(data): | |
| 93 | + | while True: | |
| 94 | + | if len(data) < 6: | |
| 95 | + | return -1, data | |
| 96 | + | version, type, shf, apid, sequenceflag, packetnumber, packetlength, data = ParseMSDU(data) | |
| 97 | + | pdata = data[:packetlength+2] | |
| 98 | + | if apid != 2047: | |
| 99 | + | pendingpackets[apid] = { | |
| 100 | + | "data": pdata, | |
| 101 | + | "version": version, | |
| 102 | + | "type": type, | |
| 103 | + | "apid": apid, | |
| 104 | + | "sequenceflag": SEQUENCE_FLAG_MAP[sequenceflag], | |
| 105 | + | "sequenceflag_int": sequenceflag, | |
| 106 | + | "packetnumber": packetnumber, | |
| 107 | + | "framesdropped": False, | |
| 108 | + | "size": packetlength | |
| 109 | + | } | |
| 110 | + | ||
| 111 | + | print "- Creating packet %s Size: %s - %s" % (apid, packetlength, SEQUENCE_FLAG_MAP[sequenceflag]) | |
| 112 | + | else: | |
| 113 | + | apid = -1 | |
| 114 | + | ||
| 115 | + | if not packetlength+2 == len(data) and packetlength+2 < len(data): # Multiple packets in buffer | |
| 116 | + | SavePacket(sys.argv[1], pendingpackets[apid]) | |
| 117 | + | del pendingpackets[apid] | |
| 118 | + | data = data[packetlength+2:] | |
| 119 | + | #print " Multiple packets in same buffer. Repeating." | |
| 120 | + | else: | |
| 121 | + | break | |
| 122 | + | return apid, "" | |
| 123 | + | ||
| 124 | + | ||
| 125 | + | if len(sys.argv) < 2: | |
| 126 | + | print "Usage: ./channeldecode.py CHANNELID" | |
| 127 | + | print "This will open channels/channel_CHANNELID.bin" | |
| 128 | + | exit() | |
| 129 | + | ||
| 130 | + | filename = "channels/channel_%s.bin" % sys.argv[1] | |
| 26 | 131 | ||
| 27 | 132 | f = open(filename, "r") | |
| 28 | 133 | fsize = os.path.getsize(filename) | |
| 29 | 134 | readbytes = 0 | |
| 30 | - | t = 0 | |
| 31 | - | ||
| 32 | - | def readHeader(f): | |
| 33 | - | global t | |
| 34 | - | type = ord(f.read(1)) | |
| 35 | - | size = f.read(2) | |
| 36 | - | size = struct.unpack(">H", size)[0] | |
| 37 | - | data = f.read(size-3) | |
| 38 | - | t += size | |
| 39 | - | if type == 0: | |
| 40 | - | filetypecode, headerlength, datalength = struct.unpack(">BIQ", data) | |
| 41 | - | print "Header type: %s (%s) File Type Code: %s Header Length %s Data Field Length: %s" %(type, HEADERTYPE_MAP[type], filetypecode, headerlength, datalength) | |
| 42 | - | elif type == 1: | |
| 43 | - | bitsperpixel, columns, lines, compression = struct.unpack(">BHHB", data) | |
| 44 | - | print "Image Structure Header: " | |
| 45 | - | print " Bits Per Pixel: %s" %bitsperpixel | |
| 46 | - | print " Columns: %s" %columns | |
| 47 | - | print " Lines: %s" %lines | |
| 48 | - | print " Compression: %s" %compression | |
| 49 | - | ||
| 50 | - | elif type == 2: | |
| 51 | - | projname, cfac, lfac, coff, loff = struct.unpack(">32sIIII", data) | |
| 52 | - | print "Image Navigation Record" | |
| 53 | - | print " Projection Name: %s" %projname | |
| 54 | - | print " Column Scaling Factor: %s" %cfac | |
| 55 | - | print " Line Scaling Factor: %s" %lfac | |
| 56 | - | print " Column Offset: %s" %coff | |
| 57 | - | print " Line Offset: %s" %loff | |
| 58 | - | ||
| 59 | - | elif type == 3: | |
| 60 | - | print "Image Data Function Record" | |
| 61 | - | print " Data: {HIDDEN}" | |
| 62 | - | #print " Data: \n%s" %data | |
| 63 | - | ||
| 64 | - | elif type == 4: | |
| 65 | - | print "Annotation Record" | |
| 66 | - | print " Data: %s" %data | |
| 67 | - | ||
| 68 | - | elif type == 5: | |
| 69 | - | print "Timestamp Record" | |
| 70 | - | days, ms = struct.unpack(">HI", data[1:]) | |
| 71 | - | print " Delta from 1 January 1958" | |
| 72 | - | print " Days: %s" %days | |
| 73 | - | print " Miliseconds: %s" %ms | |
| 74 | - | ||
| 75 | - | elif type == 6: | |
| 76 | - | print "Ancillary Text" | |
| 77 | - | print " Data: %s" %data | |
| 78 | - | ||
| 79 | - | elif type == 7: | |
| 80 | - | print "Key Header" | |
| 81 | - | print " Data: %s" %data | |
| 82 | - | ||
| 83 | - | elif type == 128: | |
| 84 | - | print size | |
| 85 | - | imageid, sequence, startcol, startline, maxseg, maxcol, maxrow = struct.unpack(">7H", data) | |
| 86 | - | print "Segment Identification Header" | |
| 87 | - | print " Image Id: %s" %imageid | |
| 88 | - | print " Sequence: %s" %sequence | |
| 89 | - | print " Start Column: %s" %startcol | |
| 90 | - | print " Start Line: %s" %startline | |
| 91 | - | print " Number of Segments: %s" %maxseg | |
| 92 | - | print " Width: %s" %maxcol | |
| 93 | - | print " Height: %s" %maxrow | |
| 94 | - | ||
| 95 | - | elif type == 129: | |
| 96 | - | print "NOAA Specific Header" | |
| 97 | - | signature, productId, productSubId, parameter, compression = struct.unpack(">4sHHHB", data) | |
| 98 | - | print " Signature: %s" %signature | |
| 99 | - | print " Product ID: %s" %productId | |
| 100 | - | print " Product SubId: %s" %productSubId | |
| 101 | - | print " Parameter: %s" %parameter | |
| 102 | - | print " Compression: %s" %compression | |
| 103 | - | ||
| 104 | - | elif type == 130: | |
| 105 | - | print "Header Structured Record" | |
| 106 | - | print " Data: %s" % data | |
| 107 | 135 | ||
| 136 | + | pendingpackets = {} | |
| 137 | + | ||
| 138 | + | lastFrameNumber = -1 | |
| 139 | + | totalFrameDrops = 0 | |
| 140 | + | totalCRCErrors = 0 | |
| 141 | + | totalSavedPackets = 0 | |
| 142 | + | lastAPID = -1 | |
| 143 | + | buff = "" | |
| 144 | + | ||
| 145 | + | while readbytes < fsize: | |
| 146 | + | if fsize - readbytes < FRAMESIZE: | |
| 147 | + | print " Some bytes at end of file was not enough for filling a frame. Remaining Bytes: %s - Frame Size: %s" % (fsize-readsize, FRAMESIZE) | |
| 148 | + | break | |
| 149 | + | ||
| 150 | + | # Read Data | |
| 151 | + | data = f.read(FRAMESIZE) | |
| 152 | + | versionNumber = (ord(data[0]) & 0xC0) >> 6 | |
| 153 | + | scid = (ord(data[0]) & 0x3F) << 2 | (ord(data[1]) & 0xC0) >> 6 | |
| 154 | + | vcid = (ord(data[1]) & 0x3F) | |
| 155 | + | ||
| 156 | + | counter = struct.unpack(">I", data[2:6])[0] | |
| 157 | + | counter &= 0xFFFFFF00 | |
| 158 | + | counter >>= 8 | |
| 159 | + | ||
| 160 | + | # Check for dropped Frames | |
| 161 | + | if not lastFrameNumber == -1 and not lastFrameNumber+1 == counter: | |
| 162 | + | print " Frames dropped: %s" % (counter-lastFrameNumber-1); | |
| 163 | + | totalFrameDrops += counter-lastFrameNumber-1; | |
| 164 | + | if not lastAPID == -1: # Fill | |
| 165 | + | pendingpackets[lastAPID]["data"] += "\x00" * 878 | |
| 166 | + | pendingpackets[lastAPID]["framesdropped"] = True | |
| 167 | + | ||
| 168 | + | ||
| 169 | + | #print "SC: %s ID: %s Frame Number: %s" % (scid, vcid, counter) | |
| 170 | + | ||
| 171 | + | # Demux M_PDU | |
| 172 | + | data = data[6:] # Strip channel header | |
| 173 | + | fhp = struct.unpack(">H", data[:2])[0] & 0x7FF | |
| 174 | + | data = data[2:] # Strip M_PDU Header | |
| 175 | + | #print " First Packet Header: %s" %fhp | |
| 176 | + | #data is now TP_PDU | |
| 177 | + | if not fhp == 2047: # Frame Contains a new Packet | |
| 178 | + | # Data was incomplete on last FHP and another packet starts here. | |
| 179 | + | if lastAPID == -1 and len(buff) > 0: | |
| 180 | + | #print " Data was incomplete from last FHP. Parsing packet now" | |
| 181 | + | if fhp > 0: | |
| 182 | + | buff += data[:fhp] | |
| 183 | + | lastAPID, data = CreatePacket(buff) | |
| 184 | + | if lastAPID == -1: | |
| 185 | + | buff = data | |
| 186 | + | else: | |
| 187 | + | buff = "" | |
| 188 | + | ||
| 189 | + | if not lastAPID == -1: # We are finishing another packet | |
| 190 | + | if fhp > 0: | |
| 191 | + | pendingpackets[lastAPID]["data"] += data[:fhp] | |
| 192 | + | SavePacket(sys.argv[1], pendingpackets[lastAPID]) | |
| 193 | + | del pendingpackets[lastAPID] | |
| 194 | + | lastAPID = -1 | |
| 195 | + | ||
| 196 | + | # Try to create a new packet | |
| 197 | + | buff += data[fhp:] | |
| 198 | + | lastAPID, data = CreatePacket(buff) | |
| 199 | + | if lastAPID == -1: | |
| 200 | + | buff = data | |
| 201 | + | else: | |
| 202 | + | buff = "" | |
| 108 | 203 | else: | |
| 109 | - | print "Type not mapped: %s" % type | |
| 110 | - | print "" | |
| 111 | - | ||
| 112 | - | data = f.read(10) | |
| 113 | - | ||
| 114 | - | filecounter, Transportlength = struct.unpack(">HQ", data) | |
| 115 | - | ||
| 116 | - | print "File Counter: %s Transport Size: %s" %(filecounter, Transportlength) | |
| 117 | - | ||
| 118 | - | readHeader(f) | |
| 119 | - | readHeader(f) | |
| 120 | - | readHeader(f) | |
| 121 | - | readHeader(f) | |
| 122 | - | readHeader(f) | |
| 123 | - | readHeader(f) | |
| 124 | - | readHeader(f) | |
| 125 | - | readHeader(f) | |
| 126 | - | readHeader(f) | |
| 127 | - | readHeader(f) | |
| 128 | - | ||
| 129 | - | print t | |
| 204 | + | if len(buff) > 0 and lastAPID == -1: | |
| 205 | + | #print " Data was incomplete from last FHP. Parsing packet now" | |
| 206 | + | buff += data | |
| 207 | + | lastAPID, data = CreatePacket(buff) | |
| 208 | + | if lastAPID == -1: | |
| 209 | + | buff = data | |
| 210 | + | else: | |
| 211 | + | buff = "" | |
| 212 | + | elif len(buff) > 0: | |
| 213 | + | print " PROBLEM!" | |
| 214 | + | elif lastAPID == -1: | |
| 215 | + | buff += data | |
| 216 | + | lastAPID, data = CreatePacket(buff) | |
| 217 | + | if lastAPID == -1: | |
| 218 | + | buff = data | |
| 219 | + | else: | |
| 220 | + | buff = "" | |
| 221 | + | else: | |
| 222 | + | #print " Appending %s bytes to %s" % (lastAPID, len(data)) | |
| 223 | + | pendingpackets[lastAPID]["data"] += data | |
| 224 | + | ||
| 225 | + | ||
| 226 | + | lastFrameNumber = counter | |
| 227 | + | readbytes += FRAMESIZE | |
| 228 | + | ||
| 229 | + | # One packet can be still in pending packets | |
| 230 | + | for i in pendingpackets.keys(): | |
| 231 | + | SavePacket(sys.argv[1], pendingpackets[lastAPID]) | |
| 232 | + | ||
| 233 | + | print "\n\nReport:" | |
| 234 | + | print "\tTotal Frames Dropped: %s" %totalFrameDrops | |
| 235 | + | print "\tTotal Saved Packets: %s" %totalSavedPackets | |
| 236 | + | print "\tTotal Packet CRC Fails: %s" %totalCRCErrors | |
| 130 | 237 | ||
| 131 | 238 | f.close() | |
packetmanager.py(fichier créé)
| @@ -0,0 +1,123 @@ | |||
| 1 | + | #!/usr/bin/env python | |
| 2 | + | import os, struct | |
| 3 | + | ||
| 4 | + | def manageFile(filename): | |
| 5 | + | f = open(filename, "r") | |
| 6 | + | ||
| 7 | + | try: | |
| 8 | + | type, filetypecode, headerlength, datalength = readHeader(f) | |
| 9 | + | except: | |
| 10 | + | print " Header 0 is corrupted for file %s" %filename | |
| 11 | + | return | |
| 12 | + | ||
| 13 | + | newfilename = filename | |
| 14 | + | while f.tell() < headerlength: | |
| 15 | + | data = readHeader(f) | |
| 16 | + | if data[0] == 4: | |
| 17 | + | print " Filename is %s" % data[1] | |
| 18 | + | newfilename = data[1] | |
| 19 | + | break | |
| 20 | + | f.close() | |
| 21 | + | if filename != newfilename: | |
| 22 | + | print " Renaming %s to %s/%s" %(filename, os.path.dirname(filename), newfilename) | |
| 23 | + | os.rename(filename, "%s/%s" %(os.path.dirname(filename), newfilename)) | |
| 24 | + | else: | |
| 25 | + | print " Couldn't find name in %s" %filename | |
| 26 | + | ||
| 27 | + | def readHeader(f): | |
| 28 | + | global t | |
| 29 | + | type = ord(f.read(1)) | |
| 30 | + | size = f.read(2) | |
| 31 | + | size = struct.unpack(">H", size)[0] | |
| 32 | + | data = f.read(size-3) | |
| 33 | + | ||
| 34 | + | if type == 0: | |
| 35 | + | filetypecode, headerlength, datalength = struct.unpack(">BIQ", data) | |
| 36 | + | return type, filetypecode, headerlength, datalength | |
| 37 | + | #print "Header type: %s (%s) File Type Code: %s Header Length %s Data Field Length: %s" %(type, HEADERTYPE_MAP[type], filetypecode, headerlength, datalength) | |
| 38 | + | elif type == 1: | |
| 39 | + | bitsperpixel, columns, lines, compression = struct.unpack(">BHHB", data) | |
| 40 | + | #print "Image Structure Header: " | |
| 41 | + | #print " Bits Per Pixel: %s" %bitsperpixel | |
| 42 | + | #print " Columns: %s" %columns | |
| 43 | + | #print " Lines: %s" %lines | |
| 44 | + | #print " Compression: %s" %compression | |
| 45 | + | return type, bitsperpixel, columns, lines, compression | |
| 46 | + | ||
| 47 | + | elif type == 2: | |
| 48 | + | projname, cfac, lfac, coff, loff = struct.unpack(">32sIIII", data) | |
| 49 | + | #print "Image Navigation Record" | |
| 50 | + | #print " Projection Name: %s" %projname | |
| 51 | + | #print " Column Scaling Factor: %s" %cfac | |
| 52 | + | #print " Line Scaling Factor: %s" %lfac | |
| 53 | + | #print " Column Offset: %s" %coff | |
| 54 | + | #print " Line Offset: %s" %loff | |
| 55 | + | return type, projname, cfac, lfac, coff, loff | |
| 56 | + | ||
| 57 | + | elif type == 3: | |
| 58 | + | #print "Image Data Function Record" | |
| 59 | + | #print " Data: {HIDDEN}" | |
| 60 | + | ##print " Data: \n%s" %data | |
| 61 | + | return type, data | |
| 62 | + | ||
| 63 | + | elif type == 4: | |
| 64 | + | #print "Annotation Record" | |
| 65 | + | #print " Data: %s" %data | |
| 66 | + | return type, data | |
| 67 | + | ||
| 68 | + | elif type == 5: | |
| 69 | + | #print "Timestamp Record" | |
| 70 | + | days, ms = struct.unpack(">HI", data[1:]) | |
| 71 | + | #print " Delta from 1 January 1958" | |
| 72 | + | #print " Days: %s" %days | |
| 73 | + | #print " Miliseconds: %s" %ms | |
| 74 | + | return type, days, ms | |
| 75 | + | ||
| 76 | + | elif type == 6: | |
| 77 | + | #print "Ancillary Text" | |
| 78 | + | #print " Data: %s" %data | |
| 79 | + | return type, data | |
| 80 | + | ||
| 81 | + | elif type == 7: | |
| 82 | + | #print "Key Header" | |
| 83 | + | #print " Data: %s" %data | |
| 84 | + | return type, data | |
| 85 | + | ||
| 86 | + | elif type == 128: | |
| 87 | + | imageid, sequence, startcol, startline, maxseg, maxcol, maxrow = struct.unpack(">7H", data) | |
| 88 | + | #print "Segment Identification Header" | |
| 89 | + | #print " Image Id: %s" %imageid | |
| 90 | + | #print " Sequence: %s" %sequence | |
| 91 | + | #print " Start Column: %s" %startcol | |
| 92 | + | #print " Start Line: %s" %startline | |
| 93 | + | #print " Number of Segments: %s" %maxseg | |
| 94 | + | #print " Width: %s" %maxcol | |
| 95 | + | #print " Height: %s" %maxrow | |
| 96 | + | return type, imageid, sequence, startcol, startline, maxseg, maxcol, maxrow | |
| 97 | + | ||
| 98 | + | elif type == 129: | |
| 99 | + | #print "NOAA Specific Header" | |
| 100 | + | signature, productId, productSubId, parameter, compression = struct.unpack(">4sHHHB", data) | |
| 101 | + | #print " Signature: %s" %signature | |
| 102 | + | #print " Product ID: %s" %productId | |
| 103 | + | #print " Product SubId: %s" %productSubId | |
| 104 | + | #print " Parameter: %s" %parameter | |
| 105 | + | #print " Compression: %s" %compression | |
| 106 | + | return type, signature, productId, productSubId, parameter, compression | |
| 107 | + | ||
| 108 | + | elif type == 130: | |
| 109 | + | #print "Header Structured Record" | |
| 110 | + | #print " Data: %s" % data | |
| 111 | + | return type, data | |
| 112 | + | ||
| 113 | + | elif type == 131: | |
| 114 | + | #print "Rice Compression Record" | |
| 115 | + | flags, pixel, line = struct.unpack(">HBB", data) | |
| 116 | + | #print " Flags: %s" %flags | |
| 117 | + | #print " Pixel: %s" %pixel | |
| 118 | + | #print " Line: %s" %line | |
| 119 | + | return type, flags, pixel, line | |
| 120 | + | ||
| 121 | + | else: | |
| 122 | + | #print "Type not mapped: %s" % type | |
| 123 | + | return type | |
Lucas Teske a révisé ce gist 9 years ago. Aller à la révision
1 file changed, 131 insertions
packetassemble.py(fichier créé)
| @@ -0,0 +1,131 @@ | |||
| 1 | + | #!/usr/bin/env python | |
| 2 | + | ||
| 3 | + | import sys, struct, os | |
| 4 | + | ||
| 5 | + | HEADERTYPE_MAP = { | |
| 6 | + | 0: "Primary Header", | |
| 7 | + | 1: "Image Structure", | |
| 8 | + | 2: "Image Navigation", | |
| 9 | + | 3: "Image Data Function", | |
| 10 | + | 4: "Annotation", | |
| 11 | + | 5: "Timestamp", | |
| 12 | + | 6: "Acililary Text", | |
| 13 | + | 7: "Key Header", | |
| 14 | + | 128: "Segment Identification", | |
| 15 | + | 129: "NOAA Specific", | |
| 16 | + | 130: "Header Structured Record" | |
| 17 | + | } | |
| 18 | + | ||
| 19 | + | for i in range(8,128): | |
| 20 | + | HEADERTYPE_MAP[i] = "Reserved" | |
| 21 | + | ||
| 22 | + | for i in range(131,256): | |
| 23 | + | HEADERTYPE_MAP[i] = "Reserved" | |
| 24 | + | ||
| 25 | + | filename = "channels/53/1702_0_1311_1.lrit" | |
| 26 | + | ||
| 27 | + | f = open(filename, "r") | |
| 28 | + | fsize = os.path.getsize(filename) | |
| 29 | + | readbytes = 0 | |
| 30 | + | t = 0 | |
| 31 | + | ||
| 32 | + | def readHeader(f): | |
| 33 | + | global t | |
| 34 | + | type = ord(f.read(1)) | |
| 35 | + | size = f.read(2) | |
| 36 | + | size = struct.unpack(">H", size)[0] | |
| 37 | + | data = f.read(size-3) | |
| 38 | + | t += size | |
| 39 | + | if type == 0: | |
| 40 | + | filetypecode, headerlength, datalength = struct.unpack(">BIQ", data) | |
| 41 | + | print "Header type: %s (%s) File Type Code: %s Header Length %s Data Field Length: %s" %(type, HEADERTYPE_MAP[type], filetypecode, headerlength, datalength) | |
| 42 | + | elif type == 1: | |
| 43 | + | bitsperpixel, columns, lines, compression = struct.unpack(">BHHB", data) | |
| 44 | + | print "Image Structure Header: " | |
| 45 | + | print " Bits Per Pixel: %s" %bitsperpixel | |
| 46 | + | print " Columns: %s" %columns | |
| 47 | + | print " Lines: %s" %lines | |
| 48 | + | print " Compression: %s" %compression | |
| 49 | + | ||
| 50 | + | elif type == 2: | |
| 51 | + | projname, cfac, lfac, coff, loff = struct.unpack(">32sIIII", data) | |
| 52 | + | print "Image Navigation Record" | |
| 53 | + | print " Projection Name: %s" %projname | |
| 54 | + | print " Column Scaling Factor: %s" %cfac | |
| 55 | + | print " Line Scaling Factor: %s" %lfac | |
| 56 | + | print " Column Offset: %s" %coff | |
| 57 | + | print " Line Offset: %s" %loff | |
| 58 | + | ||
| 59 | + | elif type == 3: | |
| 60 | + | print "Image Data Function Record" | |
| 61 | + | print " Data: {HIDDEN}" | |
| 62 | + | #print " Data: \n%s" %data | |
| 63 | + | ||
| 64 | + | elif type == 4: | |
| 65 | + | print "Annotation Record" | |
| 66 | + | print " Data: %s" %data | |
| 67 | + | ||
| 68 | + | elif type == 5: | |
| 69 | + | print "Timestamp Record" | |
| 70 | + | days, ms = struct.unpack(">HI", data[1:]) | |
| 71 | + | print " Delta from 1 January 1958" | |
| 72 | + | print " Days: %s" %days | |
| 73 | + | print " Miliseconds: %s" %ms | |
| 74 | + | ||
| 75 | + | elif type == 6: | |
| 76 | + | print "Ancillary Text" | |
| 77 | + | print " Data: %s" %data | |
| 78 | + | ||
| 79 | + | elif type == 7: | |
| 80 | + | print "Key Header" | |
| 81 | + | print " Data: %s" %data | |
| 82 | + | ||
| 83 | + | elif type == 128: | |
| 84 | + | print size | |
| 85 | + | imageid, sequence, startcol, startline, maxseg, maxcol, maxrow = struct.unpack(">7H", data) | |
| 86 | + | print "Segment Identification Header" | |
| 87 | + | print " Image Id: %s" %imageid | |
| 88 | + | print " Sequence: %s" %sequence | |
| 89 | + | print " Start Column: %s" %startcol | |
| 90 | + | print " Start Line: %s" %startline | |
| 91 | + | print " Number of Segments: %s" %maxseg | |
| 92 | + | print " Width: %s" %maxcol | |
| 93 | + | print " Height: %s" %maxrow | |
| 94 | + | ||
| 95 | + | elif type == 129: | |
| 96 | + | print "NOAA Specific Header" | |
| 97 | + | signature, productId, productSubId, parameter, compression = struct.unpack(">4sHHHB", data) | |
| 98 | + | print " Signature: %s" %signature | |
| 99 | + | print " Product ID: %s" %productId | |
| 100 | + | print " Product SubId: %s" %productSubId | |
| 101 | + | print " Parameter: %s" %parameter | |
| 102 | + | print " Compression: %s" %compression | |
| 103 | + | ||
| 104 | + | elif type == 130: | |
| 105 | + | print "Header Structured Record" | |
| 106 | + | print " Data: %s" % data | |
| 107 | + | ||
| 108 | + | else: | |
| 109 | + | print "Type not mapped: %s" % type | |
| 110 | + | print "" | |
| 111 | + | ||
| 112 | + | data = f.read(10) | |
| 113 | + | ||
| 114 | + | filecounter, Transportlength = struct.unpack(">HQ", data) | |
| 115 | + | ||
| 116 | + | print "File Counter: %s Transport Size: %s" %(filecounter, Transportlength) | |
| 117 | + | ||
| 118 | + | readHeader(f) | |
| 119 | + | readHeader(f) | |
| 120 | + | readHeader(f) | |
| 121 | + | readHeader(f) | |
| 122 | + | readHeader(f) | |
| 123 | + | readHeader(f) | |
| 124 | + | readHeader(f) | |
| 125 | + | readHeader(f) | |
| 126 | + | readHeader(f) | |
| 127 | + | readHeader(f) | |
| 128 | + | ||
| 129 | + | print t | |
| 130 | + | ||
| 131 | + | f.close() | |