上一篇帖子代码贴了太多太长了,所以重新开一篇帖子。
我终于把解调和解码程序合并到一起去了。
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
using namespace cv;
struct pcm *pcm;
Mat frame;
int SAMPLERATE = 8000;
double oneBitSampleCount;
#define MAXIMUM_BUF_LENGTH (16 * 16384)
using namespace std;
static volatile bool do_exit = false;
hackrf_device *device;
uint32_t freq;
uint32_t hardware_sample_rate;
uint16_t buf16[MAXIMUM_BUF_LENGTH];
int16_t lowpassed[MAXIMUM_BUF_LENGTH];
int lp_len;
int rate_in;
int rate_out;
int rate_out2;
int16_t result_demod[MAXIMUM_BUF_LENGTH];
int luminance[914321];
int luminance_count;
int result_demod_len;
int now_r, now_j;
int pre_r, pre_j;
int prev_index;
int now_lpr;
int prev_lpr_index;
FILE *file;
int downsample;
double round(double r)
{
return (r > 0.0) ? floor(r + 0.5) : ceil(r - 0.5);
}
double xvBPV[5];
double yvBPV[5];
double bandPassSSTVSignal(double sampleIn)
{
xvBPV[0] = xvBPV[1]; xvBPV[1] = xvBPV[2];
xvBPV[2] = xvBPV[3]; xvBPV[3] = xvBPV[4];
xvBPV[4] = sampleIn / 7.627348301;
yvBPV[0] = yvBPV[1]; yvBPV[1] = yvBPV[2];
yvBPV[2] = yvBPV[3]; yvBPV[3] = yvBPV[4];
yvBPV[4] = (xvBPV[0]+xvBPV[4])-2*xvBPV[2]+(-0.2722149379*yvBPV[0])+(0.3385637917*yvBPV[1])+(-0.8864523063*yvBPV[2])+(0.7199258671*yvBPV[3]);
return yvBPV[4];
}
double FIRLPCoeffs[51] = { +0.0001082461, +0.0034041195, +0.0063570207, +0.0078081648,
+0.0060550614, -0.0002142384, -0.0104500335, -0.0211855480,
-0.0264527776, -0.0201269304, +0.0004419626, +0.0312014771,
+0.0606261038, +0.0727491887, +0.0537028370, -0.0004362161,
-0.0779387981, -0.1511168919, -0.1829049634, -0.1390189257,
-0.0017097774, +0.2201896764, +0.4894395006, +0.7485289338,
+0.9357596142, +1.0040320616, +0.9357596142, +0.7485289338,
+0.4894395006, +0.2201896764, -0.0017097774, -0.1390189257,
-0.1829049634, -0.1511168919, -0.0779387981, -0.0004362161,
+0.0537028370, +0.0727491887, +0.0606261038, +0.0312014771,
+0.0004419626, -0.0201269304, -0.0264527776, -0.0211855480,
-0.0104500335, -0.0002142384, +0.0060550614, +0.0078081648,
+0.0063570207, +0.0034041195, +0.0001082461,
};
double xvFIRLP1[51];
double FIRLowPass1(double sampleIn)
{
for (int i = 0; i < 50; i++)
xvFIRLP1[i] = xvFIRLP1[i+1];
xvFIRLP1[50] = sampleIn / 5.013665674;
double sum = 0;
for (int i = 0; i <= 50; i++)
sum += (FIRLPCoeffs[i] * xvFIRLP1[i]);
return sum;
}
double xvFIRLP2[51];
double FIRLowPass2(double sampleIn)
{
for (int i = 0; i < 50; i++)
xvFIRLP2[i] = xvFIRLP2[i+1];
xvFIRLP2[50] = sampleIn / 5.013665674;
double sum = 0;
for (int i = 0; i <= 50; i++)
sum += (FIRLPCoeffs[i] * xvFIRLP2[i]);
return sum;
}
// moving average
double xvMA1[9];
double yvMA1prev = 0;
double noiseReductionFilter1(double sampleIn)
{
for (int i = 0; i < 8; i++)
xvMA1[i] = xvMA1[i+1];
xvMA1[8] = sampleIn;
yvMA1prev = yvMA1prev+xvMA1[8]-xvMA1[0];
return yvMA1prev;
}
double xvMA2[9];
double yvMA2prev = 0;
double noiseReductionFilter2(double sampleIn)
{
for (int i = 0; i < 8; i++)
xvMA2[i] = xvMA2[i+1];
xvMA2[8] = sampleIn;
yvMA2prev = yvMA2prev+xvMA2[8]-xvMA2[0];
return yvMA2prev;
}
double oscPhase = 0;
double realPartPrev = 0;
double imaginaryPartPrev = 0;
int fmDemodulateLuminance(double sample)
{
oscPhase += (2 * M_PI * 2000) / SAMPLERATE;
double realPart = cos(oscPhase) * sample;
double imaginaryPart = sin(oscPhase) * sample;
if (oscPhase >= 2 * M_PI)
oscPhase -= 2 * M_PI;
realPart = FIRLowPass1(realPart);
imaginaryPart = FIRLowPass2(imaginaryPart);
realPart = noiseReductionFilter1(realPart);
imaginaryPart = noiseReductionFilter2(imaginaryPart);
sample = (imaginaryPart*realPartPrev-realPart*imaginaryPartPrev)/(realPart*realPart+imaginaryPart*imaginaryPart);
realPartPrev = realPart;
imaginaryPartPrev = imaginaryPart;
sample += 0.2335; // bring the value above 0
int luminance = (int)round((sample/0.617)*255);
luminance = 255-luminance;
if (luminance > 255)
luminance = 255;
if (luminance < 0)
luminance = 0;
return luminance;
}
double pixelLengthInS = 0.0004576;
double syncLengthInS = 0.004862;
double porchLengthInS = 0.000572;
double separatorLengthInS = 0.000572;
double channelLengthInS = pixelLengthInS*320;
double channelGStartInS = syncLengthInS + porchLengthInS;
double channelBStartInS = channelGStartInS + channelLengthInS + separatorLengthInS;
double channelRStartInS = channelBStartInS + channelLengthInS + separatorLengthInS;
double lineLengthInS = syncLengthInS + porchLengthInS + channelLengthInS + separatorLengthInS + channelLengthInS + separatorLengthInS + channelLengthInS + separatorLengthInS;
double imageLengthInSamples = (lineLengthInS*256)*SAMPLERATE;
void receiveImage()
{
double t, linet;
double nextSyncTime = 0;
for (int s = 0; s < imageLengthInSamples; s++)
{
t = ((double)s)/((double)SAMPLERATE);
int temp_lineLengthInS = (int)(lineLengthInS * 10000000);
int temp_t = (int)(t * 10000000);
linet = ((double)(temp_t % temp_lineLengthInS))/10000000;
int lum = luminance[s];
if (t >= nextSyncTime)
{
nextSyncTime += lineLengthInS;
imshow("frame", frame);
if (waitKey(5) == 'q')
{
break;
}
}
if ((linet >= channelGStartInS && linet < channelGStartInS + channelLengthInS) ||
(linet >= channelBStartInS && linet < channelBStartInS + channelLengthInS) ||
(linet >= channelRStartInS && linet < channelRStartInS + channelLengthInS) )
{
int y = (int)floor(t/lineLengthInS);
if (linet >= channelGStartInS && linet < channelGStartInS + channelLengthInS)
{
int x = (int)floor(((linet-channelGStartInS)/channelLengthInS)*320);
frame.at(y, x)[1] = lum;
}
if (linet >= channelBStartInS && linet < channelBStartInS + channelLengthInS)
{
int x = (int)floor(((linet-channelBStartInS)/channelLengthInS)*320);
frame.at(y, x)[0] = lum;
}
if (linet >= channelRStartInS && linet < channelRStartInS + channelLengthInS)
{
int x = (int)floor(((linet-channelRStartInS)/channelLengthInS)*320);
frame.at(y, x)[2] = lum;
}
}
}
fprintf(stderr, "Finished rx.\n");
}
void sigint_callback_handler(int signum)
{
cout << "Caught signal" << endl;
do_exit = true;
}
void multiply(int ar, int aj, int br, int bj, int *cr, int *cj)
{
*cr = ar*br - aj*bj;
*cj = aj*br + ar*bj;
}
int polar_discriminant(int ar, int aj, int br, int bj)
{
int cr, cj;
double angle;
multiply(ar, aj, br, -bj, &cr, &cj);
angle = atan2((double)cj, (double)cr);
return (int)(angle / 3.14159 * (1<<14));
}
int rx_callback(hackrf_transfer* transfer)
{
for (int i = 0; i < transfer->valid_length; i++)
{
double sample = (int8_t)(transfer->buffer[i]) + 1;
buf16[i] = (int16_t)sample; //s->buf16[i] = (int16_t)buf[i] - 127; s->buf16[i] -127~128 uint16_t, unsigned for negative?
}
memcpy(lowpassed, buf16, 2*transfer->valid_length);
lp_len = transfer->valid_length;
//low pass //rate = hardware_sample_rate = 6*2M
int i=0, i2=0;
while (i < lp_len)
{
now_r += lowpassed[i];
now_j += lowpassed[i+1];
i += 2;
prev_index++;
if (prev_index < downsample)
{
continue;
}
lowpassed[i2] = now_r;
lowpassed[i2+1] = now_j;
prev_index = 0;
now_r = 0;
now_j = 0;
i2 += 2;
}
lp_len = i2;
//fm demod //rate = rate_in = 2M
int i3, pcm;
pcm = polar_discriminant(lowpassed[0], lowpassed[1], pre_r, pre_j);
result_demod[0] = (int16_t)pcm;
for (i3 = 2; i3 < (lp_len-1); i3 += 2)
{
pcm = polar_discriminant(lowpassed[i3], lowpassed[i3+1], lowpassed[i3-2], lowpassed[i3-1]);
result_demod[i3/2] = (int16_t)pcm;
}
pre_r = lowpassed[lp_len - 2];
pre_j = lowpassed[lp_len - 1];
result_demod_len = lp_len/2;
// low pass real //rate = rate_out = 2M
int i4=0, i5=0;
int fast = rate_out;
int slow = rate_out2;
while (i4 < result_demod_len)
{
now_lpr += result_demod[i4];
i4++;
prev_lpr_index += slow;
if (prev_lpr_index < fast)
{
continue;
}
result_demod[i5] = (int16_t)(now_lpr / (fast/slow));
prev_lpr_index -= fast;
now_lpr = 0;
i5 += 1;
}
result_demod_len = i5;
//rate = rate_out2 = 8k
fprintf(stderr, "result_demod_len: %d\n", result_demod_len);
for (int s = 0; s < result_demod_len; s++)
{
double sampleDouble, sample_filtered;
sampleDouble = ((double)result_demod[s]) / 32768.0;
sample_filtered = bandPassSSTVSignal(sampleDouble);
luminance[luminance_count] = fmDemodulateLuminance(sample_filtered);
luminance_count++;
if (luminance_count >= int(imageLengthInSamples))
{
luminance_count = 0;
receiveImage();
}
}
return 0;
}
int main(int argc, char **argv)
{
char *pcm_name;
file = stdin;
//pcm_name = "default";
frame = Mat::zeros(256, 320, CV_8UC3);
signal(SIGINT, &sigint_callback_handler);
int res;
freq = 120e6;
rate_in = 200000;
rate_out = 200000;
rate_out2 = 8000;
file = stdout;
downsample = 6;
hardware_sample_rate = (uint32_t)(downsample * rate_in);
res = hackrf_init();
if( res != HACKRF_SUCCESS )
{
cout << "hackrf_init() failed" << endl;
return EXIT_FAILURE;
}
res = hackrf_open(&device);
if( res != HACKRF_SUCCESS )
{
cout << "hackrf_open() failed" << endl;
return EXIT_FAILURE;
}
res = hackrf_set_lna_gain(device, 40);
if( res != HACKRF_SUCCESS )
{
cout << "hackrf_set_lna_gain() failed" << endl;
return EXIT_FAILURE;
}
res = hackrf_set_vga_gain(device, 26);
if( res != HACKRF_SUCCESS )
{
cout << "hackrf_set_vga_gain() failed" << endl;
return EXIT_FAILURE;
}
/* Set the frequency */
res = hackrf_set_freq(device, freq);
if( res != HACKRF_SUCCESS )
{
cout << "hackrf_set_freq() failed" << endl;
return EXIT_FAILURE;
}
fprintf(stderr, "Oversampling input by: %ix.\n", downsample);
/* Set the sample rate */
res = hackrf_set_sample_rate(device, hardware_sample_rate);
if( res != HACKRF_SUCCESS )
{
cout << "hackrf_set_sample_rate() failed" << endl;
return EXIT_FAILURE;
}
res = hackrf_set_baseband_filter_bandwidth(device, hardware_sample_rate);
if( res != HACKRF_SUCCESS )
{
cout << "hackrf_baseband_filter_bandwidth_set() failed" << endl;
return EXIT_FAILURE;
}
fprintf(stderr, "Output at %u Hz.\n", rate_in);
usleep(100000);
res = hackrf_start_rx(device, rx_callback, NULL);
while ((hackrf_is_streaming(device) == HACKRF_TRUE) && (do_exit == false))
{
usleep(100000);
}
if (do_exit)
{
fprintf(stderr, "\nUser cancel, exiting...\n");
}
else
{
fprintf(stderr, "\nLibrary error, exiting...\n");
}
res = hackrf_close(device);
if(res != HACKRF_SUCCESS)
{
cout << "hackrf_close() failed" << endl;
}
else
{
cout << "hackrf_close() done" << endl;
}
hackrf_exit();
cout << "hackrf_exit() done" << endl;
return 0;
}
g++ decode.cpp -o decode -lhackrf -pthread -lm `pkg-config --cflags --libs opencv`
上面这个是收完一个图片才显示,画面质量没问题,下面这个是收一行显示一行,但是会错位,估计线程锁没搞好。
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
using namespace cv;
struct pcm *pcm;
Mat frame;
int SAMPLERATE = 8000;
double oneBitSampleCount;
#define MAXIMUM_BUF_LENGTH (16 * 16384)
using namespace std;
static volatile bool do_exit = false;
hackrf_device *device;
uint32_t freq;
uint32_t hardware_sample_rate;
uint16_t buf16[MAXIMUM_BUF_LENGTH];
int16_t lowpassed[MAXIMUM_BUF_LENGTH];
int lp_len;
int rate_in;
int rate_out;
int rate_out2;
int16_t result_demod[MAXIMUM_BUF_LENGTH];
int luminance[914321];
int luminance_count;
int result_demod_len;
int now_r, now_j;
int pre_r, pre_j;
int prev_index;
int now_lpr;
int prev_lpr_index;
int downsample;
bool bufferIsFull = false;
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
double round(double r)
{
return (r > 0.0) ? floor(r + 0.5) : ceil(r - 0.5);
}
double xvBPV[5];
double yvBPV[5];
double bandPassSSTVSignal(double sampleIn)
{
xvBPV[0] = xvBPV[1]; xvBPV[1] = xvBPV[2];
xvBPV[2] = xvBPV[3]; xvBPV[3] = xvBPV[4];
xvBPV[4] = sampleIn / 7.627348301;
yvBPV[0] = yvBPV[1]; yvBPV[1] = yvBPV[2];
yvBPV[2] = yvBPV[3]; yvBPV[3] = yvBPV[4];
yvBPV[4] = (xvBPV[0]+xvBPV[4])-2*xvBPV[2]+(-0.2722149379*yvBPV[0])+(0.3385637917*yvBPV[1])+(-0.8864523063*yvBPV[2])+(0.7199258671*yvBPV[3]);
return yvBPV[4];
}
double FIRLPCoeffs[51] = { +0.0001082461, +0.0034041195, +0.0063570207, +0.0078081648,
+0.0060550614, -0.0002142384, -0.0104500335, -0.0211855480,
-0.0264527776, -0.0201269304, +0.0004419626, +0.0312014771,
+0.0606261038, +0.0727491887, +0.0537028370, -0.0004362161,
-0.0779387981, -0.1511168919, -0.1829049634, -0.1390189257,
-0.0017097774, +0.2201896764, +0.4894395006, +0.7485289338,
+0.9357596142, +1.0040320616, +0.9357596142, +0.7485289338,
+0.4894395006, +0.2201896764, -0.0017097774, -0.1390189257,
-0.1829049634, -0.1511168919, -0.0779387981, -0.0004362161,
+0.0537028370, +0.0727491887, +0.0606261038, +0.0312014771,
+0.0004419626, -0.0201269304, -0.0264527776, -0.0211855480,
-0.0104500335, -0.0002142384, +0.0060550614, +0.0078081648,
+0.0063570207, +0.0034041195, +0.0001082461,
};
double xvFIRLP1[51];
double FIRLowPass1(double sampleIn)
{
for (int i = 0; i < 50; i++)
xvFIRLP1[i] = xvFIRLP1[i+1];
xvFIRLP1[50] = sampleIn / 5.013665674;
double sum = 0;
for (int i = 0; i <= 50; i++)
sum += (FIRLPCoeffs[i] * xvFIRLP1[i]);
return sum;
}
double xvFIRLP2[51];
double FIRLowPass2(double sampleIn)
{
for (int i = 0; i < 50; i++)
xvFIRLP2[i] = xvFIRLP2[i+1];
xvFIRLP2[50] = sampleIn / 5.013665674;
double sum = 0;
for (int i = 0; i <= 50; i++)
sum += (FIRLPCoeffs[i] * xvFIRLP2[i]);
return sum;
}
// moving average
double xvMA1[9];
double yvMA1prev = 0;
double noiseReductionFilter1(double sampleIn)
{
for (int i = 0; i < 8; i++)
xvMA1[i] = xvMA1[i+1];
xvMA1[8] = sampleIn;
yvMA1prev = yvMA1prev+xvMA1[8]-xvMA1[0];
return yvMA1prev;
}
double xvMA2[9];
double yvMA2prev = 0;
double noiseReductionFilter2(double sampleIn)
{
for (int i = 0; i < 8; i++)
xvMA2[i] = xvMA2[i+1];
xvMA2[8] = sampleIn;
yvMA2prev = yvMA2prev+xvMA2[8]-xvMA2[0];
return yvMA2prev;
}
double oscPhase = 0;
double realPartPrev = 0;
double imaginaryPartPrev = 0;
int fmDemodulateLuminance(double sample)
{
oscPhase += (2 * M_PI * 2000) / SAMPLERATE;
double realPart = cos(oscPhase) * sample;
double imaginaryPart = sin(oscPhase) * sample;
if (oscPhase >= 2 * M_PI)
oscPhase -= 2 * M_PI;
realPart = FIRLowPass1(realPart);
imaginaryPart = FIRLowPass2(imaginaryPart);
realPart = noiseReductionFilter1(realPart);
imaginaryPart = noiseReductionFilter2(imaginaryPart);
sample = (imaginaryPart*realPartPrev-realPart*imaginaryPartPrev)/(realPart*realPart+imaginaryPart*imaginaryPart);
realPartPrev = realPart;
imaginaryPartPrev = imaginaryPart;
sample += 0.2335; // bring the value above 0
int luminance = (int)round((sample/0.617)*255);
luminance = 255-luminance;
if (luminance > 255)
luminance = 255;
if (luminance < 0)
luminance = 0;
return luminance;
}
double pixelLengthInS = 0.0004576;
double syncLengthInS = 0.004862;
double porchLengthInS = 0.000572;
double separatorLengthInS = 0.000572;
double channelLengthInS = pixelLengthInS*320;
double channelGStartInS = syncLengthInS + porchLengthInS;
double channelBStartInS = channelGStartInS + channelLengthInS + separatorLengthInS;
double channelRStartInS = channelBStartInS + channelLengthInS + separatorLengthInS;
double lineLengthInS = syncLengthInS + porchLengthInS + channelLengthInS + separatorLengthInS + channelLengthInS + separatorLengthInS + channelLengthInS + separatorLengthInS;
double imageLengthInSamples = (lineLengthInS*256)*SAMPLERATE;
void receiveImage()
{
fprintf(stderr, "Finished rx.\n");
}
void sigint_callback_handler(int signum)
{
cout << "Caught signal" << endl;
do_exit = true;
}
void multiply(int ar, int aj, int br, int bj, int *cr, int *cj)
{
*cr = ar*br - aj*bj;
*cj = aj*br + ar*bj;
}
int polar_discriminant(int ar, int aj, int br, int bj)
{
int cr, cj;
double angle;
multiply(ar, aj, br, -bj, &cr, &cj);
angle = atan2((double)cj, (double)cr);
return (int)(angle / 3.14159 * (1<<14));
}
int rx_callback(hackrf_transfer* transfer)
{
for (int i = 0; i < transfer->valid_length; i++)
{
double sample = (int8_t)(transfer->buffer[i]) + 1;
buf16[i] = (int16_t)sample; //s->buf16[i] = (int16_t)buf[i] - 127; s->buf16[i] -127~128 uint16_t, unsigned for negative?
}
memcpy(lowpassed, buf16, 2*transfer->valid_length);
lp_len = transfer->valid_length;
//low pass //rate = hardware_sample_rate = 6*2M
int i=0, i2=0;
while (i < lp_len)
{
now_r += lowpassed[i];
now_j += lowpassed[i+1];
i += 2;
prev_index++;
if (prev_index < downsample)
{
continue;
}
lowpassed[i2] = now_r;
lowpassed[i2+1] = now_j;
prev_index = 0;
now_r = 0;
now_j = 0;
i2 += 2;
}
lp_len = i2;
//fm demod //rate = rate_in = 2M
int i3, pcm;
pcm = polar_discriminant(lowpassed[0], lowpassed[1], pre_r, pre_j);
result_demod[0] = (int16_t)pcm;
for (i3 = 2; i3 < (lp_len-1); i3 += 2)
{
pcm = polar_discriminant(lowpassed[i3], lowpassed[i3+1], lowpassed[i3-2], lowpassed[i3-1]);
result_demod[i3/2] = (int16_t)pcm;
}
pre_r = lowpassed[lp_len - 2];
pre_j = lowpassed[lp_len - 1];
result_demod_len = lp_len/2;
// low pass real //rate = rate_out = 2M
int i4=0, i5=0;
int fast = rate_out;
int slow = rate_out2;
while (i4 < result_demod_len)
{
now_lpr += result_demod[i4];
i4++;
prev_lpr_index += slow;
if (prev_lpr_index < fast)
{
continue;
}
result_demod[i5] = (int16_t)(now_lpr / (fast/slow));
prev_lpr_index -= fast;
now_lpr = 0;
i5 += 1;
}
result_demod_len = i5;
//rate = rate_out2 = 8k
for (int s = 0; s < result_demod_len; s++)
{
double sampleDouble, sample_filtered;
sampleDouble = ((double)result_demod[s]) / 32768.0;
sample_filtered = bandPassSSTVSignal(sampleDouble);
pthread_mutex_lock(&mutex);
luminance[luminance_count] = fmDemodulateLuminance(sample_filtered);
luminance_count++;
if (luminance_count >= int( lineLengthInS*SAMPLERATE))
{
bufferIsFull = true;
luminance_count = 0;
}
pthread_mutex_unlock(&mutex);
}
return 0;
}
int main(int argc, char **argv)
{
frame = Mat::zeros(256, 320, CV_8UC3);
//cout << "one line samples:" << lineLengthInS*SAMPLERATE << "one line takes seconds: " << lineLengthInS<< endl;
signal(SIGINT, &sigint_callback_handler);
int res;
freq = 120e6;
rate_in = 200000;
rate_out = 200000;
rate_out2 = 8000;
downsample = 6;
hardware_sample_rate = (uint32_t)(downsample * rate_in);
res = hackrf_init();
if( res != HACKRF_SUCCESS )
{
cout << "hackrf_init() failed" << endl;
return EXIT_FAILURE;
}
res = hackrf_open(&device);
if( res != HACKRF_SUCCESS )
{
cout << "hackrf_open() failed" << endl;
return EXIT_FAILURE;
}
res = hackrf_set_lna_gain(device, 40);
if( res != HACKRF_SUCCESS )
{
cout << "hackrf_set_lna_gain() failed" << endl;
return EXIT_FAILURE;
}
res = hackrf_set_vga_gain(device, 26);
if( res != HACKRF_SUCCESS )
{
cout << "hackrf_set_vga_gain() failed" << endl;
return EXIT_FAILURE;
}
/* Set the frequency */
res = hackrf_set_freq(device, freq);
if( res != HACKRF_SUCCESS )
{
cout << "hackrf_set_freq() failed" << endl;
return EXIT_FAILURE;
}
fprintf(stderr, "Oversampling input by: %ix.\n", downsample);
/* Set the sample rate */
res = hackrf_set_sample_rate(device, hardware_sample_rate);
if( res != HACKRF_SUCCESS )
{
cout << "hackrf_set_sample_rate() failed" << endl;
return EXIT_FAILURE;
}
res = hackrf_set_baseband_filter_bandwidth(device, hardware_sample_rate);
if( res != HACKRF_SUCCESS )
{
cout << "hackrf_baseband_filter_bandwidth_set() failed" << endl;
return EXIT_FAILURE;
}
fprintf(stderr, "Output at %u Hz.\n", rate_in);
usleep(100000);
res = hackrf_start_rx(device, rx_callback, NULL);
int y = 0;
while ((hackrf_is_streaming(device) == HACKRF_TRUE) && (do_exit == false))
{
if (bufferIsFull)
{
pthread_mutex_lock(&mutex);
for (int s = 0; s < int(lineLengthInS*SAMPLERATE); s++)
{
double t = ((double)s)/((double)SAMPLERATE);
int lum = luminance[s];
if ((t >= channelGStartInS && t < channelGStartInS + channelLengthInS) ||
(t >= channelBStartInS && t < channelBStartInS + channelLengthInS) ||
(t >= channelRStartInS && t < channelRStartInS + channelLengthInS) )
{
if (t >= channelGStartInS && t < channelGStartInS + channelLengthInS)
{
int x = (int)floor(((t-channelGStartInS)/channelLengthInS)*320);
frame.at(y, x)[1] = lum;
}
if (t >= channelBStartInS && t < channelBStartInS + channelLengthInS)
{
int x = (int)floor(((t-channelBStartInS)/channelLengthInS)*320);
frame.at(y, x)[0] = lum;
}
if (t >= channelRStartInS && t < channelRStartInS + channelLengthInS)
{
int x = (int)floor(((t-channelRStartInS)/channelLengthInS)*320);
frame.at(y, x)[2] = lum;
}
}
}
y = y + 1;
if (y >=256) y = 0;
bufferIsFull = false;
pthread_mutex_unlock(&mutex);
imshow("frame", frame);
if (waitKey(5) == 'q')
{
do_exit = true;
break;
}
}
}
if (do_exit)
{
fprintf(stderr, "\nUser cancel, exiting...\n");
}
else
{
fprintf(stderr, "\nLibrary error, exiting...\n");
}
res = hackrf_close(device);
if(res != HACKRF_SUCCESS)
{
cout << "hackrf_close() failed" << endl;
}
else
{
cout << "hackrf_close() done" << endl;
}
hackrf_exit();
cout << "hackrf_exit() done" << endl;
return 0;
}
接下去的工作就是参考蓝牙和nrf解调那样,把不同函数合并到同一个解调函数里,以便往portapack上搬了。
合并后的代码如下:
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
using namespace cv;
struct pcm *pcm;
Mat frame;
int SAMPLERATE = 8000;
double oneBitSampleCount;
#define MAXIMUM_BUF_LENGTH (16 * 16384)
using namespace std;
static volatile bool do_exit = false;
hackrf_device *device;
uint32_t freq;
uint32_t hardware_sample_rate;
uint16_t buf16[MAXIMUM_BUF_LENGTH];
int16_t lowpassed[MAXIMUM_BUF_LENGTH];
int lp_len;
int rate_in;
int rate_out;
int rate_out2;
int16_t result_demod[MAXIMUM_BUF_LENGTH];
int luminance[914321];
int luminance_count;
int result_demod_len;
int now_r, now_j;
int pre_r, pre_j;
int prev_index;
int now_lpr;
int prev_lpr_index;
int downsample;
bool bufferIsFull = false;
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
double round(double r)
{
return (r > 0.0) ? floor(r + 0.5) : ceil(r - 0.5);
}
double xvBPV[5];
double yvBPV[5];
double FIRLPCoeffs[51] = { +0.0001082461, +0.0034041195, +0.0063570207, +0.0078081648,
+0.0060550614, -0.0002142384, -0.0104500335, -0.0211855480,
-0.0264527776, -0.0201269304, +0.0004419626, +0.0312014771,
+0.0606261038, +0.0727491887, +0.0537028370, -0.0004362161,
-0.0779387981, -0.1511168919, -0.1829049634, -0.1390189257,
-0.0017097774, +0.2201896764, +0.4894395006, +0.7485289338,
+0.9357596142, +1.0040320616, +0.9357596142, +0.7485289338,
+0.4894395006, +0.2201896764, -0.0017097774, -0.1390189257,
-0.1829049634, -0.1511168919, -0.0779387981, -0.0004362161,
+0.0537028370, +0.0727491887, +0.0606261038, +0.0312014771,
+0.0004419626, -0.0201269304, -0.0264527776, -0.0211855480,
-0.0104500335, -0.0002142384, +0.0060550614, +0.0078081648,
+0.0063570207, +0.0034041195, +0.0001082461,
};
double xvFIRLP1[51];
double xvFIRLP2[51];
double xvMA1[9];
double yvMA1prev = 0;
double xvMA2[9];
double yvMA2prev = 0;
double oscPhase = 0;
double realPartPrev = 0;
double imaginaryPartPrev = 0;
double pixelLengthInS = 0.0004576;
double syncLengthInS = 0.004862;
double porchLengthInS = 0.000572;
double separatorLengthInS = 0.000572;
double channelLengthInS = pixelLengthInS*320;
double channelGStartInS = syncLengthInS + porchLengthInS;
double channelBStartInS = channelGStartInS + channelLengthInS + separatorLengthInS;
double channelRStartInS = channelBStartInS + channelLengthInS + separatorLengthInS;
double lineLengthInS = syncLengthInS + porchLengthInS + channelLengthInS + separatorLengthInS + channelLengthInS + separatorLengthInS + channelLengthInS + separatorLengthInS;
double imageLengthInSamples = (lineLengthInS*256)*SAMPLERATE;
void sigint_callback_handler(int signum)
{
cout << "Caught signal" << endl;
do_exit = true;
}
void multiply(int ar, int aj, int br, int bj, int *cr, int *cj)
{
*cr = ar*br - aj*bj;
*cj = aj*br + ar*bj;
}
int polar_discriminant(int ar, int aj, int br, int bj)
{
int cr, cj;
double angle;
multiply(ar, aj, br, -bj, &cr, &cj);
angle = atan2((double)cj, (double)cr);
return (int)(angle / 3.14159 * (1<<14));
}
int rx_callback(hackrf_transfer* transfer)
{
for (int i = 0; i < transfer->valid_length; i++)
{
double sample = (int8_t)(transfer->buffer[i]) + 1;
buf16[i] = (int16_t)sample; //s->buf16[i] = (int16_t)buf[i] - 127; s->buf16[i] -127~128 uint16_t, unsigned for negative?
}
memcpy(lowpassed, buf16, 2*transfer->valid_length);
lp_len = transfer->valid_length;
//low pass //rate = hardware_sample_rate = 6*2M
int i=0, i2=0;
while (i < lp_len)
{
now_r += lowpassed[i];
now_j += lowpassed[i+1];
i += 2;
prev_index++;
if (prev_index < downsample)
{
continue;
}
lowpassed[i2] = now_r;
lowpassed[i2+1] = now_j;
prev_index = 0;
now_r = 0;
now_j = 0;
i2 += 2;
}
lp_len = i2;
//fm demod //rate = rate_in = 2M
int i3, pcm;
pcm = polar_discriminant(lowpassed[0], lowpassed[1], pre_r, pre_j);
result_demod[0] = (int16_t)pcm;
for (i3 = 2; i3 < (lp_len-1); i3 += 2)
{
pcm = polar_discriminant(lowpassed[i3], lowpassed[i3+1], lowpassed[i3-2], lowpassed[i3-1]);
result_demod[i3/2] = (int16_t)pcm;
}
pre_r = lowpassed[lp_len - 2];
pre_j = lowpassed[lp_len - 1];
result_demod_len = lp_len/2;
// low pass real //rate = rate_out = 2M
int i4=0, i5=0;
int fast = rate_out;
int slow = rate_out2;
while (i4 < result_demod_len)
{
now_lpr += result_demod[i4];
i4++;
prev_lpr_index += slow;
if (prev_lpr_index < fast)
{
continue;
}
result_demod[i5] = (int16_t)(now_lpr / (fast/slow));
prev_lpr_index -= fast;
now_lpr = 0;
i5 += 1;
}
result_demod_len = i5;
//rate = rate_out2 = 8k
for (int s = 0; s < result_demod_len; s++)
{
double sampleDouble, sample_filtered;
sampleDouble = ((double)result_demod[s]) / 32768.0;
xvBPV[0] = xvBPV[1]; xvBPV[1] = xvBPV[2];
xvBPV[2] = xvBPV[3]; xvBPV[3] = xvBPV[4];
xvBPV[4] = sampleDouble / 7.627348301;
yvBPV[0] = yvBPV[1]; yvBPV[1] = yvBPV[2];
yvBPV[2] = yvBPV[3]; yvBPV[3] = yvBPV[4];
yvBPV[4] = (xvBPV[0]+xvBPV[4])-2*xvBPV[2]+(-0.2722149379*yvBPV[0])+(0.3385637917*yvBPV[1])+(-0.8864523063*yvBPV[2])+(0.7199258671*yvBPV[3]);
sample_filtered = yvBPV[4];
pthread_mutex_lock(&mutex);
double sample_result;
oscPhase += (2 * M_PI * 2000) / SAMPLERATE;
double realPart = cos(oscPhase) * sample_filtered;
double imaginaryPart = sin(oscPhase) * sample_filtered;
if (oscPhase >= 2 * M_PI)
oscPhase -= 2 * M_PI;
for (int i = 0; i < 50; i++)
xvFIRLP1[i] = xvFIRLP1[i+1];
xvFIRLP1[50] = realPart / 5.013665674;
double realPart_lp = 0;
for (int i = 0; i <= 50; i++)
realPart_lp += (FIRLPCoeffs[i] * xvFIRLP1[i]);
for (int i = 0; i < 50; i++)
xvFIRLP2[i] = xvFIRLP2[i+1];
xvFIRLP2[50] = imaginaryPart / 5.013665674;
double imaginaryPart_lp = 0;
for (int i = 0; i <= 50; i++)
imaginaryPart_lp += (FIRLPCoeffs[i] * xvFIRLP2[i]);
for (int i = 0; i < 8; i++)
xvMA1[i] = xvMA1[i+1];
xvMA1[8] = realPart_lp;
yvMA1prev = yvMA1prev+xvMA1[8]-xvMA1[0];
double realPart_nr = yvMA1prev;
for (int i = 0; i < 8; i++)
xvMA2[i] = xvMA2[i+1];
xvMA2[8] = imaginaryPart_lp;
yvMA2prev = yvMA2prev+xvMA2[8]-xvMA2[0];
double imaginaryPart_nr = yvMA2prev;
sample_result = (imaginaryPart_nr*realPartPrev-realPart_nr*imaginaryPartPrev)/(realPart_nr*realPart_nr+imaginaryPart_nr*imaginaryPart_nr);
realPartPrev = realPart_nr;
imaginaryPartPrev = imaginaryPart_nr;
sample_result += 0.2335;
int luminance_result = (int)round((sample_result/0.617)*255);
luminance_result = 255-luminance_result;
if (luminance_result > 255)
luminance_result = 255;
if (luminance_result < 0)
luminance_result = 0;
luminance[luminance_count] = luminance_result;
luminance_count++;
if (luminance_count >= int( lineLengthInS*SAMPLERATE))
{
bufferIsFull = true;
luminance_count = 0;
}
pthread_mutex_unlock(&mutex);
}
return 0;
}
int main(int argc, char **argv)
{
frame = Mat::zeros(256, 320, CV_8UC3);
//cout << "one line samples:" << lineLengthInS*SAMPLERATE << "one line takes seconds: " << lineLengthInS<< endl;
signal(SIGINT, &sigint_callback_handler);
int res;
freq = 120e6;
rate_in = 200000;
rate_out = 200000;
rate_out2 = 8000;
downsample = 6;
hardware_sample_rate = (uint32_t)(downsample * rate_in);
res = hackrf_init();
if( res != HACKRF_SUCCESS )
{
cout << "hackrf_init() failed" << endl;
return EXIT_FAILURE;
}
res = hackrf_open(&device);
if( res != HACKRF_SUCCESS )
{
cout << "hackrf_open() failed" << endl;
return EXIT_FAILURE;
}
res = hackrf_set_lna_gain(device, 40);
if( res != HACKRF_SUCCESS )
{
cout << "hackrf_set_lna_gain() failed" << endl;
return EXIT_FAILURE;
}
res = hackrf_set_vga_gain(device, 26);
if( res != HACKRF_SUCCESS )
{
cout << "hackrf_set_vga_gain() failed" << endl;
return EXIT_FAILURE;
}
/* Set the frequency */
res = hackrf_set_freq(device, freq);
if( res != HACKRF_SUCCESS )
{
cout << "hackrf_set_freq() failed" << endl;
return EXIT_FAILURE;
}
fprintf(stderr, "Oversampling input by: %ix.\n", downsample);
/* Set the sample rate */
res = hackrf_set_sample_rate(device, hardware_sample_rate);
if( res != HACKRF_SUCCESS )
{
cout << "hackrf_set_sample_rate() failed" << endl;
return EXIT_FAILURE;
}
res = hackrf_set_baseband_filter_bandwidth(device, hardware_sample_rate);
if( res != HACKRF_SUCCESS )
{
cout << "hackrf_baseband_filter_bandwidth_set() failed" << endl;
return EXIT_FAILURE;
}
fprintf(stderr, "Output at %u Hz.\n", rate_in);
usleep(100000);
res = hackrf_start_rx(device, rx_callback, NULL);
int y = 0;
while ((hackrf_is_streaming(device) == HACKRF_TRUE) && (do_exit == false))
{
if (bufferIsFull)
{
pthread_mutex_lock(&mutex);
for (int s = 0; s < int(lineLengthInS*SAMPLERATE); s++)
{
double t = ((double)s)/((double)SAMPLERATE);
int lum = luminance[s];
if ((t >= channelGStartInS && t < channelGStartInS + channelLengthInS) ||
(t >= channelBStartInS && t < channelBStartInS + channelLengthInS) ||
(t >= channelRStartInS && t < channelRStartInS + channelLengthInS) )
{
if (t >= channelGStartInS && t < channelGStartInS + channelLengthInS)
{
int x = (int)floor(((t-channelGStartInS)/channelLengthInS)*320);
frame.at(y, x)[1] = lum;
}
if (t >= channelBStartInS && t < channelBStartInS + channelLengthInS)
{
int x = (int)floor(((t-channelBStartInS)/channelLengthInS)*320);
frame.at(y, x)[0] = lum;
}
if (t >= channelRStartInS && t < channelRStartInS + channelLengthInS)
{
int x = (int)floor(((t-channelRStartInS)/channelLengthInS)*320);
frame.at(y, x)[2] = lum;
}
}
}
y = y + 1;
if (y >=256) y = 0;
bufferIsFull = false;
pthread_mutex_unlock(&mutex);
imshow("frame", frame);
if (waitKey(5) == 'q')
{
do_exit = true;
break;
}
}
}
if (do_exit)
{
fprintf(stderr, "\nUser cancel, exiting...\n");
}
else
{
fprintf(stderr, "\nLibrary error, exiting...\n");
}
res = hackrf_close(device);
if(res != HACKRF_SUCCESS)
{
cout << "hackrf_close() failed" << endl;
}
else
{
cout << "hackrf_close() done" << endl;
}
hackrf_exit();
cout << "hackrf_exit() done" << endl;
return 0;
}
portapack显示部分可能会参考一下模拟电视解调和SSTV发射。
我试了几天把c++代码移植到portapack上,发现很难搞,图像完全不对,所以打算暂时放弃这种方法。换成fft解调,因为我发现wfm音频解调里上面的音频fft基本就是我要的东西,也是fm解调后再做了fft,能明显看到sstv信号的峰值左右移动。打算结合之前python的fft解调例子往下做。之前移植的代码放在下面。
#include "sstv_collector.hpp"
#include "dsp_fft.hpp"
#include "utility.hpp"
#include "event_m4.hpp"
#include "portapack_shared_memory.hpp"
#include "event_m4.hpp"
#include
void SSTvCollector::on_message(const Message* const message) {
switch(message->id) {
case Message::ID::UpdateSpectrum:
update();
break;
case Message::ID::SpectrumStreamingConfig:
set_state(*reinterpret_cast(message));
break;
default:
break;
}
}
void SSTvCollector::set_state(const SpectrumStreamingConfigMessage& message) {
if( message.mode == SpectrumStreamingConfigMessage::Mode::Running ) {
start();
} else {
stop();
}
}
void SSTvCollector::start() {
streaming = true;
ChannelSpectrumConfigMessage message { &fifo };
shared_memory.application_queue.push(message);
}
void SSTvCollector::stop() {
streaming = false;
fifo.reset_in();
}
void SSTvCollector::set_decimation_factor(
const size_t decimation_factor
) {
channel_spectrum_decimator.set_factor(decimation_factor);
}
/* TODO: Refactor to register task with idle thread?
* It's sad that the idle thread has to call all the way back here just to
* perform the deferred task on the buffer of data we prepared.
*/
void SSTvCollector::feed(
const buffer_c16_t& channel,
const uint32_t filter_pass_frequency,
const uint32_t filter_stop_frequency
) {
// Called from baseband processing thread.
channel_filter_pass_frequency = filter_pass_frequency;
channel_filter_stop_frequency = filter_stop_frequency;
channel_spectrum_decimator.feed(
channel,
[this](const buffer_c16_t& data) {
this->post_message(data);
}
);
}
/*
template
static typename T::value_type spectrum_window_hamming_3(const T& s, const size_t i) {
static_assert(power_of_two(s.size()), "Array size must be power of 2");
constexpr size_t mask = s.size() - 1;
// Three point Hamming window.
return s[i] * 0.54f + (s[(i-1) & mask] + s[(i+1) & mask]) * -0.23f;
};
const auto corrected_sample = spectrum_window_hamming_3(channel_spectrum, i);
*/
void SSTvCollector::post_message(const buffer_c16_t& data) {
// Called from baseband processing thread.
double xvBPV[5];
double yvBPV[5];
double FIRLPCoeffs[51] = { +0.0001082461, +0.0034041195, +0.0063570207, +0.0078081648,
+0.0060550614, -0.0002142384, -0.0104500335, -0.0211855480,
-0.0264527776, -0.0201269304, +0.0004419626, +0.0312014771,
+0.0606261038, +0.0727491887, +0.0537028370, -0.0004362161,
-0.0779387981, -0.1511168919, -0.1829049634, -0.1390189257,
-0.0017097774, +0.2201896764, +0.4894395006, +0.7485289338,
+0.9357596142, +1.0040320616, +0.9357596142, +0.7485289338,
+0.4894395006, +0.2201896764, -0.0017097774, -0.1390189257,
-0.1829049634, -0.1511168919, -0.0779387981, -0.0004362161,
+0.0537028370, +0.0727491887, +0.0606261038, +0.0312014771,
+0.0004419626, -0.0201269304, -0.0264527776, -0.0211855480,
-0.0104500335, -0.0002142384, +0.0060550614, +0.0078081648,
+0.0063570207, +0.0034041195, +0.0001082461,
};
double xvFIRLP1[51];
double xvFIRLP2[51];
double xvMA1[9];
double yvMA1prev = 0;
double xvMA2[9];
double yvMA2prev = 0;
double oscPhase = 0;
double realPartPrev = 0;
double imaginaryPartPrev = 0;
if( streaming && !channel_spectrum_request_update ) {
//fft_swap(data, channel_spectrum);
for(size_t s=0; s<256; s=s+2)
{
double sampleDouble, sample_filtered, sample_result;
sampleDouble = ((double)data.p[s].real())/128;
xvBPV[0] = xvBPV[1]; xvBPV[1] = xvBPV[2];
xvBPV[2] = xvBPV[3]; xvBPV[3] = xvBPV[4];
xvBPV[4] = sampleDouble / 7.627348301;
yvBPV[0] = yvBPV[1]; yvBPV[1] = yvBPV[2];
yvBPV[2] = yvBPV[3]; yvBPV[3] = yvBPV[4];
yvBPV[4] = (xvBPV[0]+xvBPV[4])-2*xvBPV[2]+(-0.2722149379*yvBPV[0])+(0.3385637917*yvBPV[1])+(-0.8864523063*yvBPV[2])+(0.7199258671*yvBPV[3]);
sample_filtered = yvBPV[4];
oscPhase += (2 * 3.14159265358 * 2000) / 8000;
double realPart = cos(oscPhase) * sample_filtered;
double imaginaryPart = sin(oscPhase) * sample_filtered;
if (oscPhase >= 2 * 3.14159265358)
oscPhase -= 2 * 3.14159265358;
for (int i = 0; i < 50; i++)
xvFIRLP1[i] = xvFIRLP1[i+1];
xvFIRLP1[50] = realPart / 5.013665674;
double realPart_lp = 0;
for (int i = 0; i <= 50; i++)
realPart_lp += (FIRLPCoeffs[i] * xvFIRLP1[i]);
for (int i = 0; i < 50; i++)
xvFIRLP2[i] = xvFIRLP2[i+1];
xvFIRLP2[50] = imaginaryPart / 5.013665674;
double imaginaryPart_lp = 0;
for (int i = 0; i <= 50; i++)
imaginaryPart_lp += (FIRLPCoeffs[i] * xvFIRLP2[i]);
for (int i = 0; i < 8; i++)
xvMA1[i] = xvMA1[i+1];
xvMA1[8] = realPart_lp;
yvMA1prev = yvMA1prev+xvMA1[8]-xvMA1[0];
double realPart_nr = yvMA1prev;
for (int i = 0; i < 8; i++)
xvMA2[i] = xvMA2[i+1];
xvMA2[8] = imaginaryPart_lp;
yvMA2prev = yvMA2prev+xvMA2[8]-xvMA2[0];
double imaginaryPart_nr = yvMA2prev;
sample_result = (imaginaryPart_nr*realPartPrev-realPart_nr*imaginaryPartPrev)/(realPart_nr*realPart_nr+imaginaryPart_nr*imaginaryPart_nr);
realPartPrev = realPart_nr;
imaginaryPartPrev = imaginaryPart_nr;
sample_result += 0.2335;
int luminance_result = (int)round((sample_result/0.617)*255);
luminance_result = 255-luminance_result;
if (luminance_result > 255)
luminance_result = 255;
if (luminance_result < 0)
luminance_result = 0;
//channel_spectrum[s/2] = {luminance_result, luminance_result};
channel_spectrum[s/2] = {luminance_result, luminance_result};
}
channel_spectrum_sampling_rate = data.sampling_rate;
channel_spectrum_request_update = true;
EventDispatcher::events_flag(EVT_MASK_SPECTRUM);
}
}
void SSTvCollector::update() {
// Called from idle thread (after EVT_MASK_SPECTRUM is flagged)
if( streaming && channel_spectrum_request_update ) {
/* Decimated buffer is full. Compute spectrum. */
//fft_c_preswapped(channel_spectrum, 0, 8);
ChannelSpectrum spectrum;
spectrum.sampling_rate = channel_spectrum_sampling_rate;
spectrum.channel_filter_pass_frequency = channel_filter_pass_frequency;
spectrum.channel_filter_stop_frequency = channel_filter_stop_frequency;
for(size_t i=0; i<128; i++) {
//const auto corrected_sample = channel_spectrum[i];
//const auto corrected_sample = channel_spectrum[i].real();
//const unsigned int v = corrected_sample + 127.0f;
const unsigned int v = channel_spectrum[i].real();
spectrum.db[i] = std::max(0U, std::min(255U, v));
}
fifo.in(spectrum);
}
channel_spectrum_request_update = false;
}
后来我发现fft方法也不太好,因为我仔细看了python用fft解调martin m1图片的过程,如果是采样率是8000,fft计算的长度一般是9个采样点,但是并不是9个连续采样点依次输入就能计算出一个像素点的,有时候会错位一点,如果硬性按照9个采样点,算出一个像素点,得到的图片几乎看不清,只能大概看到一段是彩色的样子。
import signal
import argparse
from sys import argv, exit
import numpy as np
import soundfile
from PIL import Image
from scipy.signal.windows import hann
def handle_sigint(signal, frame):
print()
print("Received interrupt signal, exiting.")
exit(0)
SCAN_TIME = 0.146432
SYNC_PULSE = 0.004862
SYNC_PORCH = 0.000572
SEP_PULSE = 0.000572
CHAN_TIME = SEP_PULSE + SCAN_TIME
CHAN_OFFSETS = [SYNC_PULSE + SYNC_PORCH]
CHAN_OFFSETS.append(CHAN_OFFSETS[0] + CHAN_TIME)
CHAN_OFFSETS.append(CHAN_OFFSETS[1] + CHAN_TIME)
LINE_TIME = SYNC_PULSE + SYNC_PORCH + 3 * CHAN_TIME
PIXEL_TIME = SCAN_TIME / 320
def calc_lum(freq):
"""Converts SSTV pixel frequency range into 0-255 luminance byte"""
lum = int(round((freq - 1500) / 3.1372549))
return min(max(lum, 0), 255)
def barycentric_peak_interp(bins, x):
"""Interpolate between frequency bins to find x value of peak"""
# Takes x as the index of the largest bin and interpolates the
# x value of the peak using neighbours in the bins array
# Make sure data is in bounds
y1 = bins[x] if x <= 0 else bins[x-1]
y3 = bins[x] if x + 1 >= len(bins) else bins[x+1]
denom = y3 + bins[x] + y1
if denom == 0:
return 0 # erroneous
return (y3 - y1) / denom + x
def peak_fft_freq(data):
"""Finds the peak frequency from a section of audio data"""
windowed_data = data * hann(len(data))
fft = np.abs(np.fft.rfft(windowed_data))
# Get index of bin with highest magnitude
x = np.argmax(fft)
# Interpolated peak frequency
peak = barycentric_peak_interp(fft, x)
#peak = x
# Return frequency in hz
return peak * sample_rate / len(windowed_data)
if __name__ == "__main__":
signal.signal(signal.SIGINT, handle_sigint)
samples, sample_rate = soundfile.read("mmsstv.wav")
if samples.ndim > 1: # convert to mono if stereo
samples = samples.mean(axis=1)
centre_window_time = (PIXEL_TIME * 2.34) / 2
pixel_window = round(centre_window_time * 2 * sample_rate)
#pixel window is the time of the length of a pixel
#centre_window_time is half of that time
height = 256
channels = 3
width = 320
# Use list comprehension to init list so we can return data early
image_data = [[[0 for i in range(width)]
for j in range(channels)] for k in range(height)]
seq_start = 0
chan = 0
px = 0
line = 0
px_end = 0
px_pos = 0
while (px_pos <= len(samples)/2):
if chan == 0 and px == 0:
seq_start += round(LINE_TIME * sample_rate)
chan_offset = CHAN_OFFSETS[chan]
px_pos = round(seq_start + (chan_offset + px *
PIXEL_TIME - centre_window_time) *
sample_rate)
freq = peak_fft_freq(samples[px_pos:px_pos + pixel_window])
image_data[line][chan][px] = calc_lum(freq)
px = px + 1
if (px >= width):
px = 0
chan = chan + 1
if (chan >= channels):
line = line + 1
chan = 0
if (line >= height):
break
'''
i = 0
chan = 0
px = 0
line = 0
while (i < len(samples)):
freq = peak_fft_freq(samples[i:i+9])
#print (i,freq)
image_data[line][chan][px] = calc_lum(freq)
#print (line,chan,px)
i = i + 4
px = px + 1
if (px >= width):
px = 0
chan = chan + 1
if (chan >= channels):
line = line + 1
chan = 0
if (line >= height):
break
'''
image = Image.new("RGB", (width, height))
pixel_data = image.load()
print("Drawing image data...")
for y in range(height):
for x in range(width):
pixel = (image_data[y][2][x],image_data[y][0][x],image_data[y][1][x])
pixel_data[x, y] = pixel
print("...Done!")
image.save("result.png")
这样就有一个问题,我只能根据像素点位置去遍历一个采样缓存,根据像素点位置算出对应的缓存位置我去取值,而不能直接把缓存依次读出来去作为像素点的值。对于Portapack,我只能计算固定长度(好像是32或者256)的fft值,不是我想计算哪一段就算哪一段的,而且算出来的fft频谱要得到最高峰峰值频率也要自己写代码。总之,用portapack看fm解调后的音频频谱是可以的,但是仅限于肉眼观察,不能用它来sstv图。
所以我还是要回到之前的方法。
我一步步绘制各种运算后的变量图,如果什么都不做,时域图形画出来是对的,经过带通以后也能根据我的信号有变化,但是已经不太符合理论预计的样子了,后续再经过变频和低通滤波,我发现画出来的图已经不会根据信号变化动了,再到最后做乘法,图像就是完全空白了。