14
14
sub23 = lena_haar(257 : 512 ,257 : 512 );
15
15
figure
16
16
subplot(4 ,4 ,1 )
17
- imshow(sub11 ,[ 0 , 255 ] )
17
+ imshow(sub11 )
18
18
subplot(4 ,4 ,2 )
19
- imshow(sub12 ,[ 0 , 255 ] )
19
+ imshow(sub12 )
20
20
subplot(4 ,4 ,5 )
21
- imshow(sub13 ,[ 0 , 255 ] )
21
+ imshow(sub13 )
22
22
subplot(4 ,4 ,6 )
23
- imshow(sub14 ,[ 0 , 255 ] )
23
+ imshow(sub14 )
24
24
% Now for the bigger subbands we are going to use bigger subplots
25
25
subplot(4 ,4 ,[3 : 4 7 : 8 ])
26
- imshow(sub21 ,[ 0 , 255 ] )
26
+ imshow(sub21 )
27
27
subplot(4 ,4 ,[9 : 10 13 : 14 ])
28
- imshow(sub22 ,[ 0 , 255 ] )
28
+ imshow(sub22 )
29
29
subplot(4 ,4 ,[11 : 12 15 : 16 ])
30
- imshow(sub23 ,[ 0 , 255 ] )
30
+ imshow(sub23 )
31
31
32
32
% We are going to use the same subplot distribution to show the histogram
33
33
% of each sub image
57
57
% we only have a uniform scalar quantizer, we will use that.
58
58
% No Quantization for the ll part of the image (sub 11)
59
59
60
-
61
-
62
60
% Need to count all the appearances of each number
63
61
entropies = zeros(7 ,8 );
64
- for i= 7 : -1 : 1
65
- q_sub11 = quantize_matrix(sub11 ,8 );
66
- q_sub12 = quantize_matrix(sub12 ,i );
67
- q_sub13 = quantize_matrix(sub13 ,i );
68
- q_sub14 = quantize_matrix(sub14 ,i );
69
- q_sub21 = quantize_matrix(sub21 ,i );
70
- q_sub22 = quantize_matrix(sub22 ,i );
71
- q_sub23 = quantize_matrix(sub23 ,i );
72
- e_sub11 = shannonEntropy(q_sub11 );
73
- e_sub12 = shannonEntropy(q_sub12 );
74
- e_sub13 = shannonEntropy(q_sub13 );
75
- e_sub14 = shannonEntropy(q_sub14 );
76
- e_sub21 = shannonEntropy(q_sub21 );
77
- e_sub22 = shannonEntropy(q_sub22 );
78
- e_sub23 = shannonEntropy(q_sub23 );
79
- q_lena = quantize_matrix(lena_haar ,i );
80
- q_lena(1 : 128 ,1 : 128 ) = q_sub11 ;
81
- e_lena = shannonEntropy(q_lena );
82
- dict = huffmanDict(q_lena );
83
- sizeMap = containers .Map(' KeyType' ,' double' , ' ValueType' ,' any' );
84
- for row = 1 : size(dict ,1 )
85
- keyCell = dict(row ,1 );
86
- valueCell = dict(row ,2 );
87
- code = valueCell{1 };
88
- key = keyCell{1 };
89
- sizeMap(key ) = size(code ,2 );
90
- end
91
- % SizeMap holds the lengths of all the symbols
92
- % TODO subtitute the symbols by their length and add all the elements
93
- % of the matrix, then divide by the nuelem of the matrix
94
- entropies(i ,: ) = [e_sub11 , e_sub12 , e_sub13 , e_sub14 , e_sub21 , e_sub22 , e_sub23 , e_lena ];
95
- end
62
+ q_sub11 = sub11 ;
63
+ q_sub12 = quantize_matrix(sub12 ,7 );
64
+ q_sub13 = quantize_matrix(sub13 ,7 );
65
+ q_sub14 = quantize_matrix(sub14 ,6 );
66
+ q_sub21 = quantize_matrix(sub21 ,5 );
67
+ q_sub22 = quantize_matrix(sub22 ,5 );
68
+ q_sub23 = quantize_matrix(sub23 ,4 );
69
+
70
+ e_sub11 = shannonEntropy(q_sub11 );
71
+ h_sub11 = numel(huffmanCode(q_sub11 ))/numel(q_sub11 );
72
+
73
+ e_sub12 = shannonEntropy(q_sub12 );
74
+ h_sub12 = numel(huffmanCode(q_sub12 ))/numel(q_sub12 );
75
+
76
+ e_sub13 = shannonEntropy(q_sub13 );
77
+ h_sub13 = numel(huffmanCode(q_sub13 ))/numel(q_sub13 );
78
+
79
+ e_sub14 = shannonEntropy(q_sub14 );
80
+ h_sub14 = numel(huffmanCode(q_sub14 ))/numel(q_sub14 );
81
+
82
+ e_sub21 = shannonEntropy(q_sub21 );
83
+ h_sub21 = numel(huffmanCode(q_sub21 ))/numel(q_sub21 );
84
+
85
+ e_sub22 = shannonEntropy(q_sub22 );
86
+ h_sub22 = numel(huffmanCode(q_sub22 ))/numel(q_sub22 );
87
+
88
+ e_sub23 = shannonEntropy(q_sub23 );
89
+ h_sub23 = numel(huffmanCode(q_sub23 ))/numel(q_sub23 );
90
+
91
+ % We need to "stitch" all the pieces of q_lena together
92
+ q_lena(1 : 128 ,1 : 128 ) = q_sub11 ;
93
+ q_lena(1 : 128 ,129 : 256 ) = q_sub12 ;
94
+ q_lena(129 : 256 ,1 : 128 ) = q_sub13 ;
95
+ q_lena(129 : 256 ,129 : 256 ) = q_sub14 ;
96
+ q_lena(1 : 256 ,257 : 512 ) = q_sub21 ;
97
+ q_lena(257 : 512 ,1 : 256 ) = q_sub22 ;
98
+ q_lena(257 : 512 ,257 : 512 ) = q_sub23 ;
99
+ e_lena = shannonEntropy(q_lena );
100
+ h_lena = numel(huffmanCode(q_lena ))/numel(q_lena ); % q_lena in huffman code
101
+
102
+ % sizeMap = containers.Map('KeyType','double', 'ValueType','any');
103
+ % for row = 1:size(dict,1)
104
+ % keyCell = dict(row,1);
105
+ % valueCell = dict(row,2);
106
+ % code = valueCell{1};
107
+ % key = keyCell{1};
108
+ % sizeMap(key) = size(code,2);
109
+ % end
110
+
111
+ % SizeMap holds the lengths of all the symbols
112
+ % TODO subtitute the symbols by their length and add all the elements
113
+ % of the matrix, then divide by the nuelem of the matrix
114
+ entropies = [e_sub11 , e_sub12 , e_sub13 , e_sub14 , e_sub21 , e_sub22 , e_sub23 , e_lena ;
115
+ h_sub11 , h_sub12 , h_sub13 , h_sub14 , h_sub21 , h_sub22 , h_sub23 , h_lena ]' ;
116
+
96
117
entropies
97
118
119
+ % %%%%%%% SYNTHESIS %%%%%%%%%%%%%
120
+ dq_lena(1 : 128 ,1 : 128 ) = q_lena(1 : 128 ,1 : 128 );
121
+ dq_lena(1 : 128 ,129 : 256 ) = dequantize_matrix(q_lena(1 : 128 ,129 : 256 ),7 ,8 );
122
+ dq_lena(129 : 256 ,1 : 128 ) = dequantize_matrix(q_lena(129 : 256 ,1 : 128 ),7 ,8 );
123
+ dq_lena(129 : 256 ,129 : 256 ) = dequantize_matrix(q_lena(129 : 256 ,129 : 256 ),6 ,8 );
124
+ dq_lena(1 : 256 ,257 : 512 ) = dequantize_matrix(q_lena(1 : 256 ,257 : 512 ),5 ,8 );
125
+ dq_lena(257 : 512 ,1 : 256 ) = dequantize_matrix(q_lena(257 : 512 ,1 : 256 ),5 ,8 );
126
+ dq_lena(257 : 512 ,257 : 512 ) = dequantize_matrix(q_lena(257 : 512 ,257 : 512 ),4 ,8 );
127
+
128
+ synth_lena = haar_reverse_multilevel(dq_lena ,2 );
129
+ imshow(synth_lena )
130
+
131
+ % %%%%%%% PSNR calculation %%%%%%%%
132
+ [PSNR ,MSE ,MAXERR ,L2RAT ] = measerr(lena_gray_512 ,synth_lena )
133
+ % measerr already provides the PSNR but we can calculate it again using the
134
+ % formula: 10 * log10((255^2)/MSE)
135
+ PSNR_2 = 10 * log10((255 ^ 2 )/MSE );
136
+ % Which gives the exact same result
137
+ [PSNR , PSNR_2 ]
98
138
139
+ % END OF FILE
0 commit comments