@@ -86,9 +86,11 @@ def build(self, input_shape):
8686 initializer = glorot_normal (seed = self .seed ), name = "projection_h" )
8787 self .projection_p = self .add_weight (shape = (
8888 embedding_size , 1 ), initializer = glorot_normal (seed = self .seed ), name = "projection_p" )
89- self .dropout = tf .keras .layers .Dropout (self .dropout_rate , seed = self .seed )
89+ self .dropout = tf .keras .layers .Dropout (
90+ self .dropout_rate , seed = self .seed )
9091
91- self .tensordot = tf .keras .layers .Lambda (lambda x : tf .tensordot (x [0 ], x [1 ], axes = (- 1 , 0 )))
92+ self .tensordot = tf .keras .layers .Lambda (
93+ lambda x : tf .tensordot (x [0 ], x [1 ], axes = (- 1 , 0 )))
9294
9395 # Be sure to call this somewhere!
9496 super (AFMLayer , self ).build (input_shape )
@@ -232,7 +234,8 @@ def build(self, input_shape):
232234 self .filters .append (self .add_weight (name = 'filter' + str (i ),
233235 shape = [1 , self .field_nums [- 1 ]
234236 * self .field_nums [0 ], size ],
235- dtype = tf .float32 , initializer = glorot_uniform (seed = self .seed + i ),
237+ dtype = tf .float32 , initializer = glorot_uniform (
238+ seed = self .seed + i ),
236239 regularizer = l2 (self .l2_reg )))
237240
238241 self .bias .append (self .add_weight (name = 'bias' + str (i ), shape = [size ], dtype = tf .float32 ,
@@ -247,7 +250,8 @@ def build(self, input_shape):
247250 else :
248251 self .field_nums .append (size )
249252
250- self .activation_layers = [activation_layer (self .activation ) for _ in self .layer_size ]
253+ self .activation_layers = [activation_layer (
254+ self .activation ) for _ in self .layer_size ]
251255
252256 super (CIN , self ).build (input_shape ) # Be sure to call this somewhere!
253257
@@ -668,7 +672,8 @@ def build(self, input_shape):
668672 if self .kernel_type == 'mat' :
669673
670674 self .kernel = self .add_weight (shape = (embed_size , num_pairs , embed_size ),
671- initializer = glorot_uniform (seed = self .seed ),
675+ initializer = glorot_uniform (
676+ seed = self .seed ),
672677 name = 'kernel' )
673678 elif self .kernel_type == 'vec' :
674679 self .kernel = self .add_weight (shape = (num_pairs , embed_size ,), initializer = glorot_uniform (self .seed ),
@@ -796,12 +801,15 @@ def build(self, input_shape):
796801 width = self .kernel_width [i - 1 ]
797802 new_filters = self .new_maps [i - 1 ]
798803 pooling_width = self .pooling_width [i - 1 ]
799- conv_output_shape = self ._conv_output_shape (pooling_shape , (width , 1 ))
800- pooling_shape = self ._pooling_output_shape (conv_output_shape , (pooling_width , 1 ))
804+ conv_output_shape = self ._conv_output_shape (
805+ pooling_shape , (width , 1 ))
806+ pooling_shape = self ._pooling_output_shape (
807+ conv_output_shape , (pooling_width , 1 ))
801808 self .conv_layers .append (tf .keras .layers .Conv2D (filters = filters , kernel_size = (width , 1 ), strides = (1 , 1 ),
802809 padding = 'same' ,
803810 activation = 'tanh' , use_bias = True , ))
804- self .pooling_layers .append (tf .keras .layers .MaxPooling2D (pool_size = (pooling_width , 1 )))
811+ self .pooling_layers .append (
812+ tf .keras .layers .MaxPooling2D (pool_size = (pooling_width , 1 )))
805813 self .dense_layers .append (tf .keras .layers .Dense (pooling_shape [1 ] * embedding_size * new_filters ,
806814 activation = 'tanh' , use_bias = True ))
807815
@@ -880,3 +888,157 @@ def _pooling_output_shape(self, input_shape, pool_size):
880888 cols = utils .conv_output_length (cols , pool_size [1 ], 'valid' ,
881889 pool_size [1 ])
882890 return [input_shape [0 ], rows , cols , input_shape [3 ]]
891+
892+
893+ class SENETLayer (Layer ):
894+ """SENETLayer used in FiBiNET.
895+
896+ Input shape
897+ - A list of 3D tensor with shape: ``(batch_size,1,embedding_size)``.
898+
899+ Output shape
900+ - A list of 3D tensor with shape: ``(batch_size,1,embedding_size)``.
901+
902+ Arguments
903+ - **reduction_ratio** : Positive integer, dimensionality of the
904+ attention network output space.
905+
906+ - **seed** : A Python integer to use as random seed.
907+
908+ References
909+ - [FiBiNET: Combining Feature Importance and Bilinear feature Interaction for Click-Through Rate Prediction
910+ Tongwen](https://arxiv.org/pdf/1905.09433.pdf)
911+ """
912+
913+ def __init__ (self , reduction_ratio = 3 , seed = 1024 , ** kwargs ):
914+ self .reduction_ratio = reduction_ratio
915+
916+ self .seed = seed
917+ super (SENETLayer , self ).__init__ (** kwargs )
918+
919+ def build (self , input_shape ):
920+
921+ if not isinstance (input_shape , list ) or len (input_shape ) < 2 :
922+ raise ValueError ('A `AttentionalFM` layer should be called '
923+ 'on a list of at least 2 inputs' )
924+
925+ self .filed_size = len (input_shape )
926+ self .embedding_size = input_shape [0 ][- 1 ]
927+ reduction_size = max (1 , self .filed_size // self .reduction_ratio )
928+
929+ self .W_1 = self .add_weight (shape = (
930+ self .filed_size , reduction_size ), initializer = glorot_normal (seed = self .seed ), name = "W_1" )
931+ self .W_2 = self .add_weight (shape = (
932+ reduction_size , self .filed_size ), initializer = glorot_normal (seed = self .seed ), name = "W_2" )
933+
934+ self .tensordot = tf .keras .layers .Lambda (
935+ lambda x : tf .tensordot (x [0 ], x [1 ], axes = (- 1 , 0 )))
936+
937+ # Be sure to call this somewhere!
938+ super (SENETLayer , self ).build (input_shape )
939+
940+ def call (self , inputs , training = None , ** kwargs ):
941+
942+ if K .ndim (inputs [0 ]) != 3 :
943+ raise ValueError (
944+ "Unexpected inputs dimensions %d, expect to be 3 dimensions" % (K .ndim (inputs )))
945+
946+ inputs = concat_fun (inputs , axis = 1 )
947+ Z = tf .reduce_mean (inputs , axis = - 1 ,)
948+
949+ A_1 = tf .nn .relu (self .tensordot ([Z , self .W_1 ]))
950+ A_2 = tf .nn .relu (self .tensordot ([A_1 , self .W_2 ]))
951+ V = tf .multiply (inputs , tf .expand_dims (A_2 , axis = 2 ))
952+
953+ return tf .split (V , self .filed_size , axis = 1 )
954+
955+ def compute_output_shape (self , input_shape ):
956+
957+ return input_shape
958+
959+ def compute_mask (self , inputs , mask = None ):
960+ return [None ]* self .filed_size
961+
962+ def get_config (self , ):
963+ config = {'reduction_ratio' : self .reduction_ratio , 'seed' : self .seed }
964+ base_config = super (SENETLayer , self ).get_config ()
965+ return dict (list (base_config .items ()) + list (config .items ()))
966+
967+
968+ class BilinearInteraction (Layer ):
969+ """BilinearInteraction Layer used in FiBiNET.
970+
971+ Input shape
972+ - A list of 3D tensor with shape: ``(batch_size,1,embedding_size)``.
973+
974+ Output shape
975+ - 3D tensor with shape: ``(batch_size,1,embedding_size)``.
976+
977+ Arguments
978+ - **str** : String, types of bilinear functions used in this layer.
979+
980+ - **seed** : A Python integer to use as random seed.
981+
982+ References
983+ - [FiBiNET: Combining Feature Importance and Bilinear feature Interaction for Click-Through Rate Prediction
984+ Tongwen](https://arxiv.org/pdf/1905.09433.pdf)
985+
986+ """
987+
988+ def __init__ (self , bilinear_type = "interaction" , seed = 1024 , ** kwargs ):
989+ self .bilinear_type = bilinear_type
990+ self .seed = seed
991+
992+ super (BilinearInteraction , self ).__init__ (** kwargs )
993+
994+ def build (self , input_shape ):
995+
996+ if not isinstance (input_shape , list ) or len (input_shape ) < 2 :
997+ raise ValueError ('A `AttentionalFM` layer should be called '
998+ 'on a list of at least 2 inputs' )
999+ embedding_size = input_shape [0 ][- 1 ].value
1000+
1001+ if self .bilinear_type == "all" :
1002+ self .W = self .add_weight (shape = (embedding_size , embedding_size ), initializer = glorot_normal (
1003+ seed = self .seed ), name = "bilinear_weight" )
1004+ elif self .bilinear_type == "each" :
1005+ self .W_list = [self .add_weight (shape = (embedding_size , embedding_size ), initializer = glorot_normal (
1006+ seed = self .seed ), name = "bilinear_weight" + str (i )) for i in range (len (input_shape )- 1 )]
1007+ elif self .bilinear_type == "interaction" :
1008+ self .W_list = [self .add_weight (shape = (embedding_size , embedding_size ), initializer = glorot_normal (
1009+ seed = self .seed ), name = "bilinear_weight" + str (i )+ '_' + str (j )) for i , j in itertools .combinations (range (len (input_shape )), 2 )]
1010+ else :
1011+ raise NotImplementedError
1012+
1013+ super (BilinearInteraction , self ).build (
1014+ input_shape ) # Be sure to call this somewhere!
1015+
1016+ def call (self , inputs , ** kwargs ):
1017+
1018+ if K .ndim (inputs [0 ]) != 3 :
1019+ raise ValueError (
1020+ "Unexpected inputs dimensions %d, expect to be 3 dimensions" % (K .ndim (inputs )))
1021+
1022+ if self .bilinear_type == "all" :
1023+ p = [tf .multiply (tf .tensordot (v_i , self .W , axes = (- 1 , 0 )), v_j )
1024+ for v_i , v_j in itertools .combinations (inputs , 2 )]
1025+ elif self .bilinear_type == "each" :
1026+ p = [tf .multiply (tf .tensordot (inputs [i ], self .W_list [i ], axes = (- 1 , 0 )), inputs [j ])
1027+ for i , j in itertools .combinations (range (len (inputs )), 2 )]
1028+ elif self .bilinear_type == "interaction" :
1029+ p = [tf .multiply (tf .tensordot (v [0 ], w , axes = (- 1 , 0 )), v [1 ])
1030+ for v , w in zip (itertools .combinations (inputs , 2 ), self .W_list )]
1031+ else :
1032+ raise NotImplementedError
1033+ return concat_fun (p )
1034+
1035+ def compute_output_shape (self , input_shape ):
1036+ filed_size = len (input_shape )
1037+ embedding_size = input_shape [0 ][- 1 ]
1038+
1039+ return (None , 1 , filed_size * (filed_size - 1 )// 2 * embedding_size )
1040+
1041+ def get_config (self , ):
1042+ config = {'type' : self .bilinear_type , 'seed' : self .seed }
1043+ base_config = super (BilinearInteraction , self ).get_config ()
1044+ return dict (list (base_config .items ()) + list (config .items ()))
0 commit comments