
    y h"o                     t    d Z ddlZddlZddlZddlmZ ddlZddlZ	ddl
Z
ddlZddlZddlmZ  G d d      Zy)z
Created by Jaided AI
Released Date: 18/08/2022
Description:
DBNet text detection module. 
Many parts of the codes are adapted from https://github.com/MhLiao/DB
    N)Polygon   )Configurablec                       e Zd Z	 	 	 	 	 	 	 ddZd Zd Zd Zd Zd ZddZ	d	 Z
d
 ZddZddZ	 	 	 	 	 ddZd Z	 	 	 ddZ	 	 	 ddZddZd Zd Zd Z	 	 	 	 	 	 	 ddZy)DBNetNc                 D   || _         t        j                  j                  t        j                  j	                  t
              dd      }t        |d      5 }	t        j                  |	      | _	        ddd       |!| j                  | j                  |      | _	        || j                  j                         v r|| _        nAt        dj                  dj                  | j                  j                                           ||| _        nFt        j                  j                  t        j                  j	                  t
              d      | _        |r|| j                  |   d   j                         v r@t        j                  j                  | j                  | j                  |   d   |         }
d	}n,t        j                  j                  | j                  |      }
d
}t        j                  j!                  |
      st#        |j                  ||
            | j%                  | j                  |   d   |
       nd| _        t)        j*                  | j                  d         | _        | j                  d   | _        | j                  d   | _        y# 1 sw Y   &xY w)a  
        DBNet text detector class

        Parameters
        ----------
        backbone : str, optional
            Backbone to use. Options are "resnet18" and "resnet50". The default is "resnet18".
        weight_dir : str, optional
            Path to directory that contains weight files. If set to None, the path will be set
            to "../weights/". The default is None.
        weight_name : str, optional
            Name of the weight to use as specified in DBNet_inference.yaml or a filename 
            in weight_dir. The default is 'pretrained'.
        initialize_model : Boolean, optional
            If True, construct the model and load weight at class initialization.
            Otherwise, only initial the class without constructing the model.
            The default is True.
        dynamic_import_relative_path : str, optional
            Relative path to 'model/detector.py'. This option is for supporting
            integrating this module into other modules. For example, easyocr/DBNet
            This should be left as None when calling this module as a standalone. 
            The default is None.
        device : str, optional
            Device to use. Options are "cuda" and "cpu". The default is 'cuda'.
        verbose : int, optional
            Verbosity level. The default is 0.

        Raises
        ------
        ValueError
            Raised when backbone is invalid.
        FileNotFoundError
            Raised when weight file is not found.

        Returns
        -------
        None.
        configszDBNet_inference.yamlrNz2Invalid backbone. Current support backbone are {}.,weightsweightzUA weight with a name {} is found in DBNet_inference.yaml but cannot be find file: {}.zYA weight with a name {} is not found in DBNet_inference.yaml and cannot be find file: {}.modelBGR_MEANmin_detection_sizemax_detection_size)deviceospathjoindirname__file__openyaml	safe_loadr	   set_relative_import_pathkeysbackbone
ValueErrorformat
weight_dirisfileFileNotFoundErrorinitialize_modelr   nparrayr   r   r   )selfr   r    weight_namer#   dynamic_import_relative_pathr   verboseconfig_pathfidweight_patherror_messages               o/home/developers/rajanand/mypropertyqr-fmb-refixing-v2/venv/lib/python3.12/site-packages/easyocr/DBNet/DBNet.py__init__zDBNet.__init__   s   \ ggll277??8#<iI_`+s# 	/s>>#.DL	/ (388GcdDLt||((**$DMQXXY\YaYabfbnbnbsbsbuYvwxx!(DO ggll277??8+DiPDOdll84X>CCEE ggll4??DLL<RS[<\]h<ij w ggll4??KH {77>>+.'(<(<[+(VWW!!$,,x"8"A;O DJj!9:"&,,/C"D"&,,/C"DE	/ 	/s   JJc           
      6   |J |j                  t        j                        }|j                         D ]b  \  }}|dk(  r5|j	                  |dj                  ||j                  d      z         i       @t        |t              r| j                  ||      }cd |S )a  
        Create relative import paths for modules specified in class. This method
        is recursive.

        Parameters
        ----------
        configs : dict
            Configuration dictionary from .yaml file.
        dynamic_import_relative_path : str, optional
            Relative path to 'model/detector/'. This option is for supporting
            integrating this module into other modules. For example, easyocr/DBNet
            This should be left as None when calling this module as a standalone. 
            The default is None.
        
        Returns
        -------
        configs : dict
            Configuration dictionary with correct relative path.
        class.)	splitr   sepitemsupdater   
isinstancedictr   )r&   r	   r(   preficeskeyvalues         r.   r   zDBNet.set_relative_import_pathj   s    ( ,777/55bff=  	ICg~SXXhS9I.I%JKLeT* 99%A]^E	     c                     | j                   t        d      | j                   j                  t        j                  || j
                        d       | j                   j                          y)a.  
        Load weight to model.

        Parameters
        ----------
        weight_path : str
            Path to trained weight.

        Raises
        ------
        RuntimeError
            Raised when the model has not yet been contructed.

        Returns
        -------
        None.
        Nz#model has not yet been constructed.)map_locationF)strict)r   RuntimeErrorload_state_dicttorchloadr   eval)r&   r,   s     r.   load_weightzDBNet.load_weight   sO    $ ::DEE

""5::k#T]b"c

r<   c                     t        j                  |      j                  j                  j	                  | j
                        | _        y)z
        Contruct text detection model based on the configuration in .yaml file.

        Parameters
        ----------
        config : dict
            Configuration dictionary.

        Returns
        -------
        None.
        N)r   construct_class_from_config	structurebuilderbuildr   r   )r&   configs     r.   construct_modelzDBNet.construct_model   s4     "==fEOOWW]]^b^i^ij
r<   c                 j   | j                  |       | j                  |       t        | j                  j                  t        j
                  j                        rY| j                  dk(  rI| j                  j                  j                  j                  | j                        | j                  _        yyy)aO  
        Wrapper to initialize text detection model. This model includes contructing
        and weight loading.

        Parameters
        ----------
        model_config : dict
            Configuration dictionary.
        weight_path : str
            Path to trained weight.

        Returns
        -------
        None.
        cpuN)
rL   rE   r7   r   rB   nnDataParallelr   moduleto)r&   model_configr,   s      r.   r#   zDBNet.initialize_model   s      	\*%djj&&(=(=>4;;RWCW#zz//6699$++FDJJ DX>r<   c                    t        |t              rnt        j                  j	                  |      r5t        j                  |t
        j                        j                  d      }|S t        dj                  |            t        |t        j                        r|j                  d      }|S t        |t        j                  j                        r$t        j                  |      dddddddf   }|S t!        d      )a  
        Load or convert input to OpenCV BGR image numpy array.

        Parameters
        ----------
        image : str, PIL.Image, or np.ndarray
            Image to load or convert.

        Raises
        ------
        FileNotFoundError
            Raised when the input is a path to file (str), but the file is not found.
        TypeError
            Raised when the data type of the input is not supported.

        Returns
        -------
        image : np.ndarray
            OpenCV BGR image.
        float32zCannot find {}NzYUnsupport image format. Only path-to-file, opencv BGR image, and PIL image are supported.)r7   strr   r   r!   cv2imreadIMREAD_COLORastyper"   r   r$   ndarrayPILImageasarray	TypeErrorr&   images     r.   get_cv2_imagezDBNet.get_cv2_image   s    * eS!ww~~e$

5#*:*:;BB9M  ((8(?(?(FGGrzz*LL+E  syy/JJu%aDbDj1E  wxxr<   c                    |j                   \  }}}|+t        | j                  t        ||| j                              }||k  rOt        t        j                  |dz        dz        }t        t        j                  ||z  |z  dz        dz        }nNt        t        j                  |dz        dz        }t        t        j                  ||z  |z  dz        dz        }t        j                  |||f      }|||ffS )a  
        Resize image such that the shorter side of the image is equal to the 
        closest multiple of 32 to the provided detection_size. If detection_size
        is not provided, it will be resized to the closest multiple of 32 each
        side. If the original size exceeds the min-/max-detection sizes 
        (specified in configs.yaml), it will be resized to be within the 
        min-/max-sizes.

        Parameters
        ----------
        img : np.ndarray
            OpenCV BGR image.
        detection_size : int, optional
            Target detection size. The default is None.

        Returns
        -------
        np.ndarray
            Resized OpenCV BGR image. The width and height of this image should
            be multiple of 32.
            )
shapemaxr   minr   intmathceilrX   resize)	r&   imgdetection_sizeheightwidth_
new_height	new_widthresized_imgs	            r.   resize_imagezDBNet.resize_image   s    , 99q! !8!8#feTMdMd:efNE>TYY~':;b@AJDIIj6&9E&AB&FG"LMIDIInr&9:R?@ITYYy5'86'AB'FG"LMJjjy*&=>VUO++r<   c                     t        j                  |      j                  ddd      j                         j	                  d      S )a4  
        Convert image array (assuming OpenCV BGR format) to image tensor.

        Parameters
        ----------
        image : np.ndarray
            OpenCV BGR image.

        Returns
        -------
        torch.tensor
            Tensor image with 4 dimension [batch, channel, width, height].
           r   r   )rB   
from_numpypermutefloat	unsqueezera   s     r.   image_array2tensorzDBNet.image_array2tensor  s8     &..q!Q7==?II!LLr<   c                 &    || j                   z
  dz  S )z
        Normalize image by substracting BGR mean and divided by 255

        Parameters
        ----------
        image : np.ndarray
            OpenCV BGR image.

        Returns
        -------
        np.ndarray
            OpenCV BGR image.
        g     o@)r   ra   s     r.   normalize_imagezDBNet.normalize_image  s     %u,,r<   c                     | j                  |      }| j                  ||      \  }}| j                  |      }| j                  |      }||fS )a$  
        Wrapper to load and convert an image to an image tensor

        Parameters
        ----------
        image : path-to-file, PIL.Image, or np.ndarray
            Image to load or convert.
        detection_size : int, optional
            Target detection size. The default is None.

        Returns
        -------
        img : torch.tensor
            Tensor image with 4 dimension [batch, channel, width, height]..
        original_shape : tuple
            A tuple (height, width) of the original input image before resizing.
        rn   )rc   ru   r~   r|   )r&   
image_pathrn   rm   original_shapes        r.   
load_imagezDBNet.load_image,  sY    $ 
+"//n/U^""3'%%c*N""r<   c           	          t        |D cg c]  }| j                  ||       c} \  }}t        j                  |d      |fS c c}w )a  
        Wrapper to load or convert list of multiple images to a single image 
        tensor. Multiple images are concatenated together on the first dimension.
        
        Parameters
        ----------
        images : a list of path-to-file, PIL.Image, or np.ndarray
            Image to load or convert.
        detection_size : int, optional
            Target detection size. The default is None.

        Returns
        -------
        img : torch.tensor
            A single tensor image with 4 dimension [batch, channel, width, height].
        original_shape : tuple
            A list of tuples (height, width) of the original input image before resizing.
        r   r   )dim)zipr   rB   cat)r&   imagesrn   rb   original_shapess        r.   load_imageszDBNet.load_imagesE  sR    & #&5;(=,1 )-P^(_ (= #>yyq)?::(=s   Ac	                 @   | j                  ||      }	g }
g }t        |j                  d            D ]q  }||   \  }}|r"| j                  ||   |	|   |||||      \  }}n!| j	                  ||   |	|   |||||      \  }}|
j                  |       |j                  |       s t        t        |
|      D cg c]C  \  }}t        |dkD        r,t        t        ||      D cg c]  \  }}|dkD  r||f c}} nddgE c}}}} \  }
}|
|fS c c}}w c c}}}}w )a  
        Translate probability heatmap tensor to text region boudning boxes.

        Parameters
        ----------
        image_tensor : torch.tensor
            Image tensor.
        original_shapes : tuple
            Original size of the image (height, width) of the input image (before
            rounded to the closest multiple of 32).
        hmap : torch.tensor
            Probability heatmap tensor.
        text_threshold : float, optional
            Minimum probability for each pixel of heatmap tensor to be considered
            as a valid text pixel. The default is 0.2.
        bbox_min_score : float, optional
            Minimum score for each detected bounding box to be considered as a
            valid text bounding box. The default is 0.2.
        bbox_min_size : int, optional
            Minimum size for each detected bounding box to be considered as a
            valid text bounding box. The default is 3.
        max_candidates : int, optional
            Maximum number of detected bounding boxes to be considered as 
            candidates for valid text bounding box. Setting it to 0 implies
            no maximum. The default is 0.
        as_polygon : boolean, optional
            If True, return the bounding box as polygon (fine vertrices), 
            otherwise return as rectangular. The default is False.

        Returns
        -------
        boxes_batch : list of lists
            Bounding boxes of each text box.
        scores_batch : list of floats
            Confidence scores of each text box.

        )	thresholdr   )bbox_min_scorebbox_min_sizemax_candidates )binarizerangesizepolygons_from_bitmapboxes_from_bitmapappendr   any)r&   image_tensorr   hmaptext_thresholdr   r   r   
as_polygonsegmentationboxes_batchscores_batchbatch_indexro   rp   boxesscoresboxscores                      r.   	hmap2bboxzDBNet.hmap2bbox\  s   \ }}T~}F !2!21!56 	(K+K8MFE $ 9 9(,[(9(4[(A(-(.9G8E9G !: !Iv !% 6 6(,[(9(4[(A(-(.9G8E9G !7 !Iv u%'-	(0 %( ADKQ]@^*` *` .=eV 36fqj/ +.CFufCU0d4?SY^abYb 25e 0d +/HJ2w+O *` %)!\ L((0d *`s   <*D&D9DDc                     ||kD  S )a  
        Apply threshold to return boolean tensor.

        Parameters
        ----------
        tensor : torch.tensor
            input tensor.
        threshold : float
            Threshold.

        Returns
        -------
        torch.tensor
            Boolean tensor.

        r   )r&   tensorr   s      r.   r   zDBNet.binarize  s    " 	!!r<   c                 d   |j                  d      dk(  sJ |j                         j                         d   }|j                         j                         j                         d   }|j                  \  }	}
g }g }t        j                  |dz  j                  t        j                        t
        j                  t
        j                        \  }}|dkD  r|d| }|D ]  }dt        j                  |d      z  }t        j                  ||d      }|j                  d      }|j                  d   dk  rX| j                  ||j                  d	d
            }||k  r|j                  d   d
kD  r"| j!                  |d      }t#        |      dkD  r|j                  d	d
      }| j%                  |j                  d            \  }}||d
z   k  rt'        |t(              s |j+                         }|j+                         }t        j,                  t        j.                  |dddf   |
z  |z        d|      |dddf<   t        j,                  t        j.                  |dddf   |	z  |z        d|      |dddf<   |j1                  |j3                                |j1                  |        ||fS )a  
        Translate boolean tensor to fine polygon indicating text bounding boxes

        Parameters
        ----------
        hmap : torch.tensor
            Probability heatmap tensor.
        segmentation : torch.tensor
            Segmentataion tensor.
        dest_width : TYPE
            target width of the output.
        dest_height : TYPE
            target width of the output.
        bbox_min_score : float, optional
            Minimum score for each detected bounding box to be considered as a
            valid text bounding box. The default is 0.2.
        bbox_min_size : int, optional
            Minimum size for each detected bounding box to be considered as a
            valid text bounding box. The default is 3.
        max_candidates : int, optional
            Maximum number of detected bounding boxes to be considered as 
            candidates for valid text bounding box. Setting it to 0 implies
            no maximum. The default is 0.
        
        Returns
        -------
        boxes_batch : list of lists
            Polygon bounding boxes of each text box.
        scores_batch : list of floats
            Confidence scores of each text box.

        r   r      NgMb`?T)rV   rw      rV   rw   g       @)unclip_ratio)rV   r   rw   )r   rN   numpydetachrf   rX   findContoursr[   r$   uint8	RETR_LISTCHAIN_APPROX_SIMPLE	arcLengthapproxPolyDPreshapebox_score_fastuncliplenget_mini_boxesr7   ri   itemcliproundr   tolist)r&   r   r   
dest_widthdest_heightr   r   r   bitmapro   rp   r   r   contoursrq   contourepsilonapproxpointsr   r   ssides                         r.   r   zDBNet.polygons_from_bitmap  sw   P   #q(((!!#))+A.xxz  "((*1-&&CZ)MM3224! A0H !	!GcmmGT::G%%gw=F^^G,F||A"''fnnR.CDE~%||A"kk&sk;s8a< ++b!$C**3;;z+BCHAu}q((j#.'__.
)..0QTU*Z78!ZIC1IQTV+k9:A{LC1ILL&MM% C!	!F f}r<   c                    |j                  d      dk(  sJ |j                         j                         d   }|j                         j                         j                         d   }|j                  \  }	}
t        j                  |dz  j                  t        j                        t
        j                  t
        j                        \  }}|dkD  rt        t        |      |      }nt        |      }t        j                  |ddft        j                        }t        j                  |ft        j                         }t#        |      D ]  }||   }| j%                  |      \  }}||k  r#t        j&                  |      }| j)                  ||j+                  dd            }||k  r`| j-                  |      j+                  ddd      }| j%                  |      \  }}||dz   k  rt        j&                  |      }t/        |t0              s |j3                         }|j3                         }t        j4                  t        j6                  |dddf   |
z  |z        d|      |dddf<   t        j4                  t        j6                  |dddf   |	z  |z        d|      |dddf<   |j                  t        j                        ||ddddf<   |||<    |j9                         |fS )	a  
        Translate boolean tensor to fine polygon indicating text bounding boxes

        Parameters
        ----------
        hmap : torch.tensor
            Probability heatmap tensor.
        segmentation : torch.tensor
            Segmentataion tensor.
        dest_width : TYPE
            target width of the output.
        dest_height : TYPE
            target width of the output.
        bbox_min_score : float, optional
            Minimum score for each detected bounding box to be considered as a
            valid text bounding box. The default is 0.2.
        bbox_min_size : int, optional
            Minimum size for each detected bounding box to be considered as a
            valid text bounding box. The default is 3.
        max_candidates : int, optional
            Maximum number of detected bounding boxes to be considered as 
            candidates for valid text bounding box. Setting it to 0 implies
            no maximum. The default is 0.
        
        Returns
        -------
        boxes_batch : list of lists
            Polygon bounding boxes of each text box.
        scores_batch : list of floats
            Confidence scores of each text box.
        r   r   r   r   rw   dtyperV   N)r   rN   r   r   rf   rX   r   r[   r$   r   r   r   rh   r   zerosint16rU   r   r   r%   r   r   r   r7   ri   r   r   r   r   )r&   r   r   r   r   r   r   r   r   ro   rp   r   rq   num_contoursr   r   indexr   r   r   r   r   s                         r.   r   zDBNet.boxes_from_bitmap  s   N   #q(((!!#))+A.xxz  "((*1-&&#CZ//9MM3+B+BD! As8}n=Lx=L,1-RXX></<<( 	"EuoG //8MFE}$XXf%F''fnnR.CDE~%++f%--b!Q7C,,S1JC}q((((3-Cj#.'__.
)..0QTU*Z78!ZIC1IQTV+k9:A{LC1I!$BHH!5E%A+!F5M5	"8 ||~v%%r<   c                 "   t        |      }|j                  |z  |j                  z  }t        j                         }|j                  |t        j                  t        j                         t        j                  |j                  |            }|S N)r   arealength	pyclipperPyclipperOffsetAddPathJT_ROUNDET_CLOSEDPOLYGONr$   r%   Execute)r&   r   r   polydistanceoffsetexpandeds          r.   r   zDBNet.unclipo  sh    s|99|+dkk9**,sI..	0J0JK88FNN845r<   c                 :   t        j                  |      }t        t        t        j                  |            d       }d\  }}}}|d   d   |d   d   kD  rd}d}nd}d}|d   d   |d   d   kD  rd}d}nd}d}||   ||   ||   ||   g}|t        |d         fS )Nc                     | d   S )Nr   r   )xs    r.   <lambda>z&DBNet.get_mini_boxes.<locals>.<lambda>z  s
    1 r<   )r:   )r   r   rw      r   r   r   rw   )rX   minAreaRectsortedlist	boxPointsrh   )	r&   r   bounding_boxr   index_1index_2index_3index_4r   s	            r.   r   zDBNet.get_mini_boxesx  s    w/S]]<89~N-7*'7!9Q<&)A,&GGGG!9Q<&)A,&GGGGgwgw1 CQ(((r<   c                    |j                   dd \  }}|j                         }t        j                  t        j                  |dddf   j                               j                  t        j                        d|dz
        }t        j                  t        j                  |dddf   j                               j                  t        j                        d|dz
        }t        j                  t        j                  |dddf   j                               j                  t        j                        d|dz
        }t        j                  t        j                  |dddf   j                               j                  t        j                        d|dz
        }	t        j                  |	|z
  dz   ||z
  dz   ft        j                        }
|dddf   |z
  |dddf<   |dddf   |z
  |dddf<   t        j                  |
|j                  ddd      j                  t        j                        d       t        j                  |||	dz   ||dz   f   |
      d   S )a-  
        Calculate total score of each bounding box

        Parameters
        ----------
        hmap : torch.tensor
            Probability heatmap tensor.
        box_ : list
            Rectanguar bounding box.

        Returns
        -------
        float
            Confidence score.
        Nrw   r   r   r   rV   )rf   copyr$   r   floorrh   r[   int32rk   rg   r   r   rX   fillPolyr   mean)r&   r   box_hwr   xminxmaxyminymaxmasks              r.   r   zDBNet.box_score_fast  s     zz"1~1iikwwrxxAqD	077A1a!eLwwrwws1a4y}}/66rxx@!QUKwwrxxAqD	077A1a!eLwwrwws1a4y}}/66rxx@!QUKxxq$+/:"((K1I$AqD	1I$AqD	T3;;q"a077A1ExxT$q&[$tAv+56=a@@r<   c                 <    | j                   j                  |d      S )ap  
        Run the model to obtain a heatmap tensor from a image tensor. The heatmap
        tensor indicates the probability of each pixel being a part of text area.

        Parameters
        ----------
        image_tensor : torch.tensor
            Image tensor.

        Returns
        -------
        torch.tensor
            Probability heatmap tensor.
        F)training)r   forward)r&   r   s     r.   
image2hmapzDBNet.image2hmap  s     zz!!,!??r<   c	                    t        |t              s|g}| j                  ||      \  }	}
t        j                         5  | j                  |	      }| j                  |	|
||||||      \  }}ddd       |rfS S # 1 sw Y   xY w)az  
        Wrapper to run the model on an input image to get text bounding boxes.

        Parameters
        ----------
        image : path-to-file, PIL.Image, or np.ndarray
            Image to load or convert.
        text_threshold : float, optional
            Minimum probability for each pixel of heatmap tensor to be considered
            as a valid text pixel. The default is 0.2.
        bbox_min_score : float, optional
            Minimum score for each detected bounding box to be considered as a
            valid text bounding box. The default is 0.2.
        bbox_min_size : int, optional
            Minimum size for each detected bounding box to be considered as a
            valid text bounding box. The default is 3.
        max_candidates : int, optional
            Maximum number of detected bounding boxes to be considered as 
            candidates for valid text bounding box. Setting it to 0 implies
            no maximum. The default is 0.
        detection_size : int, optional
            Target detection size. Please see docstring under method resize_image()
            for explanation. The default is None.
        as_polygon : boolean, optional
            If true, return the bounding boxes as find polygons, otherwise, return
            as rectagular. The default is False.
        return_scores : boolean, optional
            If true, return confidence score along with the text bounding boxes.
            The default is False.

        Returns
        -------
        list of lists
            Text bounding boxes. If return_scores is set to true, another list
            of lists will also be returned.

        r   )r   r   r   r   r   N)r7   r   r   rB   no_gradr   r   )r&   rb   r   r   r   r   rn   r   return_scoresr   r   r   batch_boxesbatch_scoress                 r.   	inferencezDBNet.inference  s    \ %&GE(,(8(8Q_(8(`%o]]_ 		N??<0D(,|7F7;HVHVGTHVBL )7 )N%K		N ,,		N 		Ns   .A<<B)resnet18N
pretrainedTNcudar   r   )r   )皙?r  r   r   F)r  r   r   )g      ?)r  r  r   r   NFF)__name__
__module____qualname__r/   r   rE   rL   r#   rc   ru   r|   r~   r   r   r   r   r   r   r   r   r   r   r   r   r<   r.   r   r      s    &"+$(04 SEj@.kG*!F",HM - #2;6 $'#&"##$"O)b"0 /2-../Y@ ,/*++,R&h).A<@& $'#&"##$#'$"'@r<   r   )__doc__r   rj   r   shapely.geometryr   	PIL.Imager]   r   r$   rX   r   rB   model.constructorr   r   r   r<   r.   <module>r     s6    
   $   
   +j jr<   