Breadth-First Search 
      
    
      
      
      
        Dynamic Programming 
      
    
      
      
      
        Math 
      
    
   
  
    
      
       
     
  
  
    
      
    
    
      
       
     
  
Description 
Given an integer n, return the least number of perfect square numbers that sum to  n.
A perfect square  is an integer that is the square of an integer; in other words, it is the product of some integer with itself. For example, 1, 4, 9, and 16 are perfect squares while 3 and 11 are not.
 
Example 1: 
Input:  n = 12
Output:  3
Explanation:  12 = 4 + 4 + 4.
 
Example 2: 
Input:  n = 13
Output:  2
Explanation:  13 = 4 + 9.
 
 
Constraints: 
Solutions 
Solution 1 
Python3 Java C++ Go TypeScript Rust 
class   Solution : 
    def   numSquares ( self ,  n :  int )  ->  int : 
        m  =  int ( sqrt ( n )) 
        f  =  [[ inf ]  *  ( n  +  1 )  for  _  in  range ( m  +  1 )] 
        f [ 0 ][ 0 ]  =  0 
        for  i  in  range ( 1 ,  m  +  1 ): 
            for  j  in  range ( n  +  1 ): 
                f [ i ][ j ]  =  f [ i  -  1 ][ j ] 
                if  j  >=  i  *  i : 
                    f [ i ][ j ]  =  min ( f [ i ][ j ],  f [ i ][ j  -  i  *  i ]  +  1 ) 
        return  f [ m ][ n ] 
 
 
 1 
 2 
 3 
 4 
 5 
 6 
 7 
 8 
 9 
10 
11 
12 
13 
14 
15 
16 
17 
18 
19 class  Solution   { 
     public   int   numSquares ( int   n )   { 
         int   m   =   ( int )   Math . sqrt ( n ); 
         int [][]   f   =   new   int [ m   +   1 ][ n   +   1 ] ; 
         for   ( var   g   :   f )   { 
             Arrays . fill ( g ,   1   <<   30 ); 
         } 
         f [ 0 ][ 0 ]   =   0 ; 
         for   ( int   i   =   1 ;   i   <=   m ;   ++ i )   { 
             for   ( int   j   =   0 ;   j   <=   n ;   ++ j )   { 
                 f [ i ][ j ]   =   f [ i   -   1 ][ j ] ; 
                 if   ( j   >=   i   *   i )   { 
                     f [ i ][ j ]   =   Math . min ( f [ i ][ j ] ,   f [ i ][ j   -   i   *   i ]   +   1 ); 
                 } 
             } 
         } 
         return   f [ m ][ n ] ; 
     } 
} 
 
 
 1 
 2 
 3 
 4 
 5 
 6 
 7 
 8 
 9 
10 
11 
12 
13 
14 
15 
16 
17 
18 class   Solution   { 
public : 
     int   numSquares ( int   n )   { 
         int   m   =   sqrt ( n ); 
         int   f [ m   +   1 ][ n   +   1 ]; 
         memset ( f ,   0x3f ,   sizeof ( f )); 
         f [ 0 ][ 0 ]   =   0 ; 
         for   ( int   i   =   1 ;   i   <=   m ;   ++ i )   { 
             for   ( int   j   =   0 ;   j   <=   n ;   ++ j )   { 
                 f [ i ][ j ]   =   f [ i   -   1 ][ j ]; 
                 if   ( j   >=   i   *   i )   { 
                     f [ i ][ j ]   =   min ( f [ i ][ j ],   f [ i ][ j   -   i   *   i ]   +   1 ); 
                 } 
             } 
         } 
         return   f [ m ][ n ]; 
     } 
}; 
 
 
 1 
 2 
 3 
 4 
 5 
 6 
 7 
 8 
 9 
10 
11 
12 
13 
14 
15 
16 
17 
18 
19 
20 
21 func   numSquares ( n   int )   int   { 
     m   :=   int ( math . Sqrt ( float64 ( n ))) 
     f   :=   make ([][] int ,   m + 1 ) 
     const   inf   =   1   <<   30 
     for   i   :=   range   f   { 
         f [ i ]   =   make ([] int ,   n + 1 ) 
         for   j   :=   range   f [ i ]   { 
             f [ i ][ j ]   =   inf 
         } 
     } 
     f [ 0 ][ 0 ]   =   0 
     for   i   :=   1 ;   i   <=   m ;   i ++   { 
         for   j   :=   0 ;   j   <=   n ;   j ++   { 
             f [ i ][ j ]   =   f [ i - 1 ][ j ] 
             if   j   >=   i * i   { 
                 f [ i ][ j ]   =   min ( f [ i ][ j ],   f [ i ][ j - i * i ] + 1 ) 
             } 
         } 
     } 
     return   f [ m ][ n ] 
} 
 
 
 1 
 2 
 3 
 4 
 5 
 6 
 7 
 8 
 9 
10 
11 
12 
13 
14 
15 
16 function   numSquares ( n :   number ) :   number   { 
     const   m   =   Math . floor ( Math . sqrt ( n )); 
     const   f :   number [][]   =   Array ( m   +   1 ) 
         . fill ( 0 ) 
         . map (()   =>   Array ( n   +   1 ). fill ( 1   <<   30 )); 
     f [ 0 ][ 0 ]   =   0 ; 
     for   ( let   i   =   1 ;   i   <=   m ;   ++ i )   { 
         for   ( let   j   =   0 ;   j   <=   n ;   ++ j )   { 
             f [ i ][ j ]   =   f [ i   -   1 ][ j ]; 
             if   ( j   >=   i   *   i )   { 
                 f [ i ][ j ]   =   Math . min ( f [ i ][ j ],   f [ i ][ j   -   i   *   i ]   +   1 ); 
             } 
         } 
     } 
     return   f [ m ][ n ]; 
} 
 
 
 1 
 2 
 3 
 4 
 5 
 6 
 7 
 8 
 9 
10 
11 
12 
13 
14 
15 
16 impl   Solution   { 
     pub   fn   num_squares ( n :   i32 )   ->   i32   { 
         let   ( row ,   col )   =   (( n   as   f32 ). sqrt (). floor ()   as   usize ,   n   as   usize ); 
         let   mut   dp   =   vec! [ vec! [ i32 :: MAX ;   col   +   1 ];   row   +   1 ]; 
         dp [ 0 ][ 0 ]   =   0 ; 
         for   i   in   1 ..= row   { 
             for   j   in   0 ..= col   { 
                 dp [ i ][ j ]   =   dp [ i   -   1 ][ j ]; 
                 if   j   >=   i   *   i   { 
                     dp [ i ][ j ]   =   std :: cmp :: min ( dp [ i ][ j ],   dp [ i ][ j   -   i   *   i ]   +   1 ); 
                 } 
             } 
         } 
         dp [ row ][ col ] 
     } 
} 
 
 
 
 
Solution 2 
Python3 Java C++ Go TypeScript Rust 
class   Solution : 
    def   numSquares ( self ,  n :  int )  ->  int : 
        m  =  int ( sqrt ( n )) 
        f  =  [ 0 ]  +  [ inf ]  *  n 
        for  i  in  range ( 1 ,  m  +  1 ): 
            for  j  in  range ( i  *  i ,  n  +  1 ): 
                f [ j ]  =  min ( f [ j ],  f [ j  -  i  *  i ]  +  1 ) 
        return  f [ n ] 
 
 
 1 
 2 
 3 
 4 
 5 
 6 
 7 
 8 
 9 
10 
11 
12 
13 
14 class  Solution   { 
     public   int   numSquares ( int   n )   { 
         int   m   =   ( int )   Math . sqrt ( n ); 
         int []   f   =   new   int [ n   +   1 ] ; 
         Arrays . fill ( f ,   1   <<   30 ); 
         f [ 0 ]   =   0 ; 
         for   ( int   i   =   1 ;   i   <=   m ;   ++ i )   { 
             for   ( int   j   =   i   *   i ;   j   <=   n ;   ++ j )   { 
                 f [ j ]   =   Math . min ( f [ j ] ,   f [ j   -   i   *   i ]   +   1 ); 
             } 
         } 
         return   f [ n ] ; 
     } 
} 
 
 
 1 
 2 
 3 
 4 
 5 
 6 
 7 
 8 
 9 
10 
11 
12 
13 
14 
15 class   Solution   { 
public : 
     int   numSquares ( int   n )   { 
         int   m   =   sqrt ( n ); 
         int   f [ n   +   1 ]; 
         memset ( f ,   0x3f ,   sizeof ( f )); 
         f [ 0 ]   =   0 ; 
         for   ( int   i   =   1 ;   i   <=   m ;   ++ i )   { 
             for   ( int   j   =   i   *   i ;   j   <=   n ;   ++ j )   { 
                 f [ j ]   =   min ( f [ j ],   f [ j   -   i   *   i ]   +   1 ); 
             } 
         } 
         return   f [ n ]; 
     } 
}; 
 
 
 1 
 2 
 3 
 4 
 5 
 6 
 7 
 8 
 9 
10 
11 
12 
13 
14 func   numSquares ( n   int )   int   { 
     m   :=   int ( math . Sqrt ( float64 ( n ))) 
     f   :=   make ([] int ,   n + 1 ) 
     for   i   :=   range   f   { 
         f [ i ]   =   1   <<   30 
     } 
     f [ 0 ]   =   0 
     for   i   :=   1 ;   i   <=   m ;   i ++   { 
         for   j   :=   i   *   i ;   j   <=   n ;   j ++   { 
             f [ j ]   =   min ( f [ j ],   f [ j - i * i ] + 1 ) 
         } 
     } 
     return   f [ n ] 
} 
 
 
function   numSquares ( n :   number ) :   number   { 
     const   m   =   Math . floor ( Math . sqrt ( n )); 
     const   f :   number []   =   Array ( n   +   1 ). fill ( 1   <<   30 ); 
     f [ 0 ]   =   0 ; 
     for   ( let   i   =   1 ;   i   <=   m ;   ++ i )   { 
         for   ( let   j   =   i   *   i ;   j   <=   n ;   ++ j )   { 
             f [ j ]   =   Math . min ( f [ j ],   f [ j   -   i   *   i ]   +   1 ); 
         } 
     } 
     return   f [ n ]; 
} 
 
 
 1 
 2 
 3 
 4 
 5 
 6 
 7 
 8 
 9 
10 
11 
12 
13 impl   Solution   { 
     pub   fn   num_squares ( n :   i32 )   ->   i32   { 
         let   ( row ,   col )   =   (( n   as   f32 ). sqrt (). floor ()   as   usize ,   n   as   usize ); 
         let   mut   dp   =   vec! [ i32 :: MAX ;   col   +   1 ]; 
         dp [ 0 ]   =   0 ; 
         for   i   in   1 ..= row   { 
             for   j   in   i   *   i ..= col   { 
                 dp [ j ]   =   std :: cmp :: min ( dp [ j ],   dp [ j   -   i   *   i ]   +   1 ); 
             } 
         } 
         dp [ col ] 
     } 
}