Dynamic programming (DP) is one of the most tested algorithmic topics in FAANG interviews. Rather than memorizing problems, learn the 6 core patterns — nearly every DP problem maps to one of them.
Recognizing DP Problems
A problem is likely DP if:
- It asks for count/sum/min/max of something
- It has overlapping subproblems (same sub-computation repeated)
- It has optimal substructure (optimal solution contains optimal sub-solutions)
- It can be broken into a decision at each step (choose/skip, take/leave)
Pattern 1: Linear DP — Fibonacci / Climbing Stairs
from functools import lru_cache
# --- Memoization (top-down) ---
@lru_cache(maxsize=None)
def fib(n: int) -> int:
if n int:
if n int:
if n int:
a, b = 1, 1
for _ in range(n - 1):
a, b = b, a + b
return b
# Coin change: min coins to make amount
def coin_change(coins: list, amount: int) -> int:
dp = [float("inf")] * (amount + 1)
dp[0] = 0
for coin in coins:
for x in range(coin, amount + 1):
dp[x] = min(dp[x], dp[x - coin] + 1)
return dp[amount] if dp[amount] != float("inf") else -1
print(coin_change([1, 5, 11], 15)) # 3 coins: 11+1+1+1+1 → no; 5+5+5 → 3
Pattern 2: 0/1 Knapsack
def knapsack_01(weights: list, values: list, capacity: int) -> int:
"""
Decision: for each item, include (if fits) or exclude.
dp[i][w] = max value using first i items with capacity w
"""
n = len(weights)
dp = [[0] * (capacity + 1) for _ in range(n + 1)]
for i in range(1, n + 1):
w, v = weights[i-1], values[i-1]
for cap in range(capacity + 1):
dp[i][cap] = dp[i-1][cap] # Exclude item i
if cap >= w:
dp[i][cap] = max(dp[i][cap], dp[i-1][cap-w] + v) # Include
return dp[n][capacity]
# Space optimized: O(W) using 1D array (iterate capacity in reverse)
def knapsack_1d(weights: list, values: list, capacity: int) -> int:
dp = [0] * (capacity + 1)
for w, v in zip(weights, values):
for cap in range(capacity, w - 1, -1): # Must go right-to-left for 0/1
dp[cap] = max(dp[cap], dp[cap - w] + v)
return dp[capacity]
# Subset sum: can we partition array into two equal-sum subsets?
def can_partition(nums: list) -> bool:
total = sum(nums)
if total % 2 != 0:
return False
target = total // 2
dp = {0} # Set of achievable sums
for n in nums:
dp = dp | {s + n for s in dp if s + n <= target}
return target in dp
print(can_partition([1, 5, 11, 5])) # True: {1,5,5} = {11}
Pattern 3: Longest Common Subsequence / Substring
def lcs(s1: str, s2: str) -> int:
"""LCS — classic 2D DP. O(m*n) time and space."""
m, n = len(s1), len(s2)
dp = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(1, m + 1):
for j in range(1, n + 1):
if s1[i-1] == s2[j-1]:
dp[i][j] = dp[i-1][j-1] + 1
else:
dp[i][j] = max(dp[i-1][j], dp[i][j-1])
return dp[m][n]
def longest_common_substring(s1: str, s2: str) -> str:
"""Unlike LCS, must be contiguous."""
m, n = len(s1), len(s2)
dp = [[0] * (n + 1) for _ in range(m + 1)]
best_len, best_end = 0, 0
for i in range(1, m + 1):
for j in range(1, n + 1):
if s1[i-1] == s2[j-1]:
dp[i][j] = dp[i-1][j-1] + 1
if dp[i][j] > best_len:
best_len = dp[i][j]
best_end = i
return s1[best_end - best_len : best_end]
def edit_distance(word1: str, word2: str) -> int:
"""Min insertions, deletions, substitutions to transform word1 to word2."""
m, n = len(word1), len(word2)
dp = list(range(n + 1))
for i in range(1, m + 1):
prev = dp[:]
dp[0] = i
for j in range(1, n + 1):
if word1[i-1] == word2[j-1]:
dp[j] = prev[j-1]
else:
dp[j] = 1 + min(prev[j], # Delete from word1
dp[j-1], # Insert into word1
prev[j-1]) # Substitute
return dp[n]
print(edit_distance("horse", "ros")) # 3
Pattern 4: Longest Increasing Subsequence (LIS)
from bisect import bisect_left
def lis_n2(nums: list) -> int:
"""O(n^2) DP approach — intuitive."""
if not nums: return 0
dp = [1] * len(nums)
for i in range(1, len(nums)):
for j in range(i):
if nums[j] int:
"""O(n log n) using patience sorting / binary search."""
tails = [] # tails[i] = smallest tail of any IS of length i+1
for n in nums:
idx = bisect_left(tails, n)
if idx == len(tails):
tails.append(n)
else:
tails[idx] = n
return len(tails)
# Russian dolls / box stacking: 2D LIS variant
def max_envelopes(envelopes: list) -> int:
"""Sort by width asc, then height desc (to prevent using same width twice)."""
envelopes.sort(key=lambda x: (x[0], -x[1]))
return lis_nlogn([h for _, h in envelopes])
print(lis_nlogn([10, 9, 2, 5, 3, 7, 101, 18])) # 4: [2,3,7,101]
Pattern 5: Matrix / Grid DP
def min_path_sum(grid: list) -> int:
"""Min path sum from top-left to bottom-right (can only go right or down)."""
m, n = len(grid), len(grid[0])
dp = grid[0][:]
for j in range(1, n):
dp[j] += dp[j-1]
for i in range(1, m):
dp[0] += grid[i][0]
for j in range(1, n):
dp[j] = grid[i][j] + min(dp[j], dp[j-1])
return dp[-1]
def unique_paths_with_obstacles(grid: list) -> int:
"""Count unique paths; 1 = obstacle."""
m, n = len(grid), len(grid[0])
if grid[0][0] or grid[m-1][n-1]:
return 0
dp = [0] * n
dp[0] = 1
for i in range(m):
for j in range(n):
if grid[i][j]:
dp[j] = 0
elif j > 0:
dp[j] += dp[j-1]
return dp[-1]
def maximal_square(matrix: list) -> int:
"""Largest square of 1s. dp[i][j] = side length of largest square ending here."""
if not matrix: return 0
m, n = len(matrix), len(matrix[0])
dp = [[0] * n for _ in range(m)]
max_side = 0
for i in range(m):
for j in range(n):
if matrix[i][j] == "1":
if i == 0 or j == 0:
dp[i][j] = 1
else:
dp[i][j] = 1 + min(dp[i-1][j], dp[i][j-1], dp[i-1][j-1])
max_side = max(max_side, dp[i][j])
return max_side * max_side
Pattern 6: Interval DP
def burst_balloons(nums: list) -> int:
"""
Burst balloons: gain nums[left] * nums[i] * nums[right] coins.
Trick: think of i as the LAST balloon burst in interval [left, right].
dp[l][r] = max coins from bursting all balloons between l and r (exclusive).
"""
nums = [1] + nums + [1]
n = len(nums)
dp = [[0] * n for _ in range(n)]
for length in range(2, n):
for left in range(0, n - length):
right = left + length
for i in range(left + 1, right):
dp[left][right] = max(
dp[left][right],
nums[left] * nums[i] * nums[right] + dp[left][i] + dp[i][right]
)
return dp[0][n-1]
def matrix_chain_order(dims: list) -> int:
"""
Min scalar multiplications to multiply chain of matrices.
dims[i-1] x dims[i] = dimensions of matrix i.
"""
n = len(dims) - 1
dp = [[0] * n for _ in range(n)]
for length in range(2, n + 1):
for i in range(n - length + 1):
j = i + length - 1
dp[i][j] = float("inf")
for k in range(i, j):
cost = dp[i][k] + dp[k+1][j] + dims[i] * dims[k+1] * dims[j+1]
dp[i][j] = min(dp[i][j], cost)
return dp[0][n-1]
print(burst_balloons([3, 1, 5, 8])) # 167
DP Complexity Summary
| Problem | Time | Space | Pattern |
|---|---|---|---|
| Fibonacci / Coin Change | O(n*k) | O(n) | Linear DP |
| 0/1 Knapsack | O(n*W) | O(W) | Subset selection |
| LCS / Edit Distance | O(m*n) | O(min(m,n)) | Two-sequence DP |
| LIS (patience sort) | O(n log n) | O(n) | Greedy + bisect |
| Grid paths / Max square | O(m*n) | O(n) | 2D DP |
| Balloon burst / MCM | O(n^3) | O(n^2) | Interval DP |
Frequently Asked Questions
How do you recognize a dynamic programming problem?
A problem is likely DP if it asks for a count, minimum, maximum, or existence of something, and has overlapping subproblems (same sub-computation repeated). Key signals: "minimum cost", "number of ways", "longest/shortest", "can you achieve". DP solves it by breaking into subproblems and caching results (memoization or tabulation).
What is the difference between memoization and tabulation?
Memoization (top-down): start with the full problem, recurse, cache results. Natural recursive structure, only solves needed subproblems. Tabulation (bottom-up): start with base cases, build up iteratively. No recursion overhead, better cache performance, easier to optimize space. Both have the same asymptotic complexity.
What is the time complexity of the 0/1 Knapsack problem?
O(n * W) time and O(W) space (with space optimization), where n = number of items and W = capacity. This is pseudo-polynomial — polynomial in the numeric value of W, but exponential in the number of bits needed to represent W. The problem is NP-complete in the general case.
{
“@context”: “https://schema.org”,
“@type”: “FAQPage”,
“mainEntity”: [
{
“@type”: “Question”,
“name”: “How do you recognize a dynamic programming problem?”,
“acceptedAnswer”: {
“@type”: “Answer”,
“text”: “A problem is likely DP if it asks for a count, minimum, maximum, or existence of something, and has overlapping subproblems (same sub-computation repeated). Key signals: “minimum cost”, “number of ways”, “longest/shortest”, “can you achieve”. DP solves it by breaking into subproblems and caching results (memoization or tabulation).”
}
},
{
“@type”: “Question”,
“name”: “What is the difference between memoization and tabulation?”,
“acceptedAnswer”: {
“@type”: “Answer”,
“text”: “Memoization (top-down): start with the full problem, recurse, cache results. Natural recursive structure, only solves needed subproblems. Tabulation (bottom-up): start with base cases, build up iteratively. No recursion overhead, better cache performance, easier to optimize space. Both have the same asymptotic complexity.”
}
},
{
“@type”: “Question”,
“name”: “What is the time complexity of the 0/1 Knapsack problem?”,
“acceptedAnswer”: {
“@type”: “Answer”,
“text”: “O(n * W) time and O(W) space (with space optimization), where n = number of items and W = capacity. This is pseudo-polynomial — polynomial in the numeric value of W, but exponential in the number of bits needed to represent W. The problem is NP-complete in the general case.”
}
}
]
}