JarrettYe commited on
Commit
c42a295
1 Parent(s): 4878178

simulate four ratings

Browse files
Files changed (1) hide show
  1. app.py +15 -10
app.py CHANGED
@@ -10,6 +10,7 @@ columns = ["difficulty", "stability", "retrievability", "delta_t",
10
  "reps", "lapses", "last_date", "due", "ivl", "cost", "rand"]
11
  col = {key: i for i, key in enumerate(columns)}
12
 
 
13
 
14
  def simulate(w, request_retention=0.9, deck_size=10000, learn_span=100, max_cost_perday=200, max_ivl=36500, recall_cost=10, forget_cost=30, learn_cost=10):
15
  card_table = np.zeros((len(columns), deck_size))
@@ -21,11 +22,14 @@ def simulate(w, request_retention=0.9, deck_size=10000, learn_span=100, max_cost
21
  learn_cnt_per_day = np.zeros(learn_span)
22
  memorized_cnt_per_day = np.zeros(learn_span)
23
 
24
- def cal_next_recall_stability(s, r, d, response):
25
- if response == 1:
26
- return s * (1 + np.exp(w[8]) * (11 - d) * np.power(s, -w[9]) * (np.exp((1 - r) * w[10]) - 1))
27
- else:
28
- return np.maximum(0.1, np.minimum(w[11] * np.power(d, -w[12]) * (np.power(s + 1, w[13]) - 1) * np.exp((1 - r) * w[14]), s))
 
 
 
29
 
30
  for today in tqdm(range(learn_span)):
31
  has_learned = card_table[col["stability"]] > 1e-10
@@ -48,11 +52,12 @@ def simulate(w, request_retention=0.9, deck_size=10000, learn_span=100, max_cost
48
  card_table[col["lapses"]][true_review & forget] += 1
49
  card_table[col["reps"]][true_review & ~forget] += 1
50
 
51
- card_table[col["stability"]][true_review & forget] = cal_next_recall_stability(
52
- card_table[col["stability"]][true_review & forget], card_table[col["retrievability"]][true_review & forget], card_table[col["difficulty"]][true_review & forget], 0)
53
 
54
- card_table[col["stability"]][true_review & ~forget] = cal_next_recall_stability(
55
- card_table[col["stability"]][true_review & ~forget], card_table[col["retrievability"]][true_review & ~forget], card_table[col["difficulty"]][true_review & ~forget], 1)
 
56
 
57
  card_table[col["difficulty"]][true_review & forget] = np.clip(
58
  card_table[col["difficulty"]][true_review & forget] + 2 * w[6], 1, 10)
@@ -62,7 +67,7 @@ def simulate(w, request_retention=0.9, deck_size=10000, learn_span=100, max_cost
62
  true_learn = need_learn & (
63
  np.cumsum(card_table[col["cost"]]) <= max_cost_perday)
64
  card_table[col["last_date"]][true_learn] = today
65
- first_ratings = np.random.randint(0, 4, np.sum(true_learn))
66
  card_table[col["stability"]][true_learn] = np.choose(
67
  first_ratings, w[:4])
68
  card_table[col["difficulty"]][true_learn] = w[4] - \
 
10
  "reps", "lapses", "last_date", "due", "ivl", "cost", "rand"]
11
  col = {key: i for i, key in enumerate(columns)}
12
 
13
+ first_rating_prob = np.array([0.15, 0.2, 0.6, 0.05])
14
 
15
  def simulate(w, request_retention=0.9, deck_size=10000, learn_span=100, max_cost_perday=200, max_ivl=36500, recall_cost=10, forget_cost=30, learn_cost=10):
16
  card_table = np.zeros((len(columns), deck_size))
 
22
  learn_cnt_per_day = np.zeros(learn_span)
23
  memorized_cnt_per_day = np.zeros(learn_span)
24
 
25
+ def stability_after_success(s, r, d, response):
26
+ hard_penalty = np.where(response == 1, w[15], 1)
27
+ easy_bonus = np.where(response == 3, w[16], 1)
28
+ return s * (1 + np.exp(w[8]) * (11 - d) * np.power(s, -w[9]) * (np.exp((1 - r) * w[10]) - 1) * hard_penalty * easy_bonus)
29
+
30
+ def stability_after_failure(s, r, d):
31
+ return np.maximum(0.1, np.minimum(w[11] * np.power(d, -w[12]) * (np.power(s + 1, w[13]) - 1) * np.exp((1 - r) * w[14]), s))
32
+
33
 
34
  for today in tqdm(range(learn_span)):
35
  has_learned = card_table[col["stability"]] > 1e-10
 
52
  card_table[col["lapses"]][true_review & forget] += 1
53
  card_table[col["reps"]][true_review & ~forget] += 1
54
 
55
+ card_table[col["stability"]][true_review & forget] = stability_after_failure(
56
+ card_table[col["stability"]][true_review & forget], card_table[col["retrievability"]][true_review & forget], card_table[col["difficulty"]][true_review & forget])
57
 
58
+ review_ratings = np.random.choice([1, 2, 3], np.sum(true_review & ~forget), p=[0.3, 0.6, 0.1])
59
+ card_table[col["stability"]][true_review & ~forget] = stability_after_success(
60
+ card_table[col["stability"]][true_review & ~forget], card_table[col["retrievability"]][true_review & ~forget], card_table[col["difficulty"]][true_review & ~forget], review_ratings)
61
 
62
  card_table[col["difficulty"]][true_review & forget] = np.clip(
63
  card_table[col["difficulty"]][true_review & forget] + 2 * w[6], 1, 10)
 
67
  true_learn = need_learn & (
68
  np.cumsum(card_table[col["cost"]]) <= max_cost_perday)
69
  card_table[col["last_date"]][true_learn] = today
70
+ first_ratings = np.random.choice(4, np.sum(true_learn), p=first_rating_prob)
71
  card_table[col["stability"]][true_learn] = np.choose(
72
  first_ratings, w[:4])
73
  card_table[col["difficulty"]][true_learn] = w[4] - \