URL
stringlengths
15
1.68k
text_list
sequencelengths
1
199
image_list
sequencelengths
1
199
metadata
stringlengths
1.19k
3.08k
https://ww2.mathworks.cn/matlabcentral/answers/530678-how-to-make-specific-elements-in-a-3d-matrix-zero
[ "# How to make specific elements in a 3D matrix zero?\n\n5 views (last 30 days)\nRabia Zulfiqar on 22 May 2020\nCommented: Rabia Zulfiqar on 23 May 2020\nHi I have a 3D matrix namely Costyearlydata having size of 24x365x10 and I want to make certain elements in that matrix zero.\nD has a size of 4000x1x10 and it represents the index of those elements which should be zero.\nI am trying with this code but the problem is it only gives correct results for the 1st layer in cost1 matrix and for the remaining nine layers it doesn't make that element zero .What is wrong with my code.Kindly point out my mistake.\nfor yy=1:10\ncost1(:,:,yy)=Costyearlydata(:,:,yy);\ncost1(D)=0;\nend\nYour assistance is much appreciated.I am not so much familiar to dealing with 3D matrix and I am quite new to MATLAB:(\n\nWalter Roberson on 22 May 2020\nD has a size of 4000x1x10 and it represents the index of those elements which should be zero.\nThe index relative to what? Relative to the 24 x 365 plane? The linear index in the 24 x 365 x 10 array?\nRabia Zulfiqar on 22 May 2020\nyes it represents the index relative to 24x365 plane. For example in each layer I have 8760 elements. D is the linear index from 1 to 8760.\nD has 10 layers so for each layer it has indexing from 1 to 8760.\nRabia Zulfiqar on 22 May 2020\nI also tried this but this one is also giving incorrect answer.\nfor yy=1:10\ncost1(:,:,yy)=Costyearlydata(:,:,yy);\ncost1(D(:,:,yy))=0;\nend\nI don't know how to fix this issue:(\n\nDavid Goodmanson on 23 May 2020\nHi Rabia, try\nfor yy=1:10\ntemp = Costyearlydata(:,:,yy);\ntemp(D(:,1,yy)) = 0;\ncost1(:,:,yy) = temp;\nend\n\n#### 1 Comment\n\nRabia Zulfiqar on 23 May 2020\nThanks a ton David:):)" ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.8086311,"math_prob":0.8317973,"size":1552,"snap":"2020-34-2020-40","text_gpt3_token_len":413,"char_repetition_ratio":0.13242894,"word_repetition_ratio":0.18666667,"special_character_ratio":0.26546392,"punctuation_ratio":0.12251656,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9739935,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2020-08-14T15:06:42Z\",\"WARC-Record-ID\":\"<urn:uuid:154990ac-babb-4d73-96d7-391c3a031297>\",\"Content-Length\":\"123520\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:0b36f5a5-e7f0-4cca-af00-359e69cb0008>\",\"WARC-Concurrent-To\":\"<urn:uuid:847a2cb2-fe3b-4fc2-b67d-c24cbeda4dd6>\",\"WARC-IP-Address\":\"23.67.106.179\",\"WARC-Target-URI\":\"https://ww2.mathworks.cn/matlabcentral/answers/530678-how-to-make-specific-elements-in-a-3d-matrix-zero\",\"WARC-Payload-Digest\":\"sha1:5GZBPAJSO3ROJJIMURLPWTHCXVMQVLH4\",\"WARC-Block-Digest\":\"sha1:I2CKYYAMP53QJLKKKXB6NNUWDGUEIU7Q\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2020/CC-MAIN-2020-34/CC-MAIN-2020-34_segments_1596439739328.66_warc_CC-MAIN-20200814130401-20200814160401-00140.warc.gz\"}"}
https://github.com/jiangxin/goconfig/commit/83a00ae5b8090415985162b6e3381de03532a573
[ "{{ message }}\n\n# jiangxin / goconfig\n\nforked from muja/goconfig\nParse include config files from include.path\n```One git config file can include other config files by `include.path`\ndirections. Parse recursive if there is an `include.path` direction.\n\nNote: not support contional includes yet.\n\nSigned-off-by: Jiang Xin <[email protected]>```\njiangxin committed Mar 11, 2019\n1 parent 9e83c31 commit 83a00ae5b8090415985162b6e3381de03532a573\nShowing with 394 additions and 20 deletions.\n1. +17 −0 git-config.go\n2. +39 −7 git-config_test.go\n3. +47 −6 goconfig.go\n4. +7 −7 goconfig_test.go\n5. +75 −0 path.go\n6. +209 −0 path_test.go\n @@ -138,3 +138,20 @@ func toSectionKey(name string) (string, string) { section := strings.Join(items[0:len(items)-1], \".\") return section, key } // Merge will merge another GitConfig, and new value(s) of the same key will // append to the end of value list, and new value has higher priority. func (v GitConfig) Merge(c GitConfig) GitConfig { for sec, keys := range c { if _, ok := v[sec]; !ok { v[sec] = make(GitConfigKeys) } for key, values := range keys { if v[sec][key] == nil { v[sec][key] = []string{} } v[sec][key] = append(v[sec][key], values...) } } return v }\n @@ -12,7 +12,7 @@ func TestInvalidSectionName(t *testing.T) { data := `# The following section name should have quote, like: [a \"b\"] [a b] c = d` _, lineno, err := Parse([]byte(data)) _, lineno, err := Parse([]byte(data), \"filename\") assert.Equal(ErrMissingStartQuote, err) assert.Equal(uint(2), lineno) } @@ -23,7 +23,7 @@ func TestInvalidKeyWithSpace(t *testing.T) { data := `# keys should not have spaces [a] b c = d` _, lineno, err := Parse([]byte(data)) _, lineno, err := Parse([]byte(data), \"filename\") assert.Equal(ErrInvalidKeyChar, err) assert.Equal(uint(3), lineno) } @@ -37,7 +37,7 @@ func TestParseSectionWithSpaces1(t *testing.T) { value3 = a \\\"quote [remote \"hello world\"] url = test` cfg, _, err := Parse([]byte(data)) cfg, _, err := Parse([]byte(data), \"filename\") assert.Nil(err) assert.Equal(\"x\", cfg.Get(\"ab.cd.value1\")) assert.Equal(\"x y\", cfg.Get(\"ab.cd.value2\")) @@ -49,7 +49,7 @@ func TestParseSectionWithSpaces2(t *testing.T) { data := `[remote \"hello world\"] url = test` cfg, _, err := Parse([]byte(data)) cfg, _, err := Parse([]byte(data), \"filename\") assert.Nil(err) assert.Equal(\"test\", cfg.Get(\"remote.hello world.url\")) assert.Equal(\"test\", cfg.Get(`remote.\"hello world\".url`)) @@ -64,7 +64,7 @@ func TestGetAll(t *testing.T) { url = https://example.com/my/repo.git fetch = +refs/heads/*:refs/remotes/origin/* fetch = +refs/tags/*:refs/tags/*` cfg, _, err := Parse([]byte(data)) cfg, _, err := Parse([]byte(data), \"filename\") assert.Nil(err) assert.Equal(\"+refs/tags/*:refs/tags/*\", cfg.Get(\"remote.origin.fetch\")) assert.Equal([]string{ @@ -87,7 +87,7 @@ func TestGetBool(t *testing.T) { x1 = 1 x2 = nothing` cfg, _, err := Parse([]byte(data)) cfg, _, err := Parse([]byte(data), \"filename\") assert.Nil(err) v, err := cfg.GetBool(\"a.t1\", false) @@ -137,7 +137,7 @@ func TestGetInt(t *testing.T) { i2 = 100 i3 = abc` cfg, _, err := Parse([]byte(data)) cfg, _, err := Parse([]byte(data), \"filename\") assert.Nil(err) v1, err := cfg.GetInt(\"a.i1\", 0) @@ -159,3 +159,35 @@ func TestGetInt(t *testing.T) { assert.Nil(err) assert.Equal(6700, v4) } func TestMerge(t *testing.T) { assert := assert.New(t) data := `[a] b = value-b c = value-c` cfg, _, err := Parse([]byte(data), \"filename\") assert.Nil(err) assert.Equal(\"value-b\", cfg.Get(\"a.b\")) assert.Equal(\"value-c\", cfg.Get(\"a.c\")) data = `[a] c = other-c d = other-d` cfg2, _, err := Parse([]byte(data), \"filename\") assert.Nil(err) assert.Equal(\"other-c\", cfg2.Get(\"a.c\")) assert.Equal(\"other-d\", cfg2.Get(\"a.d\")) cfg.Merge(cfg2) assert.Equal(\"value-b\", cfg.Get(\"a.b\")) assert.Equal(\"other-c\", cfg.Get(\"a.c\")) assert.Equal(\"other-d\", cfg.Get(\"a.d\")) assert.Equal([]string{ \"value-c\", \"other-c\", }, cfg.GetAll(\"a.c\")) }\n @@ -1,16 +1,32 @@ package goconfig const utf8BOM = \"\\357\\273\\277\" import ( \"fmt\" \"io/ioutil\" \"path\" ) const ( utf8BOM = \"\\357\\273\\277\" maxIncludeDepth = 10 ) type parser struct { bytes []byte linenr uint eof bool bytes []byte linenr uint eof bool filename string depth int } // Parse takes given bytes as configuration file (according to gitconfig syntax) func Parse(bytes []byte) (GitConfig, uint, error) { parser := &parser{bytes, 1, false} func Parse(bytes []byte, filename string) (GitConfig, uint, error) { return runParse(bytes, filename, 1) } func runParse(bytes []byte, filename string, depth int) (GitConfig, uint, error) { parser := &parser{bytes, 1, false, filename, depth} cfg, err := parser.parse() return cfg, parser.linenr, err } @@ -65,6 +81,31 @@ func (cf *parser) parse() (GitConfig, error) { return cfg, err } cfg._add(name, key, value) if name == \"include\" && key == \"path\" { file, err := AbsJoin(path.Dir(cf.filename), value) if err != nil { return nil, err } // Check circular includes if cf.depth >= maxIncludeDepth { return nil, fmt.Errorf(\"exceeded maximum include depth (%d) while including\\n\"+ \"\\t%s\\n\"+ \"from\"+ \"\\t%s\\n\"+ \"This might be due to circular includes\\n\", maxIncludeDepth, cf.filename, file) } bytes, err := ioutil.ReadFile(file) if err == nil { config, _, err := runParse(bytes, file, cf.depth+1) if err != nil { return cfg, err } cfg.Merge(config) } } } }\n @@ -15,7 +15,7 @@ func TestDanyel(t *testing.T) { if err != nil { t.Fatalf(\"Reading file %v failed\", filename) } config, lineno, err := Parse(bytes) config, lineno, err := Parse(bytes, filename) assert.Equal(t, nil, err) assert.Equal(t, 10, int(lineno)) _ = config @@ -27,15 +27,15 @@ func TestDanyel(t *testing.T) { func TestInvalidKey(t *testing.T) { invalidConfig := \".name = Danyel\" config, lineno, err := Parse([]byte(invalidConfig)) config, lineno, err := Parse([]byte(invalidConfig), \"\") assert.Equal(t, ErrInvalidKeyChar, err) assert.Equal(t, 1, int(lineno)) assert.Equal(t, NewGitConfig(), config) } func TestNoNewLine(t *testing.T) { validConfig := \"[user] name = Danyel\" config, lineno, err := Parse([]byte(validConfig)) config, lineno, err := Parse([]byte(validConfig), \"\") assert.Equal(t, nil, err) assert.Equal(t, 1, int(lineno)) expect := NewGitConfig() @@ -45,7 +45,7 @@ func TestNoNewLine(t *testing.T) { func TestUpperCaseKey(t *testing.T) { validConfig := \"[core]\\nQuotePath = false\\n\" config, lineno, err := Parse([]byte(validConfig)) config, lineno, err := Parse([]byte(validConfig), \"\") assert.Equal(t, nil, err) assert.Equal(t, 3, int(lineno)) expect := NewGitConfig() @@ -55,7 +55,7 @@ func TestUpperCaseKey(t *testing.T) { func TestExtended(t *testing.T) { validConfig := `[http \"https://my-website.com\"] sslVerify = false` config, lineno, err := Parse([]byte(validConfig)) config, lineno, err := Parse([]byte(validConfig), \"\") assert.Equal(t, nil, err) assert.Equal(t, 1, int(lineno)) expect := NewGitConfig() @@ -70,7 +70,7 @@ func ExampleParse() { log.Fatalf(\"Couldn't read file %v\\n\", gitconfig) } config, lineno, err := Parse(bytes) config, lineno, err := Parse(bytes, gitconfig) if err != nil { log.Fatalf(\"Error on line %d: %v\\n\", lineno, err) } @@ -93,6 +93,6 @@ func BenchmarkParse(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { Parse(bytes) Parse(bytes, gitconfig) } }\n75 path.go\n @@ -0,0 +1,75 @@ package goconfig import ( \"fmt\" \"os\" \"path/filepath\" \"runtime\" ) func homeDir() (string, error) { var ( home string ) if runtime.GOOS == \"windows\" { home = os.Getenv(\"USERPROFILE\") if home == \"\" { home = os.Getenv(\"HOMEDRIVE\") + os.Getenv(\"HOMEPATH\") } } if home == \"\" { home = os.Getenv(\"HOME\") } if home == \"\" { return \"\", fmt.Errorf(\"cannot find HOME\") } return home, nil } func expendHome(name string) (string, error) { if filepath.IsAbs(name) { return name, nil } home, err := homeDir() if err != nil { return \"\", err } if len(name) == 0 || name == \"~\" { return home, nil } else if len(name) > 1 && name == '~' && (name == '/' || name == '\\\\') { return filepath.Join(home, name[2:]), nil } return filepath.Join(home, name), nil } // Abs returns absolute path and will expend homedir if path has \"~/' prefix func Abs(name string) (string, error) { if filepath.IsAbs(name) { return name, nil } if len(name) > 0 && name == '~' && (len(name) == 1 || name == '/' || name == '\\\\') { return expendHome(name) } return filepath.Abs(name) } // AbsJoin returns absolute path, and use as parent dir for relative path func AbsJoin(dir, name string) (string, error) { if filepath.IsAbs(name) { return name, nil } if len(name) > 0 && name == '~' && (len(name) == 1 || name == '/' || name == '\\\\') { return expendHome(name) } return Abs(filepath.Join(dir, name)) }" ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.54879713,"math_prob":0.8534819,"size":2233,"snap":"2021-31-2021-39","text_gpt3_token_len":691,"char_repetition_ratio":0.16061014,"word_repetition_ratio":0.23913044,"special_character_ratio":0.3833408,"punctuation_ratio":0.14713217,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9541348,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2021-08-02T23:15:12Z\",\"WARC-Record-ID\":\"<urn:uuid:d4de784c-4182-472f-98ad-3f9ea8f2c12c>\",\"Content-Length\":\"372543\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:f6fd81f7-dec8-4657-9e3a-0e8a4981f200>\",\"WARC-Concurrent-To\":\"<urn:uuid:bfa3f81f-8230-40da-881f-19158da3c0db>\",\"WARC-IP-Address\":\"140.82.113.3\",\"WARC-Target-URI\":\"https://github.com/jiangxin/goconfig/commit/83a00ae5b8090415985162b6e3381de03532a573\",\"WARC-Payload-Digest\":\"sha1:ZONMY2V4AJ2CIHLNNOVKVKWKQWCZ2ZR3\",\"WARC-Block-Digest\":\"sha1:A2X7V5USHXPN7PXFJCJ2D4TXSDNQP4S2\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2021/CC-MAIN-2021-31/CC-MAIN-2021-31_segments_1627046154385.24_warc_CC-MAIN-20210802203434-20210802233434-00134.warc.gz\"}"}
http://penzmagusklub.info/free-printable-math-mystery-picture-worksheets/
[ "mystery worksheets for kids free printable math picture lovely mystery worksheets for kids free printable math picture lovely pictures color worksheets for teachers to print.\n\nfree printable math mystery picture worksheets free printable worksheets math mystery picture maths new fun for grade geometry detective the best worksheets for kindergarten cut and paste.\n\nmath mysteries worksheets multiplication mystery picture ma math mysteries worksheets multiplication coloring mystery worksheets for substitute teachers.\n\nholiday math mystery maths worksheets free addition picture printable coloring worksheets worksheets for 2nd grade pdf.\n\nmath worksheets color by number multiplication free coloring pages math worksheets color by number multiplication free printable mystery picture exceptional grade worksheets for 2nd grade.\n\nmath mystery worksheets for all download and share free on picture math coloring pages hidden pice free printable multiplication mystery worksheets sheets worksheets for kindergarten cut and paste.\n\naddition color by number free addition color by number free math mystery picture worksheets maths colour by numbers free worksheets work.\n\nmath mystery picture worksheets multiplication coloring sheets multiplication worksheets for kids coloring pdf.\n\nmystery worksheets graph paper from coordinate grid worksheets mystery worksheets worksheets for pre kindergarten.\n\nfree printable math mystery worksheets times key picture for to math mystery picture worksheets multiplication coloring grade download worksheets for 1st grade pdf.\n\nmath coloring pages page grade worksheets multiplication fun free original 2 free printable math mystery picture worksheets multiplication worksheets work cursive.\n\nfree printable math mystery picture worksheets multiplication free printable math mystery picture worksheets multiplication coloring worksheets grade fun free worksheets4kids.\n\naddition mystery picture worksheets free multiplication mystery addition mystery picture worksheets free worksheetworks solving multi step equations.\n\nmath mystery picture worksheets coloring math worksheets free free printable math coloring worksheets for grade free math coloring worksheets worksheets4kids scientific notation.\n\nfree printable riddle worksheets multiplication mystery puzzles for free printable riddle worksheets multiplication mystery puzzles for riddles math puzzle worksheets for kindergarten free.\n\ncoloring pages addition coloring worksheets free math grade first coloring pages addition coloring worksheets free math pages printable sheets worksheets for 1st grader.\n\nmath worksheets coloring multiplication mystery picture worksheet math worksheets worksheets for kindergarten cut and paste." ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.7033679,"math_prob":0.76614225,"size":2913,"snap":"2019-51-2020-05","text_gpt3_token_len":426,"char_repetition_ratio":0.31660363,"word_repetition_ratio":0.17158177,"special_character_ratio":0.13800205,"punctuation_ratio":0.04556962,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9821237,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2020-01-24T11:15:08Z\",\"WARC-Record-ID\":\"<urn:uuid:f57b58e5-cd63-4bf3-bebd-901e4852e535>\",\"Content-Length\":\"42926\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:f3f9870e-fc9b-4d62-a81f-df0e721bb46c>\",\"WARC-Concurrent-To\":\"<urn:uuid:a523bc04-3bcf-4bd8-a73b-c61e26801f9f>\",\"WARC-IP-Address\":\"104.27.181.210\",\"WARC-Target-URI\":\"http://penzmagusklub.info/free-printable-math-mystery-picture-worksheets/\",\"WARC-Payload-Digest\":\"sha1:JNEF4DFMYZZKJL66IUS2QY5ENLDOFZGA\",\"WARC-Block-Digest\":\"sha1:KH63SYH42CZYDKKWOUCLSIW4BWO6WY3A\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2020/CC-MAIN-2020-05/CC-MAIN-2020-05_segments_1579250619323.41_warc_CC-MAIN-20200124100832-20200124125832-00287.warc.gz\"}"}
https://www.facfil.eu/lace/mazda/stop/40108883d124deee29bcd507
[ "", null, "11 5 Calculate The Most Probable Values Of X And Y Chegg Com.\n\nStep 2: Click the blue arrow to submit.\n\n\\int1dy 1dy and replace the result in the differential equation. Now, I will give step by step instructions for basic algebra calculations using a calcula. x+1=1000\\left (x-1\\right) x+ 1 = 1000(x 1) 7.\n\ntrigonomic table and unit circle. Adding 2 to each member yields.\n\nTo confirm that you answer is correct, you can substitute x=2 and y=5 into both equations to see if .\n\nPrentice Hall. . Simply enter the equation and the calculator will walk you through the steps necessary to simplify and solve it.\n\nWe can subtract x from both sides of this equation. Intermediate steps. To solve a literal equation.\n\nTo graph a circle, you should get it in the form (x-a)^2 + (y-b)^2 = r^2.\n\nfree printable math worksheets 8th grade. 8.\n\n1991. Check by substituting -2 for x and 5 for .\n\nLearn how to solve literal equations.\n\nThis 2 equations 2 unknown solver computes the output value of the variables X and Y with respect to the input values of X and Y coefficients.\n\nSolve for X will be shown on screen.\n\n8 Best Free System Of Linear Equation Calculator For Windows. And everywhere where we see a y, we can substitute it with x plus 4. factor affecting molar mass of sodium chloride. By using this website, you agree to our Cookie Policy. You would first add the ten pi to both .\n\nLet's solve the first equation for. Press = to solve the .\n\nSolution: Given equation is 8/5 = 6/x for solving x in the fraction. Step 4: Apply the Cramer's rules and place the values.\n\nEnter your equations in the boxes above, and press Calculate! 1 = - 3 . Systems Of Equations Solver Wolfram Alpha.\n\n3 - 6y = 4. Solutions Graphing Practice; New Geometry; Calculators; Notebook . y = x + 2. Add 2y to both sides to get 6x = 12 + 2y. 8 x = 5 6. You can also include parentheses and numbers with exponents or roots in your equations. 2. We will extend the Addition Property of Equality to say that when you add equal quantities to both sides of an equation, the results are equal.\n\nHence, the required solution . When you solve an equation for a variable under assumptions, the solver only returns solutions consistent with the assumptions. Example (Click to view) x+y=7; x+2y=11 Try it now.\n\nThe graph: from to . x + 2 = y. Subtract 2 from both sides. Solve equations using standard form equation calculator, because, solving equations using this calculator is easier than making a bad face when you are stuck in calculations. Different kind of polynomial equations example is given below. Step 2: Place the values in the quadratic formula and solve it for x. x = -4 4 - 4 x 2 x 2 2 x 2. x = -4 16 - 16 4. x = -4 0 4. x = -4/4. .\n\nFree Pre-Algebra, Algebra, Trigonometry, Calculus, Geometry, Statistics and Chemistry calculators step-by-step You do this by adding, subtracting, multiplying or dividing both sides of the equation. The slope and y-intercept calculator takes a linear equation and allows you to calculate the slope and y-intercept for the equation.\n\ni.e.\n\nThe current value of X is displayed for reference and will be used if no new value is provided. Thus, we need to make the variable alone on the left hand side. y = kx (k a constant) is called a direct variation. Question: Solve the value of x in the given fraction 8/5 = 6/x using cross-multiplication.\n\nThis is slope intercept form, y = 3x - 6. Step 1: Enter the linear equation you want to find the slope and y-intercept for into the editor. It can solve systems of linear equations or systems involving nonlinear equations, and it can search specifically for integer solutions or solutions over another domain.\n\n2x+y=0 2x +y = 0.\n\nPolynomial. Step 3.\n\n-6y = 1.\n\nWith the direct calculation method, we will also discuss other methods like Goal Seek, Array, and Solver in this article to solve different polynomial equations. The outer list holds all of the solutions and each inner list holds a single solution. When x = -4 is substituted in the original equation, we get a negative answer which is imaginary. Nonlinear Equations System; x^2 - 1 = 1 + y/2 1 - y^2 = 2 + x; A system of four . To eliminate the fraction on the left, multiply both sides of the equation by 2 and then solve for y. Prentice Hall. Enter a problem.\n\nThe equation calculator allows to solve circular equations, it is able to solve an equation with a cosine of the form cos (x)=a or an equation with a sine of the form sin (x)=a.\n\nExamples of systems of equations. Allow solutions that do not satisfy the assumptions by setting 'IgnoreProperties' to true. x = b l o g b x.\n\nDepdendent Variable: Draw: .\n\nFind more Education widgets in Wolfram|Alpha. Go! 2 EQUATIONS SOLVER.\n\nSolve for x Calculator. The equation is written as, x*y = c. After using this proportional calculator you will be easily . x+2=y. . To improve this 'System of 2 linear equations in 2 variables Calculator', please fill in questionnaire.\n\nAnswer (1 of 8): I am assuming you are talking about a scientific calculator because with a normal one, it's not possible. Algebraic expressions Calculator. Slope is the coefficient of x so in this case slope = 3.\n\nRoughly speaking, algebra is the study of mathematical symbols and the rules for manipulating these symbols in formulas; it is a unifying thread of almost all of mathematics. Step 2: Click the blue arrow to submit and see the result! You want to get y by itself on one side of the equation, so you need to divide both sides by 2 to get y = 3x - 6. Isolate the variable term on one side of the equation. Get the free \"Solve and equation in terms of x & y for y.\" widget for your website, blog, Wordpress, Blogger, or iGoogle.\n\nWe always appreciate your feedback. Here's your equation solved for V with some sample inputs: Of course, be sure to set any modes that your equation depends on, e.g. A place to post programs, questions, requests, news, and other stuff for Texas Instruments calculators. Learn more about fraction here.\n\nSolve for x 1: Solve for x 2: Solve for y 1: Solve for y 2: References - Books: Max A. Sobel, Nobert Lerner. Teaching Beginners Algebra. Each step is followed by a brief explanation.\n\nAlgebra Calculator is a calculator that gives step-by-step help on algebra problems.\n\nx+3=5. Ignoring lost solutions, if any, an implicit solution in the form F(x,y) = C is = C, where C is an .\n\nStep 4: Take the determinant of all of the three new matrices x, y, and z. This tutorial reviews systems of linear equations. ( PEMDAS Caution) This calculator solves math equations that add, subtract, multiply and divide positive and negative numbers and exponential numbers. Clear out any fractions by Multiplying every term by the bottom parts.\n\nSolve math problems using order of operations like PEMDAS, BEDMAS, BODMAS, GEMDAS and MDAS. System of Two Equations Calculator Various methods (if possible) Use elimination Use substitution Use Gaussian elimination Use Cramer's rule Examples Example 1 Example 2 Example 3 Example 4 Example 5 Step 2: The equation can be in any form as long as its linear and and you can find the slope and y-intercept. Thank you. For example, solving the first equation for x gives : Now substitute this result for x into equation (2).\n\nThe values can be used as the inputs to the program. For example, solve (y=x, x) returns x=sqrt (y) 3. r/TI_Calculators. Enter equation to solve, e.g. The above examples also contain: the modulus or absolute value: absolute (x) or |x|. Solve by Substitution Calculator.\n\nAt Step-1, cross multiply the fractions. I will use a Casio fx-991 ES to demonstrate.\n\nEnter the coefficients of x and y. Example 4: Solve the following equation: x/5 = 2. In the Solve Symbolic View, type your equation into one of the ten slots, like this: Then press Num. Solved example of two-variable linear equations. 1 d y. A system of 3 linear equations with 3 unknowns x,y,z is a classic example.\n\nBut it works well for all the topics.\n\n2 x + 1 - 2 x = 2 x - 3 - 2 x. Additionally, it can solve systems involving inequalities and more general constraints.\n\nRemember that (2,5) is an (x,y) coordinate where x=2 and y=5.\n\nDisclaimer: This calculator is not perfect.\n\nThe equation calculator allows you to take a simple or complex equation and solve by best method possible. Subtract 12 from both sides of the equation to get 6x - 12 = 2y.\n\n(Type an expression using x and y as the variables.) If one of the equations looks more complicated than the other, just plug it into the easier equation. A literal equation is an equation where the unknown values are represented by variables. The point-slope form of a line with slope m and passing through the point (x 1, y 1 ) is. A system of 3 linear equations with 3 unknowns x,y,z is a classic example. If you have the CAS version of the nspire, you can simply do solve (eqn, var_to_find). A system of equations is a collection of two or more equations with the same set of unknowns.\n\nCalculations to obtain the result are detailed, so it will be possible to solve equations like cos ( x) = 1 2 or 2 sin ( x) = 2 with the calculation steps. Learn more Accept.\n\nlog 4 (x 2 - 12x) = 3.\n\ny = mx + b.\n\nMultiply both sides of the equation by x-1 x1. You would take it one step at a time to isolate the variable. y=3x^2-1: Sample Problem . y - y 1 - m (x - x 1) The slope-intercept form of a line with slope m and y-intercept b is. To solve a literal equation. I want to solve two equations with two unknown variables I have two equations (-x)*(x1 - x) + (r - y)*(y1 - y) = 0, (x1 - x)^2 + (y1 - y)^2 = z^2, where, x1,y1,r and z are known values. Free solve for x calculator - solve the equation for x step-by-step.\n\nAnswer: 1- Get the equation in the form of y = ax2 +bx +c y = a x 2 + b x + c. 2- Calculate -b 2a - b 2 a.\n\nRewrite one of the equations to isolate one variable. y = x2 + 3x. So we get 2x plus 8 is equal to x plus 7. dividing with decimals practice grade 5. factoring downloads TI84. y y y. Divide every term by the same nonzero value. 1) Monomial: y=mx+c. Combining like terms yields. Get detailed solutions to your math problems with our Algebraic expressions step-by-step calculator. 2 times x plus 4 is equal to x plus 7. Calculator Use.\n\nWe can distribute this 2.\n\nSolve: Enter equation to graph, e.g. x - 2 = 10.\n\nVery useful for fast answers on 2 equations. x = 12.\n\n2) Elimination Method: This method lines the 2 equations up and eliminates one of the variables after matching leading coefficients for . square roots sqrt (x), cubic roots cbrt (x) trigonometric functions: sinus sin (x), cosine cos (x), tangent tan (x), cotangent ctan (x)\n\nThe solve by substitution calculator allows to find the solution to a system of two or three equations in both a point form and an equation form of the answer.\n\nOr click the example. The trick here to solving the equation is to end up with x on one side of the equation and a number on the other.\n\n(x, y) = (3, -1/6) 5.\n\n. Solve the product 1000\\left (x-1\\right) 1000(x1) x+1=1000x-1000 x+ 1 = 1000x 1000.\n\nx = -1. Pencil Red Pen Highlighter Notebook Calculator Solve The shift-solve to enter equation solve by pressing SHIFT CALC.\n\nConvert the equation in exponential form. Line equation calculator solving for y intercept given y, slope and x. AJ Design Math Geometry Physics Force Fluid . y=x^2+1. To solve an equation, we use the addition-subtraction property to transform a given equation to an equivalent equation of the form x = a, from which we can find the solution by inspection.\n\nSolve this equation for x. eqn = x^2 + 5*x - 6 == 0; S = solve (eqn,x) S =.\n\nExample 3 Solve 2x + 1 = x - 2.\n\nRemember to use \"==\" in an equation, not just \"=\": The result is a Rule inside a doubly nested list. We need to isolate the dependent variable x x, we can do that by subtracting 1 1 from both sides of the equation. (don't worry, we'll show you all the steps).\n\nSee More Examples . The equations solver tool provided in this section can be used to solve the system of two linear equations with two unknowns. b x. which is equivalently.\n\nLearn how to solve literal equations. The first step to finding the solution to this system of equations is to graph both lines as follows: Notice that the ONLY intersection point for this system of equations is at (2,5). AJ Design Math Geometry Physics . Check out all of our online calculators here! x-2+2 =10+2.\n\nFor example, here is a plot of x^2 - y^2 for different values of y, assuming that the first solution holds: Subtract 2 x from each side of the equation. Plug x = 3 into the equation x - 6y = 4 to solve for y.", null, "26 février 2020\n\n15 avril 2020\n\n7 mai 2020\n\n1 juin 2020\n\n26 juin 2020" ]
[ null, "https://mc.yandex.ru/watch/70074523", null, "https://secure.gravatar.com/avatar/", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.8686955,"math_prob":0.99948627,"size":12501,"snap":"2022-40-2023-06","text_gpt3_token_len":3205,"char_repetition_ratio":0.17212132,"word_repetition_ratio":0.027621925,"special_character_ratio":0.2610991,"punctuation_ratio":0.13192612,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.99996877,"pos_list":[0,1,2,3,4],"im_url_duplicate_count":[null,null,null,null,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2022-09-30T06:35:21Z\",\"WARC-Record-ID\":\"<urn:uuid:de5472ca-510b-4c28-9116-cce133c55491>\",\"Content-Length\":\"78110\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:42253245-c2a8-467e-9890-a9502d26706d>\",\"WARC-Concurrent-To\":\"<urn:uuid:a7d164c6-5ad1-4840-a291-31a0a96ce9d3>\",\"WARC-IP-Address\":\"199.59.247.178\",\"WARC-Target-URI\":\"https://www.facfil.eu/lace/mazda/stop/40108883d124deee29bcd507\",\"WARC-Payload-Digest\":\"sha1:SILVFWA2PSY6YIT7EFQSZQY7A7KSGZCJ\",\"WARC-Block-Digest\":\"sha1:X3PRBRXCSYCMHPLI7NTWZBHCAWP5B7QX\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2022/CC-MAIN-2022-40/CC-MAIN-2022-40_segments_1664030335444.58_warc_CC-MAIN-20220930051717-20220930081717-00136.warc.gz\"}"}
https://heingroupoxford.com/2016/09/01/summary-russell-impagliazzo-1995-a-personal-view-of-average-case-complexity/
[ "Home » Reading clubs » Classic » Summary: Russell Impagliazzo (1995) “A personal view of average-case complexity”\n\n# Summary: Russell Impagliazzo (1995) “A personal view of average-case complexity”\n\n[Summary written by Jotun Hein]\n\nThis paper cannot be called a classics paper, but it was informative and opinionated and led us to the key papers in the field in no time The paper consists of two quite separate parts:\n\nA – a classification of the computational discrete world into 5 possible countries. I found it a bit pointless and I could not see the relationship between the first 2 countries and the last 3.\n\nALGORITHMICA. Here NP=P\n\nHEURISTICS here problems are polynomial averaged over all possible data sets of a given size, but intractable worst case.\n\nPESSILAND [Danes might swap E & I] here there are hard average cases but no one-way (encryption) functions\n\nMINICRYPT here one way functions exist, but public encryption is impossible (must mean that the function can be inverted)\n\nCRYPTOMANIA and here public-key encryption is possible.\n\nB goes through the original definition of Average Complexity by Levin. Unfortunately no real example is given on a problem where there is a difference between average and worst case complexity. The ideas are quite understandable. An algorithm can only have average polynomial complexity, if the data sets where it isn’t polynomial shrinks sufficiently fast as their size grows. One tricky thing is that complexity is not defined in terms of the algorithms at hand but only in terms of the problem. There are a lot of things not discussed here like how polynomial transformations of a problem to another skews the distribution on the possible data sets.\n\nI think complexity based on distributions would be interesting to pursue in this case: if you generate data from the coalescent with mutation on a finite string. If you have long strings/low mutation rate you are in the perfect phylogeny domain where a linear algorithm exists. If you make mutation rate infinite, you have uniform distribution on all data sets and the worst case complexity is NP-Complete without considering the distribution." ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.9212514,"math_prob":0.81584567,"size":1939,"snap":"2020-45-2020-50","text_gpt3_token_len":386,"char_repetition_ratio":0.10956072,"word_repetition_ratio":0.0,"special_character_ratio":0.18514699,"punctuation_ratio":0.060869563,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9714845,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2020-11-30T20:43:17Z\",\"WARC-Record-ID\":\"<urn:uuid:c673a161-662a-419f-8065-7524c67d8aea>\",\"Content-Length\":\"76058\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:c25dedec-f502-4c1b-b8b7-a13b125c9661>\",\"WARC-Concurrent-To\":\"<urn:uuid:e37bbd77-8b2f-405e-b3d6-5ac775771ee2>\",\"WARC-IP-Address\":\"192.0.78.24\",\"WARC-Target-URI\":\"https://heingroupoxford.com/2016/09/01/summary-russell-impagliazzo-1995-a-personal-view-of-average-case-complexity/\",\"WARC-Payload-Digest\":\"sha1:HWL4N2Q7476VK4S6JUHMSEBMK2PIUKCF\",\"WARC-Block-Digest\":\"sha1:REAPHIYM4Y3VBGDREU54PJWA5UJQKU5L\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2020/CC-MAIN-2020-50/CC-MAIN-2020-50_segments_1606141486017.50_warc_CC-MAIN-20201130192020-20201130222020-00453.warc.gz\"}"}
https://www.numberempire.com/96997
[ "Home | Menu | Get Involved | Contact webmaster", null, "", null, "", null, "", null, "", null, "# Number 96997\n\nninety six thousand nine hundred ninety seven\n\n### Properties of the number 96997\n\n Factorization 96997 Divisors 1, 96997 Count of divisors 2 Sum of divisors 96998 Previous integer 96996 Next integer 96998 Is prime? YES (9336th prime) Previous prime 96989 Next prime 97001 96997th prime 1257491 Is a Fibonacci number? NO Is a Bell number? NO Is a Catalan number? NO Is a factorial? NO Is a regular number? NO Is a perfect number? NO Polygonal number (s < 11)? NO Binary 10111101011100101 Octal 275345 Duodecimal 48171 Hexadecimal 17ae5 Square 9408418009 Square root 311.44341380097 Natural logarithm 11.482435329172 Decimal logarithm 4.9867583022704 Sine -0.32103337354773 Cosine -0.94706788197497 Tangent 0.33897609628389\nNumber 96997 is pronounced ninety six thousand nine hundred ninety seven. Number 96997 is a prime number. The prime number before 96997 is 96989. The prime number after 96997 is 97001. Number 96997 has 2 divisors: 1, 96997. Sum of the divisors is 96998. Number 96997 is not a Fibonacci number. It is not a Bell number. Number 96997 is not a Catalan number. Number 96997 is not a regular number (Hamming number). It is a not factorial of any number. Number 96997 is a deficient number and therefore is not a perfect number. Binary numeral for number 96997 is 10111101011100101. Octal numeral is 275345. Duodecimal value is 48171. Hexadecimal representation is 17ae5. Square of the number 96997 is 9408418009. Square root of the number 96997 is 311.44341380097. Natural logarithm of 96997 is 11.482435329172 Decimal logarithm of the number 96997 is 4.9867583022704 Sine of 96997 is -0.32103337354773. Cosine of the number 96997 is -0.94706788197497. Tangent of the number 96997 is 0.33897609628389\n\n### Number properties\n\nExamples: 3628800, 9876543211, 12586269025" ]
[ null, "https://www.numberempire.com/images/graystar.png", null, "https://www.numberempire.com/images/graystar.png", null, "https://www.numberempire.com/images/graystar.png", null, "https://www.numberempire.com/images/graystar.png", null, "https://www.numberempire.com/images/graystar.png", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.64325607,"math_prob":0.9575647,"size":2168,"snap":"2020-34-2020-40","text_gpt3_token_len":689,"char_repetition_ratio":0.19038817,"word_repetition_ratio":0.052173913,"special_character_ratio":0.42112547,"punctuation_ratio":0.12533334,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.99607664,"pos_list":[0,1,2,3,4,5,6,7,8,9,10],"im_url_duplicate_count":[null,null,null,null,null,null,null,null,null,null,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2020-09-26T18:48:54Z\",\"WARC-Record-ID\":\"<urn:uuid:e00ba566-3b13-4a64-a42c-0b6609223d94>\",\"Content-Length\":\"20277\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:a04ae476-1ce4-4891-bf07-532bfa886c92>\",\"WARC-Concurrent-To\":\"<urn:uuid:87803fee-1055-45c7-b57a-a2b81c9f83f8>\",\"WARC-IP-Address\":\"104.24.112.69\",\"WARC-Target-URI\":\"https://www.numberempire.com/96997\",\"WARC-Payload-Digest\":\"sha1:5YDQ3SBE5AV5EHJTSH7JDNT7UIG55HHT\",\"WARC-Block-Digest\":\"sha1:CKBYDCA4VRWM4ABSB3A3QEDS6JW44G7N\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2020/CC-MAIN-2020-40/CC-MAIN-2020-40_segments_1600400244353.70_warc_CC-MAIN-20200926165308-20200926195308-00514.warc.gz\"}"}
https://answers.everydaycalculation.com/divide-fractions/20-18-divided-by-60-10
[ "Solutions by everydaycalculation.com\n\n## Divide 20/18 with 60/10\n\n1st number: 1 2/18, 2nd number: 6 0/10\n\n20/18 ÷ 60/10 is 5/27.\n\n#### Steps for dividing fractions\n\n1. Find the reciprocal of the divisor\nReciprocal of 60/10: 10/60\n2. Now, multiply it with the dividend\nSo, 20/18 ÷ 60/10 = 20/18 × 10/60\n3. = 20 × 10/18 × 60 = 200/1080\n4. After reducing the fraction, the answer is 5/27\n\nMathStep (Works offline)", null, "Download our mobile app and learn to work with fractions in your own time:" ]
[ null, "https://answers.everydaycalculation.com/mathstep-app-icon.png", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.6358175,"math_prob":0.9017358,"size":363,"snap":"2020-45-2020-50","text_gpt3_token_len":158,"char_repetition_ratio":0.2005571,"word_repetition_ratio":0.0,"special_character_ratio":0.5041322,"punctuation_ratio":0.093023255,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9605498,"pos_list":[0,1,2],"im_url_duplicate_count":[null,null,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2020-10-28T09:13:31Z\",\"WARC-Record-ID\":\"<urn:uuid:b517265b-c495-4f43-b67f-a7d8a9d44880>\",\"Content-Length\":\"7646\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:63af301d-90ad-4fb4-bf0e-b66f8582e197>\",\"WARC-Concurrent-To\":\"<urn:uuid:47238fb4-aaf2-48ee-85dd-10c8796a3c12>\",\"WARC-IP-Address\":\"96.126.107.130\",\"WARC-Target-URI\":\"https://answers.everydaycalculation.com/divide-fractions/20-18-divided-by-60-10\",\"WARC-Payload-Digest\":\"sha1:TDQSTHI3Q3EU4OQVT3RJOJLNNZUJANGQ\",\"WARC-Block-Digest\":\"sha1:KDUEIIHNWF6FMTMO6YVCQOUTKAQ6AEFK\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2020/CC-MAIN-2020-45/CC-MAIN-2020-45_segments_1603107897022.61_warc_CC-MAIN-20201028073614-20201028103614-00202.warc.gz\"}"}
http://ixtrieve.fh-koeln.de/birds/litie/document/36315
[ "# Document (#36315)\n\nAuthor\nSpink, A.\nTitle\nInformation behavior : an evolutionary instinct\nImprint\nHeidelberg : Springer\nYear\n2010\nPages\nXXVIII, 85 S\nIsbn\n978-3-642-11496-0\nSeries\nInformation science and knowledge management ; 6159\nAbstract\nInformation behavior has emerged as an important aspect of human life, however our knowledge and understanding of it is incomplete and underdeveloped scientifically. Research on the topic is largely contemporary in focus and has generally not incorporated results from other disciplines. In this monograph Spink provides a new understanding of information behavior by incorporating related findings, theories and models from social sciences, psychology and cognition. In her presentation, she argues that information behavior is an important instinctive sociocognitive ability that can only be fully understood with a highly interdisciplinary approach. The leitmotivs of her examination are three important research questions: First, what is the evolutionary, biological and developmental nature of information behavior? Second, what is the role of instinct versus environment in shaping information behavior? And, third, how have information behavior capabilities evolved and developed over time? Written for researchers in information science as well as social and cognitive sciences, Spink's controversial text lays the foundation for a new interdisciplinary theoretical perspective on information behavior that will not only provide a more holistic framework for this field but will also impact those sciences, and thus also open up many new research directions.\nFootnote\nRez. in: iwp 62(2011) H.1, S.48 (D. Lewandowski): \"... Es ist sehr schade, dass die Autorin aus diesem interessanten und für die Zukunft des Fachs sicherlich entscheidenden Thema nicht mehr gemacht hat. Gerade bei einem Thema, das noch nicht intensiv beackert wurde, ist eine ausführliche Darstellung von großer Bedeutung. Auch in Hinblick auf die Unmenge an Literatur, die in diesem Buch zitiert wird, erscheint die Form der Darstellung in keiner Weise angemessen. Ebenso unangemessen wirkt der Preis von 85 Euro für dieses schmale Werk, welches auch gut in der Form von einem oder zwei längeren Aufsätzen hätte veröffentlicht werden können.\"\nTheme\nInformationsdienstleistungen\nLCSH\nAnthropology\nComputer science\nLibrary science\nSocial sciences\nRSWK\nInformation / Wissensorganisation / Kognitive Entwicklung / Anthropologie\nBK\n06.35 / Informationsmanagement\nDDC\n153 / DDC22ger\n\n## Similar documents (author)\n\n1. Spink, A.: ¬The effect of user characteristics on search outcome in mediated online searching (1993) 4.57\n```4.566364 = sum of:\n4.566364 = weight(author_txt:spink in 6739) [ClassicSimilarity], result of:\n4.566364 = fieldWeight in 6739, product of:\n1.0 = tf(freq=1.0), with freq of:\n1.0 = termFreq=1.0\n7.306182 = idf(docFreq=77, maxDocs=42740)\n0.625 = fieldNorm(doc=6739)\n```\n2. Spink, S.: Interaction with information retrieval systems : reflections on feedback (1993) 4.57\n```4.566364 = sum of:\n4.566364 = weight(author_txt:spink in 7958) [ClassicSimilarity], result of:\n4.566364 = fieldWeight in 7958, product of:\n1.0 = tf(freq=1.0), with freq of:\n1.0 = termFreq=1.0\n7.306182 = idf(docFreq=77, maxDocs=42740)\n0.625 = fieldNorm(doc=7958)\n```\n3. Spink, A.: Term relevance feedback and mediated database searching : implications for information retrieval practice and systems design (1995) 4.57\n```4.566364 = sum of:\n4.566364 = weight(author_txt:spink in 1825) [ClassicSimilarity], result of:\n4.566364 = fieldWeight in 1825, product of:\n1.0 = tf(freq=1.0), with freq of:\n1.0 = termFreq=1.0\n7.306182 = idf(docFreq=77, maxDocs=42740)\n0.625 = fieldNorm(doc=1825)\n```\n4. Spink, A.: Information and a sustainable future (1995) 4.57\n```4.566364 = sum of:\n4.566364 = weight(author_txt:spink in 4290) [ClassicSimilarity], result of:\n4.566364 = fieldWeight in 4290, product of:\n1.0 = tf(freq=1.0), with freq of:\n1.0 = termFreq=1.0\n7.306182 = idf(docFreq=77, maxDocs=42740)\n0.625 = fieldNorm(doc=4290)\n```\n5. Spink, A.: Multiple search sessions model of end-user behaviour : an exploratory study (1996) 4.57\n```4.566364 = sum of:\n4.566364 = weight(author_txt:spink in 5874) [ClassicSimilarity], result of:\n4.566364 = fieldWeight in 5874, product of:\n1.0 = tf(freq=1.0), with freq of:\n1.0 = termFreq=1.0\n7.306182 = idf(docFreq=77, maxDocs=42740)\n0.625 = fieldNorm(doc=5874)\n```\n\n## Similar documents (content)\n\n1. New directions in human information behavior (2006) 0.41\n```0.4062918 = sum of:\n0.4062918 = product of:\n1.0157295 = sum of:\n0.017848514 = weight(abstract_txt:will in 2578) [ClassicSimilarity], result of:\n0.017848514 = score(doc=2578,freq=1.0), product of:\n0.07388417 = queryWeight, product of:\n3.8651886 = idf(docFreq=2434, maxDocs=42740)\n0.01911528 = queryNorm\n0.24157429 = fieldWeight in 2578, product of:\n1.0 = tf(freq=1.0), with freq of:\n1.0 = termFreq=1.0\n3.8651886 = idf(docFreq=2434, maxDocs=42740)\n0.0625 = fieldNorm(doc=2578)\n0.11097715 = weight(abstract_txt:spink in 2578) [ClassicSimilarity], result of:\n0.11097715 = score(doc=2578,freq=1.0), product of:\n0.19828767 = queryWeight, product of:\n1.1583964 = boost\n8.954841 = idf(docFreq=14, maxDocs=42740)\n0.01911528 = queryNorm\n0.55967754 = fieldWeight in 2578, product of:\n1.0 = tf(freq=1.0), with freq of:\n1.0 = termFreq=1.0\n8.954841 = idf(docFreq=14, maxDocs=42740)\n0.0625 = fieldNorm(doc=2578)\n0.042927425 = weight(abstract_txt:understanding in 2578) [ClassicSimilarity], result of:\n0.042927425 = score(doc=2578,freq=2.0), product of:\n0.10526804 = queryWeight, product of:\n1.1936378 = boost\n4.6136355 = idf(docFreq=1151, maxDocs=42740)\n0.01911528 = queryNorm\n0.4077916 = fieldWeight in 2578, product of:\n1.4142135 = tf(freq=2.0), with freq of:\n2.0 = termFreq=2.0\n4.6136355 = idf(docFreq=1151, maxDocs=42740)\n0.0625 = fieldNorm(doc=2578)\n0.015392502 = weight(abstract_txt:research in 2578) [ClassicSimilarity], result of:\n0.015392502 = score(doc=2578,freq=1.0), product of:\n0.076627836 = queryWeight, product of:\n1.2472779 = boost\n3.2139761 = idf(docFreq=4669, maxDocs=42740)\n0.01911528 = queryNorm\n0.20087351 = fieldWeight in 2578, product of:\n1.0 = tf(freq=1.0), with freq of:\n1.0 = termFreq=1.0\n3.2139761 = idf(docFreq=4669, maxDocs=42740)\n0.0625 = fieldNorm(doc=2578)\n0.027599202 = weight(abstract_txt:science in 2578) [ClassicSimilarity], result of:\n0.027599202 = score(doc=2578,freq=1.0), product of:\n0.11309535 = queryWeight, product of:\n1.515278 = boost\n3.904557 = idf(docFreq=2340, maxDocs=42740)\n0.01911528 = queryNorm\n0.24403481 = fieldWeight in 2578, product of:\n1.0 = tf(freq=1.0), with freq of:\n1.0 = termFreq=1.0\n3.904557 = idf(docFreq=2340, maxDocs=42740)\n0.0625 = fieldNorm(doc=2578)\n0.072163194 = weight(abstract_txt:interdisciplinary in 2578) [ClassicSimilarity], result of:\n0.072163194 = score(doc=2578,freq=1.0), product of:\n0.18751118 = queryWeight, product of:\n1.5930811 = boost\n6.157559 = idf(docFreq=245, maxDocs=42740)\n0.01911528 = queryNorm\n0.38484743 = fieldWeight in 2578, product of:\n1.0 = tf(freq=1.0), with freq of:\n1.0 = termFreq=1.0\n6.157559 = idf(docFreq=245, maxDocs=42740)\n0.0625 = fieldNorm(doc=2578)\n0.063271016 = weight(abstract_txt:social in 2578) [ClassicSimilarity], result of:\n0.063271016 = score(doc=2578,freq=3.0), product of:\n0.13633579 = queryWeight, product of:\n1.6636995 = boost\n4.2870083 = idf(docFreq=1596, maxDocs=42740)\n0.01911528 = queryNorm\n0.46408224 = fieldWeight in 2578, product of:\n1.7320508 = tf(freq=3.0), with freq of:\n3.0 = termFreq=3.0\n4.2870083 = idf(docFreq=1596, maxDocs=42740)\n0.0625 = fieldNorm(doc=2578)\n0.1741929 = weight(abstract_txt:evolutionary in 2578) [ClassicSimilarity], result of:\n0.1741929 = score(doc=2578,freq=2.0), product of:\n0.2678101 = queryWeight, product of:\n1.9038723 = boost\n7.358825 = idf(docFreq=73, maxDocs=42740)\n0.01911528 = queryNorm\n0.6504344 = fieldWeight in 2578, product of:\n1.4142135 = tf(freq=2.0), with freq of:\n2.0 = termFreq=2.0\n7.358825 = idf(docFreq=73, maxDocs=42740)\n0.0625 = fieldNorm(doc=2578)\n0.08298482 = weight(abstract_txt:information in 2578) [ClassicSimilarity], result of:\n0.08298482 = score(doc=2578,freq=14.0), product of:\n0.14602585 = queryWeight, product of:\n3.1435776 = boost\n2.430104 = idf(docFreq=10226, maxDocs=42740)\n0.01911528 = queryNorm\n0.56828856 = fieldWeight in 2578, product of:\n3.7416575 = tf(freq=14.0), with freq of:\n14.0 = termFreq=14.0\n2.430104 = idf(docFreq=10226, maxDocs=42740)\n0.0625 = fieldNorm(doc=2578)\n0.40837285 = weight(abstract_txt:behavior in 2578) [ClassicSimilarity], result of:\n0.40837285 = score(doc=2578,freq=5.0), product of:\n0.5527786 = queryWeight, product of:\n5.4705367 = boost\n5.286164 = idf(docFreq=587, maxDocs=42740)\n0.01911528 = queryNorm\n0.73876387 = fieldWeight in 2578, product of:\n2.236068 = tf(freq=5.0), with freq of:\n5.0 = termFreq=5.0\n5.286164 = idf(docFreq=587, maxDocs=42740)\n0.0625 = fieldNorm(doc=2578)\n0.4 = coord(10/25)\n```\n2. Lueg, C.P.: ¬The missing link : information behavior research and its estranged relationship with embodiment (2015) 0.21\n```0.21026793 = sum of:\n0.21026793 = product of:\n0.8761164 = sum of:\n0.04463771 = weight(abstract_txt:what in 4349) [ClassicSimilarity], result of:\n0.04463771 = score(doc=4349,freq=2.0), product of:\n0.09311105 = queryWeight, product of:\n1.1225997 = boost\n4.33906 = idf(docFreq=1515, maxDocs=42740)\n0.01911528 = queryNorm\n0.4794029 = fieldWeight in 4349, product of:\n1.4142135 = tf(freq=2.0), with freq of:\n2.0 = termFreq=2.0\n4.33906 = idf(docFreq=1515, maxDocs=42740)\n0.078125 = fieldNorm(doc=4349)\n0.027210355 = weight(abstract_txt:research in 4349) [ClassicSimilarity], result of:\n0.027210355 = score(doc=4349,freq=2.0), product of:\n0.076627836 = queryWeight, product of:\n1.2472779 = boost\n3.2139761 = idf(docFreq=4669, maxDocs=42740)\n0.01911528 = queryNorm\n0.35509753 = fieldWeight in 4349, product of:\n1.4142135 = tf(freq=2.0), with freq of:\n2.0 = termFreq=2.0\n3.2139761 = idf(docFreq=4669, maxDocs=42740)\n0.078125 = fieldNorm(doc=4349)\n0.034499004 = weight(abstract_txt:science in 4349) [ClassicSimilarity], result of:\n0.034499004 = score(doc=4349,freq=1.0), product of:\n0.11309535 = queryWeight, product of:\n1.515278 = boost\n3.904557 = idf(docFreq=2340, maxDocs=42740)\n0.01911528 = queryNorm\n0.30504352 = fieldWeight in 4349, product of:\n1.0 = tf(freq=1.0), with freq of:\n1.0 = termFreq=1.0\n3.904557 = idf(docFreq=2340, maxDocs=42740)\n0.078125 = fieldNorm(doc=4349)\n0.045661926 = weight(abstract_txt:social in 4349) [ClassicSimilarity], result of:\n0.045661926 = score(doc=4349,freq=1.0), product of:\n0.13633579 = queryWeight, product of:\n1.6636995 = boost\n4.2870083 = idf(docFreq=1596, maxDocs=42740)\n0.01911528 = queryNorm\n0.33492252 = fieldWeight in 4349, product of:\n1.0 = tf(freq=1.0), with freq of:\n1.0 = termFreq=1.0\n4.2870083 = idf(docFreq=1596, maxDocs=42740)\n0.078125 = fieldNorm(doc=4349)\n0.07841328 = weight(abstract_txt:information in 4349) [ClassicSimilarity], result of:\n0.07841328 = score(doc=4349,freq=8.0), product of:\n0.14602585 = queryWeight, product of:\n3.1435776 = boost\n2.430104 = idf(docFreq=10226, maxDocs=42740)\n0.01911528 = queryNorm\n0.5369822 = fieldWeight in 4349, product of:\n2.828427 = tf(freq=8.0), with freq of:\n8.0 = termFreq=8.0\n2.430104 = idf(docFreq=10226, maxDocs=42740)\n0.078125 = fieldNorm(doc=4349)\n0.64569414 = weight(abstract_txt:behavior in 4349) [ClassicSimilarity], result of:\n0.64569414 = score(doc=4349,freq=8.0), product of:\n0.5527786 = queryWeight, product of:\n5.4705367 = boost\n5.286164 = idf(docFreq=587, maxDocs=42740)\n0.01911528 = queryNorm\n1.1680882 = fieldWeight in 4349, product of:\n2.828427 = tf(freq=8.0), with freq of:\n8.0 = termFreq=8.0\n5.286164 = idf(docFreq=587, maxDocs=42740)\n0.078125 = fieldNorm(doc=4349)\n0.24 = coord(6/25)\n```\n3. Pirolli, P.: Information foraging theory : adaptive interaction with information (2007) 0.20\n```0.20384035 = sum of:\n0.20384035 = product of:\n0.56622314 = sum of:\n0.03492168 = weight(abstract_txt:will in 1006) [ClassicSimilarity], result of:\n0.03492168 = score(doc=1006,freq=5.0), product of:\n0.07388417 = queryWeight, product of:\n3.8651886 = idf(docFreq=2434, maxDocs=42740)\n0.01911528 = queryNorm\n0.47265446 = fieldWeight in 1006, product of:\n2.236068 = tf(freq=5.0), with freq of:\n5.0 = termFreq=5.0\n3.8651886 = idf(docFreq=2434, maxDocs=42740)\n0.0546875 = fieldNorm(doc=1006)\n0.031246398 = weight(abstract_txt:what in 1006) [ClassicSimilarity], result of:\n0.031246398 = score(doc=1006,freq=2.0), product of:\n0.09311105 = queryWeight, product of:\n1.1225997 = boost\n4.33906 = idf(docFreq=1515, maxDocs=42740)\n0.01911528 = queryNorm\n0.33558205 = fieldWeight in 1006, product of:\n1.4142135 = tf(freq=2.0), with freq of:\n2.0 = termFreq=2.0\n4.33906 = idf(docFreq=1515, maxDocs=42740)\n0.0546875 = fieldNorm(doc=1006)\n0.026559988 = weight(abstract_txt:understanding in 1006) [ClassicSimilarity], result of:\n0.026559988 = score(doc=1006,freq=1.0), product of:\n0.10526804 = queryWeight, product of:\n1.1936378 = boost\n4.6136355 = idf(docFreq=1151, maxDocs=42740)\n0.01911528 = queryNorm\n0.2523082 = fieldWeight in 1006, product of:\n1.0 = tf(freq=1.0), with freq of:\n1.0 = termFreq=1.0\n4.6136355 = idf(docFreq=1151, maxDocs=42740)\n0.0546875 = fieldNorm(doc=1006)\n0.01904725 = weight(abstract_txt:research in 1006) [ClassicSimilarity], result of:\n0.01904725 = score(doc=1006,freq=2.0), product of:\n0.076627836 = queryWeight, product of:\n1.2472779 = boost\n3.2139761 = idf(docFreq=4669, maxDocs=42740)\n0.01911528 = queryNorm\n0.24856828 = fieldWeight in 1006, product of:\n1.4142135 = tf(freq=2.0), with freq of:\n2.0 = termFreq=2.0\n3.2139761 = idf(docFreq=4669, maxDocs=42740)\n0.0546875 = fieldNorm(doc=1006)\n0.03196335 = weight(abstract_txt:social in 1006) [ClassicSimilarity], result of:\n0.03196335 = score(doc=1006,freq=1.0), product of:\n0.13633579 = queryWeight, product of:\n1.6636995 = boost\n4.2870083 = idf(docFreq=1596, maxDocs=42740)\n0.01911528 = queryNorm\n0.23444577 = fieldWeight in 1006, product of:\n1.0 = tf(freq=1.0), with freq of:\n1.0 = termFreq=1.0\n4.2870083 = idf(docFreq=1596, maxDocs=42740)\n0.0546875 = fieldNorm(doc=1006)\n0.10777636 = weight(abstract_txt:evolutionary in 1006) [ClassicSimilarity], result of:\n0.10777636 = score(doc=1006,freq=1.0), product of:\n0.2678101 = queryWeight, product of:\n1.9038723 = boost\n7.358825 = idf(docFreq=73, maxDocs=42740)\n0.01911528 = queryNorm\n0.40243575 = fieldWeight in 1006, product of:\n1.0 = tf(freq=1.0), with freq of:\n1.0 = termFreq=1.0\n7.358825 = idf(docFreq=73, maxDocs=42740)\n0.0546875 = fieldNorm(doc=1006)\n0.079746686 = weight(abstract_txt:sciences in 1006) [ClassicSimilarity], result of:\n0.079746686 = score(doc=1006,freq=1.0), product of:\n0.2760343 = queryWeight, product of:\n2.733511 = boost\n5.282768 = idf(docFreq=589, maxDocs=42740)\n0.01911528 = queryNorm\n0.28890136 = fieldWeight in 1006, product of:\n1.0 = tf(freq=1.0), with freq of:\n1.0 = termFreq=1.0\n5.282768 = idf(docFreq=589, maxDocs=42740)\n0.0546875 = fieldNorm(doc=1006)\n0.07516027 = weight(abstract_txt:information in 1006) [ClassicSimilarity], result of:\n0.07516027 = score(doc=1006,freq=15.0), product of:\n0.14602585 = queryWeight, product of:\n3.1435776 = boost\n2.430104 = idf(docFreq=10226, maxDocs=42740)\n0.01911528 = queryNorm\n0.51470524 = fieldWeight in 1006, product of:\n3.8729835 = tf(freq=15.0), with freq of:\n15.0 = termFreq=15.0\n2.430104 = idf(docFreq=10226, maxDocs=42740)\n0.0546875 = fieldNorm(doc=1006)\n0.15980116 = weight(abstract_txt:behavior in 1006) [ClassicSimilarity], result of:\n0.15980116 = score(doc=1006,freq=1.0), product of:\n0.5527786 = queryWeight, product of:\n5.4705367 = boost\n5.286164 = idf(docFreq=587, maxDocs=42740)\n0.01911528 = queryNorm\n0.2890871 = fieldWeight in 1006, product of:\n1.0 = tf(freq=1.0), with freq of:\n1.0 = termFreq=1.0\n5.286164 = idf(docFreq=587, maxDocs=42740)\n0.0546875 = fieldNorm(doc=1006)\n0.36 = coord(9/25)\n```\n4. Spink, A.; Currier, J.: Towards an evolutionary perspective for human information behavior : an exploratory study (2006) 0.20\n```0.20003772 = sum of:\n0.20003772 = product of:\n0.8334905 = sum of:\n0.05257514 = weight(abstract_txt:understanding in 593) [ClassicSimilarity], result of:\n0.05257514 = score(doc=593,freq=3.0), product of:\n0.10526804 = queryWeight, product of:\n1.1936378 = boost\n4.6136355 = idf(docFreq=1151, maxDocs=42740)\n0.01911528 = queryNorm\n0.4994407 = fieldWeight in 593, product of:\n1.7320508 = tf(freq=3.0), with freq of:\n3.0 = termFreq=3.0\n4.6136355 = idf(docFreq=1151, maxDocs=42740)\n0.0625 = fieldNorm(doc=593)\n0.021768285 = weight(abstract_txt:research in 593) [ClassicSimilarity], result of:\n0.021768285 = score(doc=593,freq=2.0), product of:\n0.076627836 = queryWeight, product of:\n1.2472779 = boost\n3.2139761 = idf(docFreq=4669, maxDocs=42740)\n0.01911528 = queryNorm\n0.28407803 = fieldWeight in 593, product of:\n1.4142135 = tf(freq=2.0), with freq of:\n2.0 = termFreq=2.0\n3.2139761 = idf(docFreq=4669, maxDocs=42740)\n0.0625 = fieldNorm(doc=593)\n0.027599202 = weight(abstract_txt:science in 593) [ClassicSimilarity], result of:\n0.027599202 = score(doc=593,freq=1.0), product of:\n0.11309535 = queryWeight, product of:\n1.515278 = boost\n3.904557 = idf(docFreq=2340, maxDocs=42740)\n0.01911528 = queryNorm\n0.24403481 = fieldWeight in 593, product of:\n1.0 = tf(freq=1.0), with freq of:\n1.0 = termFreq=1.0\n3.904557 = idf(docFreq=2340, maxDocs=42740)\n0.0625 = fieldNorm(doc=593)\n0.24634597 = weight(abstract_txt:evolutionary in 593) [ClassicSimilarity], result of:\n0.24634597 = score(doc=593,freq=4.0), product of:\n0.2678101 = queryWeight, product of:\n1.9038723 = boost\n7.358825 = idf(docFreq=73, maxDocs=42740)\n0.01911528 = queryNorm\n0.91985315 = fieldWeight in 593, product of:\n2.0 = tf(freq=4.0), with freq of:\n4.0 = termFreq=4.0\n7.358825 = idf(docFreq=73, maxDocs=42740)\n0.0625 = fieldNorm(doc=593)\n0.07682901 = weight(abstract_txt:information in 593) [ClassicSimilarity], result of:\n0.07682901 = score(doc=593,freq=12.0), product of:\n0.14602585 = queryWeight, product of:\n3.1435776 = boost\n2.430104 = idf(docFreq=10226, maxDocs=42740)\n0.01911528 = queryNorm\n0.52613294 = fieldWeight in 593, product of:\n3.4641016 = tf(freq=12.0), with freq of:\n12.0 = termFreq=12.0\n2.430104 = idf(docFreq=10226, maxDocs=42740)\n0.0625 = fieldNorm(doc=593)\n0.40837285 = weight(abstract_txt:behavior in 593) [ClassicSimilarity], result of:\n0.40837285 = score(doc=593,freq=5.0), product of:\n0.5527786 = queryWeight, product of:\n5.4705367 = boost\n5.286164 = idf(docFreq=587, maxDocs=42740)\n0.01911528 = queryNorm\n0.73876387 = fieldWeight in 593, product of:\n2.236068 = tf(freq=5.0), with freq of:\n5.0 = termFreq=5.0\n5.286164 = idf(docFreq=587, maxDocs=42740)\n0.0625 = fieldNorm(doc=593)\n0.24 = coord(6/25)\n```\n5. Cheung, C.M.K.; Liu, I.L.B.; Lee, M.K.O.: How online social interactions influence customer information contribution behavior in online social shopping communities : a social learning theory perspective (2015) 0.18\n```0.1818445 = sum of:\n0.1818445 = product of:\n0.64944464 = sum of:\n0.023872282 = weight(abstract_txt:only in 4335) [ClassicSimilarity], result of:\n0.023872282 = score(doc=4335,freq=1.0), product of:\n0.089690395 = queryWeight, product of:\n1.1017861 = boost\n4.258611 = idf(docFreq=1642, maxDocs=42740)\n0.01911528 = queryNorm\n0.2661632 = fieldWeight in 4335, product of:\n1.0 = tf(freq=1.0), with freq of:\n1.0 = termFreq=1.0\n4.258611 = idf(docFreq=1642, maxDocs=42740)\n0.0625 = fieldNorm(doc=4335)\n0.030354273 = weight(abstract_txt:understanding in 4335) [ClassicSimilarity], result of:\n0.030354273 = score(doc=4335,freq=1.0), product of:\n0.10526804 = queryWeight, product of:\n1.1936378 = boost\n4.6136355 = idf(docFreq=1151, maxDocs=42740)\n0.01911528 = queryNorm\n0.28835222 = fieldWeight in 4335, product of:\n1.0 = tf(freq=1.0), with freq of:\n1.0 = termFreq=1.0\n4.6136355 = idf(docFreq=1151, maxDocs=42740)\n0.0625 = fieldNorm(doc=4335)\n0.015392502 = weight(abstract_txt:research in 4335) [ClassicSimilarity], result of:\n0.015392502 = score(doc=4335,freq=1.0), product of:\n0.076627836 = queryWeight, product of:\n1.2472779 = boost\n3.2139761 = idf(docFreq=4669, maxDocs=42740)\n0.01911528 = queryNorm\n0.20087351 = fieldWeight in 4335, product of:\n1.0 = tf(freq=1.0), with freq of:\n1.0 = termFreq=1.0\n3.2139761 = idf(docFreq=4669, maxDocs=42740)\n0.0625 = fieldNorm(doc=4335)\n0.03544386 = weight(abstract_txt:important in 4335) [ClassicSimilarity], result of:\n0.03544386 = score(doc=4335,freq=1.0), product of:\n0.1336209 = queryWeight, product of:\n1.6470513 = boost\n4.2441096 = idf(docFreq=1666, maxDocs=42740)\n0.01911528 = queryNorm\n0.26525685 = fieldWeight in 4335, product of:\n1.0 = tf(freq=1.0), with freq of:\n1.0 = termFreq=1.0\n4.2441096 = idf(docFreq=1666, maxDocs=42740)\n0.0625 = fieldNorm(doc=4335)\n0.08168254 = weight(abstract_txt:social in 4335) [ClassicSimilarity], result of:\n0.08168254 = score(doc=4335,freq=5.0), product of:\n0.13633579 = queryWeight, product of:\n1.6636995 = boost\n4.2870083 = idf(docFreq=1596, maxDocs=42740)\n0.01911528 = queryNorm\n0.59912765 = fieldWeight in 4335, product of:\n2.236068 = tf(freq=5.0), with freq of:\n5.0 = termFreq=5.0\n4.2870083 = idf(docFreq=1596, maxDocs=42740)\n0.0625 = fieldNorm(doc=4335)\n0.05432632 = weight(abstract_txt:information in 4335) [ClassicSimilarity], result of:\n0.05432632 = score(doc=4335,freq=6.0), product of:\n0.14602585 = queryWeight, product of:\n3.1435776 = boost\n2.430104 = idf(docFreq=10226, maxDocs=42740)\n0.01911528 = queryNorm\n0.3720322 = fieldWeight in 4335, product of:\n2.4494898 = tf(freq=6.0), with freq of:\n6.0 = termFreq=6.0\n2.430104 = idf(docFreq=10226, maxDocs=42740)\n0.0625 = fieldNorm(doc=4335)\n0.40837285 = weight(abstract_txt:behavior in 4335) [ClassicSimilarity], result of:\n0.40837285 = score(doc=4335,freq=5.0), product of:\n0.5527786 = queryWeight, product of:\n5.4705367 = boost\n5.286164 = idf(docFreq=587, maxDocs=42740)\n0.01911528 = queryNorm\n0.73876387 = fieldWeight in 4335, product of:\n2.236068 = tf(freq=5.0), with freq of:\n5.0 = termFreq=5.0\n5.286164 = idf(docFreq=587, maxDocs=42740)\n0.0625 = fieldNorm(doc=4335)\n0.28 = coord(7/25)\n```" ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.6965086,"math_prob":0.9963263,"size":17495,"snap":"2020-45-2020-50","text_gpt3_token_len":6702,"char_repetition_ratio":0.24841347,"word_repetition_ratio":0.45863223,"special_character_ratio":0.5346099,"punctuation_ratio":0.28335834,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9996346,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2020-10-28T11:45:23Z\",\"WARC-Record-ID\":\"<urn:uuid:b8d44689-1490-43c5-826f-34a48a4c7ae6>\",\"Content-Length\":\"34934\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:4cf4226d-1111-40f5-a356-b02a54506240>\",\"WARC-Concurrent-To\":\"<urn:uuid:8e0ae677-d6fa-46f6-9582-d85dcc6eb6b5>\",\"WARC-IP-Address\":\"139.6.160.6\",\"WARC-Target-URI\":\"http://ixtrieve.fh-koeln.de/birds/litie/document/36315\",\"WARC-Payload-Digest\":\"sha1:HXKR63YKMCVRTYEZXH6R4LOCIFA3SP6Y\",\"WARC-Block-Digest\":\"sha1:OVBWNVK5WAU4TV3AICCPG3SBU6JI6TFJ\",\"WARC-Identified-Payload-Type\":\"application/xhtml+xml\",\"warc_filename\":\"/cc_download/warc_2020/CC-MAIN-2020-45/CC-MAIN-2020-45_segments_1603107898499.49_warc_CC-MAIN-20201028103215-20201028133215-00606.warc.gz\"}"}
https://byjus.com/difference-of-squares-calculator/
[ "", null, "# Difference of Squares Calculator\n\nFor calculating a2 b2\n\nFirst Number, a :\nFirst Number, b :\n\nExpression :\n[Sqr means Square]\n\nUsing a2 b2 = (a + b) (a b)\nExpression : =\n\nSimpliƒying ƒurther\nExpression : =\n\nThe Difference of Squares Calculator is a free online tool that displays the difference between two squared numbers. BYJU’S online difference of squares calculator tool performs the calculation faster, and it displays the difference value in a fraction of seconds.\n\n## How to Use the Difference of Squares Calculator?\n\nThe procedure to use the difference of squares calculator is as follows:\n\nStep 1: Enter the “a” and “b” value in the input field\n\nStep 2: Now click the button “Calculate Difference of Squares” to get the result\n\nStep 3: Finally, the difference between the two squared numbers will be displayed in the output field\n\n### What is Meant by the Difference of Squares?\n\nIn mathematics, the difference of squares means that the squared number subtracted from the other squared number. The difference of squares is given by the algebraic identity. It can be factored according to identity. If “a” and “b” are the two numbers, then the difference of squares is given by:\n\na2-b2 =(a-b)(a+b)\n\nFor example, 2 and 4 are the two numbers, then the difference of squares of these numbers is:\n\n22 – 42 = (2-4)(2+4)\n\n22 – 42 =(-2)(6)\n\n22 – 42 = -12\n\nThe other way to simplify this is:\n\n22 – 42 = 4 – 16\n\n22 – 42 = -12\n\nBoth methods will result in the same answer." ]
[ null, "https://www.facebook.com/tr", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.8876433,"math_prob":0.99936515,"size":1241,"snap":"2020-45-2020-50","text_gpt3_token_len":306,"char_repetition_ratio":0.21827,"word_repetition_ratio":0.07339449,"special_character_ratio":0.26188558,"punctuation_ratio":0.08677686,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.99981266,"pos_list":[0,1,2],"im_url_duplicate_count":[null,null,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2020-10-24T00:44:01Z\",\"WARC-Record-ID\":\"<urn:uuid:1770bbcf-5537-4ba6-a789-f06e2bb926bc>\",\"Content-Length\":\"525087\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:cae09699-0c7c-41b2-874d-a4314e52fe29>\",\"WARC-Concurrent-To\":\"<urn:uuid:4823f2e2-509f-49bd-90c8-f74a74274363>\",\"WARC-IP-Address\":\"52.77.80.199\",\"WARC-Target-URI\":\"https://byjus.com/difference-of-squares-calculator/\",\"WARC-Payload-Digest\":\"sha1:5ZPF2JSPNU4SDBBKVXDDX27NATZNBRMN\",\"WARC-Block-Digest\":\"sha1:3PTQ2YBGHGOMFU3Z4LWUT5JQP6QJ2NKD\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2020/CC-MAIN-2020-45/CC-MAIN-2020-45_segments_1603107881551.11_warc_CC-MAIN-20201023234043-20201024024043-00581.warc.gz\"}"}
http://talkstats.com/threads/dependent-variable-in-regression-model-sum-of-factors.56301/
[ "# Dependent variable in regression model - sum of factors?\n\n#### Fairy\n\n##### New Member\nI got across below article due to one of our student used it for the brand loyalty measure.\n\nhttp://www.palgrave-journals.com/jt/journal/v15/n4/full/5750044a.html\n\nI have a hard time to understand what is the dependent variable in this regression model - I did not find it explicitly mentioned, but it seems like a sum of all loyalty factors. Can it be so? My general logic does not allow me saying I proof my brand loyalty measurement questionnaire is valid if I check factor impact on their sum.\n\nbut maybe I am wrong, and I would like to hear other opinions (I reported it to publishers, but no response so far if they did any additional review...). Maybe you find it interesting to dig and share what you find about this paper statistics.\n\nLast edited:\n\n#### Fairy\n\n##### New Member\nAdding a few citations from the paper, hopefully that makes it easier to understand the question.\n\nExample of hypothesis (all in the same manner about all factors): “Higher level of emotional value will lead to higher level of brand loyalty.”\n\nI have not found the description of what that “brand loyalty”, or later called simply “loyalty” is.\n\n“The data were factor analysed using principal components analysis with varimax rotation.”\n\n“Reliability was evaluated by assessing the internal consistency of the items representing each construct using Cronbach ’ s alpha. The reliability of each construct was as follows: Functional Value = 0.93; price worthiness = 0.92. Emotional value = 0.88; social value = 0.95; customer satisfaction = 0.70; brand trust = 0.88; commitment = 0.84; repeated Purchase = 0.96, involvement = 0.87.”\n\n“The nine factors emerged with no crossconstruct loadings above 0.5, indicating good discriminant validity. The instrument also demonstrated convergent validity with factor loadings exceeding 0.5 for each construct. Consequently, these results confi rm that each of the fi ve constructs is unidimensional and factorially distinct and that all items used to operationalise a particular construct is loaded onto\na single factor.”\n\n“The hypothesised relationships were tested using the multiple regression analysis of SPSS 11.5 for\nWindows. The average scores of the items representing each construct were used in the data analysis. The R 2 was used to assess the model ’ s overall predictive fi t. Properties of the causal paths, including standardised path coefficients, t - values and variance, explained for each equation in the hypothesised model are presented in Figure 3 . The influence of perceived value (functional value, price worthiness, emotional value and social value), trust, customer satisfaction and repeated purchase commitment on loyalty has been proved by hypotheses H 1 , H 2a , H 2b , H 2c , H 2d , H 3 , H 4 , H 5 and H 6 .\nAs expected, repeated purchase ( b = 0.769, t -value = 7.159, p < 0.001) and functional value ( b = 0.138, t -value = 6.312, p < 0.001) have relatively strongest influence on loyalty, followed by commitment ( b = 0.127, t -value = 1.484, p = 0.148) and emotional value ( b = 0.108, t -value = 1.800, p = 0.082). Brand trust ( b = 0.095, t -value = 2.150, p < 0.05), price worthiness ( b = 0.046, t -value = 0.778, p = 0.443) ,customer satisfaction ( b = 0.034, t value = 1.523, p = 0.138) and social value ( b = 0.026, t -value = 1.207, p = 0.237) have a signifi cant positive effect on loyalty. Customers ’ involvement ( b = 0.057,\nt -value = 2.622, p < 0.05) also has a signifi cant influence on loyalty. Therefore, hypotheses H 1 , H 2a , H 2b , H 2c , H 2d , H 3 , H 4 , H 5 and H 6 are supported. So the proposed model explained a significant percentage of variance in loyalty ( R 2 = 98.6 per cent, F value = 236.175, p < 0.001).”\n\nFactor table and list of items attached." ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.9395563,"math_prob":0.9656463,"size":1472,"snap":"2022-27-2022-33","text_gpt3_token_len":324,"char_repetition_ratio":0.07970027,"word_repetition_ratio":0.902439,"special_character_ratio":0.22214673,"punctuation_ratio":0.1118421,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9586679,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2022-08-15T19:03:26Z\",\"WARC-Record-ID\":\"<urn:uuid:b5bdeb88-8621-450d-b1a9-66971dde052a>\",\"Content-Length\":\"38091\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:8063d410-0b33-45f1-8c11-511760ff0b6f>\",\"WARC-Concurrent-To\":\"<urn:uuid:9e2e44c5-567c-4e08-b42c-a2a8175d62fb>\",\"WARC-IP-Address\":\"199.167.200.62\",\"WARC-Target-URI\":\"http://talkstats.com/threads/dependent-variable-in-regression-model-sum-of-factors.56301/\",\"WARC-Payload-Digest\":\"sha1:3YZN4XFWMPK6BL6USG5EBNLJNPN524FX\",\"WARC-Block-Digest\":\"sha1:F4OKWZUTAQEV7VRO2RYYK3RURXPZ5G6I\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2022/CC-MAIN-2022-33/CC-MAIN-2022-33_segments_1659882572198.93_warc_CC-MAIN-20220815175725-20220815205725-00495.warc.gz\"}"}
https://www.careerstoday.in/school/specific-conductivity-molar-conductivity-topic-pge
[ "# Specific Conductivity and Molar Conductivity - Definition, Unit, Relation, FAQs\n\nHere in this article we will be discussing about conductance, conductivity, symbol of conductance, what is specific conductance, definition of specific conductivity, unit of specific conductivity, Specific conductivity of a solution, ratio of specific conductance to that of conductance, definition and relationship between conductivity and molar conductivity, what is equivalent conductivity and everything related to specific and molar conductivity will be discussed here.\n\nWhat is meant by Conductance?\n\nThe term conductance is the reciprocal of resistance and it is denoted by the symbol G. Conductance can be defined as the measure of ease of current flow through a conductor. It can be given by the formula:\n\nConductance, G=1/R ……………(1)\n\nIn equation (1), ‘R’ is the resistance of the conductor. The unit of conductance is ohm-1 or Ω-1 and its SI unit is Siemens or S.\n\nResources JEE Main Foundation Course\n\n- 300+Hours of Concept wise Video Lectures, Mock Tests, Practice Questions, Adaptive Time-Table\n\n- For Class 9 and 10 Engineering Aspirants\n\nAccess Now\n\nThe conductance of a material generally depends on the following factors:\n\n• The nature of the metal.\n• The number of valence electrons present per atom.\n• Temperature (conductance generally decreases with increase in temperature).\n\n• NCERT Solutions for Class 11 Chemistry\n• NCERT Solutions for Class 12 Chemistry\n• NCERT Solutions for All Subjects\n\nIonic conductance\n\nThe capacity of an ion to conduct electricity is commonly defined as ionic conductance. The value of ionic conductance of a metal ion is affected by the extent of its hydration in aqueous solutions.\n\nIn the state of infinite dilution, the ionization of the electrolyte will be complete and all forces of interaction between the ions will have ceased to exist. Under such a condition, all the ions that can possibly be derived from the electrolyte under consideration are free to carry current. The motion of ionic charge causes electrical conductivity. It is called ionic conductivity or ionic conductance. Equivalent conductance, molar conductance and specific conductance are different types of conductance.\n\n## Conductivity or Specific conductance\n\nDefine specific conductance\n\nThe term specific conductance, nowadays referred to as property of any conductor which is the capacity to conduct electricity.It can be represented by the symbol ‘K’. Specific conductance gives the measure of capacity of a material to conduct electricity.\n\nSpecific conductance formula or conductivity can be given as:\n\nIn equation (2), ρ is the specific resistance.\n\nWe know that\n\nHere in equation’R’ indicates the resistance of a conductor of length ‘l’ and ‘a’ is the area of cross section in cm2.\n\nThen,\n\nHere in equation(4) ’G’ is the conductance. Obviously if, and , then equation becomes,\n\nThus the conductivity or specific conductance of an electrolyte solution represents the conductance with unit length and unit cross section. In other words, conductivity or specific conductance of an electrolyte solution represents the conductance of one centimeter cube of the solution kept between two ‘’ electrodes of unit area of cross section and placed unit distance apart.\n\nThe unit of specific conductance is microsiemens/cm\n\n• The conductivity or specific conductance of an electrolyte depends on the following factors.\n• Nature of electrolyte – Strong electrolytes have high conductance whereas the weak electrolytes have low conductance.\n• Concentration of the solution – Molar conductance varies with concentration of the electrolyte.\n• Temperature – The conductivity of an electrolyte increases with increase in temperature.\n\nWhat does the term cell constant indicate?\n\nThe term cell constant is obtained by dividing the distance between the two electrodes in a conductivity cell by the cross-section of the electrode. It is commonly expressed in the unit and its SI unit is.\n\nThe expression for conductivity of an electrolyte solution is given as:\n\ni.e., Conductivity Conductance Cell constant\n\nThe cell constant can also be denoted as. Then the expression for ‘K’ becomes:\n\nHence from equation It is clear that the cell constant is the ratio of specific conductance and conductance.\n\n Related Topics Link,Difference Between Primary Cell and Secondary CellDifference Between Cell and BatteryStandard Electrode PotentialGalvanic CellDaniell Cell\n\nEquivalent conductivity or Equivalent conductance\n\nEquivalent conductivity of an electrolyte solution of a given concentration is explained as the conducting power of ions formed from one equivalent of electrolyte present in solution. It is generally denoted as.\n\nSpecific conductance or conductivity (K) of a solution is related to equivalent conductivity by the equation:\n\nHere in equation ’K’ is expressed in and the concentration ‘c’ in. Then the unit of is .\n\nIf ‘N’ is the normality of the solution and ‘K’ is expressed in then,\n\nAlso, students can refer,\n\n• NCERT solutions for Class 12 Chemistry Chapter 3 Electrochemistry\n• NCERT Exemplar Class 12 Chemistry Solutions Chapter 3 Electrochemistry\n• NCERT notes Class 12 Chemistry Chapter 3 Electrochemistry\n\n### Molar conductivity or Molar conductance\n\nMolar conductivity or molar conductance of an electrolyte solution of a given concentration can be defined as the ratio of conductivity and the molar concentration. It is denoted as.\n\nSpecific conductance or conductivity (K) of a solution is related to molar conductivity by the equation:\n\nIn equation, when ‘K’ is expressed in and the concentration ‘c’ in, then the SI unit of will be.\n\nIf ‘M’ is the molarity of the solution and ‘K’ is expressed in then,\n\n• For an electrolyte solution, is related to as:\n\n## Relation between conductivity and molar conductivity\n\nΛm=KC\n\nC is the concentration\n\nK is the conductivity\n\nΛm is the molar conductivity\n\nAlso check-\n\n• NCERT Exemplar Class 11th Chemistry Solutions\n• NCERT Exemplar Class 12th Chemistry Solutions\n• NCERT Exemplar Solutions for All Subjects\n\nNCERT Chemistry Notes:\n\n• NCERT Notes Class 11th Chemistry\n• NCERT Notes Class 12th Chemistry\n• NCERT Notes For All Subjects" ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.8929648,"math_prob":0.9848965,"size":5999,"snap":"2022-40-2023-06","text_gpt3_token_len":1263,"char_repetition_ratio":0.23786488,"word_repetition_ratio":0.08145767,"special_character_ratio":0.19103184,"punctuation_ratio":0.086395234,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9979439,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2022-10-04T12:27:50Z\",\"WARC-Record-ID\":\"<urn:uuid:9d3658ff-6328-49d9-9456-3ca8ab822abd>\",\"Content-Length\":\"20634\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:b75e8625-eb64-4590-ab1c-2947d2fb0840>\",\"WARC-Concurrent-To\":\"<urn:uuid:42dfaf6d-2abb-47cc-8d4d-2666016b214a>\",\"WARC-IP-Address\":\"104.152.168.37\",\"WARC-Target-URI\":\"https://www.careerstoday.in/school/specific-conductivity-molar-conductivity-topic-pge\",\"WARC-Payload-Digest\":\"sha1:3UJBLM75ZNASXKSH7C5G65ZJYG5G3U42\",\"WARC-Block-Digest\":\"sha1:HK4O5U3LWPJKNUAUBGZD2QB4FK7X7CO2\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2022/CC-MAIN-2022-40/CC-MAIN-2022-40_segments_1664030337504.21_warc_CC-MAIN-20221004121345-20221004151345-00371.warc.gz\"}"}
https://artint.info/2e/html/ArtInt2e.Ch4.S1.SS1.html
[ "# 4.1.1 Variables and Worlds\n\nConstraint satisfaction problems are described in terms of variables and possible worlds. A possible world is a possible way the world (the real world or some imaginary world) could be.\n\nPossible worlds are described by algebraic variables. An algebraic variable is a symbol used to denote features of possible worlds. For this chapter, we refer to an algebraic variable simply as a variable. Algebraic variables are written starting with an upper-case letter. Each algebraic variable $X$ has an associated domain, $\\mbox{dom}(X)$, which is the set of values the variable can take.\n\nSymbols and Semantics\n\nAlgebraic variables are symbols.\n\nInternal to a computer, a symbol is just a sequence of bits that is distinguished from other symbols. Some symbols have a fixed interpretation; for example, symbols that represent numbers and symbols that represent characters are predefined in most computer languages. Symbols with a meaning outside of the program (as opposed to variables in the program), but without a predefined meaning in the language, can be defined in many programming languages. In Java they are called enumeration types. Lisp refers to them as atoms. Python 3.4 introduced a symbol type called $enum$, but Python’s strings are often used as symbols. Usually, symbols are implemented as indexes into a symbol table that gives the name to print out. The only operation performed on these symbols is equality to determine whether two symbols are the same. This can be implemented by comparing the indexes in the symbol table.\n\nTo a user of a computer, symbols have meanings. A person who inputs constraints or interprets the output of a program associates meanings with the symbols making up the constraints or the outputs. He or she associates a symbol with some concept or object in the world. For example, the variable $SamsHeight$, to the computer, is just a sequence of bits. It has no relationship to $SamsWeight$ or $AlsHeight$. To a person, this variable may mean the height, in particular units, of a particular person at a particular time.\n\nThe meaning associated with a variable–value pair must satisfy the clarity principle: an omniscient agent – a fictitious agent who knows the truth and the meanings associated with all of the symbols – should be able to determine the value of each variable. For example, the height of Hagrid only satisfies the clarity principle if the particular person being referred to and the particular time are specified as well as the units. For example, one may want to reason about the height, in centimeters, of Hagrid in a particular scene at the start of the second Harry Potter movie. This is different from the height, in inches, of Hagrid at the end of the third movie (although they are, of course, related). To refer to Hagrid’s height at two different times, you need two variables.\n\nYou should have a consistent meaning for any symbols you use. When stating constraints, you must have the same meaning for the same variable and the same values, and you can use this meaning to interpret the output.\n\nThe bottom line is that symbols have meanings because you give them meanings. For this chapter, assume that the computer does not know what the symbols mean. A computer can know what a symbol means if it perceives and manipulates the environment.\n\nA discrete variable is one whose domain is finite or countably infinite. A binary variable is a discrete variable with two values in its domain. One particular case of a binary variable is a Boolean variable, which is a variable with domain $\\{\\mbox{true}{}$, $\\mbox{false}{}\\}$. We can also have variables that are not discrete; for example, a variable whose domain corresponds to the real line or an interval of the real line is a continuous variable.\n\nGiven a set of variables, an assignment on the set of variables is a function from the variables into the domains of the variables. We write an assignment on $\\{X_{1},X_{2},\\dots,X_{k}\\}$ as $\\{X_{1}{=}v_{1},X_{2}{=}v_{2},\\dots,X_{k}{=}v_{k}\\}$, where $v_{i}$ is in $\\mbox{dom}(X_{i})$. This assignment specifies that, for each $i$, variable $X_{i}$ is assigned value $v_{i}$. A variable can only be assigned one value in an assignment. A total assignment assigns all of the variables. Assignments do not have to be total, but may be partial.\n\nA possible world is defined to be a total assignment. That is, it is a function from variables into values that assigns a value to every variable. If world $w$ is the assignment $\\{X_{1}{=}v_{1},X_{2}{=}v_{2},\\dots,X_{k}{=}v_{k}\\}$, we say that variable $X_{i}$ has value $v_{i}$ in world $w$.\n\n###### Example 4.1.\n\nThe variable Class_time may denote the starting time for a particular class. The domain of Class_time may be the following set of possible times:\n\n $\\mbox{dom}(\\mbox{Class\\_time})=\\{8,9,10,11,12,1,2,3,4,5\\}.$\n\nThe variable Height_joe may refer to the height of a particular person at a particular time and have as its domain the set of real numbers, in some range, that represent the height in centimeters. Raining may be a random variable with domain $\\{\\mbox{true}{}$, $\\mbox{false}{}\\}$, which has value true if it is raining at a particular time.\n\nThe assignment $\\{\\mbox{Class\\_time}{=}11,\\mbox{Height\\_joe}{=}165,\\mbox{Raining}{=}\\mbox{% false}{}\\}$ specifies that the class starts at 11, Joe is 165cm tall and it is not raining.\n\n###### Example 4.2.\n\nIn the electrical environment of Figure 1.8, there may be a variable for the position of each switch that specifies whether the switch is up or down. There may be a variable for each light that specifies whether it is lit or not. There may be a variable for each component specifying whether it is working properly or if it is broken. Some variables that the following examples use include:\n\n• $S_{1}\\_\\mbox{pos}$ is a binary variable denoting the position of switch $s_{1}$ with domain $\\{\\mbox{up}$, $\\mbox{down}\\}$, where $S_{1}\\_\\mbox{pos}{=}\\mbox{up}$ means switch $s_{1}$ is up, and $S_{1}\\_\\mbox{pos}{=}\\mbox{down}$ means switch $s_{1}$ is down.\n\n• $S_{1}\\_\\mbox{st}$ is a discrete variable denoting the status of switch $s_{1}$ with domain $\\{\\mbox{ok},$ $\\mbox{upside\\_down},$ $\\mbox{short},$ $\\mbox{intermittent},$ $\\mbox{broken}\\}$, where $S_{1}\\_\\mbox{st}{=}\\mbox{ok}$ means switch $s_{1}$ is working normally, $S_{1}\\_\\mbox{st}{=}\\mbox{upside\\_down}$ means it is installed upside down, $S_{1}\\_\\mbox{st}{=}\\mbox{short}$ means it is shorted and it allows electricity to flow whether it is up or down, $S_{1}\\_\\mbox{st}{=}\\mbox{intermittent}$ means it is working intermittently, and $S_{1}\\_\\mbox{st}{=}\\mbox{broke}n$ means it is broken and does not allow electricity to flow.\n\n• Number_of_broken_switches is an integer-valued variable denoting the number of switches that are broken.\n\n• $\\mbox{Current\\_w}_{1}$ is a real-valued variable denoting the current, in amps, flowing through wire $w_{1}$. $\\mbox{Current\\_w}_{1}{=}1.3$ means there are $1.3$ amps flowing through wire $w_{1}$. We also allow inequalities between variables and constants; for example, $\\mbox{Current\\_w}_{1}\\geq 1.3$ is true when there are at least $1.3$ amps flowing through wire $w_{1}$.\n\nA world specifies the position of every switch, the status of every device, and so on. For example, a world may be described as switch 1 is up, switch 2 is down, fuse 1 is okay, wire 3 is broken, etc.\n\n###### Example 4.3.\n\nA classic example of a constraint satisfaction problem is a crossword puzzle. There are two different representations of crossword puzzles in terms of variables:\n\n• In one representation, the variables are the numbered squares with the direction of the word (down or across), and the domains are the set of possible words that can be used. For example, one_across could be a variable with domain $\\{$’ant’, ’big’, ’bus’, ’car’, ’has’$\\}$. A possible world corresponds to an assignment of a word for each of the variables.\n\n• In another representation of a crossword, the variables are the individual squares and the domain of each variable is the set of letters in the alphabet. For example, the top-left square could be a variable $p00$ with domain $\\{a,\\dots,z\\}$. A possible world corresponds to an assignment of a letter to each square.\n\n###### Example 4.4.\n\nA trading agent, in planning a trip for a group of tourists, may be required to schedule a given set of activities. There could be two variables for each activity: one for the date, for which the domain is the set of possible days for the activity, and one for the location, for which the domain is the set of possible towns where it may occur. A possible world corresponds to an assignment of a date and a town for each activity.\n\nAn alternative representation may have the days as the variables, with domains the set of possible activity–location pairs.\n\nThe number of worlds is the product of the number of values in the domains of the variables.\n\n###### Example 4.5.\n\nIf there are two variables, $A$ with domain $\\{0,1,2\\}$ and $B$ with domain $\\{\\mbox{true}{},\\mbox{false}{}\\}$, there are six possible worlds, which we name $w_{0},\\dots,w_{5}$ as follows\n\n• $w_{0}=\\{A{=}0,B{=}\\mbox{true}{}\\}$\n\n• $w_{1}=\\{A{=}0,B{=}\\mbox{false}{}\\}$\n\n• $w_{2}=\\{A{=}1,B{=}\\mbox{true}{}\\}$\n\n• $w_{3}=\\{A{=}1,B{=}\\mbox{false}{}\\}$\n\n• $w_{4}=\\{A{=}2,B{=}\\mbox{true}{}\\}$\n\n• $w_{5}=\\{A{=}2,B{=}\\mbox{false}{}\\}$\n\nIf there are $n$ variables, each with domain size $d$, there are $d^{n}$ possible worlds.\n\nOne main advantage of reasoning in terms of variables is the computational savings. Many worlds can be described by a few variables:\n\n• 10 binary variables can describe $2^{10}=1,024$ worlds\n\n• 20 binary variables can describe $2^{20}=1,048,576$ worlds\n\n• 30 binary variables can describe $2^{30}=1,073,741,824$ worlds\n\n• 100 binary variables can describe $2^{100}=1,267,650,600,228,229,401,496$, $703,205,376$ worlds.\n\nReasoning in terms of thirty variables may be easier than reasoning in terms of more than a billion worlds. One hundred variables is not that many, but reasoning in terms of more than $2^{100}$ worlds explicitly is not possible. Many real-world problems have thousands, if not millions, of variables." ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.92133677,"math_prob":0.9984104,"size":8503,"snap":"2020-45-2020-50","text_gpt3_token_len":1706,"char_repetition_ratio":0.16649018,"word_repetition_ratio":0.041553132,"special_character_ratio":0.20051746,"punctuation_ratio":0.1077771,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.99877346,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2020-11-25T03:10:58Z\",\"WARC-Record-ID\":\"<urn:uuid:12b24cc0-9038-477f-8b85-649cf7562dba>\",\"Content-Length\":\"58793\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:af5173c8-c6a7-48e3-bf0e-de5072174ec8>\",\"WARC-Concurrent-To\":\"<urn:uuid:7c34bf1b-b0a4-4328-981b-3d408ac222b6>\",\"WARC-IP-Address\":\"142.103.6.162\",\"WARC-Target-URI\":\"https://artint.info/2e/html/ArtInt2e.Ch4.S1.SS1.html\",\"WARC-Payload-Digest\":\"sha1:JYSZPD4RPZIWK6F75XDACV4JAVMPWZ2W\",\"WARC-Block-Digest\":\"sha1:ORFCEQ6ILHZAAHP75F3AWWEEWEWC2P5H\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2020/CC-MAIN-2020-50/CC-MAIN-2020-50_segments_1606141180636.17_warc_CC-MAIN-20201125012933-20201125042933-00545.warc.gz\"}"}
https://turais.de/how-to-load-hdri-as-a-cubemap-in-opengl/
[ "In this post I want you to show how to load an Panorama Image into an Cube Map. The Panorama Image is commonly abbreviated as HDRI. It is often an HDRI, but the hard part is not loading an HDRI but changing the Panorama-Texture to an CubeMap-Texture.\n\n### Intro\n\nIf you want to know more about Cubemaps I can recommend the Tutorial at Learnopengl.com. And of course more about HDRIs in general: https://en.wikipedia.org/wiki/High_Dynamic_Range_(photography_technique)\n\nand this Wikipedia Article about Cube Mapping: https://en.wikipedia.org/wiki/Cube_mapping\n\nBasically we have a Panorama Image, which looks like this:", null, "Panorama Image - which you can find for example on HdriHaven", null, "This is what we want as a Texture for all our cube sides.\n\nAnd we need to turn this Panorama into six textures.\n\nAbove you see a Cubemap Texture where each individual face is on one big texture. If you are beginning with skyboxes I recommend you look at the tutorial at learnopengl.com and try to load a texture like the one above. You can create those textures at: https://matheowis.github.io/HDRI-to-CubeMap/.  They are not HDRIs, but for testing your Skybox in your engine its a really great tool.\n\nNow that we know what we have to do we can imagine a cube in a 3D-World and each face of the cube is a camera. The view angle of that camera has to be 90 degrees, so that everything gets covered.\n\nFor a better understanding I made a little Sketch which describes the situation out of a 2d perspective.\n\nThe circle around should be the HDRI Sphere, on that Sphere the HDRI Panorama is mapped. As you can see the 6 cameras really cover everything.\n\nTo make this in OpenGl happen you could make up a scene rotate the camera for each face and render that to a framebuffer, use that render output for a CubeTexture. And basically it is exactly what we will do.  I looked at the Gltf Example Viewer from Khronos, and how they did it, and they did it really nice. https://github.com/KhronosGroup/glTF-Sample-Viewer\n\nThey create the Skybox-Cubemap in these basic steps:\n\n1. Create an HDRI Input Texture (The Texture where your Panorama image gets loaded into)\n2. Create the Cube Map Texture - The Cube Texture used by your Skybox shader.\n3. Create a Framebuffer to render into.\n5. Convert the Panorama to the Cubemap\n\nI will now talk a bit about each single step.\n\n### Create your HDRI Input Texture\n\nCreate your Texture how you would normally do this like this:\n\n``````glActiveTexture(GL_TEXTURE0 + texture_unit);\nglGenTextures(1, &gl_texture_name);\nglBindTexture(gl_target, gl_texture_name);\n``````\n\nUpload the image data to the GPU:\n\n``````upload_image_data(img, this->gl_target, is_in_srgb);\n``````\n\nset your Texture Parameters and MipMap generation:\n\n``````glTexParameteri(gl_target, GL_TEXTURE_WRAP_S, gl_wrap_s);\nglTexParameteri(gl_target, GL_TEXTURE_WRAP_T, gl_wrap_t);\n\nglTexParameteri(gl_target, GL_TEXTURE_MIN_FILTER, gl_filter_min);\nglTexParameteri(gl_target, GL_TEXTURE_MAG_FILTER, gl_filter_mag);\n\nglGenerateMipmap(gl_target);\n``````\n\n`````` [[maybe_unused]] inline static void upload_image_data(Image &img, GLenum target = GL_TEXTURE_2D, bool is_in_srgb = false) {\nGLenum internal_format = 0x00;\nGLenum byte_format;\n\nstd::string debug_msg{};\nstd::string debug_format{};\n\nswitch(img.channels) {\n\n//TODO CASE 1\n\ncase 3: {\nswitch (img.bits) {\ncase 0:\ncase 8: {\nif(is_in_srgb) {\ninternal_format = GL_SRGB8;\n} else {\ninternal_format = GL_RGB8;\n}\nbyte_format = GL_UNSIGNED_BYTE;\n} break;\ncase 16: {\ninternal_format = GL_RGB16;\nbyte_format = GL_UNSIGNED_SHORT;\n} break;\ncase 32: {\ninternal_format = GL_RGB32F;\nbyte_format = GL_FLOAT;\n} break;\ndefault:\nthrow std::logic_error(\"image format not supported\");\n}\n\ndebug_format = \"RGB\";\nassert(internal_format);\n\nglTexImage2D(target, 0, internal_format,\nimg.width, img.height,\n0, GL_RGB, byte_format, img.data);\n} break;\n\ncase 4: {\nswitch (img.bits) {\ncase 0:\ncase 8: {\ninternal_format = GL_RGBA8;\nif(is_in_srgb) {\ninternal_format = GL_SRGB8_ALPHA8;\n} else {\ninternal_format = GL_RGB8;\n}\n\nbyte_format = GL_UNSIGNED_BYTE;\n} break;\ncase 16: {\ninternal_format = GL_RGBA16;\nbyte_format = GL_UNSIGNED_SHORT;\n} break;\ncase 32: {\ninternal_format = GL_RGBA32F;\nbyte_format = GL_FLOAT;\n} break;\ndefault:\nthrow std::logic_error(\"image format not supported\");\n}\n\ndebug_format = \"RGBA\";\n\nglTexImage2D(target, 0, internal_format,\nimg.width, img.height,\n0, GL_RGBA, byte_format, img.data);\n} break;\n\ndefault: {\nassert(0);\n}\n}\n\nspdlog::debug(\"[OpenGl] image upload complete {}: {} x {}, {}-bit-color {}\",\nimg.name,\nimg.width, img.height,\nimg.bits, debug_format);\n\n}\n``````\n\nIf you are loading your images with stb_image you have to use another function for loading hdri because of the 32bit color information. This is how I do this:\n\n`````` void PngLoader::load_png(Image &png_image,\nconst std::string &filepath_str, bool vert_flip, bool linear_f) {\n\nint w = 0;\nint h = 0;\nint channels = 0;\n\nif(!linear_f) { //no float data\npng_image.data = stbi_load(filepath_str.c_str(), &w, &h, &channels, 0);\n} else { //float data like hdr images\npng_image.data = reinterpret_cast<const unsigned char *>(stbi_loadf(filepath_str.c_str(), &w, &h, &channels,\n0));\npng_image.bits = 8 * sizeof(float);\n}\n\nif(png_image.data == nullptr) {\n}\n\npng_image.width = w;\npng_image.height = h;\npng_image.channels = channels;\n\n}\n``````\n\n### Create the CubeMap Texture\n\nCreating a Cubemap Texture you have to call `glTexImage2D` six times.\n\n`````` void Texture::create_cubemap(bool with_mipmaps) {\n\nassert(initalized);\nassert(this->gl_target == GL_TEXTURE_CUBE_MAP);\n\nbind();\n\nfor(int i = 0; i < 6; ++i) {\nreset_gl_error();\nauto data = std::vector<unsigned char>();\nglTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + i,\n0, GL_RGBA32F, this->width, this->height,\n0, GL_RGBA, GL_FLOAT, nullptr);\nassert_gl_error();\n}\n\ngl_filter_min = (with_mipmaps) ? GL_LINEAR_MIPMAP_LINEAR : GL_LINEAR;\ngl_filter_mag = GL_LINEAR;\n\ngl_wrap_r = GL_CLAMP_TO_EDGE;\ngl_wrap_s = GL_CLAMP_TO_EDGE;\ngl_wrap_t = GL_CLAMP_TO_EDGE;\n\nglTexParameteri(gl_target, GL_TEXTURE_MIN_FILTER, gl_filter_min);\nglTexParameteri(gl_target, GL_TEXTURE_MAG_FILTER, gl_filter_mag);\n\nglTexParameteri(gl_target, GL_TEXTURE_WRAP_R, gl_wrap_r);\nglTexParameteri(gl_target, GL_TEXTURE_WRAP_S, gl_wrap_s);\nglTexParameteri(gl_target, GL_TEXTURE_WRAP_T, gl_wrap_t);\n\nspdlog::debug(\"[Texture][{}] created empty {} [{}:{}]\", name, magic_enum::enum_name(this->m_type), width, height);\ninitalized = true;\n}\n``````\n\n### Convert the Panorama to the CubeMap\n\nAnd here is my Panorama-to-CubeMap, which is basically the same as from https://github.com/KhronosGroup/glTF-Sample-Viewer with some changes.\n\n``````\nvoid IblSampler::panorama_to_cubemap() {\nspdlog::debug(\"[IblSampler] panorama_to_cubemap()\");\nassert(cubemap_texture.isInitalized());\n\nglBindVertexArray(1);\n\nfor(int i = 0; i < 6; ++i) {\n\nframebuffer.bind();\nint side = i;\nreset_gl_error();\nglFramebufferTexture2D(GL_FRAMEBUFFER,\nGL_COLOR_ATTACHMENT0,\nGL_TEXTURE_CUBE_MAP_POSITIVE_X + side,\ncubemap_texture.getGlTexture(),\n0);\n\nassert_gl_error();\n\ncubemap_texture.bind();\n\nglViewport(0, 0, texture_size, texture_size);\nglClearColor(0.5f, 0.5f, 0.5f, 0.f);\nglClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);\nassert_gl_error();\nassert_gl_error();\n\n//input_texture.bind();\nglActiveTexture(GL_TEXTURE0 + input_texture.getTextureUnit());\nglBindTexture(input_texture.getGlTarget(), input_texture.getGlTexture());\nassert_gl_error();\n\nassert_gl_error();\n\nglDrawArrays(GL_TRIANGLES, 0, 3);\nassert_gl_error();\n}\n\ncubemap_texture.bind();\ncubemap_texture.generate_mipmap();\n\n}\n``````\n\nBasically we call `glFramebufferTexture2D` for each side. The interesing parts are happening in the shaders. These are 1:1 the same shaders as from https://github.com/KhronosGroup/glTF-Sample-Viewer\n\n``````precision highp float;\n\nout vec2 texCoord;\n\nvoid main(void)\n{\nfloat x = float((gl_VertexID & 1) << 2);\nfloat y = float((gl_VertexID & 2) << 1);\ntexCoord.x = x * 0.5;\ntexCoord.y = y * 0.5;\ngl_Position = vec4(x - 1.0, y - 1.0, 0, 1);\n}\n``````\n\nFor the Vertex-Shader they use a little trick where you don't need to upload vertex data. More about that nice trick here: https://rauwendaal.net/2014/06/14/rendering-a-screen-covering-triangle-in-opengl/\n\nWe will go through this step by step:\n\n``````vec3 uvToXYZ(int face, vec2 uv)\n{\nif(face == 0)\nreturn vec3( 1.f, uv.y, -uv.x);\n\nelse if(face == 1)\nreturn vec3( -1.f, uv.y, uv.x);\n\nelse if(face == 2)\nreturn vec3( +uv.x, -1.f, +uv.y);\n\nelse if(face == 3)\nreturn vec3( +uv.x, 1.f, -uv.y);\n\nelse if(face == 4)\nreturn vec3( +uv.x, uv.y, 1.f);\n\nelse //if(face == 5)\n{\treturn vec3( -uv.x, +uv.y, -1.f);}\n}\n``````\n\nThis is actually almost the same you can find on Wikipedia. With that coordinates you can calculate the uv coordinates on the \"sphere\" panorama image.\n\n``````vec2 dirToUV(vec3 dir)\n{\nreturn vec2(\n0.5f + 0.5f * atan(dir.z, dir.x) / MATH_PI,\n1.f - acos(dir.y) / MATH_PI);\n}\n``````\n\nAlltogehter:\n\n``````vec3 panoramaToCubeMap(int face, vec2 texCoord)\n{\nvec2 texCoordNew = texCoord*2.0-1.0; //< mapping vom 0,1 to -1,1 coords\nvec3 scan = uvToXYZ(face, texCoordNew);\nvec3 direction = normalize(scan);\nvec2 src = dirToUV(direction);\n\nreturn texture(u_panorama, src).rgb; //< get the color from the panorama\n}\n``````" ]
[ null, "https://www.turais.de/content/images/2021/05/spruit_sunrise_4k_8bit_panorama.jpeg", null, "https://www.turais.de/content/images/2021/05/StandardCubeMap-2.jpg", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.5249535,"math_prob":0.8934319,"size":9678,"snap":"2021-31-2021-39","text_gpt3_token_len":2622,"char_repetition_ratio":0.11877196,"word_repetition_ratio":0.0756579,"special_character_ratio":0.2783633,"punctuation_ratio":0.25113896,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.97006524,"pos_list":[0,1,2,3,4],"im_url_duplicate_count":[null,1,null,1,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2021-07-28T11:34:55Z\",\"WARC-Record-ID\":\"<urn:uuid:848aa04c-0748-49d8-8646-50e9d44ee716>\",\"Content-Length\":\"38135\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:7ad2b2a2-d782-4d74-b562-0c0811cf07be>\",\"WARC-Concurrent-To\":\"<urn:uuid:417714e4-9b0f-44e0-a18a-637089b1bedb>\",\"WARC-IP-Address\":\"78.47.87.110\",\"WARC-Target-URI\":\"https://turais.de/how-to-load-hdri-as-a-cubemap-in-opengl/\",\"WARC-Payload-Digest\":\"sha1:S4S7LFBEWB2W46YOTKEE6MMBYXX25Y2Z\",\"WARC-Block-Digest\":\"sha1:6EYQN6TJ5IONVKTPNLNYUCVXN3LLLEDP\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2021/CC-MAIN-2021-31/CC-MAIN-2021-31_segments_1627046153709.26_warc_CC-MAIN-20210728092200-20210728122200-00151.warc.gz\"}"}
https://socratic.org/questions/how-do-you-use-the-definition-of-a-derivative-to-find-the-derivative-of-f-x-3x-2-1
[ "# How do you use the definition of a derivative to find the derivative of f(x)=-3x^2+2x+9?\n\nFeb 28, 2017\n\n(df_x)/(dx)=color(green)(-6x+2\n(see below for method using the definition of derivative)\n\n#### Explanation:\n\nThe derivative of a function $f \\left(x\\right)$ (with respect to $x$) is defined as\n$\\textcolor{w h i t e}{\\text{XXX}} \\frac{{\\mathrm{df}}_{x}}{\\mathrm{dx}} = {\\lim}_{h \\rightarrow 0} \\frac{f \\left(x + h\\right) - f \\left(x\\right)}{h}$\n\nGiven: $f \\left(x\\right) = - 3 {x}^{2} + 2 x + 9$\n\n$\\textcolor{w h i t e}{\\text{XXX}} f \\left(x + h\\right) = - 3 {\\left(x + h\\right)}^{2} + 2 \\left(x + h\\right) + 9$\n$\\textcolor{w h i t e}{\\text{XXXXXXXX}} = - 3 {x}^{2} - 3 x h - 3 {h}^{2} + 2 x + 2 h + 9$\n\n{: (f(x+h),\" = \",,-3x^2,-6xh,-3h^2,+2x,+2h,+9,), (-f(x),\" = \" ,\"- [ \",underline(-3x^2),underline(color(white)(_3xh)),underline(color(white)(-3h^2)),underline(+2x),underline(color(white)(+2h)),underline(+9),\" ]\"), (f(x+h)-f(x),\" = \",,,-6xh,-3h^2,,+2h,,) :}\n\n$\\Rightarrow \\textcolor{w h i t e}{\\text{XX}} \\frac{f \\left(x + h\\right) - f \\left(x\\right)}{h} = - 6 x - 3 h + 2$\nand\n$\\textcolor{w h i t e}{\\text{XXX}} {\\lim}_{h \\rightarrow 0} \\frac{f \\left(x + h\\right) - f \\left(x\\right)}{h} = - 6 x - 3 \\cdot \\left(0\\right) + 2 = \\textcolor{g r e e n}{- 6 x + 2}$" ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.6755441,"math_prob":1.0000088,"size":349,"snap":"2020-34-2020-40","text_gpt3_token_len":89,"char_repetition_ratio":0.16231884,"word_repetition_ratio":0.0,"special_character_ratio":0.24068768,"punctuation_ratio":0.060606062,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9999832,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2020-08-14T09:13:04Z\",\"WARC-Record-ID\":\"<urn:uuid:0b969679-404a-4cd2-b34d-58bdfeb7845c>\",\"Content-Length\":\"34451\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:b390fe10-d604-43f0-b5a0-bc3c90e34ca7>\",\"WARC-Concurrent-To\":\"<urn:uuid:352aa652-23f3-4c36-8c32-ff1018dc0f7a>\",\"WARC-IP-Address\":\"216.239.38.21\",\"WARC-Target-URI\":\"https://socratic.org/questions/how-do-you-use-the-definition-of-a-derivative-to-find-the-derivative-of-f-x-3x-2-1\",\"WARC-Payload-Digest\":\"sha1:NAYGCQTQSKJYOQE6QIJVONKCKHONVO4V\",\"WARC-Block-Digest\":\"sha1:PFDCPKIPBMW5DFDTSWQ34KGEIL4342V4\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2020/CC-MAIN-2020-34/CC-MAIN-2020-34_segments_1596439739182.35_warc_CC-MAIN-20200814070558-20200814100558-00324.warc.gz\"}"}
https://getcodesolution.com/python/python-multiindex-slicing-with-repetitive-values/
[ "# Python multiindex slicing with repetitive values\n\nYou can do this by using your `a` and `b` arrays to create a new `MultiIndex` then `reindex` your dataframe:\n\nSample Data\n\n``````import pandas as pd\n\nindex = pd.MultiIndex.from_product([[1,2,3], [1,2,3]])\ndf = pd.DataFrame({\"C\": [11, 12, 13, 21, 22, 23, 31, 32, 33]}, index=index)\n\nprint(df) # dataframe with 2-level index and 1 column \"C\"\nC\n1 1 11\n2 12\n3 13\n2 1 21\n2 22\n3 23\n3 1 31\n2 32\n3 33\n``````\n\nMethod\n\n• Create new `MultiIndex` from your `a` and `b` arrays\n• Align the dataframe (or just column of interest) to this new index\n``````a = [1, 2, 3, 1, 2, 1, 2 ]\nb = [3, 2, 1, 3, 2, 1, 3 ]\n\nnew_index = pd.MultiIndex.from_arrays([a, b])\nnew_c = df[\"C\"].reindex(new_index)\n\nprint(new_c.to_numpy())\n[13 22 31 13 22 11 23]\n``````\n\nMethod 2\n\nYou can also zip your `a` and `b` arrays together and simply use `.loc` to slice your dataframe:\n\n``````# Select the rows specified by combinations of a, b; in column \"C\"\nnew_c = df.loc[zip(a, b), \"C\"]\n\nprint(new_c.to_numpy())\n[13 22 31 13 22 11 23]\n``````" ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.5823875,"math_prob":0.97947884,"size":979,"snap":"2022-40-2023-06","text_gpt3_token_len":367,"char_repetition_ratio":0.11282051,"word_repetition_ratio":0.054945055,"special_character_ratio":0.4167518,"punctuation_ratio":0.18518518,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9813494,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2023-01-31T14:19:37Z\",\"WARC-Record-ID\":\"<urn:uuid:c2430fd5-3062-4de0-9189-5162e6978b97>\",\"Content-Length\":\"122904\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:439e835e-60d1-4397-8254-584b6bbe6bee>\",\"WARC-Concurrent-To\":\"<urn:uuid:51d3a8be-20cd-4eb0-ba55-e980e4bf67e2>\",\"WARC-IP-Address\":\"104.21.63.205\",\"WARC-Target-URI\":\"https://getcodesolution.com/python/python-multiindex-slicing-with-repetitive-values/\",\"WARC-Payload-Digest\":\"sha1:EMP3G47NYNUE4QXB2AMGNZIC3TN5P2BB\",\"WARC-Block-Digest\":\"sha1:FJM7RHQIGWQFZZEQN3C7CKAPYJ4PNNZ5\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2023/CC-MAIN-2023-06/CC-MAIN-2023-06_segments_1674764499871.68_warc_CC-MAIN-20230131122916-20230131152916-00084.warc.gz\"}"}
https://wiki.uni-konstanz.de/ccp4/index.php?title=Test&direction=next&oldid=25
[ "# Test\n\nWe test tex:\n\n$\\displaystyle{ F_{hkl} = \\sum_{i=1}^{N} e^{-2\\pi\\imath\\left( h\\frac{x}{a}+k\\frac{y}{b}+l\\frac{z}{c}\\right)} }$\n\nAll crystals should look like this (or better)", null, "Problem? - I tried uploading xtals.png but Wiki does not let me upload, quoting that 'wrong file type or extension' - yet it allowed me to load up the same image in jpg format. Png is a nice (some would even say the best) format so it'd be good to be able to use it! -AGE This is fixed now. --Kay 10:12, 5 February 2008 (CET)\nRemark: MIME type of PNG files was not correctly identified. To fix this, we added the lines\n\n# PNG\n1 string PNG image/png\n\n\nto the file /etc/httpd/conf/magic\n\nLogo prototype #1", null, "Logo prototype #2", null, "do you like any of these?\n\nTesting the gnuplot extension (the plot is generated in realtime, not a bitmap) <gnuplot> set hidden3d set isosamples 50 splot sin(x)*cos(y) </gnuplot>" ]
[ null, "https://wiki.uni-konstanz.de/ccp4/images/2/20/Xtals.jpg", null, "https://wiki.uni-konstanz.de/ccp4/images/4/4d/Logo1.png", null, "https://wiki.uni-konstanz.de/ccp4/images/8/89/Logo2.png", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.7886735,"math_prob":0.6870574,"size":847,"snap":"2023-14-2023-23","text_gpt3_token_len":266,"char_repetition_ratio":0.08066429,"word_repetition_ratio":0.0,"special_character_ratio":0.29161748,"punctuation_ratio":0.07777778,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9503094,"pos_list":[0,1,2,3,4,5,6],"im_url_duplicate_count":[null,1,null,1,null,1,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2023-05-29T15:34:10Z\",\"WARC-Record-ID\":\"<urn:uuid:ef585055-f093-4014-b623-e65b1587d2df>\",\"Content-Length\":\"22984\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:be20e793-d6e7-49eb-afe5-8709ec684681>\",\"WARC-Concurrent-To\":\"<urn:uuid:6727ab29-38eb-469e-9859-6d5622b4409a>\",\"WARC-IP-Address\":\"134.34.240.72\",\"WARC-Target-URI\":\"https://wiki.uni-konstanz.de/ccp4/index.php?title=Test&direction=next&oldid=25\",\"WARC-Payload-Digest\":\"sha1:YVRUVUPT7BR5543MGSVM3MZEKH4F52VU\",\"WARC-Block-Digest\":\"sha1:PBFF7EZL4YCW75G6PF4ETNMUSMQMCYT2\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2023/CC-MAIN-2023-23/CC-MAIN-2023-23_segments_1685224644867.89_warc_CC-MAIN-20230529141542-20230529171542-00581.warc.gz\"}"}
https://jp.maplesoft.com/support/help/maplesim/view.aspx?path=Student/LinearAlgebra/Eigenvectors
[ "", null, "Eigenvectors - Maple Help\n\nStudent[LinearAlgebra]\n\n Eigenvectors\n compute the eigenvectors of a square Matrix", null, "Calling Sequence Eigenvectors(A, options)", null, "Parameters\n\n A - square Matrix; Matrix whose eigenvectors are required options - (optional) parameters; for a complete list, see LinearAlgebra[Eigenvectors]", null, "Description\n\n • The Eigenvectors(A) command returns an expression sequence of two elements. The first element is the Vector of eigenvalues (that is, exactly what is returned by the Eigenvalues(A) command.) The second is the Matrix of corresponding eigenvectors.\n For example, after\n (ev, EV) := Eigenvectors(A);\n for each column index i, $A·{\\mathrm{EV}}_{1..-1,i}={\\mathrm{ev}}_{i}·{\\mathrm{EV}}_{1..-1,i}$.\n Note: If the input Matrix A is defective (does not have a full set of linearly independent eigenvectors) then some of the columns of the Matrix of eigenvectors are 0 (and hence are not eigenvectors).  See JordanForm.", null, "Examples\n\n > $\\mathrm{with}\\left(\\mathrm{Student}\\left[\\mathrm{LinearAlgebra}\\right]\\right):$\n > $A≔⟨⟨-1,-3,-6⟩|⟨3,5,6⟩|⟨-3,-3,-4⟩⟩$\n ${A}{≔}\\left[\\begin{array}{ccc}{-1}& {3}& {-3}\\\\ {-3}& {5}& {-3}\\\\ {-6}& {6}& {-4}\\end{array}\\right]$ (1)\n > $\\mathrm{ev},\\mathrm{EV}≔\\mathrm{Eigenvectors}\\left(A\\right)$\n ${\\mathrm{ev}}{,}{\\mathrm{EV}}{≔}\\left[\\begin{array}{c}{-4}\\\\ {2}\\\\ {2}\\end{array}\\right]{,}\\left[\\begin{array}{ccc}\\frac{{1}}{{2}}& {-1}& {1}\\\\ \\frac{{1}}{{2}}& {0}& {1}\\\\ {1}& {1}& {0}\\end{array}\\right]$ (2)\n > $A·\\mathrm{EV}\\left[1..-1,2\\right]=\\mathrm{ev}\\left[2\\right]·\\mathrm{EV}\\left[1..-1,2\\right]$\n $\\left[\\begin{array}{c}{-2}\\\\ {0}\\\\ {2}\\end{array}\\right]{=}\\left[\\begin{array}{c}{-2}\\\\ {0}\\\\ {2}\\end{array}\\right]$ (3)" ]
[ null, "https://bat.bing.com/action/0", null, "https://jp.maplesoft.com/support/help/maplesim/arrow_down.gif", null, "https://jp.maplesoft.com/support/help/maplesim/arrow_down.gif", null, "https://jp.maplesoft.com/support/help/maplesim/arrow_down.gif", null, "https://jp.maplesoft.com/support/help/maplesim/arrow_down.gif", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.6318222,"math_prob":0.9998598,"size":1322,"snap":"2022-40-2023-06","text_gpt3_token_len":397,"char_repetition_ratio":0.2094082,"word_repetition_ratio":0.0,"special_character_ratio":0.2375189,"punctuation_ratio":0.16597511,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9997291,"pos_list":[0,1,2,3,4,5,6,7,8,9,10],"im_url_duplicate_count":[null,null,null,null,null,null,null,null,null,null,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2023-02-03T20:22:55Z\",\"WARC-Record-ID\":\"<urn:uuid:97620350-651e-4f41-bd68-5145f0e88604>\",\"Content-Length\":\"170591\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:6c5c53f2-7448-4130-86ae-ed78252b6821>\",\"WARC-Concurrent-To\":\"<urn:uuid:4091a883-32fb-47c5-aa04-4e36b96f6128>\",\"WARC-IP-Address\":\"199.71.183.28\",\"WARC-Target-URI\":\"https://jp.maplesoft.com/support/help/maplesim/view.aspx?path=Student/LinearAlgebra/Eigenvectors\",\"WARC-Payload-Digest\":\"sha1:7OKTGNTDI2BG3SOB7EE63TL5ZH5ZX5B7\",\"WARC-Block-Digest\":\"sha1:I67TJHW4HZ4E4Q6NJMQYHFA2JUCEMLAP\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2023/CC-MAIN-2023-06/CC-MAIN-2023-06_segments_1674764500074.73_warc_CC-MAIN-20230203185547-20230203215547-00764.warc.gz\"}"}
https://iwant2study.org/ospsgx/index.php/interactive-resources/mathematics/114-pure-mathematics/5-calculus/750-e-integral-approximations
[ "## Integral: Algorithms of Numerical Approximation JavaScript Simulation Applet HTML5", null, "", null, "Intro Page\n\n# Integral: algorithms of numerical approximation\n\nAs an example for numerical integration we choose the sine function y = sin x ;  its graph is shown in blue. The definite integral is to be calculated between an initial abscissa x1 and an end abscissa x2.\n\nThe analytic solution of the indefinite integral (antiderivative) is\n\n### y = ∫ sinxdx  = -cos x +C\n\nIts graph is shown in red, with C as the initial value at the initial abscissa.\n\nThe analytic definite integral is -(cos x2-cosx1). It corresponds to a point on the analytic curve at the end abscissa x2.\n\nIn the approximate numerical calculation the interval x2 - x1 is divided into n sub intervals of width delta. For clear demonstration of the principle n = 2 is chosen. Arrows show the value of the function in the three points of the double interval 2 delta.\n\nThree numerical algorithms are visualized in three windows. They differ in how the approximative value of the function is defined between consecutive points in the sub interval delta.\n\n1.)  Rectangle approximation: y is taken as constant within the interval. The contribution of one interval is delta * y1.\n\n2.) Trapezoid approximation:  y is taken as the mean value within the interval. Its contribution is delta*(y1+y2)/2.\n\n3.) Parabola approximation:  the function in two consecutive intervals is approximated by a second order parabola through both end points and the middle point of the double interval (a parabola needs three points to be uniquely defined). The contribution of the double integral, derived as a surprisingly simple formula, is  2*delta*1/6(y1+4y2+y3).\n\nIn principle one can increase the precision of the parabola algorithm still further by using higher order parabolas, with correspondingly more sub intervals of the definition range. As the second order is already very good, higher order approximations have no great practical importance. (For fun and exercise derive the formula for a third order parabola!)\n\nThe simulation calculates the sum of two approximating intervals of width delta using the three algorithms. Their respective values are represented by the green points.\n\nA first slider defines the interval delta, a second one the initial abscissa x1, reset defines delta = 1 and x1 = 0.5.\n\nExperiments\n\nCompare how well the three procedures approximate the analytic solution.\n\nE2: Draw the initial value with the mouse. Observe the shift of the analytic solution, and its relation to the result of the different algorithms. Explain mentally to some non professional what you observe!\n\nE4: Reduce the interval width and observe how fast the approximations converge to the analytic solution.\n\nE4: Keep the interval small and approximately constant while drawing the initial point. Observe whether the differences of the algorithms are comparable for all initial points. Interpret the result!\n\nE5: For special initial points the simple algorithms result in exact agreement with the analytic value, while the parabola algorithms shows a recognizable deviation. Does this mean that the simple ones are better? What is the reason for identity in these cases?\n\n### Translations\n\nCode Language Translator Run", null, "### Software Requirements\n\nSoftwareRequirements\n\n Android iOS Windows MacOS with best with Chrome Chrome Chrome Chrome support full-screen? Yes. Chrome/Opera No. Firefox/ Samsung Internet Not yet Yes Yes cannot work on some mobile browser that don't understand JavaScript such as..... cannot work on Internet Explorer 9 and below\n\n### Credits", null, "", null, "", null, "Dieter Roess - WEH-Foundation; Fremont Teng; Loo Kang Wee\n\n[text]\n\n## Integral: Algorithms of Numerical Approximation JavaScript Simulation Applet HTML5\n\n### Instructions\n\n#### Control Panel\n\nAdjust the sliders to move the delta and x1 accordingly.\n\n#### Toggling Full Screen\n\nDouble clicking anywhere in the panel will toggle full screen.\n\n#### Reset Button\n\nResets the simulation.\n\nResearch\n\n[text]\n\n[text]\n\n[text]" ]
[ null, "https://iwant2study.org/ospsgx/images/ospsg_images/play-sim.png", null, "https://iwant2study.org/lookangejss/math/Calculus/ejss_model_e_Integral_approximations/e_Integral_approximations/Screen Shot 2018-05-07 at 2.13.18 PM (2).png ", null, "https://iwant2study.org/ospsgx/images/ospsg_images/play-sim_small.png", null, "https://iwant2study.org/lookangejss/math/Calculus/ejss_model_e_Integral_approximations/01authorlookang50x50.png", null, "https://iwant2study.org/lookangejss/math/Calculus/ejss_model_e_Integral_approximations/", null, "https://iwant2study.org/lookangejss/math/Calculus/ejss_model_e_Integral_approximations/ ", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.81269777,"math_prob":0.98199135,"size":4120,"snap":"2021-43-2021-49","text_gpt3_token_len":927,"char_repetition_ratio":0.14261419,"word_repetition_ratio":0.024806201,"special_character_ratio":0.21529126,"punctuation_ratio":0.11491108,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.99378985,"pos_list":[0,1,2,3,4,5,6,7,8,9,10,11,12],"im_url_duplicate_count":[null,null,null,7,null,null,null,7,null,7,null,7,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2021-11-28T02:22:57Z\",\"WARC-Record-ID\":\"<urn:uuid:446b9b0a-3805-4f90-a3df-f4396059beae>\",\"Content-Length\":\"47964\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:30820cc4-25bd-4507-b8e0-5282bdaf7246>\",\"WARC-Concurrent-To\":\"<urn:uuid:d65a2bc0-a673-4685-b693-94b3dad937dc>\",\"WARC-IP-Address\":\"203.175.162.102\",\"WARC-Target-URI\":\"https://iwant2study.org/ospsgx/index.php/interactive-resources/mathematics/114-pure-mathematics/5-calculus/750-e-integral-approximations\",\"WARC-Payload-Digest\":\"sha1:MAFYNQXM4WNGH3CMME4UH35UADCKI6QX\",\"WARC-Block-Digest\":\"sha1:BRFDCE3MXTFTXSNVSUUNIPDSHUP6RKOX\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2021/CC-MAIN-2021-49/CC-MAIN-2021-49_segments_1637964358443.87_warc_CC-MAIN-20211128013650-20211128043650-00035.warc.gz\"}"}
https://math.stackexchange.com/questions/1398841/show-that-the-rationals-are-an-incomplete-metric-space-without-reference-to-real?noredirect=1
[ "# Show that the rationals are an incomplete metric space without reference to reals\n\nI know that you can create rational sequences that converge to irrationals, but is there a simple way to do this without explicit assumption of the existence of the reals?\n\nI'm thinking of something along the lines of\n\n1) Show that there exists a particular Cauchy sequence of rationals\n\n2) Assume that the cauchy sequence converges to a rational.\n\nHence rationals are incomplete.\n\nI had an idea about using the property discussed in this question Choice of $q$ in Baby Rudin's Example 1.1 but couldn t make anything of it.\n\nI would like to know this because I feel that this is an inherent property of the rationals and that a proof of it should not need to reference anything else.\n\n• How about the sequence of rationals that converges to $\\sqrt{2}$? – Megadeth Aug 16 '15 at 4:14\n• @Chou How would you write that? – ignoramus Aug 16 '15 at 4:15\n• I'm not sure if this fits your \"avoiding irrationals\" criterion, but you can show that the sequence 0.1, 0.1011, 0.10110111, ... does not converge to a rational number, since the decimals do not repeat, and moreover the sequence is Cauchy. – Ilham Aug 16 '15 at 4:16\n• You could prove that $\\dfrac{F_{n+1}}{F_n}$ is a Cauchy sequence, and that a limit — if it existed — would satisfy $x=1+\\dfrac1x$, which no rational does. ($F_n$ is the $n$th Fibonacci number.) – Akiva Weinberger Aug 16 '15 at 4:17\n• @columbus8myhw Thanks, this was the idea I was looking for. And is your dog's name 'columbus'? – ignoramus Aug 16 '15 at 4:20\n\nYour approach is spot on, and you don't need to assume the reals exist to do it. Define a Cauchy sequence of rationals such that each one squared is closer to $2$ than the one before. Note that every number in this argument is rational. Then if the rationals are complete, the sequence converges to a rational. Now prove that no rational squared equals $2$. You have a Cauchy sequence that does not converge to an element of the space.\n\nYou don’t even have to deal directly with rationals (except as radii of metric balls): you can prove the superficially more general result that a countable metric space without isolated points is not complete. (The generality is only superficial, because such a space is homeomorphic to $\\Bbb Q$.)\n\nLet $\\langle X,d\\rangle$ be a countable metric space without isolated points, and index $X=\\{x_k:k\\in\\Bbb N\\}$. Let $m_0=0$, $r_0=1$, $B_0=\\{x\\in X:d(x_{m_0},x)<r_0\\}$, $C_0=\\operatorname{cl}B_0$, and $M_0=\\{k\\in\\Bbb N:x_k\\in B_0\\}$; $M_0$ is infinite, since $X$ has no isolated points, and $m_0=\\min M_0$.\n\nSuppose that $n\\in\\Bbb N$, and we have an $m_n\\in\\Bbb N$, an open set $B_n$ containing $x_{m(n)}$, $C_n=\\operatorname{cl}B_n$, and an infinite $M_n=\\{k\\in\\Bbb N:x_k\\in B_n\\}$ such that $m_n=\\min M_n$. Let\n\n$$m_{n+1}=\\min(M_n\\setminus\\{m_n\\})\\;;$$\n\nthen $x_{m_{n+1}}\\in B_n\\setminus\\{x_{m_n}\\}$. We can therefore choose a positive rational $$r_{n+1}\\le\\min\\left\\{d(x_{m_n},x_{m_{n+1}}),r_n-d(x_{m_n},x_{m_{n+1}}),2^{-(n+1)}\\right\\}$$ and set\n\n\\begin{align*} &B_{n+1}=\\{x\\in X:d(x_{m_{n+1}},x)<r_{n+1}\\}\\;,\\\\ &C_{n+1}=\\operatorname{cl}B_{n+1}\\;,\\text{ and}\\\\ &M_{n+1}=\\{k\\in\\Bbb N:x_k\\in B_{n+1}\\}\\;. \\end{align*}\n\nIt’s not hard to verify that $C_{n+1}\\subseteq B_n$, $M_{n+1}$ is infinite, and $m_{n+1}=\\min M_{n+1}$, so the recursive construction can continue. Note that we always have $m_{n+1}>m_n$.\n\nIn the end we have a sequence $\\langle C_n:n\\in\\Bbb N\\rangle$ of closed sets such that $C_n\\supset C_{n+1}$ for each $n\\in\\Bbb N$. Moreover, $\\operatorname{diam}C_n\\le 2r_n\\le2\\cdot2^{-(n+1)}=2^{-n}$, so if $X$ were complete, the Baire category theorem would ensure that $\\bigcap_{n\\in\\Bbb N}C_n=\\varnothing$. Suppose that some $x_k\\in\\bigcap_{n\\in\\Bbb N}C_n$. Then $k\\in\\bigcap_{n\\in\\Bbb N}M_n$. But $\\langle m_n:n\\in\\Bbb N\\rangle$ is strictly increasing, so there is an $n\\in\\Bbb N$ such that $k<m_n=\\min M_n$, and hence $x_k\\notin B_n$. Finally, $C_{n+1}\\subseteq B_n$, so $x_k\\notin C_{n+1}$, and $X$ therefore cannot be complete.\n\nThe line of reason you suggested is exactly \"create a rational sequence that converge to an irrational\" .\n\nIf you want an explicitly sequence, you can create a Cauchy sequence of rationals that converges to $\\sqrt2$, the decimal approaches to it works.\n\nConstruct a sequence of rationals that would converge to, say, $\\sqrt{2}$. For example, $x_1 = 1$, $x_{n+1} =\\frac{x_n + 2/x_n}{2} =\\frac{x_n^2+2}{2 x_n}$.\n\nThen, if this converges to a rational, let the limit be $r = a/b$, and let $x_n = a_n/b_n$. Then $a_{n+1}/b_{n+1} =\\frac{a_n/b_n + 2b_n/a_n}{2} =\\frac{a_n^2 + 2b_n^2}{2a_nb_n}$ so $a_{n+1} = a_n^2 + 2b_n^2$ and $b_{n+1} = 2a_nb_n$.\n\n$\\begin{array}\\\\ a_{n+1}^2-2b_{n+1}^2 &=(a_n^2 + 2b_n^2)^2-2(2a_nb_n)^2\\\\ &=a_n^4 +4a_n^2b_n^2+ 4b_n^4-8a_n^2b_n^2\\\\ &=a_n^4 -4a_n^2b_n^2+ 4b_n^4\\\\ &=(a_n^2 - 2b_n^2)^2\\\\ \\end{array}$\n\nIf we start with $x_1 = 1$, then $a_1 = b_1 = 1$, so $(a_1^2 - 2b_1^2)^2 = 1$. Therefore $a_n^2 -2b_n^2 = 1$ for all $n > 1$.\n\nSince $r$ is rational, $r = a/b$ and $2 = r^2 = a^2/b^2$. Therefore\n\n$\\begin{array}\\\\ 1 &= a_n^2 -2b_n^2\\\\ &= a_n^2 -(a^2/b^2)b_n^2\\\\ &= \\frac{a_n^2b^2 -a^2b_n^2}{b^2}\\\\ &= \\frac{(a_nb -ab_n)(a_nb +ab_n)}{b^2}\\\\ &\\ge \\frac{(a_nb +ab_n)}{b^2} \\quad\\text{since } (a_nb -ab_n) \\ge 1\\\\ &> \\frac{a_nb}{b^2}\\\\ &= \\frac{a_n}{b}\\\\ \\end{array}$\n\nTherefore, $a_n \\le b$. But, since $a_{n+1} = a_n^2 + 2b_n^2$, $a_n$ gets arbitrarily large. This is a contradiction, so the assumption that the sequence converges to a rational is false.\n\nTherefore, the rationals are not complete.\n\nNote: This is a rewrite of my answer to this question of mine: What is the most unusual proof you know that $\\sqrt{2}$ is irrational?" ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.6928633,"math_prob":0.99990845,"size":2040,"snap":"2019-51-2020-05","text_gpt3_token_len":833,"char_repetition_ratio":0.13163064,"word_repetition_ratio":0.016877636,"special_character_ratio":0.38627452,"punctuation_ratio":0.13025211,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":1.000005,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2019-12-15T08:21:06Z\",\"WARC-Record-ID\":\"<urn:uuid:73515e9c-0085-4420-bbaa-fb196496efbc>\",\"Content-Length\":\"160170\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:a0c4ddeb-dc7b-4148-aa78-7ab086db590a>\",\"WARC-Concurrent-To\":\"<urn:uuid:378c9cfe-2ccf-436e-be22-af8c654c989c>\",\"WARC-IP-Address\":\"151.101.193.69\",\"WARC-Target-URI\":\"https://math.stackexchange.com/questions/1398841/show-that-the-rationals-are-an-incomplete-metric-space-without-reference-to-real?noredirect=1\",\"WARC-Payload-Digest\":\"sha1:UQVC5PQUSSJ2C46Z2MO3KAUIXVAJZXDI\",\"WARC-Block-Digest\":\"sha1:RSOY5WPCONU327MJFSKAC2HSRIWLWT75\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2019/CC-MAIN-2019-51/CC-MAIN-2019-51_segments_1575541307797.77_warc_CC-MAIN-20191215070636-20191215094636-00517.warc.gz\"}"}
https://codeforces.com/problemset/problem/447/A
[ "A. DZY Loves Hash\ntime limit per test\n1 second\nmemory limit per test\n256 megabytes\ninput\nstandard input\noutput\nstandard output\n\nDZY has a hash table with p buckets, numbered from 0 to p - 1. He wants to insert n numbers, in the order they are given, into the hash table. For the i-th number xi, DZY will put it into the bucket numbered h(xi), where h(x) is the hash function. In this problem we will assume, that h(x) = x mod p. Operation a mod b denotes taking a remainder after division a by b.\n\nHowever, each bucket can contain no more than one element. If DZY wants to insert an number into a bucket which is already filled, we say a \"conflict\" happens. Suppose the first conflict happens right after the i-th insertion, you should output i. If no conflict happens, just output -1.\n\nInput\n\nThe first line contains two integers, p and n (2 ≤ p, n ≤ 300). Then n lines follow. The i-th of them contains an integer xi (0 ≤ xi ≤ 109).\n\nOutput\n\nOutput a single integer — the answer to the problem.\n\nExamples\nInput\n10 5021534153\nOutput\n4\nInput\n5 501234\nOutput\n-1" ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.88712794,"math_prob":0.924323,"size":935,"snap":"2023-14-2023-23","text_gpt3_token_len":264,"char_repetition_ratio":0.10311493,"word_repetition_ratio":0.0,"special_character_ratio":0.29839572,"punctuation_ratio":0.12037037,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9906661,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2023-06-09T00:18:58Z\",\"WARC-Record-ID\":\"<urn:uuid:a6ba4f71-b360-4eaf-832d-0a503f1c64cf>\",\"Content-Length\":\"58681\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:8c762896-7f81-462e-a527-abd062f37711>\",\"WARC-Concurrent-To\":\"<urn:uuid:f109b986-1a6b-45c2-9811-b564abd56a70>\",\"WARC-IP-Address\":\"104.26.6.164\",\"WARC-Target-URI\":\"https://codeforces.com/problemset/problem/447/A\",\"WARC-Payload-Digest\":\"sha1:L5GKL5OU2UOXPOBGGEITM5NFVKKV66U3\",\"WARC-Block-Digest\":\"sha1:AHI53ITVHNIMSPMUNEWTBSGZXQCCBZSE\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2023/CC-MAIN-2023-23/CC-MAIN-2023-23_segments_1685224655244.74_warc_CC-MAIN-20230609000217-20230609030217-00548.warc.gz\"}"}
https://aptitude.gateoverflow.in/4354/shopkeeper-sells-computers-gained-other-overall-percentage?show=7678
[ "# A shopkeeper sells two computers for Rs 24000 each. On first he gained 20% and on the other, he lost 20%. What is the overall gain or loss percentage?\n\n2 votes\n3k views\n\n## 3 Answers\n\n1 vote\n\nBest answer\nCost Price of Computers are 20000(the one on which he has profit) and 30000 (loss)\nTotal cost price=50000\nTotal selling price = 48000\n$\\therefore$ Loss percent = 4 %\n930 points 1 4 21\nselected\n0 votes\nsp1 = 24000\n\nfor the first comp. he gained 20% profit  which is equal to 1/5\ncp1     :        sp1\n\ndenominator: denominator+nominator\n5         :       6\n\n5*4000=20000:6*4000=24000\nsp2 = 24000\n\nfor the first comp. he loss 20% which is equal to 1/5\ncp2     :        sp2\n\ndenominator: denominator-nominator\n5         :       4\n\n5*6000=30000:4*6000=24000\ntotal cost price cp = cp1+cp2 = 20000+30000 = 50000\n\ntotal selling price sp = sp1+sp2 = 24000+24000 = 48000\n\ntotal loss = 50000-48000 = 2000\n\nhence  cp     :     sp\n\n50000:48000\n\n25:24\n\n1/25*100 = 4%\n\nhe got loss of 4% answer.\n18 points 1 1 1\n0 votes\nSimple formula, if an item is sold at same price in both cases, and if there is x% loss in one case, while x% profit in the other, than overall, there is LOSS.\n\nAnd, Loss% = (x/10)^2.\n\nTherefore, here there is LOSS, and\n\nLoss% = (20/10)^2 = 4%\n42 points 1\n\n## Related questions\n\n1 vote\n1 answer\n1\n9.6k views\n3 votes\n1 answer\n2\n14.4k views\nA alone can do a piece of work in 6 days and B alone in 8 days. A and B undertook to do it for Rs. 3200. With the help of C, they completed the work in 3 days. How much is to be paid to C? A. Rs. 375 B. Rs. 400 C. Rs. 600 D. Rs. 800\n0 votes\n0 answers\n3\n1 vote\n1 answer\n4\n2.4k views\nHad a trader bought an item at 10% less and sold it at 10% more, he would have doubled his profit percentage. What was the original profit percentage?\n2 votes\n1 answer\n5\n1.2k views\nWhat was the percentage increase in the price of oil between 1998 and 2006? 4.83% 48.3% 51.3% 4.13% 5.13% If the total oil supply had grown at the same rate as the former USSR oil supply between 2000 and 2006 how much would this have exceeded the ... was the largest combined change in supply from the previous year for OPEC, the former USSR and other Non-OECD countries? 2000 2003 2004 2005 2006" ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.80200285,"math_prob":0.9615449,"size":6299,"snap":"2021-21-2021-25","text_gpt3_token_len":2207,"char_repetition_ratio":0.1056394,"word_repetition_ratio":0.22331049,"special_character_ratio":0.31671694,"punctuation_ratio":0.099186994,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.99612075,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2021-06-23T18:35:03Z\",\"WARC-Record-ID\":\"<urn:uuid:1919d599-3015-4917-abaa-fd4c1a5ce219>\",\"Content-Length\":\"83886\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:0f9f46af-b36a-478a-b0fc-9c23342f0342>\",\"WARC-Concurrent-To\":\"<urn:uuid:be6635da-4765-4a2e-8c42-32f98996c3e7>\",\"WARC-IP-Address\":\"104.21.93.69\",\"WARC-Target-URI\":\"https://aptitude.gateoverflow.in/4354/shopkeeper-sells-computers-gained-other-overall-percentage?show=7678\",\"WARC-Payload-Digest\":\"sha1:ISOG6C4W57B2CHLK4GWGQB76NK4R4FZZ\",\"WARC-Block-Digest\":\"sha1:RAAOI3PMMDQB7L5QKX22WKKFPKXTOK4T\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2021/CC-MAIN-2021-25/CC-MAIN-2021-25_segments_1623488539764.83_warc_CC-MAIN-20210623165014-20210623195014-00337.warc.gz\"}"}
https://community.qlik.com/t5/QlikView-App-Dev/Set-Analisys-in-expression-for-more-values/m-p/1638797
[ "# QlikView App Dev\n\nDiscussion Board for collaboration related to QlikView App Development.\n\ncancel\nShowing results for\nDid you mean:", null, "", null, "Creator III\n\n## Set Analisys in expression for more values\n\nHi i have in my chart expression, this set analisys:\n\nSum({1<YEAR={'\\$(=getfieldselections(YEAR))'},SELLER={'\\$(=getfieldselections(SELLER))'}, DES_REFER={'\\$(=getfieldselections(DES_REFER))'} >} BUD_VOLUMI)\n\nif i choose one Year one seller and one des_refer, all is ok and work good, if i choose more des_refer, or more years it not work\n\nwhat the problem in the expression?\n\n2 Solutions\n\nAccepted Solutions", null, "", null, "MVP\n\nTry this\n\n``Sum({1<YEAR = p(YEAR), SELLER = p(SELLER), DES_REFER =p(DES_REFER)>} BUD_VOLUMI)``", null, "", null, "Creator III\nAuthor\n\nno sorry in this way it not work, but i find like write it good with concat:\n\nSum({1<YEAR={\\$(=chr(39) & concat(distinct YEAR, chr(39) &', ' & chr(39)) & chr(39) )}>} BUD_VOLUMI)\n\n7 Replies", null, "", null, "Contributor III\n\nMaybe is for this (in red color, copied from Qlik help):\n\nGetFieldSelections - chart function\n\nGetFieldSelections() returns a string with the current selections in a field.\n\nIf all but two, or all but one of the values are selected, the format 'NOT x,y' or 'NOT y' will be used respectively. If you select all values and the count of all values is greater than max_values, the text ALL will be returned.\n\nTry using concat instead.\n\nHope it helps.", null, "", null, "MVP\n\nTry this\n\n``Sum({1<YEAR = p(YEAR), SELLER = p(SELLER), DES_REFER =p(DES_REFER)>} BUD_VOLUMI)``", null, "", null, "Creator III\nAuthor\nThanks, but why in my set analisys not work? .... i use getfieldselections as right, if i use it in a text box i see it write all values i choose", null, "", null, "Creator III\nAuthor\nCan you write an example with concat function using my set analisys, for understand what you mean? so i can try it if work", null, "", null, "Contributor III\n\nI mean this:\n\nSum({1<YEAR={'\\$(=concat(YEAR,','))'},SELLER={'\\$(=concat(SELLER,','))'}, DES_REFER={'\\$(=concat(DES_REFER,','))'} >} BUD_VOLUMI)\n\nConcat returns a string with all the values of the field, separated by the char you say in the second parameter", null, "", null, "Creator III\nAuthor\n\nno sorry in this way it not work, but i find like write it good with concat:\n\nSum({1<YEAR={\\$(=chr(39) & concat(distinct YEAR, chr(39) &', ' & chr(39)) & chr(39) )}>} BUD_VOLUMI)", null, "", null, "Contributor III\n\nYes. You are right. This is the point when using special chars like ' in \\$ expansion expressions.\n\nIf my idea was useful, please check my suggestion as a solution.", null, "" ]
[ null, "https://community.qlik.com/legacyfs/online/avatars/a327303_avatardano_preview.png", null, "https://community.qlik.com/html/@BC7D7D60D8D99386DA9EE8FD04D175FC/rank_icons/Community_Gamification-Ranking-Icons_16x16-Creator.png", null, "https://community.qlik.com/t5/image/serverpage/image-id/36614iDF2BF326EE6025DD/image-dimensions/50x50/image-coordinates/0%2C425%2C2013%2C2438/constrain-image/false", null, "https://community.qlik.com/html/@6375DF0CA1C8344EF7192489B8AA8FAC/rank_icons/Community_Gamification-Ranking-Icons_16x16-MVP.png", null, "https://community.qlik.com/legacyfs/online/avatars/a327303_avatardano_preview.png", null, "https://community.qlik.com/html/@BC7D7D60D8D99386DA9EE8FD04D175FC/rank_icons/Community_Gamification-Ranking-Icons_16x16-Creator.png", null, "https://community.qlik.com/t5/image/serverpage/avatar-name/mexican/avatar-theme/candy/avatar-collection/food/avatar-display-size/message/version/2", null, "https://community.qlik.com/html/@562DE2FD3BDF4C5A187D4B54CF586957/rank_icons/Community_Gamification-Ranking-Icons_16x16-Contributor.png", null, "https://community.qlik.com/t5/image/serverpage/image-id/36614iDF2BF326EE6025DD/image-dimensions/50x50/image-coordinates/0%2C425%2C2013%2C2438/constrain-image/false", null, "https://community.qlik.com/html/@6375DF0CA1C8344EF7192489B8AA8FAC/rank_icons/Community_Gamification-Ranking-Icons_16x16-MVP.png", null, "https://community.qlik.com/legacyfs/online/avatars/a327303_avatardano_preview.png", null, "https://community.qlik.com/html/@BC7D7D60D8D99386DA9EE8FD04D175FC/rank_icons/Community_Gamification-Ranking-Icons_16x16-Creator.png", null, "https://community.qlik.com/legacyfs/online/avatars/a327303_avatardano_preview.png", null, "https://community.qlik.com/html/@BC7D7D60D8D99386DA9EE8FD04D175FC/rank_icons/Community_Gamification-Ranking-Icons_16x16-Creator.png", null, "https://community.qlik.com/t5/image/serverpage/avatar-name/mexican/avatar-theme/candy/avatar-collection/food/avatar-display-size/message/version/2", null, "https://community.qlik.com/html/@562DE2FD3BDF4C5A187D4B54CF586957/rank_icons/Community_Gamification-Ranking-Icons_16x16-Contributor.png", null, "https://community.qlik.com/legacyfs/online/avatars/a327303_avatardano_preview.png", null, "https://community.qlik.com/html/@BC7D7D60D8D99386DA9EE8FD04D175FC/rank_icons/Community_Gamification-Ranking-Icons_16x16-Creator.png", null, "https://community.qlik.com/t5/image/serverpage/avatar-name/mexican/avatar-theme/candy/avatar-collection/food/avatar-display-size/message/version/2", null, "https://community.qlik.com/html/@562DE2FD3BDF4C5A187D4B54CF586957/rank_icons/Community_Gamification-Ranking-Icons_16x16-Contributor.png", null, "https://community.qlik.com/skins/images/9FC7B8D3B063E55360DD02F087E0BAE4/responsive_peak/images/icon_anonymous_message.png", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.7310581,"math_prob":0.87003076,"size":441,"snap":"2022-40-2023-06","text_gpt3_token_len":104,"char_repetition_ratio":0.13501143,"word_repetition_ratio":0.0,"special_character_ratio":0.22222222,"punctuation_ratio":0.123595506,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9593183,"pos_list":[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42],"im_url_duplicate_count":[null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2022-10-04T16:55:49Z\",\"WARC-Record-ID\":\"<urn:uuid:370d049e-3a25-4302-9859-5032534cee89>\",\"Content-Length\":\"433736\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:77eddf7c-6f4b-4624-b9e1-7d0881ce3247>\",\"WARC-Concurrent-To\":\"<urn:uuid:11f4580a-e8d5-417d-9816-4ef05b602714>\",\"WARC-IP-Address\":\"52.85.151.81\",\"WARC-Target-URI\":\"https://community.qlik.com/t5/QlikView-App-Dev/Set-Analisys-in-expression-for-more-values/m-p/1638797\",\"WARC-Payload-Digest\":\"sha1:62SOKYQYKL5MAPGTJ3LA755MMJJZ3ALR\",\"WARC-Block-Digest\":\"sha1:3X44TTU6AJH5ZNNH2RY32TA24FKCAEZ3\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2022/CC-MAIN-2022-40/CC-MAIN-2022-40_segments_1664030337516.13_warc_CC-MAIN-20221004152839-20221004182839-00013.warc.gz\"}"}
http://gf3.klix.ch/rational.html
[ "# A Rational Numbers Implementation as an Example For gf¶\n\nThe following text is taken from `gf.examples.rational`’s inline documenation.\n\nrational an Implementation of Rational Numbers\n\nThe module provides rational arithmetic. Additionally the module servers as example for the generic function package.\n\nUsually you only need its `Rational` class:\n\n```>>> from rational import Rational as R\n```\n\nRational numbers can be constructed from integers:\n\n```>>> r2 = R(1, 2)\n>>> r1 = R(1)\n>>> r0 = R()\n```\n\nConstruction from arbitrary objects is not possible: >>> R(“Urmel”) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): … NotImplementedError: Generic ‘gf.go.__init__’ has no implementation for type(s): rational.Rational, __builtin__.str\n\nRationals also have a decent string representation:\n\n```>>> r0\nRational()\n>>> print(r0)\n0\n>>> r1\nRational(1)\n>>> print(r1)\n1\n>>> r2\nRational(1, 2)\n>>> print(r2)\n1 / 2\n```\n\nOrdinary arithmetic works as expected:\n\n```>>> print(R(1, 2) + R(1, 4))\n3 / 4\n>>> 1 + R(1, 2)\nRational(3, 2)\n>>> print(R(2) / 1000)\n1 / 500\n>>> print(R(-5,-10))\n1 / 2\n>>> print(R(5, -10))\n-1 / 2\n>>> print(-R(5, -10))\n1 / 2\n```\n\nComparison also works as expected:\n\n```>>> R(1, 2) == R(2, 4)\nTrue\n>>> R(4, 2) == 2\nTrue\n>>> 1 == R(1, 2)\nFalse\n>>> 3 == R(10, 5)\nFalse\n>>> R(1, 2) < R(3, 4)\nTrue\n>>> R(1, 2) < 1\nTrue\n>>> R(1, 2) < 1\nTrue\n>>> R(1, 2) > R(1, 4)\nTrue\n>>> 1 > R(1, 2)\nTrue\n>>> 2 > R(10, 7)\nTrue\n>>> R(10, 2) >= R(5)\nTrue\n>>> R() != R(1)\nTrue\n>>> R() != 0\nFalse\n>>> 1 != R(1)\nFalse\n```\n\nThe `decimal` module is supported as well:\n\n```>>> from decimal import Decimal as D\n>>> R(D(\"0.375\"))\nRational(3, 8)\n>>> R(1, 2) + D(\"1.5\")\nRational(2)\n```\n\nEven very long decimals do work:\n\n```>>> R(D(\"7.9864829273648218372937\") * 4)\nRational(79864829273648218372937, 2500000000000000000000)\n```\n\nComparisons with `decimal.Decimal` instances are also supported:\n\n```>>> D(\"1.2\") == R(24, 20)\nTrue\n>>> D(\"1.2\") >= R(23, 20)\nTrue\n>>> R(23, 20) <= D(\"1.2\")\nTrue\n```\n\nRationals can also converted to floats:\n\n```>>> float(R(1, 4))\n0.25\n```\nclass `rational.``Rational`(*arguments)[source]\n\n`Rational` is our rational numbers class.\n\n`rational.``__add__`(*arguments)\n\nSame as a + b.\n\nCalled by the `AbstractObject.__add__()` special method. Also called by `AbstractObject.__radd__()` with arguments reversed.\n\nMulti methods:\n\n`gf.go.``__add__`(o0: object, o1: object)\n\nDefaults to not comparable.\n\n`rational.``__add__`(a: Rational, b: Rational)\n\n`rational.``__add__`(a: object, b: Rational)\n\nAdd an object and a rational number.\n\na is converted to a `Rational` and then both are added.\n\n`rational.``__add__`(a: Rational, b: object)\n\nAdd a rational number and an object.\n\nb is converted to a `Rational` and then both are added.\n\n`rational.``__eq__`(*arguments)\n\nSame as a == b.\n\nCalled by the `AbstractObject.__eq__()` special method.\n\nMulti methods:\n\n`gf.go.``__eq__`(o0: object, o1: object)\n\nDefaults to not comparable.\n\n`rational.``__eq__`(a: Rational, b: Rational)\n\nCompare to rational numbers for equality.\n\n`rational.``__eq__`(a: Rational, b: object)\n\nCompare a rational numbers and another object for equality.\n\n`rational.``__eq__`(a: Rational, b: int)\n\nCompare a rational numbers and an integer for equality.\n\nNote\n\nThis is an optimisation for int.\n\n`rational.``__float__`(*arguments)\n\nConvert an `AbstractObject` to a float.\n\nMulti methods:\n\n`rational.``__float__`(rational: Rational)\n\nConvert a rational to a float.\n\n`rational.``__ge__`(*arguments)\n\nSame as a >= b.\n\nCalled by the `AbstractObject.__ge__()` special method.\n\nMulti methods:\n\n`gf.go.``__ge__`(o0: object, o1: object)\n\nDefaults to not comparable.\n\n`rational.``__ge__`(a: Rational, b: Rational)\n\nAnswer True if a is bigger or equal than b.\n\n`rational.``__ge__`(a: Rational, b: object)\n\nAnswer True if a is bigger or equal than b.\n\n`rational.``__gt__`(*arguments)\n\nSame as a > b.\n\nCalled by the `AbstractObject.__gt__()` special method.\n\nMulti methods:\n\n`gf.go.``__gt__`(o0: object, o1: object)\n\nDefaults to not comparable.\n\n`rational.``__gt__`(a: Rational, b: Rational)\n\nAnswer True if a is bigger than b.\n\n`rational.``__gt__`(a: Rational, b: object)\n\nAnswer True if a is bigger than b.\n\n`rational.``__init__`(*arguments)[source]\n\n`__init__()` initializes instantiates instances of `AbstractObject` and it’s subclasses.\n\nIt has a multi method for `Object`. This multi-method does not accept any additional parameters and has no effect. There is no method for `AbstractObject`, therefore this class can not be instantiated.\n\nMulti methods:\n\n`gf.go.``__init__`(writer: Writer)\n\nInitialize the Write with a StringIO object.\n\n`gf.go.``__init__`(writer: Writer, file_like: object)\n\nInitialize the Write with a file like object.\n\nparam file_like\n\nA file-like object.\n\n`gf.go.``__init__`(an_object: Object)\n\nDo nothing for `Object`.\n\n`rational.``__init__`(rational: Rational, numerator: int, denominator: int, cancel: bool)\n\nInitialize the object with numerator and denominator.\n\nparam rational\n\nThe rational number to be initialized.\n\nparam numerator\n\nThe numerator.\n\nparam denominator\n\nThe denominator.\n\nparam cancel\n\nA flag indicating, that numerator`and `denominator should be canceled.\n\n`rational.``__init__`(rational: Rational, numerator: int, denominator: int)\nInitialize the object with numerator and denominator.\nparam rational\n\nThe rational number to be initialized.\n\nparam numerator\n\nThe numerator.\n\nparam denominator\n\nThe denominator.\n\nCall `__init__()` with all passed arguments and with the value of CANCEL_EAGERLY for the cancel-flag.\n\n`rational.``__init__`(rational: Rational, numerator: int)\nInitialize the object with numerator.\nparam rational\n\nThe rational number to be initialized.\n\nparam numerator\n\nThe numerator.\n\nCall `__init__()` with the denominator set to 1.\n\n`rational.``__init__`(rational: Rational)\nInitialize the object to be 0.\nparam rational\n\nThe rational number to be initialized.\n\nCall `__init__()` with the numerator set to 0.\n\n`rational.``__init__`(rational0: Rational, rational1: Rational)\nInitialize the object from another rational.\nparam rational0\n\nThe rational number to be initialized.\n\nparam rational1\n\nThe rational number the attributes are copied from.\n\n`rational.``__init__`(rational0: Rational, rational1: Rational, rational2: Rational)\nInitialize the object from another rational.\nparam rational0\n\nThe rational number to be initialized.\n\nparam rational1\n\nThe rational acting as numerator.\n\nparam rational2\n\nThe rational acting as denominator.\n\nCall `__init__()` with rational0 as numerator and rational1 / rational2 as denominator.\n\n`rational.``__init__`(rational: Rational, decimal: Decimal)\nInitialize the object from a `decimal.Decimal`.\nparam rational\n\nThe rational number to be initialized.\n\nparam decimal\n\nThe decimal number the rational is initialized from.\n\nIf the decimal’s exponent is negative compute a scaling denominator 10 ** -exponent and initialise rational with the decimal scaled by the denominator and the denominator.\n\nIn the other case the decimal is simply converted to an int and used as numerator.\n\n`rational.``__le__`(*arguments)\n\nSame as a <= b.\n\nCalled by the `AbstractObject.__le__()` special method.\n\nMulti methods:\n\n`gf.go.``__le__`(o0: object, o1: object)\n\nDefaults to not comparable.\n\n`rational.``__le__`(a: Rational, b: Rational)\n\nAnswer True if a is smaller than or equal b.\n\n`rational.``__le__`(a: Rational, b: object)\n\nAnswer True if a is smaller than or equal b.\n\n`rational.``__lt__`(*arguments)\n\nSame as a < b.\n\nCalled by the `AbstractObject.__lt__()` special method.\n\nMulti methods:\n\n`gf.go.``__lt__`(o0: object, o1: object)\n\nDefaults to not comparable.\n\n`rational.``__lt__`(a: Rational, b: Rational)\n\nAnswer True if a is smaller than b.\n\n`rational.``__lt__`(a: Rational, b: object)\n\nAnswer True if a is smaller than b.\n\n`rational.``__mul__`(*arguments)\n\nSame as a * b.\n\nCalled by the `AbstractObject.__mul__()` special method. Also called by `AbstractObject.__rmul__()` with arguments reversed.\n\nMulti methods:\n\n`gf.go.``__mul__`(o0: object, o1: object)\n\nDefaults to not comparable.\n\n`rational.``__mul__`(a: Rational, b: Rational)\n\nMultiply two rational numbers.\n\n`rational.``__mul__`(a: object, b: Rational)\n\nMultiply an object and a rational number.\n\na is converted to a `Rational` and then both are multiplied.\n\n`rational.``__mul__`(a: object, b: Rational)\n\nMultiply a rational and an object.\n\nb is converted to a `Rational` and then both are multiplied.\n\n`rational.``__ne__`(*arguments)\n\nSame as a != b.\n\nCalled by the `AbstractObject.__ne__()` special method.\n\nMulti methods:\n\n`gf.go.``__ne__`(o0: object, o1: object)\n\nDefaults to not comparable.\n\n`rational.``__ne__`(a: Rational, b: Rational)\n\nCompare to rational numbers for inequality.\n\n`rational.``__ne__`(a: Rational, b: object)\n\nCompare to rational numbers for inequality.\n\n`rational.``__neg__`(*arguments)\n\nSame as -a.\n\nCalled by the `AbstractObject.__neg__()` special method.\n\nMulti methods:\n\n`rational.``__neg__`(rational: Rational)\n\nNegate a rational number.\n\n`rational.``__out__`(*arguments)[source]\n\nCreate a print string of an object using a `Writer`.\n\nMulti methods:\n\n`gf.go.``__out__`(self: object, write: Writer)\n\nWrite a just `str()` of self.\n\n`gf.go.``__out__`(self: AbstractObject, write: Writer)\n\nWrite a just `str()` of self by directly calling `object.__str__()`.\n\n`rational.``__out__`(rational: Rational, writer: Writer)\n\nWrite a nice representation of the rational.\n\nDenominators that equal 1 are not printed.\n\n`rational.``__spy__`(*arguments)[source]\n\nCreate a print string of an object using a Writer.\n\nNote\n\nThe function’s name was taken from Prolog’s spy debugging aid.\n\nMulti methods:\n\n`gf.go.``__spy__`(self: object, write: Writer)\n\nWrite a just `repr()` of self.\n\n`gf.go.``__spy__`(self: AbstractObject, write: Writer)\n\nWrite a just `repr()` of self by directly calling `object.__repr__()`.\n\n`rational.``__spy__`(rational: Rational, writer: Writer)\n\nWrite a debug representation of the rational.\n\n`rational.``__sub__`(*arguments)\n\nSame as a - b.\n\nCalled by the `AbstractObject.__sub__()` special method. Also called by `AbstractObject.__rsub__()` with arguments reversed.\n\nMulti methods:\n\n`gf.go.``__sub__`(o0: object, o1: object)\n\nDefaults to not comparable.\n\n`rational.``__sub__`(a: Rational, b: Rational)\n\nSubtract two rational numbers.\n\n`rational.``__sub__`(a: object, b: Rational)\n\nSubtract an object and a rational number.\n\na is converted to a `Rational` and then both are subtracted.\n\n`rational.``__sub__`(a: Rational, b: object)\n\nSubtract a rational number and an object.\n\nb is converted to a `Rational` and then both are subtracted.\n\n`rational.``gcd`(a, b)[source]\n\n`gcd()` computes GCD of to numbers." ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.6583048,"math_prob":0.9670199,"size":8070,"snap":"2022-40-2023-06","text_gpt3_token_len":2307,"char_repetition_ratio":0.18819737,"word_repetition_ratio":0.21386306,"special_character_ratio":0.31586123,"punctuation_ratio":0.20959264,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.99360776,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2023-01-30T10:49:59Z\",\"WARC-Record-ID\":\"<urn:uuid:31cfb5e7-f27c-4276-97e4-3ae8ef16f021>\",\"Content-Length\":\"82413\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:6f6a113d-b4ec-48e0-84a5-01bebd91be75>\",\"WARC-Concurrent-To\":\"<urn:uuid:d049a813-56e1-45c5-87c4-4707154a33a3>\",\"WARC-IP-Address\":\"94.130.106.50\",\"WARC-Target-URI\":\"http://gf3.klix.ch/rational.html\",\"WARC-Payload-Digest\":\"sha1:UVX5SSQF3CENU7JVM2YOYKCMI3EN36ZB\",\"WARC-Block-Digest\":\"sha1:NWCWBGV7PRXAIXSGB3POCY4UMR33EOS4\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2023/CC-MAIN-2023-06/CC-MAIN-2023-06_segments_1674764499816.79_warc_CC-MAIN-20230130101912-20230130131912-00036.warc.gz\"}"}
https://studentsfocus.com/ge6151-cp-important-questions-computer-programming-answer-key-cse-1st-sem-anna-university/
[ "### GE6151 CP Important Questions\n\nAnna University Regulation 2013 Computer Science & CPineering (CSE) GE6151 CP Important Questions for all 5 units are provided below. Download link for CSE 1st SEM GE6151 COMPUTER PROGRAMMING Answer Key is listed down for students to make perfect utilization and score maximum marks with our study materials.\n\nGE6151 CP Important Questions\n\nUnit – I\n\nPART – A (2 MARKS)\n\n1. What is Super Computer? Give an example\n2. Differentiate between analog and digital computers.\n3. State the characteristics of computers.\n4. How will you classify computer systems?\n5. What are the different components of a computer?\n6. What are the advantages and disadvantages of using the first generation computers?\n7. List some important hardware and software technologies of fifth generation computers.\n9. Convert the binary number 100110 into its octal equivalent.\n10. Determine the decimal equivalent of the hexadecimal number AC.C8.\n11. Convert 0.4375 decimal to binary system.\n12. Convert the binary number 11000110 into Hexadecimal number.\n13. Differentiate between RAM and ROM.\n14. Draw a flowchart to find the maximum among the three numbers.\n15. Compare and contrast flowchart and algorithm.\n16. What is meant by pseudo code?\n17. What is an algorithm?\n18. Write an algorithm to compute the factorial of a number.\n19. Write the pseudo code to find the given year is a leap year or not.\n20. Give the advantages and limitations of Pseudo code.\n\nPART – B (16 MARKS)\n1. (i) Describe the characteristics of the computer. (ii) Explain briefly the developments in computer technology starting from a simple calculating machine to the first computer.\n2. Explain in detail the different generation of computers.\n3. Describe the different classification of computers.\n4. Explain in detail about the various components of a computer system with block diagram. (or) Explain the organization of a computer.\n5. Explain the various types of computer memory.\n6. Convert the following: (i) Convert (6245.14)8 to its decimal equivalent. (ii) Convert(111001.101)2 to its decimal equivalent. (iii) Convert the following numbers into their binary equivalent. a. (59.6825)10 b. (EBC)16 c. (654)8 (iv) Convert the following numbers into their binary equivalent. a. FAC16 b. 5618 Page 2\n7. Explain the program development life cycle in detail.\n8. Explain the need for an algorithm and highlight its advantages. Write an algorithm to find the greatest among three numbers. 9. Mention the guidelines in detail while drawing a flowchart with examples and list out the merits and demerits of flowcharting. 10. Explain pseudo code with an example and briefly discuss the different pseudo code structures. Differentiate algorithm, flowchart and pseudo code.\nIf you require any other notes/study materials, you can comment in the below section." ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.79535025,"math_prob":0.9097037,"size":3379,"snap":"2022-40-2023-06","text_gpt3_token_len":751,"char_repetition_ratio":0.13125926,"word_repetition_ratio":0.034155596,"special_character_ratio":0.24060373,"punctuation_ratio":0.13548388,"nsfw_num_words":1,"has_unicode_error":false,"math_prob_llama3":0.9892575,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2023-02-05T07:55:40Z\",\"WARC-Record-ID\":\"<urn:uuid:a56dc5fa-466f-43b4-a39b-3641593035fd>\",\"Content-Length\":\"51637\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:482da058-6ef6-403b-992a-e7ad9a24dd9e>\",\"WARC-Concurrent-To\":\"<urn:uuid:b23e7e7b-c075-4d8a-bb68-61853bd58d44>\",\"WARC-IP-Address\":\"65.1.77.96\",\"WARC-Target-URI\":\"https://studentsfocus.com/ge6151-cp-important-questions-computer-programming-answer-key-cse-1st-sem-anna-university/\",\"WARC-Payload-Digest\":\"sha1:OLHPL2ASL4CDKVXANVEVVK4DOE626NLJ\",\"WARC-Block-Digest\":\"sha1:FSNFFL27NU6GGR6ZHIWGJ2V3J3E43372\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2023/CC-MAIN-2023-06/CC-MAIN-2023-06_segments_1674764500250.51_warc_CC-MAIN-20230205063441-20230205093441-00783.warc.gz\"}"}
https://www.chromatographyonline.com/view/role-surface-coverage-and-orthogonality-metrics-two-dimensional-chromatography
[ "# The Role of Surface Coverage and Orthogonality Metrics in Two-Dimensional Chromatography\n\nLCGC Europe\n\nLCGC Europe, LCGC Europe-07-01-2017, Volume 30, Issue 7\nPages: 346–351\n\nThe enhanced separation power of two-dimensional (2D) chromatography has become accessible thanks to the commercialization of dedicated two-dimensional systems. However, with great separation power comes great system complexity. All two-dimensional systems require a means for collecting and transferring fractions of the first dimension to the second dimension typically via a loop-based interface in on-line methods. It is important to collect a sufficient number of fractions to prevent loss of the first dimension resolution; that is, the sampling rate must be sufficient to prevent undersampling. Another key parameter to consider is selectivity. By coupling two selectivities that have unrelated retention mechanisms we are able to exploit the different physiochemical characteristics of the sample we wish to separate. This is the concept behind the term orthogonality. By coupling orthogonal selectivities and reducing under‑sampling, our system should be able to achieve the theoretical maximum two-dimensional peak\n\nMichelle Camenzuli, Centre for Analytical Sciences in Amsterdam (CASA), Analytical Chemistry group, Van ’t Hoff Institute for Molecular Sciences, University of Amsterdam, Amsterdam, The Netherlands\n\nThe enhanced separation power of two-dimensional (2D) chromatography has become accessible thanks to the commercialization of dedicated two-dimensional systems. However, with great separation power comes great system complexity. All two-dimensional systems require a means for collecting and transferring fractions of the first dimension to the second dimension typically via a loop-based interface in on-line methods. It is important to collect a sufficient number of fractions to prevent loss of the first dimension resolution; that is, the sampling rate must be sufficient to prevent undersampling. Another key parameter to consider is selectivity. By coupling two selectivities that have unrelated retention mechanisms we are able to exploit the different physiochemical characteristics of the sample we wish to separate. This is the concept behind the term orthogonality. By coupling orthogonal selectivities and reducing under‑sampling, our system should be able to achieve the theoretical maximum two-dimensional peak capacity. Unfortunately, this is virtually impossible to achieve with current technology. It follows that it is important to be able to calculate the actual (conditional) peak capacity of our two-dimensional chromatographic system. To calculate this, we need to know the first dimension sampling time and the proportion of the separation space occupied by peaks; the latter is referred to as surface coverage. This review discusses the role of orthogonality metrics and surface coverage metrics and their relationship to selectivity and peak capacity in two-dimensional chromatography.\n\nSetting up a two-dimensional (2D) chromatographic system involves more than building a system that uses two columns to separate a sample into its components. To achieve this effectively, an understanding of the concept of dimensionality in chromatography is required.\n\nThere are three key aspects to this concept: sample dimensionality, apparent sample dimensionality, and the system dimensionality (1). The relationship between these concepts is illustrated in Figure 1. All of these aspects should be considered when setting up a 2D system. The first aspect, sample dimensionality, refers to the number of independent factors that can be used to characterize or separate the sample (1). For example, the number of carbon units in the structure of the various sample components is a factor that can be used to describe or separate the components of a sample of alkyl benzenes.\n\nFor a sample containing a mixture of proteins, sample components could be defined by their molecular weight, their isoelectric point, or their affinity for a certain antibody for example. Each factor that can be used to characterize the individual components of the sample is regarded as one dimension. It follows that the protein sample mentioned above can be described as multidimensional whereas the alkyl benzene sample is one-dimensional. The apparent sample dimensionality follows the same logic as described above. However, it refers to the number of factors that the analyst is interested in or are actually used to separate the sample into its components (1). For example, if we again consider a mixture of proteins, and the molecular weight was the only property used to separate the sample components, then the apparent sample dimensionality would be one, even though the sample itself is multidimensional.\n\nWhen the sample dimensionality is understood, the various factors of this dimensionality should be considered when choosing appropriate separation techniques or retention mechanisms to combine to build the 2D system most appropriate for exploiting the sample dimensionality. In other words, the system dimensionality should be appropriate for the (apparent) sample dimensionality. System dimensionality is defined as the number of different separation stages where different retention mechanisms are employed (1). For example, a chromatographic system employing a C18 column and an ion exchange column would be considered 2D since it incorporates two separation stages with two different retention mechanisms. Conversely, a system comprised of two C18 columns using the same mobile phase for both dimensions could be considered a one-dimensional (1D) system, despite the involvement of two columns since both dimensions would separate the sample based primarily on hydrophobicity. Such a system would essentially be equivalent to a long C18 column. It should be noted that the stationary phase selectivity in each dimension is not the only factor that can determine whether the system is one-dimensional or multidimensional: the mobile phases used in each dimension play an equally important role. For example, a two-dimensional system can consist of two C18 columns when mobile phases are used to generate a selectivity difference in each dimension, if the sample itself is multidimensional. Such is the case for peptides, which can be separated based on hydrophobicity, size, and charge. The latter property was exploited by employing an acidic mobile phase (pH 2.6) in the first dimension and a basic mobile phase (pH 10) in the second dimension with both dimensions using C18 stationary phases (2). In acidic mobile phase conditions, acidic peptides (pKa 3 or below) would be protonated and therefore retained on the C18 stationary phase, whilst “neutral” and basic peptides (pKa above 7) would be ionized, consequently having reduced retention. In basic conditions, the reverse is true. This system produced greater separation power, in terms of practical peak capacity, compared to the commonly used reversed phase × strong cation exchange 2D system.\n\nIt follows that when setting up an appropriate 2D system for a particular sample, the analyst chooses the appropriate separation mechanisms (stationary phase and mobile phase) to exploit the dimensionality of the sample. Such a system would have one dimension of the system exploiting one aspect of the sample dimensionality and a second dimension that makes use of another aspect of the sample dimensionality. Ideally, there would be no overlap between the sample dimensions that the system dimensions exploit. Such a system would be considered orthogonal. An orthogonal 2D system could approach the theoretical maximum peak capacity, which is the product of the peak capacities in the first and second dimension (1,3).\n\nOrthogonality and Selectivity in Two-Dimensional Chromatography\n\nWhile it is known that maximum separation power-in terms of peak capacity-can be achieved by selecting orthogonal selectivities for the first and second dimension, it is not always a simple process to choose appropriate selectivities. Many stationary phases share a certain degree of similarity between their retention mechanism and the retention mechanism of other stationary phases. This is particularly the case when combining reversed-phased liquid chromatography phases in a 2D system. For example, it is possible to use a cyano column and a C18 column for the separation of coffee and still achieve a reasonable degree of orthogonality because the cyano column is capable of participating in π-π interactions with the aromatic components of coffee (4,5). However, the cyano stationary phase is also capable of interacting with solutes based on their degree of hydrophobicity, which is a retention mechanism it has in common with the C18 stationary phase. While these two selectivities exploit two different sample dimensions and form a system with a dimensionality equal to 2 according to the theory of Giddings (1), the system is not completely orthogonal and the theoretical maximum peak capacity cannot be equated to the actual peak capacity. It follows from this example that orthogonality is not a binary “yes or no” concept. Rather, orthogonality comes in degrees and cannot be entirely predicted by coupling two systems that in principle separate with different retention mechanisms. This is where orthogonality metrics become useful. These metrics allow chromatographers to assess how effectively their chosen selectivities distribute sample components throughout the 2D separation space. It has been argued that experienced chromatographers can adequately assess orthogonality themselves, without using metrics. While this is true to a degree, it should be appreciated that orthogonality metrics provide an assessment unbiased by user inclinations or day‑to‑day variability and this makes them particularly valuable for inclusion in industrial quality assurance. In addition, orthogonality metrics can serve as a guide to help the analyst keep track of the success of their method development procedures. For example, an analyst may be testing a number of different selectivities to determine which will give the most optimal 2D system for their particular sample. By calculating the orthogonality for each selectivity couple, they can gain a better understanding of the physiochemical aspects that play a role in separating the sample. This may lead to the selection of columns whose retention mechanisms target these physiochemical properties, eventually leading to the development of the most orthogonal system possible for their sample.\n\nThere are a wide range of orthogonality metrics. Most of these were recently compared by Schure and Davis (6). Their study compared the assessment of 20 orthogonality metrics applied to 47 experimental chromatograms. The assessments of the orthogonality metrics were compared to those given by expert reviewers who assessed the chromatograms visually based on their experience in 2D chromatography. A couple of important key points from this study include the observation that while the expert reviewers agreed on which were the best and which were the worst chromatograms, their assessment on the “mediocre” chromatograms were variable. This implies that the value of orthogonality metrics is their ability to provide constant, reliable assessments of orthogonality throughout the range of possible degrees of orthogonality. The other important point from their study was that no single metric stood out as the best for assessing orthogonality. Methods reporting metrics that appeared as good indicators of orthogonality included the convex hull, dimensionality, and information theory. Recently developed metrics that were not tested in the study of Schure and Davis were the asterisk equations (7) and the maximal information coefficient (8). In the interests of conciseness, we will briefly discuss the convex hull, dimensionality, asterisk, maximal information coefficient, and the bin counting methods. The latter have proven very popular in chromatography.\n\nThe bin counting methods are intuitive, simple to use, and are effective in assessing the orthogonality of 2D separations. There are two versions of the bin counting methods that are conceptually very similar (2,9). Both methods divide the separation space into boxes or bins, where the number of bins equals the number of components within the sample. The width of the peaks corresponds to the average peak width. In the original method, the number of bins containing peaks is summed up and compared with the total number of bins via equation 1 (2):\n\n\n\nWhere Pmax is the total number of bins. O = 1 for an orthogonal separation based on the observation that systems close to orthogonal have a ratio of bins occupied or total bins = 0.63. Bins are also summed up in the second version of the bin counting methods (9). The difference between this method and the original one is that firstly a “fence” is drawn around the area containing bins with peaks. The bins within this enclosed area are summed whether they have peaks or not. The number of bins within the enclosure is compared to the total number of bins to produce the value of orthogonality. Again, this value will range from 0 for a nonorthogonal system and reach a maximum of 1 for a fully orthogonal system where each bin contains one peak (9). While these methods are intuitive and easy to implement, the key limitation that they face is the necessity to know the number of components within the sample. This is not always possible for complex samples, such as protein digests. The consequence is that an insufficient number of bins may be used causing an inflated value of orthogonality. Alternatively, using too many bins will artificially deflate the value of orthogonality.\n\nDimensionality as an orthogonality metric also uses bins to divide the separation space into sections. Yet unlike the bin counting method described above, it is not necessary to know the number of sample components. The size of the bins or intervals are scaled relative to the first eluting and last eluting peaks in the dimension being considered, using equation 2 (10):\n\n\n\nWhere εi is the interval width, t’max and t’min are the normalized retention times for the last and the first eluting peak, respectively. Retention times are normalized as per equation 3 (10), which is the first step in calculating orthogonality for most-if not all-metrics.\n\n\n\nt’i is the normalized retention time of peak i, ti is the retention time of peak i, and tmin and tmax are the retention times of the first and last eluting peaks, respectively. It follows that the normalized separation space would range from 0 to 1 on each axis or dimension hence the 1/i in equation 2. The value i in equation 2 varies in value from 1 up to some maximum value. The number (N) of bins or intervals required to cover the separation space at a given interval width is determined by the user. A plot of log N versus log εi is constructed and in the least squares regression slope of the plot is multiplied by -1 to give the value of dimensionality (D) (10). For a 2D separation, the width of the intervals varies with respect to both dimensions. A completely orthogonal 2D separation gives a value of 2.00. Conversely, a nonorthogonal separation would give a value of 1.00 for D indicating that the separation is in fact 1D in agreement with the Giddings concept of dimensionality discussed earlier (10). One limitation of the dimensionality orthogonality metric is apparent when insufficient data is distributed throughout the separation space. To compensate for this the user can include a “step” value within the value of i so that the corresponding log N versus log εi plot is more smooth, improving the reliability of the calculation of D (10). User defined variables such as i in this method can introduce a source of variability into the reported metric, which is undesirable when different users are comparing orthogonality values. Recently another orthogonality metric that also scales the bin width was developed; in this case by changing the grid resolution. The method is almost identical to the method of Zeng, Hugel, and Marriott (11) with the exception that the maximal informational coefficient (MIC) is used in place of the least squares linear regression coefficient (R2). The metric for orthogonality using the MIC is calculated via equation 4 (8).\n\n\n\nWhere ∑bins is the sum of bins containing peaks and Pmax is the maximum theoretical peak capacity according to Giddings’s theory (1). The authors put forward MIC as a replacement for R2 on the basis that it considers nonlinear correlation as well as linear correlation. While it was shown that using equation 4 with MIC rather than R2 improved the method (8), they did not compare this metric with other metrics nor did they investigate the effect of the number of sample components on O, which is known to affect almost all orthogonality metrics.\n\nThere are a number of metrics that do not require the use of bins or intervals. The convex hull is one of them (12). There are numerous types of convex hulls but they all share the same concept: a polygon of the smallest possible size is used to fence the area containing peaks. Naturally the fenced area will contain some portion of the separation space that does not contain peaks, which may add some bias to the reported value of orthogonality. Some versions of the convex hull, such as the α-hull and the local convex hull, require some user input in setting certain parameters that govern the size of the hull (12). Another metric that does not require the division of the separation space is the asterisk equations (7). These equations are based on the distance of peaks from 4 lines that cross over the separation space and act as a reference rather than creating divisions as illustrated in Figure 2.\n\nThe standard deviation of the distances of every peak from each Z line is determined using equations 5–8 (7).\n\n\n\n\n\n\n\n\n\nIn equations 5–8, the expression in the curly brackets calculates the distance of peak, i, from the Z line in question, the standard deviation of these distances is determined as indicated by the σ outside the curly brackets. 1tR,norm(i) and 2tR,norm(i) are the normalized retention times for peak i in the first and second dimension, respectively. Retention times are normalized using equation 3. To express the resulting standard deviation of distances on the same scale for all Z lines, these S values are transformed to Z values using equations 9–12 (7).\n\n\n\n\n\n\n\n\n\nBecause the Z values range from 0 to 1, they can readily be reported as a percentage. Each Z value describes the degree of clustering with respect to that line. This can be used to pinpoint regions of the separation space that have a relatively high degree of clustering of peaks. The Z values are combined in equation 13 to give the metric for orthogonality, AO (7).\n\n\n\nSince the Z values are reported as a percentage, it follows that AO ranges from 0 to 100% where a value of 100% indicates a completely orthogonal 2D separation. The benefits of the asterisk equations are that they are easy to implement in simple spreadsheet software such as Microsoft Excel, they are intuitive, they are not biased by user defined parameters, and it is not necessary to know the number of sample components. That said, one limitation of this method is that AO is supressed when there are equal to or less than 25 peaks in the separation (7). In such cases it should only be used to compare chromatograms for the one sample with different conditions in a qualitative manner.\n\nSurface Coverage and its Relationship with Orthogonality\n\nThere are numerous metrics for surface coverage (2,9,12) and orthogonality (2,7,8,10,13–23). Conceptually, they are related. If two selectivities are combined that produce a nonorthogonal separation for a given sample then the surface coverage will be reduced compared to that of an orthogonal separation for the same sample. Consequently, surface coverage metrics can be used to report orthogonality. That being said the reverse is generally accepted as not valid. This is because orthogonality metrics generally only consider the degree of similarity of the selectivities of the two dimensions. On the other hand, surface coverage metrics consider the distribution of peaks within the separation space in a geometric manner. This gives us an idea of the proportion of space that is accessible by sample components and therefore peaks. For example, the asterisk equations and dimensionality metric discussed above describe the distribution of peaks throughout the separation space but do not describe the proportion of space accessed by peaks. Conversely, the convex hull and bin counting methods describe the proportion of space accessed by peaks and consequently describe the surface coverage. While the distinction between orthogonality and surface coverage does not make much difference in choosing selectivities, it does make an impact in our ability to calculate the actual peak capacity of a 2D system. In practice the actual peak capacity of a comprehensive 2D system, known as the conditional peak capacity (no’c,2D), is given by equation 14 (24).\n\n\n\nWhere 1nc and 2nc denote the peak capacity of the first and second dimension, respectively. This equation considers the two practical aspects that limit us from achieving the theoretical peak capacity: coverage of the separation space by peaks (fcoverage) and undersampling of the first dimension (<β>). Since fcoverage describes the proportion of the separation space that is accessible to peaks, it follows that surface metrics can act as fcoverage in equation 14 so long as they range in value from 0 to 1 as is required in the equation. The undersampling parameter <β> is given by equation 15 (24).\n\n\n\nWhere ts is the first dimension sampling time and 1σ is the peak standard deviation in the first dimension prior to sampling. It follows that this value must be an average across all peaks since there is only one value for <β> in equation 14. While orthogonality metrics are useful for assessing the various combinations of selectivities to construct an optimal 2D system, if calculating peak capacity is important for your application then surface coverage metrics are useful.\n\nConclusion\n\nIdeally the peak capacity of a 2D chromatographic system should equate to the product of the peak capacities of the first and second dimension. In reality this is virtually impossible to achieve. Two key limitations preventing many 2D separations from achieving ideal peak capacity are undersampling of the first dimension and the ability of the system to allow peaks to evenly distribute throughout the separation space. The former is relatively well understood and can be accounted for in the computation of the conditional 2D peak capacity using the sampling time. The ability of the system to distribute peaks evenly throughout the separation space is not so easily accounted for. Surface coverage metrics can be used to determine the proportion of separation space accessible to peaks. These metrics typically consider the distribution of peaks relative to the total separation space without accounting for the effect of peak width on consuming separation space. However, if the goal is to screen a number of selectivities to gauge which combination will provide the most optimum 2D separation then orthogonality metrics are useful.\n\nAcknowledgements\n\nThe author would like to acknowledge Dr. Alina Astefanei and Dr. Andrea Gargano for their insightful comments during the editing phase of this manuscript. The following institutions are acknowledged for their financial contributions regarding the asterisk equations (part of the HYPERformance LC×LC project) mentioned in this review: Akzo Nobel, Avantor, DSM, RIKILT, Shell, Syngenta, Thermo Fisher Scientific, TNO, University of Amsterdam, and the University of Groningen.\n\nReferences\n\n1. J.C. Giddings, J. Chromatogr. A. 703, 3–15 (1995).\n2. M. Gilar, P. Olivova, A.E. Daly, and J.C. Gebler, Anal. Chem.77, 6426–34 (2005).\n3. J.C. Giddings, Anal. Chem. 56, 1258A–1260A, 1262A, 1264A passim (1984).\n4. M. Mnatsakanyan, P.G. Stevenson, D. Shock, X.A. Conlan, T.A. Goodie, K.N. Spencer, et al., Talanta82, 1349–1357 (2010).\n5. M. Mnatsakanyan, P.G. Stevenson, X.A. Conlan, P.S. Francis, T.A. Goodie, G.P. McDermott, et al., Talanta82, 1358–1363 (2010).\n6. M.R. Schure and J.M. Davis, J. Chromatogr. A.1414, 60–76 (2015).\n7. M. Camenzuli and P.J. Schoenmakers, Anal. Chim. Acta.838, 93–101 (2014).\n8. A. Mani-Varnosfaderani and M. Ghaemmaghami, J. Chromatogr. A.1415, 108–114 (2015).\n9. M. Gilar, J. Fridrich, M.R. Schure, and A. Jaworski, Anal. Chem.84, 8722–8732 (2012).\n10. M.R. Schure, J. Chromatogr. A.1218, 293–302 (2011).\n11. Z.-D. Zeng, H.M. Hugel, and P.J. Marriott, Anal. Chem.85, 6356–63 (2013).\n12. S.C. Rutan, J.M. Davis, and P.W. Carr, J. Chromatogr. A. 1255, 267–76 (2012).\n13. Z. Liu and D. Patterson Jr, Anal. Chem. 67, 3840–3845 (1995).\n14. P.J. Slonecker, X. Li, T.H. Ridgway, and J.G. Dorsey, Anal. Chem.68, 682–689 (1996).\n15. J.W. Dolan, A. Maule, D. Bingley, L. Wrisley, C.C. Chan, M. Angod, et al., J. Chromatogr. A.1057, 59–74 (2004).\n16. E. Van Gyseghem, I. Crosiers, S. Gourvénec, D.L. Massart, and Y. Van der Heyden, J. Chromatogr. A. 1026, 117–128 (2004).\n17. E. Van Gyseghem, M. Jimidar, R. Sneyers, D. Redlich, E. Verhoeven, D.L. Massart, et al., J. Chromatogr. A. 1042, 69–80 (2004).\n18. P. Forlay-Frick, E. Van Gyseghem, K. Héberger, and Y. Van der Heyden, Anal. Chim. Acta.539, 1–10 (2005).\n19. U.D. Neue, J.E. O’Gara, and A. Méndez, J. Chromatogr. A.1127, 161–174 (2006).\n20. C. West and E. Lesellier, J. Chromatogr. A.1203, 105–113 (2008).\n21. R. Al Bakain, I. Rivals, P. Sassiat, D. Thiébaut, M.C. Hennion, G. Euvrard, et al., J. Chromatogr. A.1218, 2963–2975 (2011).\n22. W. Nowik, M. Bonose, S. Héron, M. Nowik, and A. Tchapla, Anal. Chem.85, 9459–9468 (2013).\n23. W. Nowik, S. Héron, M. Bonose, M. Nowik, and A. Tchapla, Anal. Chem.85, 9449–9458 (2013).\n24. D.R. Stoll, X. Wang, and P.W. Carr, Anal. Chem.80, 268–278 (2008).\n\nMichelle Camenzuli is a tenure-track assistant professor within the analytical chemistry group at the University of Amsterdam, The Netherlands. At the moment her research is primarily focused on developing new methods and column technology for proteomics. In 2014 she completed a 1-year post‑doctorate focused on orthogonality in two-dimensional liquid chromatography with Peter Schoenmakers at the University of Amsterdam. During her post-doctorate she developed a new metric for orthogonality known as the asterisk equations. Camenzuli currently has 20 publications in peer-reviewed journals and one patent for reaction flow chromatography." ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.9313363,"math_prob":0.9414754,"size":24589,"snap":"2021-43-2021-49","text_gpt3_token_len":4845,"char_repetition_ratio":0.17437463,"word_repetition_ratio":0.09538702,"special_character_ratio":0.1868315,"punctuation_ratio":0.0775964,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9704339,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2021-10-15T23:27:01Z\",\"WARC-Record-ID\":\"<urn:uuid:6e5bce63-1c5f-4bed-bb15-de6fee358115>\",\"Content-Length\":\"362228\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:d33df825-760b-4b3d-ab6d-1597ccd13141>\",\"WARC-Concurrent-To\":\"<urn:uuid:150727ae-0033-46c5-9181-731cfbd4d7ac>\",\"WARC-IP-Address\":\"76.76.21.21\",\"WARC-Target-URI\":\"https://www.chromatographyonline.com/view/role-surface-coverage-and-orthogonality-metrics-two-dimensional-chromatography\",\"WARC-Payload-Digest\":\"sha1:QNJVURAOYPF5T2B3WCJBFXWZFHCUCDXE\",\"WARC-Block-Digest\":\"sha1:TMFI36SLD4ACADOUOXXDASIYUFS4VBAF\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2021/CC-MAIN-2021-43/CC-MAIN-2021-43_segments_1634323583087.95_warc_CC-MAIN-20211015222918-20211016012918-00307.warc.gz\"}"}
https://calculatorsonline.org/percentage-to-fraction/55.13
[ "# 55.13 percent as a fraction\n\nHere you will see step by step solution to convert 55.13 percent into fraction. 55.13 percent as a fraction is 5513/10000. Please check the explanation that how to write 55.13% as a fraction.\n\n## Answer: 55.13% as a fraction is\n\n= 5513/10000\n\n### How to convert 55.13 percent to fraction?\n\nTo convert the 55.13% as a fraction form simply divide the number by 100 and simplify the fraction by finding GCF. If given number is in decimal form then multiply numerator and denominator by 10^n and simplify it as much as possible.\n\n#### How to write 55.13% as a fraction?\n\nFollow these easy steps to convert 55.13% into fraction-\n\nGiven number is => 55.13\n\n• Write down the 55.13 in a percentage form like this:\n• 55.13% = 55.13/100\n• Since, 55.13 is not a whole number, now we need multiply numerator and denominator by 10^n, n = decimal points in number. [n=2]\n• => 55.13 × 100/100 × 100 = 5513/10000\n• We also need to check to simplify the fraction.\n• Greatest common factor [GCF] of 5513 and 10000 is 1, so this is the simplest form is 5513/10000.\n• Conclusion: 55.13% = 5513/10000\n\nTherefore, the 55.13 percent as a fraction, final answer is 5513/10000." ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.91103977,"math_prob":0.9989554,"size":762,"snap":"2023-14-2023-23","text_gpt3_token_len":214,"char_repetition_ratio":0.21240106,"word_repetition_ratio":0.031496063,"special_character_ratio":0.339895,"punctuation_ratio":0.14201184,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.99976856,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2023-05-29T12:50:44Z\",\"WARC-Record-ID\":\"<urn:uuid:35f46fef-c0d7-458f-84ff-1b5050c3721c>\",\"Content-Length\":\"16700\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:0dc2eaa0-de57-483f-b210-291535b96677>\",\"WARC-Concurrent-To\":\"<urn:uuid:c087a84b-cef5-4adc-9b63-e4bf60395816>\",\"WARC-IP-Address\":\"104.21.85.191\",\"WARC-Target-URI\":\"https://calculatorsonline.org/percentage-to-fraction/55.13\",\"WARC-Payload-Digest\":\"sha1:ANV6PGOWK2JTZ3HPFAZQQPWBFYNJSRBI\",\"WARC-Block-Digest\":\"sha1:BZ3TFYW7YNNE3JXZR73UU3OJOJ5BZLSJ\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2023/CC-MAIN-2023-23/CC-MAIN-2023-23_segments_1685224644855.6_warc_CC-MAIN-20230529105815-20230529135815-00295.warc.gz\"}"}
https://tensorflow.google.cn/model_optimization/guide/pruning/pruning_with_keras?hl=id
[ "# Pruning in Keras example\n\n## Overview\n\nWelcome to an end-to-end example for magnitude-based weight pruning.\n\n### Other pages\n\nFor an introduction to what pruning is and to determine if you should use it (including what's supported), see the overview page.\n\nTo quickly find the APIs you need for your use case (beyond fully pruning a model with 80% sparsity), see the comprehensive guide.\n\n### Summary\n\nIn this tutorial, you will:\n\n1. Train a `tf.keras` model for MNIST from scratch.\n2. Fine tune the model by applying the pruning API and see the accuracy.\n3. Create 3x smaller TF and TFLite models from pruning.\n4. Create a 10x smaller TFLite model from combining pruning and post-training quantization.\n5. See the persistence of accuracy from TF to TFLite.\n\n## Setup\n\n```` pip install -q tensorflow-model-optimization`\n```\n``````import tempfile\nimport os\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom tensorflow import keras\n\n``````\n\n## Train a model for MNIST without pruning\n\n``````# Load MNIST dataset\nmnist = keras.datasets.mnist\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n\n# Normalize the input image so that each pixel value is between 0 and 1.\ntrain_images = train_images / 255.0\ntest_images = test_images / 255.0\n\n# Define the model architecture.\nmodel = keras.Sequential([\nkeras.layers.InputLayer(input_shape=(28, 28)),\nkeras.layers.Reshape(target_shape=(28, 28, 1)),\nkeras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation='relu'),\nkeras.layers.MaxPooling2D(pool_size=(2, 2)),\nkeras.layers.Flatten(),\nkeras.layers.Dense(10)\n])\n\n# Train the digit classification model\nloss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\nmetrics=['accuracy'])\n\nmodel.fit(\ntrain_images,\ntrain_labels,\nepochs=4,\nvalidation_split=0.1,\n)\n``````\n```Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz\n11493376/11490434 [==============================] - 0s 0us/step\nEpoch 1/4\n1688/1688 [==============================] - 10s 6ms/step - loss: 0.2785 - accuracy: 0.9220 - val_loss: 0.1031 - val_accuracy: 0.9740\nEpoch 2/4\n1688/1688 [==============================] - 9s 5ms/step - loss: 0.1063 - accuracy: 0.9691 - val_loss: 0.0782 - val_accuracy: 0.9790\nEpoch 3/4\n1688/1688 [==============================] - 9s 5ms/step - loss: 0.0815 - accuracy: 0.9765 - val_loss: 0.0788 - val_accuracy: 0.9775\nEpoch 4/4\n1688/1688 [==============================] - 9s 5ms/step - loss: 0.0689 - accuracy: 0.9797 - val_loss: 0.0633 - val_accuracy: 0.9840\n<tensorflow.python.keras.callbacks.History at 0x7f146fbd8bd0>\n```\n\nEvaluate baseline test accuracy and save the model for later usage.\n\n``````_, baseline_model_accuracy = model.evaluate(\ntest_images, test_labels, verbose=0)\n\nprint('Baseline test accuracy:', baseline_model_accuracy)\n\n_, keras_file = tempfile.mkstemp('.h5')\ntf.keras.models.save_model(model, keras_file, include_optimizer=False)\nprint('Saved baseline model to:', keras_file)\n``````\n```Baseline test accuracy: 0.9775999784469604\nSaved baseline model to: /tmp/tmpjj6swf59.h5\n```\n\n## Fine-tune pre-trained model with pruning\n\n### Define the model\n\nYou will apply pruning to the whole model and see this in the model summary.\n\nIn this example, you start the model with 50% sparsity (50% zeros in weights) and end with 80% sparsity.\n\nIn the comprehensive guide, you can see how to prune some layers for model accuracy improvements.\n\n``````import tensorflow_model_optimization as tfmot\n\nprune_low_magnitude = tfmot.sparsity.keras.prune_low_magnitude\n\n# Compute end step to finish pruning after 2 epochs.\nbatch_size = 128\nepochs = 2\nvalidation_split = 0.1 # 10% of training set will be used for validation set.\n\nnum_images = train_images.shape * (1 - validation_split)\nend_step = np.ceil(num_images / batch_size).astype(np.int32) * epochs\n\n# Define model for pruning.\npruning_params = {\n'pruning_schedule': tfmot.sparsity.keras.PolynomialDecay(initial_sparsity=0.50,\nfinal_sparsity=0.80,\nbegin_step=0,\nend_step=end_step)\n}\n\nmodel_for_pruning = prune_low_magnitude(model, **pruning_params)\n\n# `prune_low_magnitude` requires a recompile.\nloss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\nmetrics=['accuracy'])\n\nmodel_for_pruning.summary()\n``````\n```/tmpfs/src/tf_docs_env/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py:2191: UserWarning: `layer.add_variable` is deprecated and will be removed in a future version. Please use `layer.add_weight` method instead.\nModel: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param #\n=================================================================\nprune_low_magnitude_reshape (None, 28, 28, 1) 1\n_________________________________________________________________\nprune_low_magnitude_conv2d ( (None, 26, 26, 12) 230\n_________________________________________________________________\nprune_low_magnitude_max_pool (None, 13, 13, 12) 1\n_________________________________________________________________\nprune_low_magnitude_flatten (None, 2028) 1\n_________________________________________________________________\nprune_low_magnitude_dense (P (None, 10) 40572\n=================================================================\nTotal params: 40,805\nTrainable params: 20,410\nNon-trainable params: 20,395\n_________________________________________________________________\n```\n\n### Train and evaluate the model against baseline\n\nFine tune with pruning for two epochs.\n\n`tfmot.sparsity.keras.UpdatePruningStep` is required during training, and `tfmot.sparsity.keras.PruningSummaries` provides logs for tracking progress and debugging.\n\n``````logdir = tempfile.mkdtemp()\n\ncallbacks = [\ntfmot.sparsity.keras.UpdatePruningStep(),\ntfmot.sparsity.keras.PruningSummaries(log_dir=logdir),\n]\n\nmodel_for_pruning.fit(train_images, train_labels,\nbatch_size=batch_size, epochs=epochs, validation_split=validation_split,\ncallbacks=callbacks)\n``````\n```Epoch 1/2\nWARNING:tensorflow:From /tmpfs/src/tf_docs_env/lib/python3.7/site-packages/tensorflow/python/ops/array_ops.py:5049: calling gather (from tensorflow.python.ops.array_ops) with validate_indices is deprecated and will be removed in a future version.\nInstructions for updating:\nThe `validate_indices` argument has no effect. Indices are always validated on CPU and never validated on GPU.\n3/422 [..............................] - ETA: 12s - loss: 0.0628 - accuracy: 0.9896 WARNING:tensorflow:Callback method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0075s vs `on_train_batch_end` time: 0.0076s). Check your callbacks.\n422/422 [==============================] - 5s 9ms/step - loss: 0.0797 - accuracy: 0.9771 - val_loss: 0.0828 - val_accuracy: 0.9790\nEpoch 2/2\n422/422 [==============================] - 3s 8ms/step - loss: 0.0971 - accuracy: 0.9741 - val_loss: 0.0839 - val_accuracy: 0.9775\n<tensorflow.python.keras.callbacks.History at 0x7f12e4502910>\n```\n\nFor this example, there is minimal loss in test accuracy after pruning, compared to the baseline.\n\n``````_, model_for_pruning_accuracy = model_for_pruning.evaluate(\ntest_images, test_labels, verbose=0)\n\nprint('Baseline test accuracy:', baseline_model_accuracy)\nprint('Pruned test accuracy:', model_for_pruning_accuracy)\n``````\n```Baseline test accuracy: 0.9775999784469604\nPruned test accuracy: 0.972100019454956\n```\n\nThe logs show the progression of sparsity on a per-layer basis.\n\n``````#docs_infra: no_execute\n%tensorboard --logdir={logdir}\n``````\n\nFor non-Colab users, you can see the results of a previous run of this code block on TensorBoard.dev.\n\n## Create 3x smaller models from pruning\n\nBoth `tfmot.sparsity.keras.strip_pruning` and applying a standard compression algorithm (e.g. via gzip) are necessary to see the compression benefits of pruning.\n\n• `strip_pruning` is necessary since it removes every tf.Variable that pruning only needs during training, which would otherwise add to model size during inference\n• Applying a standard compression algorithm is necessary since the serialized weight matrices are the same size as they were before pruning. However, pruning makes most of the weights zeros, which is added redundancy that algorithms can utilize to further compress the model.\n\nFirst, create a compressible model for TensorFlow.\n\n``````model_for_export = tfmot.sparsity.keras.strip_pruning(model_for_pruning)\n\n_, pruned_keras_file = tempfile.mkstemp('.h5')\ntf.keras.models.save_model(model_for_export, pruned_keras_file, include_optimizer=False)\nprint('Saved pruned Keras model to:', pruned_keras_file)\n``````\n```WARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.\nSaved pruned Keras model to: /tmp/tmp22u333hk.h5\n```\n\nThen, create a compressible model for TFLite.\n\n``````converter = tf.lite.TFLiteConverter.from_keras_model(model_for_export)\npruned_tflite_model = converter.convert()\n\n_, pruned_tflite_file = tempfile.mkstemp('.tflite')\n\nwith open(pruned_tflite_file, 'wb') as f:\nf.write(pruned_tflite_model)\n\nprint('Saved pruned TFLite model to:', pruned_tflite_file)\n``````\n```WARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.\nINFO:tensorflow:Assets written to: /tmp/tmp51falze0/assets\nSaved pruned TFLite model to: /tmp/tmpehx5la2i.tflite\n```\n\nDefine a helper function to actually compress the models via gzip and measure the zipped size.\n\n``````def get_gzipped_model_size(file):\n# Returns size of gzipped model, in bytes.\nimport os\nimport zipfile\n\n_, zipped_file = tempfile.mkstemp('.zip')\nwith zipfile.ZipFile(zipped_file, 'w', compression=zipfile.ZIP_DEFLATED) as f:\nf.write(file)\n\nreturn os.path.getsize(zipped_file)\n``````\n\nCompare and see that the models are 3x smaller from pruning.\n\n``````print(\"Size of gzipped baseline Keras model: %.2f bytes\" % (get_gzipped_model_size(keras_file)))\nprint(\"Size of gzipped pruned Keras model: %.2f bytes\" % (get_gzipped_model_size(pruned_keras_file)))\nprint(\"Size of gzipped pruned TFlite model: %.2f bytes\" % (get_gzipped_model_size(pruned_tflite_file)))\n``````\n```Size of gzipped baseline Keras model: 78211.00 bytes\nSize of gzipped pruned Keras model: 25797.00 bytes\nSize of gzipped pruned TFlite model: 24995.00 bytes\n```\n\n## Create a 10x smaller model from combining pruning and quantization\n\nYou can apply post-training quantization to the pruned model for additional benefits.\n\n``````converter = tf.lite.TFLiteConverter.from_keras_model(model_for_export)\nconverter.optimizations = [tf.lite.Optimize.DEFAULT]\nquantized_and_pruned_tflite_model = converter.convert()\n\n_, quantized_and_pruned_tflite_file = tempfile.mkstemp('.tflite')\n\nwith open(quantized_and_pruned_tflite_file, 'wb') as f:\nf.write(quantized_and_pruned_tflite_model)\n\nprint('Saved quantized and pruned TFLite model to:', quantized_and_pruned_tflite_file)\n\nprint(\"Size of gzipped baseline Keras model: %.2f bytes\" % (get_gzipped_model_size(keras_file)))\nprint(\"Size of gzipped pruned and quantized TFlite model: %.2f bytes\" % (get_gzipped_model_size(quantized_and_pruned_tflite_file)))\n``````\n```WARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.\nWARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.\nINFO:tensorflow:Assets written to: /tmp/tmp6tzu3z87/assets\nINFO:tensorflow:Assets written to: /tmp/tmp6tzu3z87/assets\nSaved quantized and pruned TFLite model to: /tmp/tmp0mvlkin1.tflite\nSize of gzipped baseline Keras model: 78211.00 bytes\nSize of gzipped pruned and quantized TFlite model: 8031.00 bytes\n```\n\n## See persistence of accuracy from TF to TFLite\n\nDefine a helper function to evaluate the TF Lite model on the test dataset.\n\n``````import numpy as np\n\ndef evaluate_model(interpreter):\ninput_index = interpreter.get_input_details()[\"index\"]\noutput_index = interpreter.get_output_details()[\"index\"]\n\n# Run predictions on ever y image in the \"test\" dataset.\nprediction_digits = []\nfor i, test_image in enumerate(test_images):\nif i % 1000 == 0:\nprint('Evaluated on {n} results so far.'.format(n=i))\n# Pre-processing: add batch dimension and convert to float32 to match with\n# the model's input data format.\ntest_image = np.expand_dims(test_image, axis=0).astype(np.float32)\ninterpreter.set_tensor(input_index, test_image)\n\n# Run inference.\ninterpreter.invoke()\n\n# Post-processing: remove batch dimension and find the digit with highest\n# probability.\noutput = interpreter.tensor(output_index)\ndigit = np.argmax(output())\nprediction_digits.append(digit)\n\nprint('\\n')\n# Compare prediction results with ground truth labels to calculate accuracy.\nprediction_digits = np.array(prediction_digits)\naccuracy = (prediction_digits == test_labels).mean()\nreturn accuracy\n``````\n\nYou evaluate the pruned and quantized model and see that the accuracy from TensorFlow persists to the TFLite backend.\n\n``````interpreter = tf.lite.Interpreter(model_content=quantized_and_pruned_tflite_model)\ninterpreter.allocate_tensors()\n\ntest_accuracy = evaluate_model(interpreter)\n\nprint('Pruned and quantized TFLite test_accuracy:', test_accuracy)\nprint('Pruned TF test accuracy:', model_for_pruning_accuracy)\n``````\n```Evaluated on 0 results so far.\nEvaluated on 1000 results so far.\nEvaluated on 2000 results so far.\nEvaluated on 3000 results so far.\nEvaluated on 4000 results so far.\nEvaluated on 5000 results so far.\nEvaluated on 6000 results so far.\nEvaluated on 7000 results so far.\nEvaluated on 8000 results so far.\nEvaluated on 9000 results so far.\n\nPruned and quantized TFLite test_accuracy: 0.9722\nPruned TF test accuracy: 0.972100019454956\n```\n\n## Conclusion\n\nIn this tutorial, you saw how to create sparse models with the TensorFlow Model Optimization Toolkit API for both TensorFlow and TFLite. You then combined pruning with post-training quantization for additional benefits.\n\nYou created a 10x smaller model for MNIST, with minimal accuracy difference.\n\nWe encourage you to try this new capability, which can be particularly important for deployment in resource-constrained environments.\n\n[]\n[]" ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.64714646,"math_prob":0.8600812,"size":14159,"snap":"2022-27-2022-33","text_gpt3_token_len":3525,"char_repetition_ratio":0.17859414,"word_repetition_ratio":0.11689962,"special_character_ratio":0.30037433,"punctuation_ratio":0.2106383,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.97514766,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2022-07-01T11:50:11Z\",\"WARC-Record-ID\":\"<urn:uuid:37afa648-0c01-419c-9c66-38d3f6173aec>\",\"Content-Length\":\"112111\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:e668c7bf-d6f8-4495-91a3-0c0483dd5442>\",\"WARC-Concurrent-To\":\"<urn:uuid:740843a0-eea9-41be-9a38-332a143c55f8>\",\"WARC-IP-Address\":\"172.253.115.94\",\"WARC-Target-URI\":\"https://tensorflow.google.cn/model_optimization/guide/pruning/pruning_with_keras?hl=id\",\"WARC-Payload-Digest\":\"sha1:N4O7UWOHGDM4LVBVYFSS7YTUDFXGDOOE\",\"WARC-Block-Digest\":\"sha1:WNGSRHOLYX4KV6FS3D6LXJVR6S33EV7P\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2022/CC-MAIN-2022-27/CC-MAIN-2022-27_segments_1656103940327.51_warc_CC-MAIN-20220701095156-20220701125156-00095.warc.gz\"}"}
https://rdrr.io/cran/ccid/man/match.cpt.ts.html
[ "match.cpt.ts: Associating the change-points with the component time series In ccid: Cross-Covariance Isolate Detect: a New Change-Point Method for Estimating Dynamic Functional Connectivity\n\nDescription\n\nThis function performs a contrast function based approach in order to match each change-point and time series. In simple terms, for a given change-point set this function associates each change-point with the respective data sequence (or sequences) from which it was detected.\n\nUsage\n\n 1 2 3 4 5 6 7 8 match.cpt.ts( X, cpt, thr_const = 1, thr_fin = thr_const * sqrt(2 * log(nrow(X))), scales = -1, count = 5 )\n\nArguments\n\n X A numerical matrix representing the multivariate periodograms. Each column contains a different periodogram which is the result of applying the wavelet transformation to the initial multivariate time series. cpt A positive integer vector with the locations of the change-points. If missing, then our approach with the L_2 aggregation is called internally to extract the change-points in X. thr_const A positive real number with default value equal to 1. It is used to define the threshold; see thr_fin. thr_fin With T the length of the data sequence, this is a positive real number with default value equal to thr_const * log(T). It is the threshold, which is used in the detection process. scales Negative integers for the wavelet scales used to create the periodograms, with a small negative integer representing a fine scale. The default value is equal to -1. count Positive integer with default value equal to 5. It can be used so that the function will return only the count most important matches of each change-points with the time series.\n\nValue\n\nA list with the following components:\n\n time_series_indicator A list of matrices. There are as many matrices as the number of change-points. Each change-point has its own matrix, with each row of the matrix representing the associated combination of time series that are associated with the respective change-point. most_important A list of matrices. There are as many matrices as the number of change-points. Each change-point has its own matrix, with each row of the matrix representing the associated combination of time series that are associated with the respective change-point. It shows the count most important time series combinations for each change-point.\n\nAuthor(s)\n\nAndreas Anastasiou, [email protected]\n\nReferences\n\n“Cross-covariance isolate detect: a new change-point method for estimating dynamic functional connectivity”, Anastasiou et al (2020), preprint <doi:10.1101/2020.12.20.423696>.\n\nExamples\n\n 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 set.seed(1) num.nodes <- 40 # number of nodes etaA.1 <- 0.95 etaA.2 <- 0.05 pcor1 <- GeneNet::ggm.simulate.pcor(num.nodes, etaA = etaA.1) pcor2 <- GeneNet::ggm.simulate.pcor(num.nodes, etaA = etaA.2) n <- 100 data1 <- GeneNet::ggm.simulate.data(n, pcor1) data2 <- GeneNet::ggm.simulate.data(n, pcor2) X <- rbind(data1, data2, data1, data2) ## change-points at 100, 200, 300 sgn <- sign(stats::cor(X)) M1 <- match.cpt.ts(t(hdbinseg::gen.input(x = t(X),scales = -1, sq = TRUE, diag = FALSE, sgn = sgn))) M1\n\nccid documentation built on Dec. 20, 2021, 5:08 p.m." ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.7615917,"math_prob":0.98748595,"size":2839,"snap":"2022-05-2022-21","text_gpt3_token_len":757,"char_repetition_ratio":0.12063492,"word_repetition_ratio":0.18721461,"special_character_ratio":0.26910883,"punctuation_ratio":0.1634103,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9894955,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2022-01-27T16:05:36Z\",\"WARC-Record-ID\":\"<urn:uuid:02ffb7ce-9a33-4c1f-b554-bfd36c067a8f>\",\"Content-Length\":\"39932\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:c1ee0dba-0778-4b0d-acdd-51a5c32558e0>\",\"WARC-Concurrent-To\":\"<urn:uuid:ddb025af-d365-4af0-95ae-523382da4523>\",\"WARC-IP-Address\":\"51.81.83.12\",\"WARC-Target-URI\":\"https://rdrr.io/cran/ccid/man/match.cpt.ts.html\",\"WARC-Payload-Digest\":\"sha1:PFMJJJYH3M66WZETD352CHCJAJLRHOOR\",\"WARC-Block-Digest\":\"sha1:UB2KWSJFRFP3LXVUHUZ4ZYQBK2PGJF7N\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2022/CC-MAIN-2022-05/CC-MAIN-2022-05_segments_1642320305266.34_warc_CC-MAIN-20220127133107-20220127163107-00567.warc.gz\"}"}
https://www.coolmath.com/prealgebra/14-intro-to-radicals/05-radicals-multiplying-with-FOIL-01-80
[ "We're going to need this for the next lesson.\n\nWe'll need to multiply guys like", null, "There's an easy process we can use called FOIL.\n\n F = First O = Outer I = Inner L = Last\n\nYou can use this process anytime you multiply two things that are added or subtracted inside of two sets of parentheses.  Boy, that sounds confusing!  Like this:", null, "" ]
[ null, "https://www.coolmath.com/sites/default/files/images/05-Radicals-01.gif", null, "https://www.coolmath.com/sites/default/files/images/05-Radicals-02.gif", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.90673274,"math_prob":0.8330975,"size":337,"snap":"2023-40-2023-50","text_gpt3_token_len":91,"char_repetition_ratio":0.11111111,"word_repetition_ratio":0.0,"special_character_ratio":0.25816023,"punctuation_ratio":0.092307694,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9766456,"pos_list":[0,1,2,3,4],"im_url_duplicate_count":[null,4,null,4,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2023-10-01T08:29:26Z\",\"WARC-Record-ID\":\"<urn:uuid:a5bd014a-ffc8-4a6b-8fc2-06cdc85c2576>\",\"Content-Length\":\"28290\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:96fd2f3e-cb99-4a0b-b501-a2a14c1c2263>\",\"WARC-Concurrent-To\":\"<urn:uuid:85025f10-1393-4f41-9829-9191b49031f5>\",\"WARC-IP-Address\":\"172.64.152.72\",\"WARC-Target-URI\":\"https://www.coolmath.com/prealgebra/14-intro-to-radicals/05-radicals-multiplying-with-FOIL-01-80\",\"WARC-Payload-Digest\":\"sha1:J77VV3QGCDPUKK64G4P2VOVWM3J4BKFL\",\"WARC-Block-Digest\":\"sha1:SNOTPN7SI6H6EQ5UCDFIUBLHUOHIN2ZX\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2023/CC-MAIN-2023-40/CC-MAIN-2023-40_segments_1695233510810.46_warc_CC-MAIN-20231001073649-20231001103649-00790.warc.gz\"}"}
https://web2.0calc.com/questions/help_39291
[ "+0\n\n# help\n\n0\n43\n3\n\nFind the sum of three positive consecutive integers a, b, c such that a^2 + b^2 + c^2 = 110.\n\nJan 5, 2020\n\n#1\n0\n\nJust try a few numbers such as: 5^2 + 6^2 + 7^2 = 110\n\nJan 5, 2020\n#2\n0\n\nFind the sum of three positive consecutive integers a, b, c such that a^2 + b^2 + c^2 = 110.\n\na, b, & c are consecutive, and a2 + b2 + c2  =  110\n\nA third of 110 is about 33 so start from there.\n\nThe closest square is 36, so the next two going up would be 49 and 64.\n\nYou can easily see that that total is way too large, try the other direction.\n\nThe closest square is 36, so the next two going down would be 25 and 16.\n\nThat total is way too small, so try the one below 36 and the one above it.\n\n36 + 25 + 49  = 110  ...  Like the Baby Bear's porridge, it's just right.\n\nSo a, b, & c are 5, 6, & 7, respectively, and so 5 + 6 + 7  = 18  and there's your final answer.\n\n.\n\nJan 5, 2020\n#3\n+1\n\nMiddle number = x   the other two numbers are   x-1    and x+1\n\n(x-1)^2 + x^2 + (x+1)^2 = 110\n\nx^2 - 2x +1    + x^2   +  x^2 + 2x +1 = 110\n\n3x^2 +2 = 110\n\n3x^2 = 108\n\nx^2 = 36\n\nx =      x-1 =   x+1 = 7         5+6+7 =18\n\nJan 5, 2020\nedited by ElectricPavlov  Jan 5, 2020" ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.91744286,"math_prob":0.9998932,"size":938,"snap":"2019-51-2020-05","text_gpt3_token_len":353,"char_repetition_ratio":0.12098501,"word_repetition_ratio":0.2090909,"special_character_ratio":0.4466951,"punctuation_ratio":0.12757201,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.99996746,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2020-01-19T23:01:03Z\",\"WARC-Record-ID\":\"<urn:uuid:944e1afe-986a-4ff6-938c-f874086bf872>\",\"Content-Length\":\"25504\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:2e21f1f8-af23-4c19-b0c1-f5d9ddf8ad55>\",\"WARC-Concurrent-To\":\"<urn:uuid:d068cc25-5a56-4998-a691-08738a77c47c>\",\"WARC-IP-Address\":\"116.202.113.245\",\"WARC-Target-URI\":\"https://web2.0calc.com/questions/help_39291\",\"WARC-Payload-Digest\":\"sha1:GVRGW5G7XWZDK6MNG7HEQTPDYTNQXP2H\",\"WARC-Block-Digest\":\"sha1:AYIOYBXIACCBNAXPD52CJBFC3J253AZI\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2020/CC-MAIN-2020-05/CC-MAIN-2020-05_segments_1579250595282.35_warc_CC-MAIN-20200119205448-20200119233448-00175.warc.gz\"}"}
http://lbartman.com/worksheet/number-sequence-worksheets-for-kindergarten.php
[ "## lbartman.com - the pro math teacher\n\n• Subtraction\n• Multiplication\n• Division\n• Decimal\n• Time\n• Line Number\n• Fractions\n• Math Word Problem\n• Kindergarten\n• a + b + c\n\na - b - c\n\na x b x c\n\na : b : c\n\n# Number Sequence Worksheets For Kindergarten\n\nPublic on 04 Oct, 2016 by Cyun Lee\n\n###", null, "1000 images about números series on pinterest skip counting\n\nName : __________________\n\nSeat Num. : __________________\n\nDate : __________________\n\n### HOW MANY STARS EACH LINE ?\n\n......\n......\n......\n......\n......\nshow printable version !!!hide the show\n\n## RELATED POST\n\nNot Available\n\n## POPULAR\n\nmath for kindergarten free worksheets\n\nmath worksheets kindergarten addition and subtraction\n\nmultiplying and dividing decimal worksheets\n\nyear 7 math worksheets" ]
[ null, "https://s-media-cache-ak0.pinimg.com/originals/f7/af/ea/f7afea8819876644e6608d0a9c0d6928.gif", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.8317319,"math_prob":0.8412293,"size":807,"snap":"2020-45-2020-50","text_gpt3_token_len":161,"char_repetition_ratio":0.18804483,"word_repetition_ratio":0.0,"special_character_ratio":0.25154895,"punctuation_ratio":0.121212125,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9839711,"pos_list":[0,1,2],"im_url_duplicate_count":[null,1,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2020-10-22T20:21:21Z\",\"WARC-Record-ID\":\"<urn:uuid:656a9628-206a-4f74-966a-d15a2a8a0b04>\",\"Content-Length\":\"43801\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:66684a4b-9ae3-4c2c-8a49-b70016cbbba6>\",\"WARC-Concurrent-To\":\"<urn:uuid:dfa080e7-f91a-41b7-9cc2-938ab8f20494>\",\"WARC-IP-Address\":\"45.76.71.7\",\"WARC-Target-URI\":\"http://lbartman.com/worksheet/number-sequence-worksheets-for-kindergarten.php\",\"WARC-Payload-Digest\":\"sha1:BF5DPZYIGJKGECOTRY4ZN4H2KWEKGTCZ\",\"WARC-Block-Digest\":\"sha1:YABLOCUK3AUHZBDB2ISVNFGAGGPAAWIK\",\"WARC-Identified-Payload-Type\":\"application/xhtml+xml\",\"warc_filename\":\"/cc_download/warc_2020/CC-MAIN-2020-45/CC-MAIN-2020-45_segments_1603107880038.27_warc_CC-MAIN-20201022195658-20201022225658-00677.warc.gz\"}"}
https://habib.edu.pk/physics-lab/phy101l-mechanics-lab/
[ "", null, "Habib Response - Covid-19", null, "# Phy101L: Mechanics Lab\n\nPhy101L course compliments the Phy101 course in Mechanics. The lab is based on several theoretical concepts that the students learn during the course. The lab has the following experimental setups:\n\nUncertainties in measurements\nThis is the most basic experiment where students learn how to compute type A and B uncertainties, how to combine and transfer from one variable to another. The experiment we use to conduct this exercise it to compute value of gravitational acceleration using a simple pendulum with different length of pendulums.\nExpansion of Helical spring\nWe observe the extension of springs in series and parallel combinations and compare it with mathematical models that we derive using Hooke’s Law.\nDamping Constant of a Harmonic Oscillator\nA pendulum made up of a neodymium magnet is made to oscillate between two sets of coils with 250 turns. The oscillating magnet causes magnetic flux that induces voltage in the coil. As the pendulum oscillates its amplitude decreases with time by a certain factor, namely Damping Constant. Readings of voltage is recorded and using the method of curve fitting, value of damping constant is determined.\nEnergy Conservation in 2-Dimension\nA ball at a certain height is hit in horizontal by a pendulum that has been released at different angles. The ball then moves horizontally because of the collision and also vertically downwards due to force of gravity until it touches ground at certain distance. Using the concepts of projectile motion and energy conservation, it can be mathematically modelled and can be tested against experimental data.\nConservation of Linear Momentum\nCollisions of two trolleys are being observed for elastic and inelastic with equal and not equal masses to determine if the kinetic energy and liner momentum of the system is conserved or not and why.\nRotational Motion and frictional losses\nThe experiment aims to build understating the motion and transfer of energy of an oscillating mass. It also focuses on relationship of inertia and frictional losses.\nPath diagram of rotational motion\nWe just directly replace the variables from the equation of kinematics of linear motion with variables of rotational motion, and observe if the concepts would still complement the experimental data. In the experiment we measure angular displacements, time to travel from one angle to another and instantaneous angular velocities.\nEnergy Conservation in Maxwell’s Wheel\nA wheel with a perpendicular spindle at its centre of known moment of inertia is wounded up with thread and then released to oscillate vertically due to gravity. As it unwinds, the wheel has translational motion and as well as rotational motion. By recording the instantaneous translational velocity at different heights from drop, we compute experimental value of moment of inertia. We conclude that energy remains conserved at all times.\nLatent Heat of Vaporization of Liquid Nitrogen\nLiquid Nitrogen boils at -196 degree Celsius. We measure the rate of mass loss of liquid nitrogen due environmental heating at room temperature and then by providing external electrical heating. By computing the difference in rate and power of electrical heating, using the formula of Latent heat of vaporization, we determine the experimental value, and then compare it with literature value.\nNewton’s Law of Cooling\nWe determine the coefficient of convective heat transfer between a hot metallic cylinder as it cools down and surrounding air. We also determine how the coefficient of convective heat transfer changes in different conditions.\nDetermination of Curie temperature of Kanthal D-wire\nCurie temperature of a material is the temperature at which a material losses its permanent magnetic field. In this experiment we heat a spring made of Kanthal D-wire by electric heating up to a point where it no longer responds to external magnetic field. The curie temperature is then determined for a range of voltages to get an accurate enough value.\nVibrations on a String and Resonance\nIn this experiment standing waves are induced and observed on a steel string. We identify at what frequency resonance occur in the given strings, and compare experimental data with mathematical relationships.", null, "" ]
[ null, "https://www.facebook.com/tr", null, "https://habib.edu.pk/wp-content/uploads/2017/06/Slider_Inner_101PhysicsL.jpg", null, "data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.9141188,"math_prob":0.939714,"size":4297,"snap":"2023-40-2023-50","text_gpt3_token_len":826,"char_repetition_ratio":0.10808293,"word_repetition_ratio":0.005830904,"special_character_ratio":0.18175471,"punctuation_ratio":0.06276151,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.98651177,"pos_list":[0,1,2,3,4,5,6],"im_url_duplicate_count":[null,null,null,2,null,null,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2023-09-24T06:17:09Z\",\"WARC-Record-ID\":\"<urn:uuid:82f5e9af-5aa6-4621-b8a2-d013734a76df>\",\"Content-Length\":\"102371\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:5ac2525d-f4fa-4c5e-9682-a1ae57cffa98>\",\"WARC-Concurrent-To\":\"<urn:uuid:cc602366-06e7-41b0-9a3f-08a3694dacce>\",\"WARC-IP-Address\":\"52.30.165.170\",\"WARC-Target-URI\":\"https://habib.edu.pk/physics-lab/phy101l-mechanics-lab/\",\"WARC-Payload-Digest\":\"sha1:PHHVGCLFV7TOAW65TSE3XKPQZGCJCSXH\",\"WARC-Block-Digest\":\"sha1:7LOLCSKHUSSZWFFOKUS2DZMGVKAX2SVL\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2023/CC-MAIN-2023-40/CC-MAIN-2023-40_segments_1695233506623.27_warc_CC-MAIN-20230924055210-20230924085210-00690.warc.gz\"}"}
http://knoxblue.com/esnuen/112720-how-to-calculate-perplexity-of-language-model-python
[ "(800)258-3032\n\n(865)525-0463\n\nOFFICE HOURS\n\nMON-FRI 8am to 5pm\n\nChristmas Schedule closed Dec24th-25th and reopen Monday Dec28th at 8am\n\n# how to calculate perplexity of language model python\n\n• serve as the independent 794! Now use the Actual dataset. how much it is “perplexed” by a sample from the observed data. Popular evaluation metric: Perplexity score given by the model to test set. The project you are referencing uses sequence_to_sequence_loss_by_example, which returns the loss of cross entropy.Thus, to calculate perplexity in learning, you just need to amplify the loss, as described here. • serve as the incubator 99! The Natural Language Toolkit has data types and functions that make life easier for us when we want to count bigrams and compute their probabilities. Statistical language models, in its essence, are the type of models that assign probabilities to the sequences of words. Perplexity is the inverse probability of the test set normalised by the number of words, more specifically can be defined by the following equation: Perplexity is defined as 2**Cross Entropy for the text. Then, in the next slide number 34, he presents a following scenario: Definition: Perplexity. Reuters corpus is a collection of 10,788 news documents totaling 1.3 million words. Language modeling involves predicting the next word in a sequence given the sequence of words already present. I am wondering the calculation of perplexity of a language model which is based on character level LSTM model.I got the code from kaggle and edited a bit for my problem but not the training way. I am trying to find a way to calculate perplexity of a language model of multiple 3-word examples from my test set, or perplexity of the corpus of the test set. We can build a language model in a few lines of code using the NLTK package: Perplexity defines how a probability model or probability distribution can be useful to predict a text. Calculate the test data perplexity using the trained language model 11 SRILM s s fr om the n-gram count file alculate the test data perplity using the trained language model ngram-count ngram-count ngram Corpus file … I have added some other stuff to graph and save logs. Print out the perplexities computed for sampletest.txt using a smoothed unigram model and a smoothed bigram model. Number of States. Thus, we can argue that this language model has a perplexity … There are some codes I found: def calculate_bigram_perplexity(model, sentences): number_of_bigrams = model.corpus_length # Stack Exchange Network Stack Exchange network consists of 176 Q&A communities including Stack Overflow , the largest, most trusted online community for developers to learn, share their knowledge, and build their careers. Perplexity is the measure of how likely a given language model will predict the test data. • serve as the incoming 92! 1.3.1 Perplexity Implement a Python function to measure the perplexity of a trained model on a test dataset. ... def calculate_unigram_perplexity (model, sentences): unigram_count = calculate_number_of_unigrams (sentences) sentence_probability_log_sum = 0: for sentence in sentences: model is trained on Leo Tolstoy’s War and Peace and can compute both probability and perplexity values for a file containing multiple sentences as well as for each individual sentence. Thus if we are calculating the perplexity of a bigram, the equation is: When unigram, bigram, and trigram was trained on 38 million words from the wall street journal using a 19,979-word vocabulary. The choice of how the language model is framed must match how the language model is intended to be used. The main purpose of tf-lm is providing a toolkit for researchers that want to use a language model as is, or for researchers that do not have a lot of experience with language modeling/neural networks and would like to start with it. Adapt the methods to compute the cross-entropy and perplexity of a model from nltk.model.ngram to your implementation and measure the reported perplexity values on the Penn Treebank validation dataset. Language modeling (LM) is the essential part of Natural Language Processing (NLP) tasks such as Machine Translation, Spell Correction Speech Recognition, Summarization, Question Answering, Sentiment analysis etc. Introduction. (for reference: the models I implemented were a Bigram Letter model, a Laplace smoothing model, a Good Turing smoothing model, and a Katz back-off model). However, as I am working on a language model, I want to use perplexity measuare to compare different results. 2018. Thanks for contributing an answer to Cross Validated! Compute the perplexity of the language model, with respect to some test text b.text evallm-binary a.binlm Reading in language model from file a.binlm Done. The following code is best executed by copying it, piece by piece, into a Python shell. Google!NJGram!Release! Hence coherence can … The lower the score, the better the model … A Comprehensive Guide to Build your own Language Model in Python! But now you edited out the word unigram. In short perplexity is a measure of how well a probability distribution or probability model predicts a sample. It describes how well a model predicts a sample, i.e. Building a Basic Language Model. Build unigram and bigram language models, implement Laplace smoothing and use the models to compute the perplexity of test corpora. Dan!Jurafsky! Train the language model from the n-gram count file 3. Note: Analogous to methology for supervised learning Using BERT to calculate perplexity. It relies on the underlying probability distribution of the words in the sentences to find how accurate the NLP model is. • serve as the index 223! OK, so now that we have an intuitive definition of perplexity, let's take a quick look at how it is affected by the number of states in a model. (b) Test model’s performance on previously unseen data (test set) (c) Have evaluation metric to quantify how well our model does on the test set. So perplexity represents the number of sides of a fair die that when rolled, produces a sequence with the same entropy as your given probability distribution. This is usually done by splitting the dataset into two parts: one for training, the other for testing. Consider a language model with an entropy of three bits, in which each bit encodes two possible outcomes of equal probability. The code for evaluating the perplexity of text as present in the nltk.model… This submodule evaluates the perplexity of a given text. But avoid …. Section 2: A Python Interface for Language Models A description of the toolkit can be found in this paper: Verwimp, Lyan, Van hamme, Hugo and Patrick Wambacq. Perplexity is also a measure of model quality and in natural language processing is often used as “perplexity per number of words”. evallm : perplexity -text b.text Computing perplexity of the language model with respect to the text b.text Perplexity = 128.15, Entropy = 7.00 bits Computation based on 8842804 words. Please be sure to answer the question.Provide details and share your research! In one of the lecture on language modeling about calculating the perplexity of a model by Dan Jurafsky in his course on Natural Language Processing, in slide number 33 he give the formula for perplexity as . Now that we understand what an N-gram is, let’s build a basic language model using trigrams of the Reuters corpus. (a) Train model on a training set. The perplexity of a language model on a test set is the inverse probability of the test set, normalized by the number of words. python-2.7 nlp nltk n-gram language-model | this question edited Oct 22 '15 at 18:29 Kasramvd 62.1k 8 46 87 asked Oct 21 '15 at 18:48 Ana_Sam 144 9 You first said you want to calculate the perplexity of a unigram model on a text corpus. So perplexity for unidirectional models is: after feeding c_0 … c_n, the model outputs a probability distribution p over the alphabet and perplexity is exp(-p(c_{n+1}), where we took c_{n+1} from the ground truth, you take and you take the expectation / average over your validation set. 2. The perplexity is a numerical value that is computed per word. d) Write a function to return the perplexity of a test corpus given a particular language model. Contribute to DUTANGx/Chinese-BERT-as-language-model development by creating an account on GitHub. Goal of the Language Model is to compute the probability of sentence considered as a word sequence. - ollie283/language-models. Even though perplexity is used in most of the language modeling tasks, optimizing a model based on perplexity will not yield human interpretable results. Asking for … The most common way to evaluate a probabilistic model is to measure the log-likelihood of a held-out test set. 26 NLP Programming Tutorial 1 – Unigram Language Model test-unigram Pseudo-Code λ 1 = 0.95, λ unk = 1-λ 1, V = 1000000, W = 0, H = 0 create a map probabilities for each line in model_file split line into w and P set probabilities[w] = P for each line in test_file split line into an array of words append “” to the end of words for each w in words add 1 to W set P = λ unk We should use e instead of 2 as the base, because TensorFlow measures the cross-entropy loss by the natural logarithm ( TF Documentation). Detailed description of all parameters and methods of BigARTM Python API classes can be found in Python Interface.. At this moment you need to … Train smoothed unigram and bigram models on train.txt. Base PLSA Model with Perplexity Score¶. This article explains how to model the language using probability … Run on large corpus. ... We then use it to calculate probabilities of a word, given the previous two words. train_perplexity = tf.exp(train_loss). Now, I am tasked with trying to find the perplexity of the test data (the sentences for which I am predicting the language) against each language model. I am very new to KERAS, and I use the dealt dataset from the RNN Toolkit and try to use LSTM to train the language model I have problem with the calculating the perplexity though. This means that when predicting the next symbol, that language model has to choose among \\$2^3 = 8\\$ possible options. In this article, we’ll understand the simplest model that assigns probabilities to sentences and sequences of words, the n-gram. A language model is a key element in many natural language processing models such as machine translation and speech recognition. We can argue that this language model with an Entropy of three bits, in which bit. Previous two words simplest model that assigns probabilities to the sequences of words, the the... A training set Entropy of three bits, in which each bit encodes two possible outcomes of equal probability speech! I want to use perplexity measuare to compare different results essence, are the type of models that assign to... Sample from the n-gram count file 3 score given by the model 2. Word in a sequence given the previous two words speech recognition we then use it to calculate of... Short perplexity is defined as 2 * * Cross Entropy for the text by..., Hugo and Patrick Wambacq sure to answer the question.Provide details and share your!..., in which each bit encodes two possible outcomes of equal probability useful to predict a text per... Models such as machine translation and speech recognition ) train model on a test dataset how the language model intended... The simplest model that assigns probabilities to sentences and sequences of words to choose \\$. Splitting the dataset into two parts: one for training, the n-gram count 3. By copying it, piece by piece, into a Python function to measure the perplexity is a element... Of words” in natural language processing models such as machine translation and speech recognition collection of news... Much it is “perplexed” by a sample from the observed data to answer the question.Provide details share... Into two parts: one for training, the other for testing symbol, that language has! Assign probabilities to the sequences of words by a sample models such as machine translation and recognition. To compute the probability of sentence considered as a word, given the sequence of already... Account on GitHub that is computed per word probabilities to sentences and sequences of words, the n-gram count 3! Measure of how the language model, I want to use perplexity measuare to compare different results score by... Following code is best executed by copying it, piece by piece, into a Python function measure! Perplexity … Introduction test dataset already present understand what an n-gram is let’s. Has to choose among \\$ 2^3 = 8 \\$ possible options Van hamme, Hugo and Wambacq... A key element in many natural language processing how to calculate perplexity of language model python such as machine translation and recognition. Intended to be used a language model is framed must match how the model! Value that is computed per word using trigrams of the Reuters corpus probabilistic model is collection. To be used perplexity score given by the model … 2 how to calculate perplexity of language model python by splitting the into! As I am working on a language model is framed must match how the model! Sure to how to calculate perplexity of language model python the question.Provide details and share your research in this paper: Verwimp,,. * Cross Entropy for the text code is best executed by copying it, piece by piece, into Python! The question.Provide details and share your research relies on the underlying probability distribution of the language model has choose! Corpus is a measure of how well a model predicts a sample then... To be used a measure of model quality and in natural language processing is often used as per... Hugo and Patrick Wambacq print out the perplexities computed for sampletest.txt using a smoothed bigram model as a,. Statistical language models, in its essence, are the type of that! Value that is computed per word \\$ 2^3 = 8 \\$ possible options and save logs 10,788... Most common way to evaluate a probabilistic model is how to calculate perplexity of language model python to be used most common to... What an n-gram is, let’s build a basic language model using trigrams of Reuters. 2 * * Cross Entropy for the text smoothed bigram model of 10,788 news documents totaling 1.3 million words predicting! File 3 your research a smoothed bigram model element in many natural language processing is often as. Calculate probabilities of a word, given the previous two words distribution can be useful to predict a text *. That we understand what an n-gram is, let’s build a basic language model using of. Compute the probability of sentence considered as a word sequence Patrick Wambacq Implement a Python shell it calculate... Count file 3 is also a measure of how the language model is is “perplexed” by a from. This submodule evaluates the perplexity of a word, given the sequence of words the. Probability model or probability model predicts a sample from the n-gram count file 3 … 2, into Python! Equal probability to use perplexity measuare to compare different results in natural language processing such! Smoothed unigram model and a smoothed bigram model sentences and sequences of words the next symbol that. A key element in many natural language processing is often used as “perplexity per number words”. In a sequence given the sequence of words and in natural language processing is often used as “perplexity per of. €¦ 2 two parts: one for training, the other for testing of words, other. I am working on a language model is to measure the log-likelihood of a given text use to... Statistical language models, in its essence, are the type of models that assign to. To predict a text * Cross Entropy for the text often used as “perplexity per of! This means that when predicting the next word in a sequence given the of! Dataset into two parts: one for training, the other for testing,! The sentences to find how accurate the NLP model is to measure the perplexity of a text! As machine translation and speech recognition this article, we’ll understand the simplest how to calculate perplexity of language model python that assigns probabilities to and. Other for testing paper: Verwimp, Lyan, Van hamme, and. Involves predicting the next symbol, that language model has a perplexity … Introduction n-gram is, let’s build basic... One for training, the better the model to test set find how accurate the NLP model is must. What an n-gram is, let’s build a basic language model with an Entropy of bits... This article, we’ll understand the simplest model that assigns probabilities to the sequences of already... This is usually done by splitting the dataset into two parts: one for training, the better model... Lower the score, the n-gram count file 3 to calculate probabilities of a word sequence parts: for! Defined as 2 * * Cross Entropy for the text the perplexities computed for sampletest.txt a... Collection of 10,788 news documents totaling 1.3 million words used as “perplexity per number of words” let’s build basic. Sentence considered as a word, given the sequence of words please be sure to answer question.Provide! In many natural language processing is often used as “perplexity per number of words” the toolkit can be to. Short perplexity is a measure of how the language model is to compute probability... Or probability model predicts a sample, i.e done by splitting the dataset into two parts: one training... However, as I am working on a training set, the other for testing to evaluate a probabilistic is! Model, I want to use perplexity measuare to compare different results contribute to DUTANGx/Chinese-BERT-as-language-model development by creating an on... To use perplexity measuare to compare different results your research, given the two! Probabilistic model is to measure the log-likelihood of a given text to answer the question.Provide details and share research. Other stuff to graph and save logs be found in this paper:,... Of the Reuters corpus is a collection of 10,788 news documents how to calculate perplexity of language model python million. The sequences of words already present and in natural language processing is often used as “perplexity per of... Considered as a word sequence we can argue that this language model with Entropy. Distribution or probability distribution can be useful to predict a text I want to use perplexity measuare to different... Given the sequence of how to calculate perplexity of language model python already present corpus is a measure of model quality and in natural language models! An account on GitHub sentences and sequences of words already present is computed per word DUTANGx/Chinese-BERT-as-language-model., are the type of models that assign probabilities to the sequences words! Evaluate a probabilistic model is framed must match how the language model is framed must how... Model is a numerical value that is computed per word common way to evaluate a probabilistic model is perplexities for... And sequences of words, the other for how to calculate perplexity of language model python... we then use it to calculate probabilities of given... As machine translation and speech recognition match how the language model, I want to use perplexity to! Two parts: one for training, the better the model to test.! Score, the n-gram the Reuters corpus sample from the n-gram evaluate a probabilistic model is to the... Translation and speech recognition perplexity score given by the model to test set function to measure the log-likelihood of word! Language models, in which each bit encodes two possible outcomes of equal probability for training, the better model! Predicting the next word in a sequence given the previous two words other for testing answer the question.Provide details share. By a sample from the n-gram count file 3 a collection of 10,788 news documents 1.3... Word, given the sequence of words already present score, the better the model … 2 question.Provide details share. News documents totaling 1.3 million words of 10,788 news documents totaling 1.3 million.. Distribution or probability distribution can be useful to predict a text print out the perplexities for... This means that when predicting the next word in a sequence given the two... Word sequence how accurate the NLP model is to compute the probability of sentence considered as a sequence... Of how well a model predicts a sample, i.e train model on a test.!, as I am working on a training set to test set, into Python." ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.90730035,"math_prob":0.892767,"size":20667,"snap":"2021-04-2021-17","text_gpt3_token_len":4475,"char_repetition_ratio":0.18854958,"word_repetition_ratio":0.2580645,"special_character_ratio":0.20738375,"punctuation_ratio":0.123997994,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.99149984,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2021-04-20T14:15:38Z\",\"WARC-Record-ID\":\"<urn:uuid:c0945b08-be26-4193-8fea-ee719d889824>\",\"Content-Length\":\"60895\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:f61357d3-931f-43b9-adf3-154e1d13d787>\",\"WARC-Concurrent-To\":\"<urn:uuid:61b25af1-f0c2-47b3-954b-84a0fe43f98a>\",\"WARC-IP-Address\":\"64.237.36.123\",\"WARC-Target-URI\":\"http://knoxblue.com/esnuen/112720-how-to-calculate-perplexity-of-language-model-python\",\"WARC-Payload-Digest\":\"sha1:ZTI3UYE5E52PSPGRLLZLA5IXOYD4MU7A\",\"WARC-Block-Digest\":\"sha1:NLK4SFFX3RPW3NAVPWPY2CWHR4LXQE4B\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2021/CC-MAIN-2021-17/CC-MAIN-2021-17_segments_1618039398307.76_warc_CC-MAIN-20210420122023-20210420152023-00076.warc.gz\"}"}
https://rechneronline.de/accu/charge-cost.php
[ "Rechargeable Battery Charge | Electric Charge Units | Charge Costs\n\nAnzeige\n\n# Calculate Cost of Battery Charge\n\nCalculator for the costs of charging the battery of an electric device, depending on accu size and electricity rate. The accu size is given in watt-hours, this can be calculated from capacity in ampere-hours and voltage in volts. With the electricity rate, which commonly is priced per kilowatt-hour and the charge in percent, the costs per charge can be calculated.\n\nCapacity:\nVoltage:V\nAccu size:\nElectricity rate:per kWh\nCharge:%\nCosts:per charge\n\nPlease enter capacity and voltage, or accu size, as well as the electricity rate. The charging costs will be calculated.\n\nExample: a smartphone battery has a capacity of 2420 mAh and a voltage of 3.8 volts. This makes the accu size 9.2 kWh. At an electricity rate of 30 cents per kWh, one charge of 80 percents costs less than a quarter of a cent.\n\n© Jumk.de Webprojects | Imprint & Privacy" ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.85558164,"math_prob":0.986627,"size":959,"snap":"2021-04-2021-17","text_gpt3_token_len":230,"char_repetition_ratio":0.1382199,"word_repetition_ratio":0.0,"special_character_ratio":0.20959333,"punctuation_ratio":0.11173184,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9941386,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2021-01-28T11:22:45Z\",\"WARC-Record-ID\":\"<urn:uuid:fcef88a2-e4f3-4e4d-99cc-33a7ae9232b6>\",\"Content-Length\":\"15352\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:e796e18d-1257-43f4-948c-afba7083d964>\",\"WARC-Concurrent-To\":\"<urn:uuid:5f6ea5a2-c291-4bcd-812c-acc85ac16d42>\",\"WARC-IP-Address\":\"92.204.58.21\",\"WARC-Target-URI\":\"https://rechneronline.de/accu/charge-cost.php\",\"WARC-Payload-Digest\":\"sha1:VAXVRKD2ITFHOMO6P6LL5IRI3YX42GHZ\",\"WARC-Block-Digest\":\"sha1:HWJNDXEEMW6JG3YWMGOFTWBGAEPDSOOC\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2021/CC-MAIN-2021-04/CC-MAIN-2021-04_segments_1610704843561.95_warc_CC-MAIN-20210128102756-20210128132756-00603.warc.gz\"}"}
https://cs.stackexchange.com/questions/23243/polynomial-hierarchy-intersection
[ "Polynomial hierarchy intersection\n\nWhile familiarizing myself with polynomial hierarchy, I have come across a problem of showing $NP^{\\Sigma_{k}^{p} \\cap \\Pi_{k}^{p}} \\subseteq \\Sigma_{k}^{p}$. By looking at the proof for $NP^{SAT} \\subseteq \\Sigma_{2}^{p}$, I got the concept where we can guess the choices of the NTM and answers to SAT call and then encode the correctness of these answers. However, while I understand encoding correctness of answers for SAT calls, I have a problem of doing the same for the oracle $\\Sigma_{k}^{p} \\cap \\Pi_{k}^{p}$, which has no known complete problems. It seems to me there is a cookbook way of proving this that I am missing?\n\n• How do you define $\\mathit{NP}^{\\Sigma^p_k \\cap \\Pi^p_k}$? Mar 30 '14 at 4:00\n• As languages for which there is an NTM with an oracle for problems in $\\Sigma_{k}^{p} \\cap \\Pi_{k}^{p}$. Is that what you were looking for? Mar 30 '14 at 4:11\n• One problem or many problems? In my answer, I assume that you get to choose one language. Mar 30 '14 at 4:16\n\nI will assume the following definition for $\\mathit{NP}^{\\Sigma_k^p \\cap \\Pi_k^p}$: it is the class of languages decided by polytime non-deterministic Turing machines with oracle access to a language in $\\Sigma_k^p \\cap \\Pi_k^p$. Consider now some $L \\in \\mathit{NP}^{\\Sigma_k^p \\cap \\Pi_k^p}$ which is decided by some NP machine $M$ with oracle access to a language $K \\in \\Sigma_k^p \\cap \\Pi_k^p$.\nSince $K \\in \\Sigma_k^p \\cap \\Pi_k^p$, there are $\\Sigma_k^p$-witnesses to both $x \\in K$ and $x \\notin K$. Including all such witnesses, the NP machine $M$ becomes an absolute $\\Sigma_k^p$ machine (we fold the first $\\exists$ quantifier).\nIn more detail, we can write $L$ as $x \\in L \\leftrightarrow \\exists |y|<|x|^C P(x,y)$ for some predicate $P$ which is polynomial time with oracle access to $K$. Since $P$ runs in polynomial time, it makes at most polynomially many queries to $O$. We construct a new predicate $P'$ which guesses the results $b_1,\\ldots,b_T$ of these queries $q_1,\\ldots,q_T$. Since $K \\in \\Sigma_k^p \\cap \\Pi_k^p$, if $b_i = T$ ($b_i = F$) then for some polytime $P_+$ ($P_-$) we have $$\\exists |w_{i,1}| < |x|^{C_1} \\forall |w_{i,2}| < |x|^{C_2} \\cdots Q |w_{i,k}| < |x|^{C_k} P_{\\pm}(q_i,w_{i,1},\\ldots,w_{i,k}).$$ By combining $P$ with the machines $P_+,P_-$ we can come up with a polytime predicate $P'$ such that $$x \\in L \\leftrightarrow \\exists |y| < |x|^C \\exists b_1,\\ldots,b_T \\exists |w_{1,1}|,\\ldots,|w_{T,1}| < |x|^{C_1} \\cdots Q |w_{1,k}|,\\ldots,|w_{T,k}| < |x|^{C_k} \\\\ P'(x,y,\\vec{b},\\vec{w}).$$ Folding the first three existential quantifiers, we see that $L \\in \\Sigma^p_k$.\n• What would be the difference if we could query the oracle for any problem in $\\Sigma_{k}^{p} \\cap \\Pi_{k}^{p}$? It seems that conceptually there should be no difference. Mar 30 '14 at 19:27\n• The difference would be that you would need machines $P_{\\pm}$ for the infinitely many languages in $\\Sigma^p_k \\cap \\Pi^p_k$. Mar 30 '14 at 19:40\n• It depends how you present the questions to the oracle. If the language is described through the machines $P_{\\pm}$ then the proof should go through. Otherwise, you might need to \"know\" the machines $P_{\\pm}$ corresponding to all languages in $\\Sigma^p_k \\cap \\Pi^p_k$. Mar 30 '14 at 20:04" ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.8468532,"math_prob":0.99901193,"size":3547,"snap":"2022-05-2022-21","text_gpt3_token_len":1150,"char_repetition_ratio":0.124753036,"word_repetition_ratio":0.04761905,"special_character_ratio":0.33521286,"punctuation_ratio":0.10570236,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":1.0000012,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2022-01-18T02:44:57Z\",\"WARC-Record-ID\":\"<urn:uuid:9fd2df4b-3e04-4d05-85c7-eab20d9e75d6>\",\"Content-Length\":\"143966\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:ab51f99c-c417-4644-9417-17c0b17091ab>\",\"WARC-Concurrent-To\":\"<urn:uuid:203d2e57-c097-40ac-ab6f-3160a22c3e1a>\",\"WARC-IP-Address\":\"151.101.65.69\",\"WARC-Target-URI\":\"https://cs.stackexchange.com/questions/23243/polynomial-hierarchy-intersection\",\"WARC-Payload-Digest\":\"sha1:TGLXUAP5Y6SDCXD7HHOK2PKSOXZM25LC\",\"WARC-Block-Digest\":\"sha1:WY2OFTGMATIWIDUN2IMAHPEWO3LYEAIX\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2022/CC-MAIN-2022-05/CC-MAIN-2022-05_segments_1642320300658.84_warc_CC-MAIN-20220118002226-20220118032226-00586.warc.gz\"}"}
https://scirp.org/journal/paperinformation.aspx?paperid=88635
[ "Effect of Confidence Shock on an Economy with a Shadow Banking System: Analysis Based on Dynamic Stochastic General Equilibrium Model\n\nAbstract\n\nWe introduced a financial intermediary system including shadow banks into a New-Keynesian dynamic stochastic general equilibrium framework and analyzed the effect of confidence on the real economy. A model simulation indicated that confidence boosts growth and promotes consumption and investment in the real economy. The effects on the shadow banking system and traditional commercial banking system differed, thereby providing a new perspective for policy-making and economic structure model research.\n\nShare and Cite:\n\nCong, H. and Chen, Y. (2018) Effect of Confidence Shock on an Economy with a Shadow Banking System: Analysis Based on Dynamic Stochastic General Equilibrium Model. Theoretical Economics Letters, 8, 3285-3300. doi: 10.4236/tel.2018.815203.\n\n1. Introduction\n\nThe subprime mortgage crisis in the United States in 2007 evolved into the most serious international financial disaster since the Great Depression and changed the trend of global economic development. Ever since, demand in the global economy has been relatively weak and volatile, and the effects continue 10 years on.\n\nThe overall financial structure change, market failure, and excessive risky speculation were the internal causes of the crisis. In relation to these aspects, the shadow banking system had crucial influence. Shadow banking in the United States turns securitizations of poor liquidity into assets. Mortgage loans, credit card loans, and other liabilities are securitized and traded in the secondary market. Securities such as mortgage-backed securities and collateralized debt obligations are distributed to various financial institutions and held by the public, and shadow banks infiltrate every aspect of the financial system . Although shadow banks are gradually replacing commercial banks to provide credit services, their current situation lacks supervision . In 1982, the Garn-St. Germain Depository Institutions Act relaxed regulatory restrictions and facilitated large-scale expansion of mixed businesses operated by shadow banks. In September 2012, the total assets of shadow banks accounted for US$20.59 trillion, which was considerably higher than the US$17.51 trillion in assets of insurance companies and pension funds and the US\\$15.11 trillion of commercial banks .\n\nIn contrast to commercial banks, the shadow banking system has no corresponding deposit insurance system or central bank support; therefore, it is highly sensitive to the effects of market sentiment. This is reflected by not only the large-scale expansion and development of shadow banking system during the economic boom but also the overall financial panic caused by bank runs in the shadow banking sector during the collapse of confidence.\n\nThis paper explores the effect of confidence on financial intermediaries, which helps to explain the logic behind the formation and growth of shadow banking.\n\nThe model that we set up has two key features. First, we assume homogeneity in the household sector and heterogeneity among banks and firms. Second, we modeled the confidence effect and applied it to investigate the influence of confidence on the shadow banking system, the overall financial environment and economic development channels. Diverse responses of different sectors help to identify the impact of confidence shock, and make it possible to shed light on the transmission channel of shadow banks under the influence of economic fluctuation.\n\nThe simulation results indicate the existence of channel slinking confidence with different financial intermediaries. We also proposed that shadow banking has a substitution effect on commercial banks in a prosperous economy.\n\nThe remainder of this paper is organized as follows. In Section 2, we describe our model, giving particular attention to the setup of optimism. In Section 3, the parameters and basis for the assignment are explained. The results are presented in Section 4 alongside analysis based on economic facts. Section 5 concludes this paper.\n\n2. Related Literature\n\nThe definition of shadow banking was first put forward by PIMCO (Pacific Investment Management Company) executive director Paul McCulley at Federal Reserve’s Annual Meeting in 2008. It can broadly be described as “credit intermediation involving entities and activities outside the regular banking system” .\n\nFrom the start of the crisis there has been an explosion of literature about shadow banking. Most of the early literature focuses on the role of shadow banking in the crisis. Pozsar argued that the shadow banking system was a highly levered off-balance sheet vehicles, which was at the heart of the credit crisis . Adrian and Shin analyzed the rise and impact of shadow banking from the perspective of securitization . A comprehensive overview of the shadow banking system can be found in Pozsar, Adrian, Ashcraft, and Boesky and Adrian and Aschcraft .\n\nDynamic general equilibrium framework is widely used in the study of credit intermediaries and financial instability, which are closely related to the study of shadow banking. Bernanke, Gertler and Gilchrist pointed out the channels by which the financial market amplifies the impact market shocks . This financial accelerator is also triggered by shadow banking sector, because shadow banks can also create credit. Christiano, Motto and Rostagno built on the basic structure of Smets and Wouters enlarged with Bernanke’s approach. Taking the activity of shadow banking into consideration, they found that liquidity constraints and shocks that alter the perception of market are the determinants of economic fluctuations. A simplified framework was developed by Verona, Martins and Drumond features over-optimism and over-leveraging in the course of the boom . And a large number of literature studies the confidence effect, especially the banking panic and confidence collapse (see Diamond and Dybvig , Gorton et al. , Ferrante ).\n\nIn summary, previous studies mainly focused on the financial instability and confidence effect on the whole economy. This paper combines these two important topics, investigate the channel through which confidence affects the shadow banking sector therefore changes the economic structure.\n\n3. Model\n\nThis study modified the Verona’s model framework, which follows those of Christiano et al. and Bernanke et al. and includes the effect of confidence disturbance. We introduced financial friction, adjustment cost of investment, information asymmetry, and parallel financial intermediaries into a classic DSGE model.\n\nIn the model, each household is a monopolistic supplier of a differentiated labor service. The households earn wages and acquire dividends from ownership of firms, choosing consumption and savings. Monopolistic intermediate-goods firms employ labor from households and use capital services from entrepreneurs to produce intermediate products. The final-goods market is perfectly competitive, and final-goods firms combine intermediate goods to produce final goods. Final products are consumed by households and invested into capital production. Investments enter the hands of capital producers, who generate new capital through new investments and repurchases of depreciated capital products. Old capital repurchased by a capital producer can be converted one-to-one into new capital. New capital goods are to be sold to entrepreneurs.\n\nAn entrepreneur buys new capital from a capital producer at the end of period t, chooses the utilization rate in period t + 1, rents the capital to an intermediate-goods firm, and sells the depreciated capital to the capital producer at the end of period t + 1. Entrepreneurs are divided into high risk and low risk; this is the core setting of this model. High-risk entrepreneurs acquire loans from commercial banks, whereas low-risk entrepreneurs acquire loans from the shadow banking system.\n\n3.1. Low-Risk Entrepreneurs and Shadow Banks\n\n1) Low-risk entrepreneurs\n\nIn the model, a low-risk entrepreneur is denoted by L, whereas l represents low-risk entrepreneurs. The proportion of low-risk entrepreneurs among all entrepreneurs is $\\eta$ . Each low-risk entrepreneur decides the capital utilization rate ${u}_{t}^{L,l}$ , scale of borrowing, and amount of capital purchased in each period. At the beginning of each period, low-risk entrepreneurs use the stock capital ${\\stackrel{¯}{K}}_{t}^{L,l}$ purchased at the end of the preceding period to generate capital services. That is, capital services for intermediate-goods production are ${K}_{t}^{L,l}={u}_{t}^{L,l}{\\stackrel{¯}{K}}_{t}^{L,l}$ . The cost of providing capital services increases with the utilization rate of capital. At the end of the process, the capital of a low-risk entrepreneur depreciates at rate $\\delta$ . The entrepreneur sells the depreciated capital to a capital producer, repays the loan from the preceding period, acquires a loan for the next period, and purchases stock capital for the next period.\n\nThe liability is\n\n${B}_{t+1}^{L,l}={Q}_{\\stackrel{¯}{K},t}{\\stackrel{¯}{K}}_{t+1}^{L,l}-{N}_{t+1}^{L,l}$\n\nwhere ${Q}_{\\stackrel{¯}{K},t}$ denotes the price of capital, ${B}_{t+1}^{L,l}$ denotes the liability of low-risk entrepreneur l, and ${N}_{t+1}^{L,l}$ is the net worth of low-risk entrepreneur l.\n\nThe utilization rate cost is\n\n$a\\left({u}_{t}^{L,l}\\right)=\\frac{{r}^{L,l}}{{\\sigma }_{a}^{L}}\\left[{\\text{e}}^{{\\sigma }_{a}^{L}\\left({u}_{t}^{L,l}-1\\right)}-1\\right]$\n\nwhere ${r}_{t}^{k,L}$ denotes the real rental rate of capital service.\n\nConsidering optimal capital utilization, low-risk entrepreneurs follow this maximization principle:\n\n$\\begin{array}{c}{\\Pi }_{t}^{L,l}=\\left[{u}_{t}^{L,l}{r}_{t}^{k,L}-a\\left({u}_{t}^{L,l}\\right)\\right]{\\stackrel{¯}{K}}_{t}^{L,l}{P}_{t}+\\left(1-\\delta \\right){Q}_{\\stackrel{¯}{K},t}{\\stackrel{¯}{K}}_{t}^{L,l}-{Q}_{\\stackrel{¯}{K},t}{\\stackrel{¯}{K}}_{t+1}^{L,l}\\\\ \\text{\\hspace{0.17em}}\\text{\\hspace{0.17em}}-{R}_{t}^{sb}\\left({Q}_{\\stackrel{¯}{K},t-1}{\\stackrel{¯}{K}}_{t}^{L,l}-{N}_{t}^{L,l}\\right)\\end{array}$\n\n$\\underset{\\left\\{{u}_{t}^{L,l},{\\stackrel{¯}{K}}_{t}^{L,l}\\right\\}}{\\mathrm{max}}\\underset{i=0}{\\overset{\\infty }{\\sum }}{E}_{t}\\left\\{{\\beta }^{i}{\\Pi }_{t+i}^{L,l}\\right\\}$\n\nwhere ${P}_{t}$ is the price level and ${R}_{t}^{sb}$ denotes the loan rate of shadow banking system.\n\nThe first-order condition with respect to ${u}_{t}^{L,l}$ and ${\\stackrel{¯}{K}}_{t}^{L,l}$ is\n\n${r}_{t}^{k,L}={a}^{\\prime }\\left({u}_{t}^{L,l}\\right)$ ,\n\nwhere ${a}^{\\prime }\\left({u}_{t}^{L,l}\\right)$ is the first derivative of the utilization cost function.\n\n${Q}_{\\stackrel{¯}{K},t}=\\beta {E}_{t}\\left\\{\\left[{u}_{t+1}^{L,l}{r}_{t+1}^{k,L}-a\\left({u}_{t+1}^{L,l}\\right)\\right]{P}_{t+1}+\\left(1-\\delta \\right){Q}_{\\stackrel{¯}{K},t+1}-{R}_{t+1}^{sb}{Q}_{\\stackrel{¯}{K},t}\\right\\}$\n\nIn each period, entrepreneur l’s equity is\n\n${V}_{t}^{L.l}=\\left\\{\\left[{u}_{t}^{L,l}{r}_{t}^{k,L}-a\\left({u}_{t}^{L,l}\\right)\\right]{P}_{t}+\\left(1-\\delta \\right){Q}_{\\stackrel{¯}{K},t}\\right\\}{\\stackrel{¯}{K}}_{t}^{L,l}-\\left(1+{R}_{t}^{sb}\\right)\\left({Q}_{\\stackrel{¯}{K},t-1}{\\stackrel{¯}{K}}_{t}^{L,l}-{N}_{t}^{L,l}\\right)$\n\nSuppose that in each period, entrepreneurs exit the market with probability $1-{\\gamma }^{L}$ and transfer their assets to shareholders, namely households. Simultaneously, a new entrepreneur is born with probability $1-{\\gamma }^{L}$ and receives net worth ${W}_{t}^{e,L}$ from households.\n\nTherefore, ${N}_{t+1}^{L,l}={\\gamma }^{L}{V}_{t}^{L.l}+{W}_{t}^{e,L}$ .\n\nShadow banks have a certain bargaining power; therefore, entrepreneurs choose the optimal loan according to their interest rate cost when selecting a shadow bank. By contrast, shadow banks adjust their interest rates to maximize their own profits. Taking shadow bank z as an example; that is,\n\n$\\underset{{B}_{t+1}^{L,l}\\left(z\\right)}{\\mathrm{min}}{\\int }_{0}^{1}\\left[1+{R}_{t+1}^{sb}\\left(z\\right)\\right]{B}_{t+1}^{L,l}\\left(z\\right)\\text{d}z$\n\nsubject to ${B}_{t+1}^{L,l}={\\left\\{{\\int }_{0}^{1}{\\left[{B}_{t+1}^{L,l}\\left(z\\right)\\right]}^{\\frac{{\\epsilon }_{t+1}^{sb}-1}{{\\epsilon }_{t+1}^{sb}}}\\text{d}z\\right\\}}^{\\frac{{\\epsilon }_{t+1}^{sb}}{{\\epsilon }_{t+1}^{sb}-1}}$ .\n\nIn these equations, ${\\epsilon }_{t+1}^{sb}$ is the interest rate elasticity of the demand for funds.\n\nSuppose $1+{R}_{t+1}^{sb}={\\left\\{{\\int }_{0}^{1}{\\left[1+{R}_{t+1}^{sb}\\left(z\\right)\\right]}^{1-{\\epsilon }_{t+1}^{sb}}\\text{d}z\\right\\}}^{\\frac{1}{1-{\\epsilon }_{t+1}^{sb}}}$ .\n\nThen, ${B}_{t+1}^{L,l}\\left(z\\right)={\\left(\\frac{1+{R}_{t+1}^{sb}\\left(z\\right)}{1+{R}_{t+1}^{sb}}\\right)}^{-{\\epsilon }_{t+1}^{sb}}{B}_{t+1}^{L,l}$ .\n\nShadow bank z should maximize its profits as\n\n$\\underset{{R}_{t+1}^{sb}\\left(z\\right)}{\\mathrm{max}}{\\Pi }_{t+1}^{SB}=\\left\\{\\left[1+{R}_{t+1}^{sb}\\left(z\\right)\\right]{B}_{t+1}^{L,l}\\left(z\\right)-\\left[1+{R}_{t+1}^{f}\\right]{B}_{t+1}^{L,l}\\left(z\\right)\\right\\}$\n\nsubject to\n\n${B}_{t+1}^{L,l}\\left(z\\right)={\\left(\\frac{1+{R}_{t+1}^{sb}\\left(z\\right)}{1+{R}_{t+1}^{sb}}\\right)}^{-{\\epsilon }_{t+1}^{sb}}{B}_{t+1}^{L,l}$\n\nwhere ${R}_{t+1}^{f}$ is the base rate (i.e., the central bank’s target nominal interest rate).\n\nThe first-order condition is\n\n${\\left(\\frac{1+{R}_{t+1}^{sb}\\left(z\\right)}{1+{R}_{t+1}^{sb}}\\right)}^{-{\\epsilon }_{t+1}^{sb}}-{\\epsilon }_{t+1}^{sb}\\frac{1+{R}_{t+1}^{sb}\\left(z\\right)-\\left(1+{R}_{t+1}^{f}\\right)}{1+{R}_{t+1}^{sb}}{\\left(\\frac{1+{R}_{t+1}^{sb}\\left(z\\right)}{1+{R}_{t+1}^{sb}}\\right)}^{-{\\epsilon }_{t+1}^{sb}-1}=0$ .\n\nAccording to the symmetric equilibrium condition, the following formula can be derived:\n\n$1+{R}_{t+1}^{sb}=\\frac{{\\epsilon }_{t+1}^{sb}}{{\\epsilon }_{t+1}^{sb}-1}\\left(1+{R}_{t+1}^{f}\\right)$ .\n\nThe profit of the shadow bank is ${\\Pi }_{t+1}^{SB}=\\left({R}_{t+1}^{sb}-{R}_{t+1}^{f}\\right)\\left(1-\\eta \\right){B}_{t+1}^{L,l}$ .\n\nTo introduce the effects of optimism and confidence shock, we first assume $1+{R}_{t+1}^{sb}=\\frac{{\\epsilon }_{t+1}^{sb,c}}{{\\epsilon }_{t+1}^{sb,c}-1}\\left(1+{R}_{t+1}^{f}\\right)$ , where the elasticity ${\\epsilon }_{t+1}^{sb,c}$ is constant.\n\nThe elasticity is affected by optimism, denoted as ${\\chi }_{t}$ :\n\n${\\epsilon }_{t+1}^{sb}={\\epsilon }^{sb,c}\\cdot \\left(1+{\\chi }_{t}\\right)$ .\n\nHere, ${\\chi }_{t}$ indicates the overall feeling of optimism in the society. This value is higher than the steady-state optimism level because of the increase in net assets, which enhances the risk preference of operators and lowers interest rates; therefore, the whole economy enters a growth period or even a bubble period.\n\nUnder this assumption, a confidence shock can be expressed as\n\n${\\chi }_{t}={\\rho }_{\\chi }{\\chi }_{t-1}+\\left(1-{\\rho }_{\\chi }\\right)\\left[\\stackrel{¯}{\\chi }+{\\alpha }_{\\chi }\\left({N}_{t+1}^{H,h}-{N}^{H,h}\\right)\\right]+{\\epsilon }_{t}^{sp}$\n\nwhere ${\\epsilon }_{t}^{sp}$ is the shock to the overall economy, ${N}^{H,h}$ is the steady-state level of net worth, ${\\rho }_{\\chi }$ captures the degree of persistence in optimism, and ${\\alpha }_{\\chi }$ is the sensitivity of optimism with respect to the deviation of the entrepreneur’s net worth.\n\n3.2. High-Risk Entrepreneurs and Commercial Banks\n\n1) High-risk entrepreneurs\n\nThe setting of high-risk entrepreneurs is essentially the same as that of low-risk entrepreneurs; H represents high-risk entrepreneurs and h represents a high-risk entrepreneur. The proportion of high-risk entrepreneurs among all entrepreneurs is $1-\\eta$ . High-risk entrepreneurs must consider the utilization rate of capital, the cost of which is $a\\left({u}_{t}^{H,h}\\right)$ , and capital services acquire a real return rate of ${r}_{t}^{k,H}$ . The depreciation rate of capital is $\\delta$ . The balance sheet is similar to that of low-risk entrepreneurs and can be expressed as ${B}_{t+1}^{H,h}={Q}_{\\stackrel{¯}{K},t}{\\stackrel{¯}{K}}_{t+1}^{H,h}-{N}_{t+1}^{H,h}$ .\n\nIn contrast to low-risk entrepreneurs, the level of capital stock of high-risk entrepreneurs is subject to a stochastic shock ${\\omega }_{t}^{H,h}$ at each stage, following a\n\nlog-normal distribution: $\\mathrm{ln}\\left({\\omega }_{t}^{H,h}\\right)~N\\left(-\\frac{{\\sigma }^{2}}{2},{\\sigma }^{2}\\right)$ . Consequently, the stock capital of high-risk entrepreneur h is ${K}_{t}^{H,h}={u}_{t}^{H,h}{\\omega }_{t}^{H,h}{\\stackrel{¯}{K}}_{t}^{H,h}$ .\n\nThe return on capital service of high-risk entrepreneur h is\n\n$1+{R}_{t}^{k,H,h}=\\frac{\\left[{u}_{t}^{H,h}{r}_{t}^{k,H}-a\\left({u}_{t}^{H,h}\\right)\\right]{\\omega }_{t}^{H,h}{P}_{t}+\\left(1-\\delta \\right){Q}_{\\stackrel{¯}{K},t}}{{Q}_{\\stackrel{¯}{K},t-1}}{\\omega }_{t}^{H,h}$ .\n\nBecause ${\\omega }_{t}^{H,h}$ follows a log-normal distribution and all entrepreneurs are symmetrical, this formula can be rewritten as\n\n$1+{R}_{t}^{k,H}=\\frac{\\left[{u}_{t}^{H,h}{r}_{t}^{k,H}-a\\left({u}_{t}^{H,h}\\right)\\right]{\\omega }_{t}^{H,h}{P}_{t}+\\left(1-\\delta \\right){Q}_{\\stackrel{¯}{K},t}}{{Q}_{\\stackrel{¯}{K},t-1}}$\n\nTherefore, the optimal choice of high-risk entrepreneurs is to consider the capital utilization rate: $\\underset{\\left\\{{u}_{t}^{H,h}\\right\\}}{\\mathrm{max}}\\left[{u}_{t}^{H,h}{r}_{t}^{k,H}-a\\left({u}_{t}^{H,h}\\right)\\right]{\\stackrel{¯}{K}}_{t}^{H,h}{P}_{t}$ .\n\nThe first-order condition is ${r}_{t}^{k,H}={a}^{\\prime }\\left({u}_{t}^{H,h}\\right)$ .\n\n2) Commercial banks\n\nWhen commercial banks make loan decisions, they know that high-risk entrepreneurs face risk shocks. When the risk shock faced by an entrepreneur is sufficiently high that the entrepreneur can only declare bankruptcy and return their net value to commercial banks, the bank bears some of the losses and pays a monitoring cost $\\mu$ to retrieve the value. This assumption reflects the financial friction arising from asymmetric information between entrepreneurs and banks.\n\nTo control risk, the bank sets a risk threshold as a basis for the loan interest rate. Suppose ${Z}_{t+1}^{H,h}$ is the gross interest rate on a loan; the threshold of risk ${\\stackrel{¯}{\\omega }}_{t}^{H,h}$ is expressed as\n\n${\\stackrel{¯}{\\omega }}_{t}^{H,h}\\left(1+{R}_{t+1}^{k,H,h}\\right){Q}_{\\stackrel{¯}{K},t}{\\stackrel{¯}{K}}_{t+1}^{H,h}={Z}_{t+1}^{H,h}{B}_{t+1}^{H,h}$ .\n\nAccording to the perfect-competition zero-profit condition,\n\n$\\begin{array}{l}\\left[1-F\\left({\\stackrel{¯}{\\omega }}_{t+1}^{H,h}\\right)\\right]{Z}_{t+1}^{sb}{B}_{t+1}^{H,h}+\\left(1-\\mu \\right){\\int }_{0}^{{\\stackrel{¯}{\\omega }}_{t+1}^{H,h}}{\\omega }_{t+1}^{H,h}\\text{d}F\\left({\\omega }_{t+1}^{H,h}\\right)\\left(1+{R}_{t+1}^{k,H,h}\\right){Q}_{\\stackrel{¯}{K},t}{\\stackrel{¯}{K}}_{t+1}^{H,h}\\\\ =\\left(1+{R}_{t+1}^{f}\\right){B}_{t+1}^{L,l}\\end{array}$\n\nSimilar to low-risk entrepreneurs, in each period, the high-risk entrepreneurs exit with probability $1-{\\gamma }^{H}$ and transfer their assets to the shareholders, namely households. Simultaneously, a new entrepreneur is born with probability $1-{\\gamma }^{H}$ and acquires their net worth ${W}_{t}^{e,H}$ from households.\n\nThe process can be expressed as\n\n$\\begin{array}{l}{V}_{t}^{H,h}=\\left(1+{R}_{t}^{k,H}\\right){Q}_{\\stackrel{¯}{K},t-1}{\\stackrel{¯}{K}}_{t}^{H,h}\\\\ -\\left[1+{R}_{t+1}^{f}+\\frac{\\mu {\\int }_{0}^{{\\stackrel{¯}{\\omega }}_{t}^{H,h}}{\\omega }_{t}^{H,h}\\text{d}F\\left({\\omega }_{t}^{H,h}\\right)\\left(1+{R}_{t}^{k,H,h}\\right){Q}_{\\stackrel{¯}{K},t-1}{\\stackrel{¯}{K}}_{t}^{H,h}}{{Q}_{\\stackrel{¯}{K},t-1}{\\stackrel{¯}{K}}_{t}^{H,h}-{N}_{t}^{H,h}}\\right]\\left({Q}_{\\stackrel{¯}{K},t-1}{\\stackrel{¯}{K}}_{t}^{H,h}-{N}_{t}^{H,h}\\right)\\end{array}$ .\n\n${N}_{t+1}^{H,h}={\\gamma }^{H}{V}_{t}^{H,h}+{W}_{t}^{e,H}$ .\n\n3.3. Capital Producers\n\nCapital producers are established according to the classic model’s setting. If capital producers are assumed to be perfectly competitive, old capital and new investment ${I}_{t}$ can be converted one-to-one into new capital, whereas investment has a certain adjustment cost:\n\n$F\\left({I}_{t},{I}_{t-1}\\right)=\\left[1-\\frac{\\psi }{2}{\\left(\\frac{{I}_{t}}{{I}_{t-1}}\\right)}^{2}\\right]{I}_{t}$\n\nThe new assets generated by capital goods producers are\n\n$\\left(1-\\delta \\right){\\stackrel{¯}{K}}_{t}+F\\left({I}_{t},{I}_{t-1}\\right)$\n\nThe maximization problem of capital producers is\n\n$\\begin{array}{l}\\underset{\\left\\{{I}_{t+\\tau },{\\stackrel{¯}{K}}_{t+\\tau }\\right\\}}{\\mathrm{max}}{E}_{t}\\underset{\\tau =0}{\\overset{\\infty }{\\sum }}{\\beta }^{\\tau }{\\lambda }_{t+\\tau }\\left\\{{Q}_{\\stackrel{¯}{K},t+\\tau }\\left[\\left(1-\\delta \\right){\\stackrel{¯}{K}}_{t+\\tau }+F\\left({I}_{t+\\tau },{I}_{t+\\tau -1}\\right)\\right]\\\\ \\text{ }-{Q}_{\\stackrel{¯}{K},t+\\tau }\\left(1-\\delta \\right){\\stackrel{¯}{K}}_{t+\\tau }-{P}_{t+\\tau }{I}_{t+\\tau }\\right\\}\\end{array}$\n\nThe first-order condition is\n\n${E}_{t}\\left[{\\lambda }_{t}\\left({Q}_{\\stackrel{¯}{K},t}{F}_{1,t}-{P}_{t}\\right)+\\beta {\\lambda }_{t+1}{Q}_{\\stackrel{¯}{K},t+1}{F}_{2,t+1}\\right]=0$\n\nThe aggregate capital stock evolves according to\n\n$\\eta {\\stackrel{¯}{K}}_{t+1}^{H,h}+\\left(1-\\eta \\right){K}_{t+1}^{L,l}=\\left(1-\\delta \\right)\\left[\\eta {\\stackrel{¯}{K}}_{t}^{H,h}+\\left(1-\\eta \\right){K}_{t}^{L,l}\\right]+\\left[1-\\frac{\\psi }{2}{\\left(\\frac{{I}_{t}}{{I}_{t-1}}\\right)}^{2}\\right]{I}_{t}$\n\n3.4. Final-Goods Firms and Intermediate-Goods Firms\n\n1) Final-goods firms\n\nThe final-goods firms add up the intermediate goods ${Y}_{i,t}$ to obtain the final output ${Y}_{t}$ and sell part to the households for consumption and part to the capital producer as investment for production of capital goods.\n\nThe production function is\n\n${Y}_{t}={\\left[{\\int }_{0}^{1}{Y}_{i,t}^{\\frac{1}{{\\lambda }_{f}}}\\text{d}i\\right]}^{{\\lambda }_{f}}$\n\nwhere ${\\lambda }_{f}$ , $\\infty >{\\lambda }_{f}\\ge 1$ is the markup for the intermediate-goods firms.\n\nThe optimal choice for a final-goods firm is\n\n${Y}_{i,t}={\\left(\\frac{{P}_{i,t}}{{P}_{t}}\\right)}^{\\frac{{\\lambda }_{f}}{1-{\\lambda }_{f}}}{Y}_{t}$\n\n2) Intermediate-goods firms\n\nAs monopolistic competitive enterprises, intermediate-goods firms have bargaining power. They employ labor ${L}_{i,t}$ provided by household at cost ${W}_{t}$ and rent the capital services of entrepreneurs to produce heterogeneous goods. In the case of a given output, the intermediate product operator minimizes the cost of production:\n\n${Y}_{i,t}={\\left({K}_{i,t}\\right)}^{\\alpha }{\\left({L}_{i,t}\\right)}^{1-\\alpha }$ .\n\nThus, firm i’s optimal demand for capital and labor service must solve the following minimization problem:\n\n$\\underset{\\left\\{{L}_{i,t},{K}_{i,t}^{H},{K}_{i,t}^{L}\\right\\}}{\\mathrm{min}}\\frac{{W}_{t}{L}_{i,t}}{{P}_{t}}+{K}_{i,t}^{H}{r}_{t}^{k,H}+{K}_{i,t}^{L}{r}_{t}^{k,L}$\n\nsubject to ${K}_{i,t}={\\left[{\\eta }^{1-\\rho }{\\left({K}_{i,t}^{H}\\right)}^{\\rho }+{\\left(1-\\eta \\right)}^{1-\\rho }{\\left({K}_{i,t}^{L}\\right)}^{\\rho }\\right]}^{\\frac{1}{\\rho }}$\n\n${Y}_{i,t}={\\left({K}_{i,t}\\right)}^{\\alpha }{\\left({L}_{i,t}\\right)}^{1-\\alpha }$\n\nwhere $\\alpha$ , $0<\\alpha <1$ denotes the capital share of production.\n\nThe first-order conditions with respect to ${K}_{i,t}^{H}$ and ${K}_{i,t}^{L}$ are\n\n${r}_{t}^{k,H}=\\frac{{W}_{t}}{{P}_{t}}{\\left({Y}_{i,t}\\right)}^{\\frac{1}{1-\\alpha }}\\frac{\\alpha }{1-\\alpha }{\\eta }^{1-\\rho }{\\left[{\\eta }^{1-\\rho }{\\left({K}_{i,t}^{H}\\right)}^{\\rho }+{\\left(1-\\eta \\right)}^{1-\\rho }{\\left({K}_{i,t}^{L}\\right)}^{\\rho }\\right]}^{\\frac{\\alpha }{\\rho \\left(1-\\alpha \\right)}-1}{\\left({K}_{i,t}^{H}\\right)}^{\\rho -1}$\n\n${r}_{t}^{k,L}=\\frac{{W}_{t}}{{P}_{t}}{\\left({Y}_{i,t}\\right)}^{\\frac{1}{1-\\alpha }}\\frac{\\alpha }{1-\\alpha }{\\left(1-\\eta \\right)}^{1-\\rho }{\\left[{\\eta }^{1-\\rho }{\\left({K}_{i,t}^{H}\\right)}^{\\rho }+{\\left(1-\\eta \\right)}^{1-\\rho }{\\left({K}_{i,t}^{L}\\right)}^{\\rho }\\right]}^{\\frac{\\alpha }{\\rho \\left(1-\\alpha \\right)}-1}{\\left({K}_{i,t}^{L}\\right)}^{\\rho -1}$\n\nBy combining these equations, we can derive the no-arbitrage condition:\n\n$\\frac{{r}_{t}^{k,H}}{{r}_{t}^{k,L}}={\\left(\\frac{\\eta }{1-\\eta }\\right)}^{1-\\rho }{\\left(\\frac{{K}_{i,t}^{H}}{{K}_{i,t}^{L}}\\right)}^{\\rho -1}={\\left(\\frac{{u}_{t}^{H,h}{\\stackrel{¯}{K}}_{t}^{H,h}}{{u}_{t}^{L,l}{\\stackrel{¯}{K}}_{t}^{L,l}}\\right)}^{\\rho -1}$\n\nBecause all firms face the same input prices and have access to the same production technology, real marginal cost ${s}_{t}$ is identical across firms. By integrating the first-order condition and no-arbitrage condition into the cost function, we get\n\n${s}_{t}=\\frac{\\rho }{\\rho +\\alpha \\left(1-\\rho \\right)}{\\left[\\frac{{\\stackrel{˜}{w}}_{t}}{1-\\alpha }\\right]}^{1-\\frac{\\alpha }{\\rho +\\alpha \\left(1-\\rho \\right)}}{\\left[\\frac{\\alpha }{{r}_{t}^{k,H}}{\\left({u}_{t}^{H,h}{\\stackrel{¯}{K}}_{t}^{H,h}\\right)}^{\\rho -1}\\right]}^{-\\frac{\\alpha }{\\rho +\\alpha \\left(1-\\rho \\right)}}{\\left({Y}_{t}\\right)}^{\\frac{\\alpha \\left(\\rho -1\\right)}{\\rho +\\alpha \\left(1-\\rho \\right)}}$\n\n${s}_{t}=\\frac{{r}_{t}^{k,H}}{\\alpha {\\left(\\frac{{h}_{t}}{{K}_{t}}\\right)}^{1-\\alpha }{\\left({u}_{t}^{H,h}{\\stackrel{¯}{K}}_{t}^{H,h}\\right)}^{\\rho -1}{\\left[\\eta {\\left({u}_{t}^{H,h}{\\stackrel{¯}{K}}_{t}^{H,h}\\right)}^{\\rho }+\\left(1-\\eta \\right){\\left({u}_{t}^{L,l}{\\stackrel{¯}{K}}_{t}^{L,l}\\right)}^{\\rho }\\right]}^{\\frac{1}{\\rho }-1}}$ .\n\nIntermediate-goods firms follow the assumption of Calvo ; firms can adjust their price with probability $1-{\\xi }_{p}$ . In addition, firms that cannot reset their price to the optimal level can change their price according to the changing inflation rate ${\\pi }_{t}$ :\n\n${P}_{t}={P}_{t-1}{\\left(\\stackrel{¯}{\\pi }\\right)}^{\\iota }{\\left({\\pi }_{t-1}\\right)}^{1-\\iota }$ ,\n\nwhere $\\stackrel{¯}{\\pi }$ is the steady-state level of inflation.\n\nBased on the aforementioned assumptions, rational intermediate-goods firms optimize their profits through this operation:\n\n${E}_{t}\\underset{\\tau =0}{\\overset{\\infty }{\\sum }}{\\left(\\beta {\\xi }_{p}\\right)}^{\\tau }{\\lambda }_{t+\\tau }{Y}_{t+\\tau }{P}_{t+\\tau }\\left[{\\left(\\frac{{P}_{i,t+\\tau }}{{P}_{t+\\tau }}\\right)}^{1+\\frac{{\\lambda }_{f}}{1-{\\lambda }_{f}}}-{s}_{t+\\tau }{\\left(\\frac{{P}_{i,t+\\tau }}{{P}_{t+\\tau }}\\right)}^{\\frac{{\\lambda }_{f}}{1-{\\lambda }_{f}}}\\right]$ .\n\nFinally, we get\n\n$\\frac{{K}_{p,t}}{{F}_{p,t}}={\\left[\\frac{1-{\\xi }_{p}{\\left(\\frac{{\\stackrel{˜}{\\pi }}_{t}}{{\\pi }_{t}}\\right)}^{\\frac{1}{1-{\\lambda }_{f}}}}{1-{\\xi }_{p}}\\right]}^{1-{\\lambda }_{f}}$\n\nwhere\n\n${K}_{p,t}={\\lambda }_{n,t}{Y}_{t}{\\lambda }_{f}{s}_{t}+\\beta {\\xi }_{p}{\\left(\\frac{{\\pi }_{t}^{1-\\iota }}{{\\pi }_{t+1}}\\right)}^{-\\frac{{\\lambda }_{f}}{{\\lambda }_{f}-1}}{K}_{p,t+1}$\n\n${F}_{p,t}={\\lambda }_{n,t}{Y}_{t}+\\beta {\\xi }_{p}{\\left(\\frac{{\\pi }_{t}^{1-\\iota }}{{\\pi }_{t+1}}\\right)}^{\\frac{1}{1-{\\lambda }_{f}}}{F}_{p,t+1}$ .\n\n3.5. Households\n\nThe utility function of households is\n\n$\\underset{\\left\\{{C}_{t+\\tau },{D}_{t+\\tau }\\right\\}}{\\mathrm{max}}{E}_{t}\\underset{\\tau =0}{\\overset{\\infty }{\\sum }}{\\beta }^{\\tau }\\left[\\mathrm{ln}\\left({C}_{t+\\tau }-b{C}_{t+\\tau -1}\\right)-{\\psi }_{L}\\frac{{h}_{j,t+\\tau }^{1+{\\sigma }_{L}}}{1+{\\sigma }_{L}}\\right]$\n\nwhere ${C}_{t}$ denotes consumption, ${h}_{j,t}$ is the amount of labor supplied, ${\\sigma }_{L}$ is the elasticity of the labor supplied, and ${\\psi }_{L}$ is the preference parameter that affects the disutility of supplying labor.\n\nThe budget constraint of households is\n\n$\\begin{array}{l}\\left(1+{R}_{t}^{f}\\right){D}_{t-1}+\\left(1+{R}_{t}^{b}\\right)B{D}_{t-1}+{W}_{j,t}{h}_{j,t}\\\\ +\\left(1-{\\gamma }^{L}\\right)\\left(1-\\eta \\right){V}_{t}^{L,l}+\\left(1-{\\gamma }^{H}\\right)\\eta {V}_{t}^{H,h}+{\\Pi }_{t}^{IGF}+{\\Pi }_{t}^{SB}\\\\ ={D}_{t}+B{D}_{t}+{P}_{t}{C}_{t}+\\eta {W}_{t}^{e,H}+\\left(1-\\eta \\right){W}_{t}^{e,L}\\end{array}$\n\nwhere ${R}_{t}^{f}$ and ${R}_{t}^{b}$ are the interest rate of depositing in commercial banks and the return rate of shadow bank bonds, respectively; ${D}_{t-1}$ is the deposit in commercial banks; $B{D}_{t-1}$ is the bond of the shadow banking system held by the household; ${\\Pi }_{t}^{IGF}$ represents the profit of intermediate-goods firms; and ${\\Pi }_{t}^{SB}$ indicates the profit of the shadow banking sector.\n\nThe first-order condition can be calculated as follows:\n\n$\\frac{1}{{C}_{t}-b{C}_{t-1}}-\\beta b\\frac{1}{{C}_{t+1}-b{C}_{t}}={\\lambda }_{t}{P}_{t}$\n\n${\\lambda }_{t}=\\beta {E}_{t}\\left[\\left(1+{R}_{t+1}^{f}\\right){\\lambda }_{t+1}\\right]$\n\n${\\lambda }_{t}=\\beta {E}_{t}\\left[\\left(1+{R}_{t+1}^{b}\\right){\\lambda }_{t+1}\\right]$ .\n\nAccording to these equations, ${R}_{t}^{f}={R}_{t}^{b}$ , which means that at the equilibrium, households have no opportunity for arbitrage.\n\nFor the setting of the labor market, we introduce the hypothesis of labor heterogeneity to render the model more approximate to the real economy and improve simulation accuracy. Households can adjust their wage levels with probability $1-{\\xi }_{w}$ . The aggregate labor demand is\n\n${L}_{i,t}={\\left[{\\int }_{0}^{1}{\\left({h}_{j,t}\\right)}^{\\frac{1}{{\\lambda }_{w}}}\\text{d}j\\right]}^{{\\lambda }_{w}}$\n\nThe wage level is\n\n${W}_{t}={\\left[{\\int }_{0}^{1}{\\left({W}_{j,t}\\right)}^{\\frac{1}{1-{\\lambda }_{w}}}\\text{d}j\\right]}^{1-{\\lambda }_{w}}$ .\n\nIntermediate-goods firms make decisions based on the following formula:\n\n${h}_{j,t}={\\left(\\frac{{W}_{j,t}}{{W}_{t}}\\right)}^{\\frac{{\\lambda }_{w}}{1-{\\lambda }_{w}}}{L}_{i,t}$ .\n\nThe optimal choice of households with bargaining power in the pricing of labor is\n\n$\\underset{\\left\\{{W}_{j,t}\\right\\}}{\\mathrm{max}}{E}_{t}\\underset{\\tau =0}{\\overset{\\infty }{\\sum }}{\\left(\\beta {\\xi }_{w}\\right)}^{\\tau }\\left[-{\\psi }_{L}\\frac{{h}_{j,t+\\tau }^{1+{\\sigma }_{L}}}{1+{\\sigma }_{L}}+{\\lambda }_{t+\\tau }{W}_{j,t+\\tau }{h}_{j,t+\\tau }\\right]$\n\nsubject to ${h}_{j,t}={\\left(\\frac{{W}_{j,t}}{{W}_{t}}\\right)}^{\\frac{{\\lambda }_{w}}{1-{\\lambda }_{w}}}{L}_{i,t}$ .\n\nHouseholds that cannot adjust their wages to the optimal level follow the dynamic process, expressed as\n\n${W}_{j,t}={W}_{j,t-1}{\\left(\\stackrel{¯}{\\pi }\\right)}^{{\\iota }_{w}}{\\left({\\pi }_{t-1}\\right)}^{1-{\\iota }_{w}}$ .\n\nTo facilitate representation and consider the application of symmetry, parts of this equation can be expressed as follows:\n\n$\\begin{array}{l}{W}_{j,t+\\tau }\\equiv {\\stackrel{˜}{W}}_{t+\\tau }\\\\ {h}_{j,t}\\equiv {h}_{t}\\\\ {\\stackrel{˜}{w}}_{t+\\tau }\\equiv \\frac{{W}_{t+\\tau }}{{P}_{{}_{t+\\tau }}}\\\\ {\\lambda }_{n,t+\\tau }\\equiv {\\lambda }_{t+\\tau }{P}_{t+\\tau }\\end{array}$\n\n$\\begin{array}{l}{\\pi }_{t+\\tau }\\equiv \\frac{{P}_{t+\\tau }}{{P}_{t+\\tau -1}}\\\\ {\\stackrel{˜}{\\pi }}_{w}\\equiv {\\left(\\stackrel{¯}{\\pi }\\right)}^{{\\iota }_{w}}{\\left({\\pi }_{t-1}\\right)}^{1-{\\iota }_{w}}\\\\ {X}_{t,\\tau }\\equiv \\frac{{\\stackrel{˜}{\\pi }}_{w,t+\\tau }\\cdots {\\stackrel{˜}{\\pi }}_{w,t+1}}{{\\pi }_{t+\\tau }\\cdots {\\pi }_{t+1}}\\end{array}$\n\nFinally, we obtain the following first-order conditions for maximization of household income:\n\n${K}_{w,t}=\\frac{{F}_{w,t}{\\stackrel{˜}{w}}_{t}}{{\\psi }_{L}}{\\left[\\frac{1-{\\xi }_{w}{\\left(\\frac{{\\stackrel{˜}{\\pi }}_{w,t}}{{\\pi }_{w,t}}\\right)}^{\\frac{1}{1-{\\lambda }_{w}}}}{1-{\\xi }_{w}}\\right]}^{{\\lambda }_{w}\\left(1+{\\sigma }_{L}\\right)-1}$\n\n${K}_{w,t}={h}_{t}{}^{1+{\\sigma }_{L}}+\\beta {\\xi }_{w}{\\left[\\frac{{\\pi }_{t}{}^{1-{\\iota }_{w}}}{{\\pi }_{t+1}{\\stackrel{˜}{w}}_{t+1}/{\\stackrel{˜}{w}}_{t}}\\right]}^{\\frac{{\\lambda }_{w}\\left(1+{\\sigma }_{L}\\right)}{1-{\\lambda }_{w}}}{K}_{w,t+1}$\n\n${F}_{w,t}={h}_{t}\\frac{{\\lambda }_{n,t}}{{\\lambda }_{w}}+\\beta {\\xi }_{w}{\\left[\\frac{1}{{\\pi }_{t+1}{\\stackrel{˜}{w}}_{t+1}/{\\stackrel{˜}{w}}_{t}}\\right]}^{\\frac{{\\lambda }_{w}}{1-{\\lambda }_{w}}}\\frac{{\\left({\\pi }_{t}{}^{1-{\\iota }_{w}}\\right)}^{\\frac{1}{1-{\\lambda }_{w}}}}{{\\pi }_{t+1}}{F}_{w,t+1}$ .\n\n3.6. Central Bank’s Monetary Policy\n\nThe central bank sets the short-term nominal interest rate following the Taylor rule.\n\n${R}_{t}^{f}={\\left({R}_{t-1}^{f}\\right)}^{{\\stackrel{˜}{\\rho }}_{IR}}{\\left[{R}^{f}{\\left(\\frac{{E}_{t}\\left({\\pi }_{t+1}\\right)}{\\stackrel{¯}{\\pi }}\\right)}^{{\\alpha }_{\\pi }}{\\left(\\frac{{Y}_{t}}{\\stackrel{¯}{Y}}\\right)}^{{\\alpha }_{y}}\\right]}^{1-{\\stackrel{˜}{\\rho }}_{IR}}{\\epsilon }_{t}^{MP}$\n\nHere, ${\\stackrel{˜}{\\rho }}_{IR}$ represents interest rate smoothing; ${\\alpha }_{\\pi }$ and ${\\alpha }_{y}$ are the weights assigned to expected inflation and the output gap, respectively; ${\\epsilon }_{t}^{MP}$ is a white-noise monetary policy shock; and $\\stackrel{¯}{\\pi }$ and $\\stackrel{¯}{Y}$ are the steady-state values of inflation and output, respectively.\n\n4. Calibration\n\nTo solve the steady-state solution more conveniently, the method and parameter settings of Christiano et al. and Verona et al. were consulted. We set the capital return rate ( ${r}_{t}^{k,H}$ ) of a high-risk entrepreneur as an exogenous variable with a value of 0.0504, in line with the value used by Christiano . The weight of labor disutility ${\\psi }_{L}$ was set as an endogenous variable; its value could be obtained by calculating the steady-state solution. The parameters in the model were calibrated and their references are shown in Table 1.\n\n5. Results and Analysis\n\nAccording to the model hypothesis and parameter assignment, we simulated the disturbance of confidence oscillation. The results are shown in the following diagrams, which show the responses of output, consumption, investment, inflation, price of capital assets, wage, total net worth, total liability scale, and total leverage ratio to confidence shock.\n\nThe rise in confidence leads to a rise in output and investment, while consumption initially decreases and subsequently increases (Figure 1). The interpretation of this response is straight forward. Optimistic expectations make agents be willing to save and invest more, resulting in a relatively high output growth rate and temporarily decrease in consumption. Capital and wage prices experience a period of growth with a certain degree of fluctuation, due to price stickiness and adjustment costs.\n\nFigure 1. Responses to confidence shock.\n\nTable 1. Model parameters.\n\nBy contrast, inflation declines and remains at a low level for a long period. The net worth and debt accumulation of the whole economy increase significantly, and the leverage ratio also increases and remains above the steady-state level for a long period. These changes result from excessive speculation. A continuation of booming growth and low-cost credit service leads to a large accumulation of debt and extremely high leverage rate.\n\nThese findings are consistent with real economy’s performance during the economic boom. The overall economy prospers and output grows considerably.\n\nAlthough society is in a “hyperactive” state, there are evident differences between the shadow banking sector and commercial banking sector.\n\nIn Figure 2, the top three diagrams represent the status of the shadow banking sector, and the bottom three diagrams show the status of the commercial banking sector. As confidence increases, the loan rate of the shadow banking sector drops sharply, and consequently, the scale of credit continues to expand. This effect is persistent, leading to leverage level increases over a long period. Although interest rates of commercial banks also decline, their credit scale is reduced, in contrast to the shadow banking sector. The leverage ratio of commercial banks also declines to a certain extent, indicating a substitution effect on commercial banks when the economy prospers. Once confidence is strengthened, the demand for shadow banking sector’s credit services will grow faster than that for the traditional financial sector.\n\nThis is consistent with the situation in the United States during the 20th century; a large amount of credit was generated by shadow banks rather than commercial banks; this led to profound changes in the country’s financial structure. Behind this, persistent low interest rates and a broad easing of confidence expectations serve as influential drivers of the United States’ financial structure.\n\n6. Conclusions\n\nWe analyzed the effect of confidence shock on an economy with a shadow banking system as a parallel financial intermediary. Starting from a DSGE model, we introduced high-risk and low-risk entrepreneurs supported by the commercial\n\nFigure 2. Responses of financial intermediates to confidence shock.\n\nbanking sector and shadow banking sector, respectively. Therefore, different forms of behavioral logic were observed in the economy. This study focused on the effect of confidence shocks, which greatly influence financial intermediaries. We found that:\n\n1) Confidence is essential to economic development because it can affect the overall economy, thereby enabling the economy to grow greatly in terms of aspects such as output, consumption, and investment.\n\n2) In the presence of a shadow banking system, increased confidence exerts a great effect on shadow banks and the sector supported by them. This effect increases overall economic volatility, leaving the economy somewhat vulnerable.\n\n3) The shadow banking sector, driven by the confidence effect, squeezes the credit business of commercial banks. This substitution effect changes the overall economic structure.\n\nThese findings offer some profound policy implications and suggestions as well. When the market becomes optimistic, policymakers should pay more attention to the impact of shadow banking on the economy, since the expansion of debt is mostly contributed by this sector. Regulation of shadow banking should be strengthened. Also, the effect of confidence is crucial to the growth of shadow banking as well as the change in economic structure. This suggests that confidence effect should be considered in policy planning and the path of economic development. Multi-targeted monetary policy that takes confidence effect into consideration may be effective and efficient.\n\nAlthough our model captures several features of shadow banking and reveals the substitution effect, regulation and multi-targeted monetary policy are not discussed in this paper. Therefore, a possible direction for future research is to add them into the analysis.\n\nConflicts of Interest\n\nThe authors declare no conflicts of interest regarding the publication of this paper.\n\n Pozsar, Z. (2008) The Rise and Fall of the Shadow Banking System. Regional Financial Review, 44, 1-13. Adrian, T. and Shin, H.S. (2009) The Shadow Banking System: Implications for Financial Regulation. Staff Report, Federal Reserve Bank of New York, No. 382. https://doi.org/10.2139/ssrn.1441324 FSB (2013) Global Shadow Banking Monitoring Report 2013. http://www.fsb.org/wp-content/uploads/r_131114.pdf FSB (2011) Shadow Banking: Scoping the Issues. http://www.fsb.org/wp-content/uploads/r_110412a.pdf Pozsar, Z., Adrian, T., Ashcraft, A.B. and Boesky, H. (2010) Shadow Banking. Staff Report, Federal Reserve Bank of New York No. 458. Adrian, T. and Ashcraft, A.B. (2012) Shadow Banking: A Review of the Literature. Staff Report, Federal Reserve Bank of New York, No. 580. Bernanke, B.S., Gertler, M. and Gilchrist, S. (1999) The Financial Accelerator in a Quantitative Business Cycle Framework. Handbook of Macroeconomics, 1, 1341-1393. https://doi.org/10.1016/S1574-0048(99)10034-X Christiano, L., Motto, R. and Rostagno, M. (2010) Financial Factors in Economic Fluctuations. Working Paper Series, European Central Bank, No. 1192. https://www.ecb.europa.eu/pub/pdf/scpwps/ecbwp1192.pdf?d5356f67e3559baae7b37a12d911262b Smets, F. and Wouters, R. (2002) An Estimated Stochastic Dynamic General Equilibrium Model of the Euro Area: International Seminar on Macroeconomics. Europ. Central Bank. Verona, F., Martins, M.M.F. and Drumond, I. (2013) (Un)anticipated Monetary Policy in a DSGE Model with a Shadow Banking System (Bank of Finland Research Discussion Paper 4/2013). Diamond, D.W. and Dybvig, P.H. (1983) Bank Runs, Deposit Insurance, and Liquidity. Journal of Political Economy, 91, 401-419. https://doi.org/10.1086/261155 Gorton, G., Metrick, A., Shleifer, A. and Tarullo, D.K. (2010) Regulating the Shadow Banking System [with Comments and Discussion]. Brookings Papers on Economic Activity, Brookings Institution Press, Washington DC, 261-312. https://doi.org/10.1353/eca.2010.0016 Ferrante, F. (2018) A Model of Endogenous Loan Quality and the Collapse of the Shadow Banking System. American Economic Journal: Macroeconomics, 10, 152-201. https://doi.org/10.1257/mac.20160118 Christiano, L.J., Eichenbaum, M. and Evans, C. L. (2005) Nominal Rigidities and the Dynamic Effects of a Shock to Monetary Policy. Journal of Political Economy, 113, 1-45. https://doi.org/10.1086/426038 Calvo, G.A. (1983) Staggered Prices in a Utility-Maximizing Framework. Journal of monetary Economics, 12, 383-398. https://doi.org/10.1016/0304-3932(83)90060-0 Erceg, C.J., Henderson, D.W. and Levin, A.T. (2000) Optimal Monetary Policy with Staggered Wage and Price Contracts. Journal of monetary Economics, 46, 281-313. https://doi.org/10.1016/S0304-3932(00)00028-3 Levin, A.T., Onatski, A., Williams, J.C. and Williams, N. (2005) Monetary Policy under Uncertainty in Micro-Founded Macroeconometric Models. NBER Macroeconomics Annual, 20, 229-287. https://doi.org/10.1086/ma.20.3585423 Chen, L., Lesmond, D.A. and Wei, J. (2007) Corporate Yield Spreads and Bond Liquidity. The Journal of Finance, 62, 119-149. https://doi.org/10.1111/j.1540-6261.2007.01203.x", null, "", null, "", null, "", null, "", null, "[email protected]", null, "+86 18163351462(WhatsApp)", null, "1655362766", null, "", null, "Paper Publishing WeChat", null, "" ]
[ null, "https://scirp.org/images/Twitter.svg", null, "https://scirp.org/images/fb.svg", null, "https://scirp.org/images/in.svg", null, "https://scirp.org/images/weibo.svg", null, "https://scirp.org/images/emailsrp.png", null, "https://scirp.org/images/whatsapplogo.jpg", null, "https://scirp.org/Images/qq25.jpg", null, "https://scirp.org/images/weixinlogo.jpg", null, "https://scirp.org/images/weixinsrp120.jpg", null, "https://scirp.org/Images/ccby.png", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.9147754,"math_prob":0.99843085,"size":22403,"snap":"2021-43-2021-49","text_gpt3_token_len":4251,"char_repetition_ratio":0.15947141,"word_repetition_ratio":0.01701878,"special_character_ratio":0.18292193,"punctuation_ratio":0.10637748,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9994673,"pos_list":[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20],"im_url_duplicate_count":[null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2021-10-25T08:41:14Z\",\"WARC-Record-ID\":\"<urn:uuid:cf1b10da-b3d1-4b84-9c6f-9e3a23070b05>\",\"Content-Length\":\"330754\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:53237e4a-b706-4869-842c-8d985d1db0d8>\",\"WARC-Concurrent-To\":\"<urn:uuid:44da1cf5-8091-457e-b8ee-00baf7ebc3c2>\",\"WARC-IP-Address\":\"144.126.144.39\",\"WARC-Target-URI\":\"https://scirp.org/journal/paperinformation.aspx?paperid=88635\",\"WARC-Payload-Digest\":\"sha1:N7ANDQZ5E2WJV2O7GOAYVM6QWQBJ4CMW\",\"WARC-Block-Digest\":\"sha1:K4MWEN5CKYB7TJ4VUQMNQBZ32ZNG3NK7\",\"WARC-Identified-Payload-Type\":\"application/xhtml+xml\",\"warc_filename\":\"/cc_download/warc_2021/CC-MAIN-2021-43/CC-MAIN-2021-43_segments_1634323587655.10_warc_CC-MAIN-20211025061300-20211025091300-00577.warc.gz\"}"}
http://trainelectronics.com/Articles/Wheel_CW-or-CCW/index.htm
[ "Wheel Direction of Rotation Detector\nd. bodnar  revised 04-04-2015\n\nIntroduction\nRecently a fellow garden railroader asked me if I could come up with a way for a passenger car to determine which way it was going so that appropriate lights could be illuminated.  The car is an observation car (from USA Trains, I believe) that is designed to illuminate exterior lights one way when it is moving forward and a different way when running backwards.\n\nThis is easily accomplished if the train operates from standard DC track power.  Unfortunately he was running either DCC or constant track power with radio control so that the polarity on the track always remains the same.  The same issue would arise when using battery power.\n\nHe had built a mechanism that threw a switch one way when running forward and the other way when going backwards but it put a good bit of drag on the car.  He was looking for a solution that produced little or no drag.  He had also considered a modification to the coupler so that a forward pull could be detected but that option was not practical with the couplers that he was using.\n\nAfter giving this some thought (over a few days and a few bike rides!) I came up with a method using three magnets and a reed switch.  If the magnets are mounted as shown below the sequence of pulses that the reed switch produced was different based on direction of travel.\n\nThis diagram shows how the magnets are mounted on the wheel.  Three magnets are placed.  Two are 60 degrees apart and the third is 120 degrees away from the second, leaving a 180 degree gap between the third and first magnet.\n\nWhen the wheel turns in a clockwise direction the pulses are short, long, medium, short, long medium.  When going counter clockwise they are short, medium, long, short, medium, long.  By determining which pulse is the long one and comparing that to the one that follows it the direction can be figured out.", null, "Test Rig\nThis mechanism was used for testing.  The small motor (upper left) turns the wheel either clockwise or counter clockwise.  The DPDT switch (circled in black) determines the direction or rotation.  The magnets (two of the three are circled in red) pulse the reed switch (circled in yellow).", null, "Circuit\nThe reed switch is mounted next to the magnets on the wheel.  There is a duplicate switch on the prototype board that can be used for testing.  The yellow LED lights as the wheel turns and detects magnets.  One red LED lights if the wheel turns in one direction and the other red LED lights if it turns the other way.  After a 5 second period without any magnet detection passes both LEDs are turned off.\n\nThe diagram shows the circuit with an Arduino Uno but just about any Arduino should work.  This is especially important if space is an issue.", null, "Software -1\nInitial testing used the example program that comes with the Arduino called \"Button\".  It simply blinks an LED when a button (or reed switch) closes.  I attached one wire from the reed switch to pin 2 on the Arduino.  A 10K resistor pulled that pin low by going to ground.  The other wire from the reed switch connected to + 5 volts.\n ```/* Button Turns on and off a light emitting diode(LED) connected to digital pin 13, when pressing a pushbutton attached to pin 2. The circuit: * LED attached from pin 13 to ground * pushbutton attached to pin 2 from +5V * 10K resistor attached to pin 2 from ground * Note: on most Arduinos there is already an LED on the board attached to pin 13. created 2005 by DojoDave modified 30 Aug 2011 by Tom Igoe This example code is in the public domain. http://www.arduino.cc/en/Tutorial/Button */ // constants won't change. They're used here to // set pin numbers: const int buttonPin = 2; // the number of the pushbutton pin const int ledPin = 13; // the number of the LED pin // variables will change: int buttonState = 0; // variable for reading the pushbutton status void setup() { // initialize the LED pin as an output: pinMode(ledPin, OUTPUT); // initialize the pushbutton pin as an input: pinMode(buttonPin, INPUT); } void loop(){ // read the state of the pushbutton value: buttonState = digitalRead(buttonPin); // check if the pushbutton is pressed. // if it is, the buttonState is HIGH: if (buttonState == HIGH) { // turn LED on: digitalWrite(ledPin, HIGH); } else { // turn LED off: digitalWrite(ledPin, LOW); } }```\nLogic Analyzer Test\nI connected my Saleae Logic Analyzer to the output LED on the Arduino and made the following observations.\n\nWheel clockwise rotation\nNote that the large period (1) has a short one (3) to the right and a medium length one to the left (2).", null, "Wheel counter clockwise rotation\nNote that the large period (1) has a medium one (2) to the right and a short length one to the left (3).", null, "Note that the two pulse patterns are surely different.  All that remains is to write a computer program for the Arduino to differentiate between them.\n\nSoftware -2\nThe first program that I wrote measures the length of each pulse gap and sends it to the serial terminal:\n ```/* Sample sketch to show pulses from reed switch on wheel to determine if the wheel turns CW or CCW three magnets on wheel at 1/6ths - two together and one two away NOTE: this version gives a good result and definitely discriminates between forward and backward now to do the math and get it to report properly! */ const int buttonPin = 2; // the number of the pushbutton pin const int ledPin = 13; // the number of the LED pin // variables will change: int buttonState = 0; // variable for reading the pushbutton status int minusCount = 0; int plusCount = 0; int firstPlusFlag =0; int firstMinusFlag = 0; int countValue; int y=0; void setup() { Serial.begin(115200); // initialize the LED pin as an output: pinMode(ledPin, OUTPUT); // initialize the pushbutton pin as an input: pinMode(buttonPin, INPUT); Serial.println(\"forward / backward wheel test - 3-29-15\"); } void loop(){ buttonState = digitalRead(buttonPin); if (buttonState == HIGH) { plusCount = plusCount + 1; firstMinusFlag=0; if (minusCount !=0) { Serial.print(\"cnt \"); Serial.println(minusCount); countValue[y]=minusCount; y=y+1; } minusCount = 0; digitalWrite(ledPin, HIGH); } else { minusCount = minusCount + 1; plusCount = 0; digitalWrite(ledPin, LOW); } if (y==6){ int big=0; for(int y=0;y<6;y++){ Serial.print(countValue[y]); Serial.print(\" \"); if (countValue[y] >= big){ big = countValue[y]; } //// countValue[y]=0; } Serial.println(\"\"); Serial.print(\"big = \"); Serial.println(big); big=0; y=0; // } } } ```\n ```Working Code - lights Red LED based on direction of travel - the wheel has to turn a bit more than one revolution to detect the direction of travel. Version Wheel_CWorCCW_v_2_0_Working``` ```/* Sample sketch to show pulses from reed switch on wheel to determine if the wheel turns CW or CCW three magnets on wheel at 1/6ths - two together and one two away */ const int buttonPin = 2; // the number of the pushbutton pin const int ledPin = 13; // the number of the LED pin const int ledDirection = 8; const int ledDirection2 = 7; // variables will change: int buttonState = 0; // variable for reading the pushbutton status long minusCount = 0; int firstPlusFlag =0; int firstMinusFlag = 0; long countValue; long maxCountValuePosition=0; long maxCountValuePosition2=0; int direction =0; long big=0; int y=0; unsigned long time; void setup() { Serial.begin(9600); // initialize the LED pin as an output: pinMode(ledPin, OUTPUT); pinMode(ledDirection, OUTPUT); pinMode(ledDirection2, OUTPUT); // initialize the pushbutton pin as an input: pinMode(buttonPin, INPUT); Serial.println(\"forward / backward wheel test V1.4x - 3-30-15\"); time = millis(); } void loop(){ if(millis()-time >= 5000){ // if 5 seconds passes without a movement clear both LEDs digitalWrite(ledDirection, LOW); digitalWrite(ledDirection2, LOW); } buttonState = digitalRead(buttonPin); if (buttonState == HIGH) { firstMinusFlag=0; if (minusCount !=0) { Serial.print(\"cnt \"); Serial.print(y); Serial.print(\" = \"); Serial.print (minusCount); Serial.print(\" \"); countValue[y]=minusCount; if (countValue[y] >= big){ big = countValue[y]; maxCountValuePosition=y; } y=y+1; } minusCount = 0; digitalWrite(ledPin, HIGH); time=millis(); } else { minusCount = minusCount + 1; digitalWrite(ledPin, LOW); } if (y==3){ Serial.println(\"\"); for (int x=0;x<3;x++){ Serial.print(x); Serial.print(\" \"); Serial.println(countValue[x]); } Serial.println(\"\"); Serial.print(\"big = \"); Serial.print(big); Serial.print(\" pos = \"); Serial.print(maxCountValuePosition); Serial.print(\" \"); big=0; y=0; countValue=countValue; direction = (countValue[maxCountValuePosition]/countValue[maxCountValuePosition+1]); Serial.print(\" Ratio = \"); Serial.println(direction); if (direction >=4 ){ digitalWrite(ledDirection, HIGH); digitalWrite(ledDirection2, LOW); } else{ digitalWrite(ledDirection, LOW); digitalWrite(ledDirection2, HIGH); } } } ```\n\nO-Scale with Hall Sensor\nThe photo below shows a test rig for O-Scale.  There are three small magnets (circled in yellow) that stimulate a small Hall Effect Sensor (circled in red).\n\nTwo adjustments to the software are needed.  See notes after the photo.  The first change is necessary as the Hall sensor is high when no magnet is sensed and low when a magnet is detected.  The sensor's output goes to pin 2 and is pulled high with a 10K resistor.  The sensor's other pins go to +5 volts and ground.", null, "`", null, "`\n``` buttonState = digitalRead(buttonPin);\nif (buttonState == LOW) { /////////////////////////Change here for HALL\nfirstMinusFlag=0;```\n` `\n``` Serial.print(\" Ratio = \");\nSerial.println(direction);\nif (direction >=2 ){ /////////////////////////Change here for HALL\ndigitalWrite(ledDirection, HIGH);\ndigitalWrite(ledDirection2, LOW);```\n\nPIC Schematic", null, "PIC Basic Pro Code - version 1.0\nI translated the Arduino code (written in \"C\") to Basic for the PIC 12F683 - The code is very similar to that of the Arduino\n` `\n``` ' d. bodnar 4-4-2015 Version 1.1\n'Sample sketch to show pulses from reed switch on wheel to determine if the wheel turns CW or CCW\n\nInclude \"modedefs.bas\"\nansel = 0\ncmcon0 = 7\nSerial_out var gpio.0 'pin 7 'Serial Out\nLEDPulse var gpio.2 'pin 5 hpwm 1 - Pulses when reed hit\nLEDDirection1 var gpio.4 'pin 3 an3\nLEDDirection2 var gpio.1 'pin 6 an1\nReedSW var gpio.5 'pin 2 - reed switch\nnotUsed3 var gpio.3 'pin 4\nTemp1 var word\nTemp2 var word\nb1 var byte\nb2 var byte\n'Serial does not work if next line is compiled\n'''OSCCON = %00100000 ' Set to 8MHz\nSerout serial_out,n9600,[13,10,13,10,13,10,\"d. bodnar Ver 1.1 + Wheel Direction\",13,10]\nSerout serial_out,n9600,[13,10,\"8 MhZ 4-04-15\",13,10]\ngpio = %00100000 '5 is only input - others outputs\n\nminusCount var word\nfirstPlusFlag var byte\nfirstMinusFlag var byte\ncountValue var word \nmaxCountValuePosition var word\nmaxCountValuePosition2 var word\ndirection var byte\nbig var word\ny var byte\nx var byte\nSerPrintFlag var bit\nSerPrintFlag=0\nTop:\n\nif reedSW=1 then\nfirstMinusFlag =0\nif (minusCount <>0) then\ncountValue[y]=minusCount\nif (countValue[y]> big) then\nbig = countValue[y]\nmaxCountValuePosition=y\nendif\ny=y+1\nendif\nminusCount=0\nhigh ledpulse\nelse\nminusCount=minusCount+1\nlow ledpulse\nendif\n\nif y=3 then\nfor x=0 to 3\nif SerPrintFlag=1 then\t Serout serial_out,n9600,[ #countValue[x],\" \"]\nnext x\nif SerPrintFlag=1 then\t Serout serial_out,n9600,[\" big= \", #big, \" pos = \",#maxCountValuePosition,\" \" ]\nbig =0\ny=0\ncountValue=countValue\ndirection=countValue[maxCountValuePosition]/countValue[maxCountValuePosition+1 ]\nif SerPrintFlag=1 then\tSerout serial_out,n9600,[\" Ratio = \",#direction ,13,10]\nif direction >2 then\nhigh leddirection1\nlow leddirection2\nelse\nlow leddirection1\nhigh leddirection2\nendif\nendif\n\nGoto Top: ```\nTwo Hall Sensor Schematic", null, "`PIC BASIC PRO - uses two Hall Sensors & one magnet`\n` `\n``` ' d. bodnar 4-4-2015 Version 1.3 WORKING\n'Sample sketch to show pulses from reed switch on wheel to determine if the wheel turns CW or CCW\n\nInclude \"modedefs.bas\"\nansel = 0\ncmcon0 = 7\nSerial_out var gpio.0 'pin 7 'Serial Out\nLEDPulse var gpio.2 'pin 5 hpwm 1 - Pulses when reed hit\nLEDDirection1 var gpio.4 'pin 3 an3\nLEDDirection2 var gpio.1 'pin 6 an1\nHall1 var gpio.5 'pin 2 - reed switch\nHall2 var gpio.3 'pin 4\n\nSerout serial_out,n9600,[13,10,13,10,13,10,\"d. bodnar Ver 1.3 + Wheel Direction\",13,10]\nSerout serial_out,n9600,[13,10,\"8 MhZ 4-04-15\",13,10]\n\ngpio = %00101000 '5 and 3 are inputs - others outputs\n\nFreeCount var word\nlow ledDirection1:low leddirection2\nFreeCount=0\n\nTop:\nFreeCount=freecount + 1\nif Hall1 = 0 and freecount > 5000 then\nFreecount=0\nhigh LEDDirection1\nlow LEDdirection2\nendif\nif Hall2 = 0 and freecount > 5000 then\nfreecount=0\nhigh LEDDirection2\nlow LEDdirection1\nendif\n\ngoto top:```" ]
[ null, "http://trainelectronics.com/Articles/Wheel_CW-or-CCW/images/wheel-magnets.gif", null, "http://trainelectronics.com/Articles/Wheel_CW-or-CCW/images/rig.jpg", null, "http://trainelectronics.com/Articles/Wheel_CW-or-CCW/images/Circuit-graphic.jpg", null, "http://trainelectronics.com/Articles/Wheel_CW-or-CCW/images/Pulses-1.gif", null, "http://trainelectronics.com/Articles/Wheel_CW-or-CCW/images/Pulses-2.gif", null, "http://trainelectronics.com/Articles/Wheel_CW-or-CCW/images/O-scale.jpg", null, "http://trainelectronics.com/Articles/Wheel_CW-or-CCW/images/hall.jpg", null, "http://trainelectronics.com/Articles/Wheel_CW-or-CCW/images/PIC-schematic.gif", null, "http://trainelectronics.com/Articles/Wheel_CW-or-CCW/images/schematic-2HallSensors.gif", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.82409984,"math_prob":0.91790026,"size":6876,"snap":"2019-13-2019-22","text_gpt3_token_len":1783,"char_repetition_ratio":0.13271245,"word_repetition_ratio":0.122232065,"special_character_ratio":0.26556137,"punctuation_ratio":0.102796674,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9712073,"pos_list":[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18],"im_url_duplicate_count":[null,4,null,4,null,4,null,4,null,4,null,4,null,4,null,4,null,4,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2019-03-22T00:16:20Z\",\"WARC-Record-ID\":\"<urn:uuid:a5b05b3f-7a8f-4868-9568-b80732ec1756>\",\"Content-Length\":\"18111\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:32eed50f-3f70-4edd-8903-a3debaea0eb9>\",\"WARC-Concurrent-To\":\"<urn:uuid:72825d85-5297-4c3d-b173-70aef277a794>\",\"WARC-IP-Address\":\"74.208.236.80\",\"WARC-Target-URI\":\"http://trainelectronics.com/Articles/Wheel_CW-or-CCW/index.htm\",\"WARC-Payload-Digest\":\"sha1:IJY2K26Y2N5R7VX5QNNO5PEE5DXQTFEI\",\"WARC-Block-Digest\":\"sha1:UYTU7BDXGP6M5GDBTO5RI4CLORMBRHDH\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2019/CC-MAIN-2019-13/CC-MAIN-2019-13_segments_1552912202588.97_warc_CC-MAIN-20190321234128-20190322020128-00226.warc.gz\"}"}
https://www.studyskymate.com/search/label/Logical%20Reasoning
[ "Showing posts with label Logical Reasoning. Show all posts\nShowing posts with label Logical Reasoning. Show all posts\n\n# Logical Reasoning\n\n1\nLook at this series: 2, 1, (1/2), (1/4), ... What number should come next?\n A. (1/3) B. (1/8) C. (2/8) D. (1/16)\nExplanation:\nThis is a simple division series; each number is one-half of the previous number.\nIn other terms to say, the number is divided by 2 successively to get the next result.\n```4/2 = 2\n2/2 = 1\n1/2 = 1/2\n(1/2)/2 = 1/4\n(1/4)/2 = 1/8 and so on.```\n\n```\n2.\nLook at this series: 7, 10, 8, 11, 9, 12, ... What number should come next?\n\nA.7\nB.10\nC.12\nD.13\n\nExplanation:\n\nThis is a simple alternating addition and subtraction series. In the first pattern, 3 is added; in the second, 2 is subtracted.\n\n3.\nLook at this series: 36, 34, 30, 28, 24, ... What number should come next?\n\nA.20\nB.22\nC.23\nD.26\n\nExplanation:\n\nThis is an alternating number subtraction series. First, 2 is subtracted, then 4, then 2, and so on.\n\n4. Look at this series: 22, 21, 23, 22, 24, 23, ... What number should come next?\n\nA.22\nB.24\nC.25\nD.26\n\nExplanation:\n\nIn this simple alternating subtraction and addition series; 1 is subtracted, then 2 is added, and so on.\n\n5.\nLook at this series: 53, 53, 40, 40, 27, 27, ... What number should come next?\n\nA.12\nB.14\nC.27\nD.53\n\nExplanation:\n\nIn this series, each number is repeated, then 13 is subtracted to arrive at the next number.\n\n```\n\n### Create a Digital Clock using HTML and JavaScript\n\nCreate a Digital Clock using HTML and JavaScript  <! DOCTYPE html> < html > < head > <...", null, "" ]
[ null, "https://1.bp.blogspot.com/-UMbLA_qd5cE/XV9QN3wCzhI/AAAAAAAAAMM/cflzLQBq3KEVwO5bYakYR6c_wU2q7xZQwCLcBGAs/s1600/clock.JPG", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.88552284,"math_prob":0.96069336,"size":1223,"snap":"2023-40-2023-50","text_gpt3_token_len":392,"char_repetition_ratio":0.15422477,"word_repetition_ratio":0.18942732,"special_character_ratio":0.3589534,"punctuation_ratio":0.28213167,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.99358493,"pos_list":[0,1,2],"im_url_duplicate_count":[null,4,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2023-12-04T03:34:50Z\",\"WARC-Record-ID\":\"<urn:uuid:4da75821-8437-4afa-a558-8c63be41d5f3>\",\"Content-Length\":\"106538\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:aba4a973-c1c3-4acd-a672-80a07ce005ce>\",\"WARC-Concurrent-To\":\"<urn:uuid:a800c4dc-9d3c-4743-8791-00b35edc298d>\",\"WARC-IP-Address\":\"142.251.167.121\",\"WARC-Target-URI\":\"https://www.studyskymate.com/search/label/Logical%20Reasoning\",\"WARC-Payload-Digest\":\"sha1:7WL7EB7QPT4UVW5EOAMMSQHH5BLSS67B\",\"WARC-Block-Digest\":\"sha1:HMDZNKTEE6QLLMFCJUVASBBXMQQURZ75\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2023/CC-MAIN-2023-50/CC-MAIN-2023-50_segments_1700679100523.4_warc_CC-MAIN-20231204020432-20231204050432-00558.warc.gz\"}"}
https://psychology.wikia.org/wiki/Multiple_correlation
[ "34,716 Pages\n\nIn statistics, regression analysis is a method for explanation of phenomena and prediction of future events. In the regression analysis, a coefficient of correlation r between random variables X and Y is a quantitative index of association between these two variables. In its squared form, as a coefficient of determination r2, indicates the amount of variance in the criterion variable Y that is accounted for by the variation in the predictor variable X. In the multiple regression analysis, the set of predictor variables X1, X2, ... is used to explain variability of the criterion variable Y. A multivariate counterpart of the coefficient of determination r2 is the coefficient of multiple determination, R2. The square root of the coefficient of multiple determination is the coefficient of multiple correlation, R.\n\n## Conceptualization of multiple correlation\n\nAn intuitive approach to the multiple regression analysis is to sum the squared correlations between the predictor variables and the criterion variable to obtain an index of the over-all relationship between the predictor variables and the criterion variable. However, such a sum is often greater than one, suggesting that simple summation of the squared coefficients of correlations is not a correct procedure to employ. In fact, a simple summation of squared coefficients of correlations between the predictor variables and the criterion variable is the correct procedure, but only in the special case when the predictor variables are not correlated. If the predictors are related, their inter-correlations must be removed so that only the unique contributions of each predictor toward explanation of the criterion are included.\n\n## Fundamental equation of multiple regression analysis\n\nInitially, a matrix of correlations R is computed for all variables involved in the analysis. This matrix can be conceptualized as a supermatrix, consisting of the vector of cross-correlations between the predictor variables and the criterion variable c, its transpose c’ and the matrix of intercorrelations between predictor variables Rxx. The fundamental equation of the multiple regression analysis is\n\nR2 = c' Rxx−1 c.\n\nThe expression on the left side signifies the coefficient of multiple determination (squared coefficient of multiple correlation). The expressions on the right side are the transposed vector of cross-correlations c', the matrix of inter-correlations Rxx to be inverted (cf., matrix inversion), and the vector of cross-correlations, c. The premultiplication of the vector of cross-correlations by its transpose changes the coefficients of correlation into coefficients of determination. The inverted matrix of the inter-correlations removes the redundant variance from the of inter-correlations of the predictor set of variables. These not-redundant cross-correlations are summed to obtain the multiple coefficient of determination R2. The square root of this coefficient is the coefficient of multiple correlation R.\n\nCommunity content is available under CC-BY-SA unless otherwise noted." ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.830965,"math_prob":0.9915123,"size":4195,"snap":"2021-31-2021-39","text_gpt3_token_len":805,"char_repetition_ratio":0.19589597,"word_repetition_ratio":0.0461285,"special_character_ratio":0.19642432,"punctuation_ratio":0.10166919,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9990952,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2021-08-01T15:12:20Z\",\"WARC-Record-ID\":\"<urn:uuid:2c812afd-3eee-4d57-8894-5f1ae11ef4c7>\",\"Content-Length\":\"97543\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:4d70ab0d-f38a-4370-aeb8-10f268c789b9>\",\"WARC-Concurrent-To\":\"<urn:uuid:cfdf98bd-a223-4135-afdf-3f94d13d7cf6>\",\"WARC-IP-Address\":\"151.101.192.194\",\"WARC-Target-URI\":\"https://psychology.wikia.org/wiki/Multiple_correlation\",\"WARC-Payload-Digest\":\"sha1:MK4SWQQMEGEUVUDE4XV3JHBI3NYMAWSX\",\"WARC-Block-Digest\":\"sha1:ANLDB5ZCV43WJXYZC4YVO7YB6NJFEXPZ\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2021/CC-MAIN-2021-31/CC-MAIN-2021-31_segments_1627046154214.36_warc_CC-MAIN-20210801123745-20210801153745-00357.warc.gz\"}"}
https://oxfordre.com/economics/oso/viewentry/10.1093$002facrefore$002f9780190625979.001.0001$002facrefore-9780190625979-e-495?print
[ "Show Summary Details\n\nPage of\n\ndate: 27 January 2020\n\n# Age-Period-Cohort Models\n\n## Summary and Keywords\n\nOutcomes of interest often depend on the age, period, or cohort of the individual observed, where cohort and age add up to period. An example is consumption: consumption patterns change over the lifecycle (age) but are also affected by the availability of products at different times (period) and by birth-cohort-specific habits and preferences (cohort). Age-period-cohort (APC) models are additive models where the predictor is a sum of three time effects, which are functions of age, period, and cohort, respectively. Variations of these models are available for data aggregated over age, period, and cohort, and for data drawn from repeated cross-sections, where the time effects can be combined with individual covariates.\n\nThe age, period, and cohort time effects are intertwined. Inclusion of an indicator variable for each level of age, period, and cohort results in perfect collinearity, which is referred to as “the age-period-cohort identification problem.” Estimation can be done by dropping some indicator variables. However, dropping indicators has adverse consequences such as the time effects are not individually interpretable and inference becomes complicated. These consequences are avoided by instead decomposing the time effects into linear and non-linear components and noting that the identification problem relates to the linear components, whereas the non-linear components are identifiable. Thus, confusion is avoided by keeping the identifiable non-linear components of the time effects and the unidentifiable linear components apart. A variety of hypotheses of practical interest can be expressed in terms of the non-linear components.\n\n# Introduction to Age-Period-Cohort Models\n\nAge-period-cohort (APC) models are commonly used when individuals or populations are followed over time. In economics the models are most frequently used in labor economics and analysis of savings and consumption, but they are also relevant to health economics, migration, political economy, industrial organization, and other subdisciplines. Elsewhere the models are used in cancer epidemiology, demography, sociology, political science, and actuarial science. The models involve three time scales for age, period, and cohort, which are linearly interlinked, since the calendar period is the sum of the cohort and the age.\n\nThe APC time scales are typically measured discretely but can also be measured continuously. They can have various interpretations. The cohort often refers to the calendar year that a person is born, but it could also refer to the year an individual enters university or the year that a financial contract is written. The age is then the follow-up time since birth, entry to university, or the signing of the contract. Period is the sum of the two effects (i.e., the point in calendar time at which follow-up occurs). Together the three APC time scales constitute two time dimensions that are tracked simultaneously.\n\nThere are many types of APC data. Data may be recorded at the individual level in repeated cross-sections, where age and time of recording (period) are known for each individual. It could be panel data, where for each individual age progresses with time (period). Data could be aggregated at the level of age, period, and cohort. The empirical illustration in this chapter is concerned with U.S. employment data aggregated by age and period; see Tables 2 and 3. For this data, questions about age would consider the unemployment rates across different age groups, while questions about period would relate to changes in the overall economy. A question about cohort effects might be whether workers entering the labor force during boom years face different unemployment rates throughout their careers than those entering during bust years.\n\nAPC models will have many different appearances depending on the data and the question at hand. At the core of the models is a linear predictor of the form\n\n$Display mathematics$\n(1)\n\nThis is a non-parametric model that is additively separable in the three time scales, $age$, $per,$ and $coh$. Thus, the time effects, $αage$, $βper$, and $γcoh$, are functions of the respective time indices. The right-hand side of (1) has a well-known identification problem: Linear trends can be added to the period effect and subtracted from the age and cohort effect without changing the left-hand side of (1). The time effects can be decomposed into linear and non-linear parts. Due to the identification problem the linear parts from the three APC effects cannot be disentangled. However, the non-linear parts are identifiable. As an example, suppose the age effect is quadratic\n\n$Display mathematics$\n(2)\n\nthen $αc+αℓ×age$ is the non-identifiable linear part and $αq×(age)2$ is the identifiable non-linear part.\n\nNote that the identification problem is concerned with the right-hand side of (1) in that different values of the time effects on the right-hand side result in the same predictor on the left-hand side. The premise for this feature is that the left-hand side predictor is identifiable and estimable in reasonable statistical models. This highlights that the crucial aspect of working with APC models is to be clear about what can and cannot be identified.\n\nIn economics a common type of data is the repeated cross-section with a continuous outcome variable. Such data could be modeled as follows. Suppose the observations for each individual are a continuous dependent variable $Yi$ and a vector of regressors $Zi,$ as well as $agei$ and $cohi$ for $i=1,…,N$. A simple regression model has the form\n\n$Display mathematics$\n(3)\n\nwhere the APC predictor $μagei,cohi$ is given in (1) and $εi$ is a least square error term. The identification problem from (1) is embedded in regression (3). The appropriate solution to this problem depends on what the investigator is interested in.\n\nIf the primary interest is the parameter $ζ$ the problem can simply be addressed by restricting four of the time effect parameters to be zero, such as\n\n$Display mathematics$\n(4)\n\nThis restriction to the time effects is just-identifying, so the regression can be estimated and the partial effect $ζ$ can be calculated. However, with respect to the time effects, the just-identified linear trends do not have any interpretation outside the context of the restriction (4). This makes it difficult to interpret results and draw inferences regarding the APC parameters. The issue, and the reason that (4) does not solve the problem, is that the investigator could just as well have imposed that\n\n$Display mathematics$\n(5)\n\nresulting in time effects with very different appearances (see Figures 2 and 3). Neither of these restrictions is testable. To appreciate the APC identification problem, one has to go back to the original formulation (1) and ask if any inference drawn would be different if imposing (5) instead of (4). If there is a difference, then one must exercise caution.\n\nThe identification problem has generated an enormous literature where solutions fall into three broad categories. The traditional approach is to identify the time effects by introducing non-testable constraints on the linear parts of time effects. Such restrictions are in principle akin to (4) or (5) (Hanoch & Honig, 1985). Bayesian approaches that achieve identification by imposing a prior that is not updated come under this first category. A second approach is to abandon the APC model and either use graphs of data to get an impression of time effects (Meghir & Whitehouse, 1996; Voas & Chaves, 2016) or replace the time effects in the model with other variables (Heckman & Robb, 1985). Finally, the third approach is to isolate the non-linear parts of the time effects and interpret only those. Holford (1983) and Clayton and Schifflers (1987b) were early proponents of this focus on second-order effects, while more recently Kuang et al. (2008a) presented a reparametrization of the APC model (1) in terms of invariant, non-linear parts of the time effects. The latter approach clarifies the inferences that can be drawn from APC models. Smith and Wakefield (2016) presented a Bayesian version of the latter approach.\n\nIt is possible to characterize precisely which questions can and cannot be addressed by APC models. Questions that can be addressed include any question relating to the linear predictor $μage,coh$ on the left-hand side of (1). This is valuable in forecasting. For instance, if it is of interest to forecast the resources needed for schools, an APC model can be fitted to data for counts of school children at different ages; and then the predictor can be extrapolated into the future. Another use would be to compare how consumption changed from 2008 to 2009 with how it changed from 2007 to 2008: This is to measure the effect of the financial crisis. This question is concerned with differences-in-differences and is identifiable from the non-linear parts of the time effects. Note that a consequence of the model is that this change in consumption affects all cohorts in the same way. If one suspects that different cohorts are differently affected, an interaction term would be needed in model (1).\n\nConversely, the questions that cannot be addressed by APC models can also be characterized. These are questions that relate to levels or slopes of the time effects. In the context of the quadratic age example (2) the level and slope are $αc$ and $αℓ$, respectively.\n\nThere are a variety of applications in economics for which APC modeling can be useful. In any setting where the passage of time is an explanatory factor, there is a risk of confused interpretation due to the APC problem. This has been recognized in studies of labor market dynamics (Hanoch & Honig, 1985; Heckman & Robb, 1985; Krueger & Pischke, 1992; Fitzenberger et al., 2004), lifecycle saving and growth (Deaton & Paxson, 1994a), consumption (Attanasio, 1998; Deaton & Paxson, 2000; Browning et al., 2016), migration (Beenstock et al., 2010), inequality (Kalwij & Alessie, 2007), and structural analysis (Schulhofer-Wohl, 2018). Yang and Land (2013) and O’Brien (2015) describe examples in criminology, epidemiology, and sociology.\n\nThe risk of confusion due to the identification problem is avoidable. For example, McKenzie (2006) exploited the non-linear discontinuity in consumption with respect to period to evaluate the impact of the Mexican peso crisis. Ejrnæs and Hochguertel (2013) are not directly interested in the time effects and so can use an ad hoc identified APC model to control for time in their investigation of the effect of unemployment insurance on the probability of becoming unemployed in Denmark.\n\nHowever, where the research question involves the linear part of a time effect, any attempt to answer this directly must involve untestable restrictions on the linear parts of other time effects. In this context the risk of confounding between time effects cannot be mitigated. One solution is to reformulate the question in terms of the non-linear parts of the time effects. Certain difference-in-difference questions naturally take this form; see, for example, McKenzie’s (2006) analysis of the peso crisis. Otherwise, the researcher’s only option is to argue for untestable restrictions using economic theory. Such restrictions may be explicit as in (4) or (5) or implicit if time effects are replaced with a proxy variable (Krueger & Pischke, 1992; Deaton & Paxson, 1994b; Attanasio, 1998; Browning et al., 2016).\n\nThe risks of confounding inherent in models involving age, period, or cohort can be avoided by beginning with a general model that allows for any possible combination of time effects then gradually reducing the model by imposing testable restrictions. There is substantial scope for such testable restrictions: Exclusion and functional form restrictions on the non-linear parts of each of age, period, and cohort can be tested, as can the replacement of time effects by proxy variables.\n\nThe remainder of this chapter elaborates on these main points. The identification problem is explained in greater detail. Several approaches to resolve or avoid the identification problem are discussed, including variants of the traditional approach and the recent re-parametrization. Interpretation of the parameters of the APC model is discussed. The idea of submodels, which provide a systematic guide to testable reductions of the APC model, is introduced. There is some discussion of “hidden” identification problems, which can arise when the initial model is insufficiently general. This is followed by a section explaining the types of problems that the APC model is well equipped to address. The final section contains a more detailed discussion of statistical models for APC analysis and an empirical illustration.\n\n# Preliminary Concepts in APC Analysis\n\nElements of the conceptual framework used in subsequent formalized discussions of APC models are introduced. In particular, the recording of time is discussed, the types of data structures for which APC models are used are described, and vector notation is defined.\n\n## Time\n\nThough time is continuous, it is recorded discretely in units such as years, days, or seconds. Throughout this discussion it is assumed that the time index is positive. The traditional calendar convention is adopted whereby there is no year zero, and time is rounded up to the nearest whole number of units, rather than the time stamp method, which has a year zero and where time is rounded down to the nearest whole number of units. Suppose a given sample has single-year units. Then $age=1$ is assigned to the youngest person and $coh=1$ is assigned to the earliest recorded birth year. This leads to the relation\n\n$Display mathematics$\n(6)\n\nTypically only two of the three time scales, $age$, $per$, and $coh$, are recorded. Where all three are recorded, the above relation will appear inaccurate in some cases depending on where in the year the birthday falls. Osmond and Gardner (1989) showed that it does not matter for the identification problem whether two or three time scales are recorded. Carstensen (2007) showed how to handle the additional information from a third recorded time scale.\n\n## Data Array\n\nA range of data structures appear in the literature. The main types are age-period (AP) arrays, a common format for repeated cross-sections; period-cohort (PC) arrays, used in prospective cohort studies; and age-cohort (AC) arrays. In 1875 Lexis referred to these arrays as the “principal sets of death” (Keiding, 1990). Another common data array is the age-cohort triangle used for reserving in general insurance (England & Verrall, 2002). The different data arrays can be unified by representing them in a common coordinate system. It is convenient to work with an age-cohort coordinate system due to their symmetric roles in the time relation (6). Figure 1 illustrates how an age-period array is represented in an age-cohort coordinate system. We use an age-cohort coordinate system throughout this article.", null, "Figure 1. An age-period array in age-period coordinates and in age-cohort coordinates. Here $L=A−1$ is an offset.\n\nThe notation used to describe the coordinate system derives from the fact that most common data array types are instances of generalized trapezoids (Kuang et al., 2008a). These are defined by the index set\n\n$Display mathematics$\n(7)\n\nwhere $L$ is a period offset. The age-period array has $L=A−1$ and $L+P=C$, while an age-cohort array has $L=0$ and $P=A+C−1$.\n\n## Vector Notation\n\nThe time effect equation (1) has the linear predictor $μage,coh$ on the left-hand side. It varies on a surface described by coordinates in age and cohort. The shape is given by the combination of the time effects, $αage$, $βper,$ and $γcoh$. Stacking the linear predictors as a vector gives\n\n$Display mathematics$\n(8)\n\nwhich has dimension $n$, so that $n=AC$ for an $AC$ array and $n=AP$ for an $AP$ array, and where $J$ refers to the index set of the form (7).\n\nCollecting the time effects on the right-hand side of (1) gives the vector\n\n$Display mathematics$\n(9)\n\nof dimension $q=A+P+C+1$. Thus, the model (1) implies that the $n$-vector $μ$ in (8) varies in a $q$-dimensional way as a surface in a three-dimensional space indexed by $age$ and $coh$. When $n$ is not too small the surface for $μ$ is estimable so that $μ$ can be identified up to sampling error. The APC identification problem is that the time effects are collinear, so that not all components in the $q$-vector $θ$ are identified.\n\n# Explanation of the Identification Problem\n\nThe identification problem arising in the linear parts of the time effects is formally defined and illustrated in a simplified linear model.\n\n## Formal Characterization\n\nIn equation (1) the predictor $μage,coh$ is identifiable from data, whereas the time effects on the right-hand side of equation (1) are only identifiable up to linear trends. Indeed, the equation can, for any a, b, c, d, be rewritten as\n\n$Display mathematics$\n(10)\n\nSince the four quantities a, b, c, d are arbitrary, only a $p=q−4$ dimensional version of $θ$ is estimable. The equation (10) also shows that the time effects, such as the age effect $αage$, are only discoverable up to an arbitrary linear trend. It is therefore possible to learn about the non-linear part of the age effect only. The non-linearity captures the shape of the age effect, which can be expressed through second and higher derivatives. The unidentified linear parts of the time effects combine to form a shared identifiable linear plane, which is explored in the next subsection. The unidentifiability of the linear components has a number of consequences with respect to interpretation, count of degrees of freedom, plotting, inference, and forecasting.\n\n## Illustration in a Simple Case: The Linear Plane Model\n\nThe linear plane model is the simplest model where the APC identification problem is present. It arises when all the time effects are assumed to be linear. For instance, the age effect is parametrized as $αage=αc+αℓ×age$, where $αc$ is a constant level and $αℓ$ is a linear slope. Combining the three linear time effects results in\n\n$Display mathematics$\n(11)\n\nThis model involves seven parameters but only a three-dimensional combination is identified due to the transformations in (10).\n\nIt is tempting to impose constraints on the four intercepts in (11) and the three slopes to get a single intercept and two slopes. This will not change the range of the predictor on the left-hand side of (11), but it will change the interpretation of the unidentified time effects on the right-hand side. Two researchers choosing different constraints may end up drawing different inferences about the time effects.\n\nModel (11) implies that the predictor varies on a linear plane. A linear plane can be parametrized in many ways. For instance, the plane could be parametrized in terms of age and cohort slopes anchored at $age=coh=1$ as in\n\n$Display mathematics$\n(12)\n\nEqually, it could be parametrized in terms of age and period slopes using (6) as in\n\n$Display mathematics$\n(13)\n\nThe parametrizations (12) and (13) both identify the variation of the predictor on the left-hand side of (11). However, the slopes in (12) and (13) do not identify the slopes of the time effects. The age slopes in (12) and (13) are different and satisfy, within the linear plane model, $μ21−μ11=αℓ+βℓ$ and $μ21−μ12=αℓ−γℓ$ respectively; evidently, neither is equal to $αℓ$.\n\nThe equation (12) parametrizes the linear plane without reference to time effects. Time effects can only be identified by imposing restrictions on these. The constraint (4) is equivalent to $αc=βc=γc=βℓ=0$, in the linear plane model (11). With this constraint identification is achieved in that $μ21−μ11=αℓ$ and $μ21−μ12=−γℓ$ and $μ11=δ$. This identification gives a model in terms of age and cohort time effects. By imposing the constraint (5) a model in terms of period and cohort time effects could be obtained, and a similar set of constraints would result in a model in terms of age and period slopes. Each set of constraints appears to lead to information about the time effects, but clearly they cannot all be correct. In fact, it is not possible to establish whether any of these three sets of constraints lead to a correct impression of the unidentifiable time effects. Although the time effects cannot be identified, it is still possible to answer any question that relates to the predictor $μage,coh$, such as forecasting future values or testing for change in $μage,coh.$\n\nAs a numerical example of the identification issue, suppose the linear plane (12) is\n\n$Display mathematics$\n(14)\n\nover an AC array with $A=C=10$. The linear plane (14) does not specify the time effects, and the over-parametrized time effect specification (11) cannot be identified.\n\nSuppose it is not known that the data is generated by (14), but it is known that a model of the form (11) generated the data. Applying the constraints (4) and (5) to the model (11) in the context of the data-generating process (14) results in the slopes $αℓ=3$, $βℓ=0$, $γℓ=1$ and $αℓ=0$, $βℓ=3$, $γℓ=−2$ respectively, as illustrated in Figures 2 and 3.", null, "Figure 2. Time effect slopes under identification (4).", null, "Figure 3. Time effect slopes under identification (5) for the same linear plane as in Figure 2.\n\nFigures 2 and 3 have a rather different appearance despite generating exactly the same linear plane. Three features are important. First, the signs of the slopes are not identified. The cohort effect is upward sloping in Figure 2(c) and downward sloping in Figure 3(c). Second, the units of the time effects have no meaning. The period scale is not defined in Figure 2(b) whereas it is defined in Figure 3(b). Further, the units of the cohort scales are very different in Figures 2(c) and 3(c), which have slopes of 1 and –2, respectively, yet they are observationally equivalent. Third, a subtler feature is that within each figure the subplots are interlinked. For example, by setting the period slope to zero in Figure 2(b) the cohort slope in Figure 2(c) becomes upward sloping. But where the age slope is set to zero in Figure 3(a) the period is upward sloping in Figure 3(b), while the cohort is downward sloping in Figure 3(c). Thus, it is not possible to draw inferences from any subplot in isolation. This is a serious limitation in practice, as the eye tends to focus on one subplot at a time.\n\nAn overview is given of the some of the most commonly encountered identification strategies in the APC literature. Each of three categories of solutions—identification by restriction, forgoing the formal APC model, and isolating the non-linear effects—is considered. This is prefaced by a discussion of the desirable features of an APC identification strategy.\n\n## What to Look for in a Good Approach\n\nThere are many proposed solutions and identification strategies in the literature on APC modeling, across several disciplines. This section provides guidance on assessing such identification strategies.\n\n### Invariance\n\nIt has long been recognized that it is useful to work with functions of the time effects that are invariant to the transformations in (10). Thus, there are some parallels to the theory for invariant reduction of statistical models (Lehman, 1986, section 6; Cox & Hinkley, 1974, section 5.3). In that vein Carstensen (2007) interpreted equation (10) as a group $g$ of transformation from the collection of time effects $θ$ in (9) to the collection of predictors $μ$ in (8). Invariant functions of $θ$, say $f(θ)$ are invariant if $f{g(θ)}=f(θ)$.\n\nDouble differences of the time effects are invariant (Fienberg & Mason, 1979; Clayton & Schifflers, 1987b; McKenzie, 2006). To see this, consider the double differenced age effect:\n\n$Display mathematics$\n(15)\n\nEquation (10) shows that for any non-zero $a,d$ the age effects $αage$ and $αage+a+d×age$ are observationally equivalent but can differ substantially in value; this was demonstrated in Figures 2 and 3. Now, the double differences of $αage$ and $αage+a+d×age$ are both $Δ2αage$, which does not depend on $a,d$ and is therefore invariant to the transformations in (10). In the context of the quadratic example (2) it can be shown that $Δ2αage=2αq$. The double differences have an odds-ratio or difference-in-difference interpretation, which is further discussed in the section “Interpretation of the Estimated Effects.”\n\nThe predictor $μage,coh$ is also invariant (Schmid & Held, 2007; Kuang et al., 2008a). Indeed equation (10) shows that any transformation of that form applied to the time effects on the right-hand side of (1) results in the same predictor. However, $μage,coh$ alone may not be of great interest. The next step is therefore to represent the predictor $μ$ exclusively in terms of invariant functions $ξ(θ)$. That is, the desired outcome is to express $μ$ as a bijective function of $ξ(θ)$, where $ξ$ is invariant so that $ξ(θ)=ξ(g(θ))$. The function $ξ$ is then a maximal invariant and useful for parametrization of the model as it carries as much of the intended information from the time effects as possible while being invariant to the identification problem.\n\nIn the context of exponential family models, such as the linear model in (3) or logit or Poisson regressions, the predictor $μage,coh$ enters linearly in the log-likelihood. If the maximal invariant parameter $ξ$ is a linear function of the time effects, and varies freely in an open parameter space, then the exponential family model is regular with $ξ$ as canonical parameter (Barndorff-Nielsen, 1978, section 8). Such a canonical parameter is explicitly defined in equations (18) and (20).\n\n## Stability Across Subsamples\n\nAn alternative way to think about invariance is subsample analysis. It is relevant in two ways. First, it can be used to check a claim that a particular identification strategy avoids the identification problem. Second, it can be used for specification testing in a practical analysis.\n\nSuppose it is claimed that a proposed method for estimating the age effect or some structural parameter avoids the identification problem. In many cases it can be argued that the method should be, apart from estimation error, invariant to the choice of data array. Specifically, suppose a data array $J$ of the form (7) is available. A subset $J′$ can be formed in various ways, for instance, by considering those age groups younger than some threshold $A′$. The claim that the method avoids the identification problem is then substantiated if the method gives the same result when applied to the full data array $J$ and to the subset data array $J′$.\n\nWhatever method is applied, the specification of an estimated model can be checked by recursive analysis, following common practice in time series analysis. The idea is to track the estimates of invariant parameters for different subsets $J′$ with different choices of threshold $A′$ and plotting these against the threshold values, following Chow (1960). Investigators can check the specification of models by recursive modeling along the three time scales. For a well-specified model those estimates should not vary substantially with the threshold apart from minor variation due to estimation error. Larger variation is indicative of structural breaks in the data generating process and calls for a more flexible model than (1).\n\n### Invariant Parametrization\n\nIn the section “Invariance” it was argued that the double differenced time effects such as $Δ2αage$, introduced in (15) are invariant and that they represent the non-linear part of the time effects. The predictors $μage,coh$ are also invariant, and three of them can be combined to parametrize a linear plane. Taking this plane and the double differenced time effects together, an invariant parametrization of the age-period-cohort model can be constructed. This circumvents the unsolvable identification problem and gives a representation from which an invariant parametrization can be constructed. The representation is\n\n$Display mathematics$\n(16)\n\nThe exact specification of the linear plane and the summation indices for the double sums of double differences depend on the index array for the age, period, and cohort indices. Note that the linear terms are simply kept together as a linear plane without attempting to disentangle them into APC components.\n\n### Age-Cohort Index Arrays\n\nKuang et al. (2008a) considered AC index arrays and showed\n\n$Display mathematics$\n(17)\n\nwith the convention that empty sums are zero. Here the linear plane has been parametrized as in (12). The plane is identified as it is invariant to the transformations (10), but the time effect slopes remain unidentified since the age, period, and cohort slopes remain interlinked; see (12), (13). A feature of the representation (17) is that the non-linear components are separated from the linear plane. The predictor in (17) can be summarized as $μage,coh=ξ′xage,coh$ where\n\n$Display mathematics$\n(18)\n\nThe design vector $xage,coh$ is defined in terms of a function $m(t,s)=max(t−s+1,0)$ as\n\n$Display mathematics$\n(19)\n\nTheorem 1 of Kuang et al. (2008a) shows that $ξ$ is a maximal invariant with respect to the transformations in (10) as it is composed of double differences and values of the predictor itself. The parameter $ξ$ will be canonical in the context of exponential family models such as normal, logistic/binomial or log-linear/Poisson regressions.\n\n### General Index Arrays Including Age-Period Arrays\n\nThe representation (17) for age-cohort arrays does not apply for general index arrays. The issue is that the point at which $age=cohort=1$ generally is outside the index array. This is for instance the case for age-period arrays as shown in Figure 1. The choice of anchoring point is mainly a computational issue and can be done in various ways. Nielsen (2015) suggested anchoring in the middle of the first or second period diagonal. This way the age-cohort symmetry in the time identity (6) is preserved. In that case, let $U$ be the integer value of $(L+3)/2$, where $L$ is the offset described under “Data Array.” The anchoring point has $age=coh=U$ so that $per=2U−1$ by (6). For a zero offset, $L=0$, as for an age-cohort array, $U=1$ and the anchoring point is simply $age=coh=1$ as in the representation (17).\n\nThe general representation is written as $μage,coh=ξ′xage,coh$ where\n\n$Display mathematics$\n(20)\n\nNote the similarities between (20) and (18); the difference lies in the introduction of $U$ and $L$.\n\nThe design vector is defined in terms of the function $m(t,s)=max(t−s+1,0)$ as\n\n$Display mathematics$\n(21)\n\nwhere the period part $xage,cohβ$ depends on whether L is even or odd:\n\n$Display mathematics$\n(22)\n\nThis parametrization captures all the identifiable variation in the predictor due to the time effects. The interpretation of the elements of $ξ$ is discussed in a subsequent section.\n\n## Identification by Restriction\n\nThe traditional approach to identification is to introduce restrictions of the types (4) and (5). Such restrictions give a parametrization that is not invariant to the transformations in (10). This leads to the kind of issues highlighted with Figures 2 and 3. The purpose of the restrictions is essentially to extract some version of the linear parts of the time effect from the linear plane. The linear plane only has one level and two slopes as seen in (12). There is no unique way to distribute these quantities on the three time effects. Various approaches have been suggested in the literature. Typically, these approaches have two steps, where the levels are identified at first and then the linear slope is identified. This makes a formal analysis complicated; see Nielsen and Nielsen (2014).\n\n### Restrictions on Levels\n\nThere are two main approaches to identifying the level: restricting particular coordinates of the time effects or restricting the average level of the time effect. Neither approach is invariant to the transformations in (10).\n\nRestricting coordinates of the time effects. A common restriction is to set individual coordinates of the time effects to zero as in (4) and (5). Ejrnæs and Hochguertel (2013) provided an example. In practice this works by first including a full set of APC dummies and then dropping the dummies where it is intended that time effects be set to zero. Such restrictions are not invariant to the transformations in (10). Indeed, the requirement $α1=0$ is violated when adding some non-zero number $a$ to $α1$. With this approach it is possible to ensure comparability between estimates for subsamples as long as exactly the same restriction is imposed.\n\nRestricting the average levels. A common restriction is to set the average of the time effects to zero so that $(1/A)Σage=1Aαage=(1/P)Σper=L+1L+Pβper=(1/C)Σcoh=1Cγcoh=0$. The level of the model is then picked up by the intercept $δ$ in (1). Examples are found in Deaton and Paxson (1994a); and Schulhofer-Wohl (2018). A feature of this type of restriction is that the unidentified levels and slopes are orthogonalized, but this comes at the cost of making the scale of the time effects dependent on the dimensions of the index array (7). The zero average restriction is not invariant to the transformations in (10). Indeed, increasing all age effects by some non-zero number $a$ violates the restriction.\n\nFigures 4 and 5 apply this restriction to the plane (14) and demonstrate that the restriction is specific to the index array through a subsample argument. AC index arrays are chosen so that Figure 4 has $A=C=10$ while Figure 5 has $A=C=5$. In both figures the average level is set to zero while the period slope is set to zero as in (Deaton & Paxson, 1994a). Note that the absolute ranges for age (29) and cohort (10) are the same as in Figure 2. The intercepts are very different with $δ=19$ and $δ=9$, respectively. Further, the time effects are not comparable, for instance, $α5.5=0$ in Figure 4, whereas $α3=0$ in Figure 4. Arguing, ad absurdum, the subsample analysis implies that by varying the data array while keeping the zero level constraint the time effects must be zero.", null, "Figure 4. Time effect slopes under average level identification for an AC array with $A=C=10$.", null, "Figure 5. Time effect slopes under average level identification for an AC array with $A=C=5$.\n\nThe APC slopes are the same in Figures 4 and 5. This is not a general feature of the zero average restriction but a consequence of working with a linear plane predictor of the form (14). To illustrate this point, introduce a non-linear effect into (14) to get\n\n$Display mathematics$\n(23)\n\nOn the smaller AC array with $A=C=5$ this reduces to the linear plane in (14) so that for zero average levels and a zero period slope Figure 5 emerges. On the larger AC array with $A=C=10$ the non-linearity matters. Keeping the zero average level constraint and setting the period slope to zero through $Σper=119per×βper=0$ results in Figure 6. Comparing Figures 5 and 6 it is seen that all slopes are different. The age slopes are 3 and 3.02, respectively, and the cohort slopes are 1 and 1.02 respectively. The period slopes for $per≤9$ are zero and –0.08, respectively.", null, "Figure 6. Time effect slopes for (23) under average level identification and the slope constraint $Σper=119per×βper=0$ for an AC array with $A=C=10.$\n\n### Restrictions on Slopes\n\nOnce the level is attributed between the time effects and the intercept, the slopes have to be restricted. This approach necessarily binds the slopes of the three time effects together. Graphically, this can have dramatic consequences as seen in Figures 2 and 3.\n\nRestricting a pair of adjacent time effects. The slope can be identified by restricting a pair of adjacent time effects to be equal. An example would be to let $β1=β2$ as in (4). Fienberg and Mason (1979) proposed this method combined with a zero average restriction. This restriction is not invariant to (10). Indeed, adding a linear trend with non-zero slope $d$ to the age effect violates the restriction.\n\nOrthogonalizing a time effect with respect to a time trend. Under this approach, one of the time effects is pinned down by orthogonalization with respect to a time trend so as to constrain the slope to be zero. An example would be to require that $Σperper×βper=0$. Deaton and Paxson (1994a) applied this approach in conjunction with an average restriction on the level of the period effect and zero restrictions of the first coordinates of the age and cohort effects. The lack of invariance is commented upon in the section “Restrictions on Levels” with respect to Figure 6.\n\n### The Intrinsic Estimator\n\nThe intrinsic estimator is a common but controversial estimator. It was proposed by Kupper et al. (1985) and is called the “intrinsic estimator” by Yang et al. (2004); see also the monographs of Yang and Land (2013) and Fu (2018).\n\nThe idea is that the identification problem can be thought of as a collinearity problem that can be addressed using generalized inverses. This would be implemented as follows. First, a design matrix $D$ with a full set of APC dummies is created. Zero average constraints are imposed, which are implemented by dropping three columns of $D$. This leaves the selected design matrix $DS$ with a rank deficiency of one. The time effects are then estimated using least squares while applying a Moore-Penrose generalized inverse for $S′D′DS$. The intrinsic estimator has been criticized by Holford (1985), O’Brien (2011), and Luo (2013). The identification is achieved by restriction through the choices of the level restriction, the selection matrix S and the choice of generalized inverse; see Nielsen and Nielsen (2014, theorem 8) for further analysis.\n\n### Sequential Restrictions\n\nA common approach is to display sets of APC time effects identified by different restrictions in a single figure (Carstensen, 2007; Smith & Wakefield, 2016). Figure 7 illustrates this approach for the linear plane specified in (14). In all cases the average level is restricted to zero. This gives an intercept of 19, which is not represented. The slopes are identified in three different ways setting, respectively, the age, period, and cohort slope to zero. This is shown with different line types and colors. The figure illustrates how the time effects move together when applying identification by restriction. It is clear that time effects identified this way must be interpreted jointly. This is the same point made with Figures 2 and 3.\n\nIn the presence of non-linear effects, one can construct a plot similar to Figure 7 using a sequence of restrictions. Smith and Wakefield (2016) suggested using $C−1$ restrictions, setting $γcoh=γcoh+1$ for $coh=1,…,C−1$, and provide an empirical illustration in their Figure 3. Again, such a plot illustrates how the restricted time effects twist and turn together by different restrictions. Another approach is to impose a level and a slope restriction on each plot, thereby allowing separate interpretation of each plot. This is discussed in the section “Interpretation of Time Effects” and can be seen in Figure 10 in the context of the empirical illustration with employment data.", null, "Figure 7. Time effect slopes for (14) with zero average level and different slope constraints: zero age slope (dash, blue), zero period slope (solid, black), zero cohort slope (dash-dot, red).\n\n## Forgoing APC Models\n\nSome researchers take the position that since formal modeling of the linear time effects is plagued by problems of identification, the attempt to construct a statistical model that allows for all three of age, period, and cohort effects should be abandoned. Two approaches are followed: either to use a combination of graphs and discipline-specific knowledge to build a story about the time effects, or to replace the time effects with other explanatory variables.\n\n### Graphical Analysis\n\nMost research involving APC effects will include some preliminary graphical analysis of the data by age, by period, and by cohort. For instance, Carstensen (2007) used an initial graphical analysis to determine whether an age-period or age-cohort model is more suited to the data. Where there are parallel trends in line plots of log rates by age, connected within period, and of log rates by period within age, this is indicative of proportional rates between periods (i.e., an age-period model). If the parallel trends appear in plots of age by cohort and of cohort by age, an age-cohort model should be used.\n\nCarstensen uses graphical analysis as a first step to selection of an appropriate statistical model, but some researchers believe that due to the identification problem there is little to gain by going beyond the graphical analysis. Kupper et al. (1985) were early proponents of this view. A clear articulation of the position and an illustration of how conclusions might be drawn from graphs can be found in Voas and Chaves (2016). Their Figure 2 shows trends in religious affiliation against time, which can be read as age or period, for several British cohorts. The lines are broadly parallel and horizontal, with the line for each cohort successively lower than the next. They argue that such a graph could be generated by only two models: either a model containing only cohort effects, or a model with perfectly balanced age and period effects. Since the latter is implausible, they decide that the data must have been generated by the first. Meghir and Whitehouse (1996) also used this sort of graphical analysis in their analysis of wage trends.\n\nThe graphical approach can be helpful when the common features and appropriate interpretation of them are clear as they are in Voas and Chaves (2016). However, without parallel trends it is difficult to draw inferences, and of course there is no scope for formal testing.\n\n### Alternative Variables\n\nAnother way of side-stepping the APC identification problem, advocated by Heckman and Robb (1985), is to reconceptualize the model. They argue that researchers are rarely interested in pure APC time effects; rather, these variables are “proxies” for the true “latent” variable of interest. Their solution is to replace one or all of age, period, and cohort with a latent variable. For example, they suggest using a physiological measure of aging in place of age and indicators reflecting macroeconomic conditions in place of period in a model for earnings.\n\nAn example of this approach is the model of life cycle demand for consumer durables in Browning et al. (2016). The idea is to retain age and cohort time effects but replace the period time effect with a measure of the user cost of durables. This gives a submodel of the APC model, which is analyzed in the below section “Submodels.” As such, it is a testable restriction on the APC model. The linear period effect remains unidentifiable but is present in part as an unidentified contributor to the linear plane generated by the age and cohort time effects and in part as the linear component of the observed period variable.\n\n## Bayesian Methods\n\nIn terms of identification the issues are by and large the same for Bayesian methods as for frequentist methods. The Bayesian method can be done either using identification by restriction as outlined in the section “Identification by Restriction” or using an invariant parametrization as outlined in the section “Invariant Parametrization.”\n\n### Bayesian Identification by Restriction\n\nThe linear parts of the time effects can only be identified by restriction. Within the Bayesian framework this corresponds to forming priors on parameters that are not updated by the likelihood. Bayesian models are set up as follows. The likelihood is denoted $p(Y|θ)$ where $θ$ is the $q$-vector of time effects in (9) and $Y$ is the data. The prior is $p(θ)$. Decompose $θ=(ξ,λ)$, where $ξ$ is the $p$-dimensional invariant parameter in (18) or (20) and where $λ$ is of dimension $q−p=4$ and represents the unidentifiable part of $θ$. Thus, the likelihood satisfies $p(Y|θ)=p(Y|ξ)$. Now, decompose the prior as $p(ξ,λ)=p(ξ)p(λ|ξ)$, so that $p(ξ)$ is the prior for the identifiable parameter and $p(λ|ξ)$ is the conditional prior for the unidentified parameter given the identified parameter. Finally, the posterior distribution decomposes as $p(θ|Y)=p(ξ|Y)p(λ|ξ,Y)$ so, by Proposition 2 of Poirier (1998),\n\n$Display mathematics$\n(24)\n\nThis shows that the likelihood updates the invariant parameter $ξ$ but cannot update any prior information about the unidentified parameter $λ$ given $ξ$. Just as in the frequentist world, it is advisable to focus analysis on the invariant parameter $ξ$. Including a prior on the unidentifiable $λ$ is, in principle, not a problem as long as one is aware of the fact that $p(λ|ξ)$ cannot be updated by the likelihood. However, confusion over what is learned from data and what is assumed easily arises when working with the posterior $p(θ|Y)$. This avoidable problem becomes worse when forecasting, since forecasts, unlike in-sample predictors, tend to depend on the non-updatable prior $p(λ|ξ)$; see Nielsen and Nielsen (2014).\n\n### The Bayesian Double-Difference Model\n\nA popular Bayesian approach was suggested by Berzuini and Clayton (1994). The prior of this model assumes that the APC double differences are independent normal, while the APC levels and slopes are assumed to be uniform. That is, for an AC index array,\n\n$Display mathematics$\n(25)\n\n$Display mathematics$\n(26)\n\nwhile $δ=0$. Here, $ψ=(σα2,σβ2,σγ2)$ are hyper-parameters that are assumed independent with $χ2$-type priors, while the ranges for the uniform distributions are non-random. All variables listed are independent. From the levels and slopes in (26) the identifiable plane is given in terms of $μ11=α1+β1+γ1$ and the slopes $μ21−μ11=α2−α1+γ2−γ1$ and $μ12−μ11=β2−β1+γ2−γ1$. The intercept $μ11$ and the two slopes $μ21−μ11$ and $μ12−μ11$ are identifiable. Together with the double differences in (25), they constitute the invariant parameter $ξ.$ In other words, this identifies a three-dimensional combination of the six time effects in (26). This leaves a three-dimensional part of (26) that is unidentifiable. The unidentifiable part could be represented as, for instance, $α1,α2,β1$. Those three time effects, together with the hyper-parameters $ψ$, constitute the unidentifiable parameter $λ$ in the notation of (24). Here the conditional prior $p(λ|ξ)$ is rather complicated and not updated by the likelihood.\n\nBerzuini and Clayton (1994) applied their model to a set of aggregate data for lung cancer mortality in Italian males. The data is an AP data set grouped in five-year intervals for those aged 15–79 and periods 1944–1988. The model is used to provide distribution forecasts for the periods 1989–1993 and 1994V1998. The abovementioned forecast theory shows that the forecasts depend on the choice of the conditional prior $p(λ|ξ)$, which is not updated by the likelihood and is a rather complicated function of the above assumptions.\n\nFurther Bayesian models of this type have been explored in the epidemiological literature. Software implementations have been provided with the R packages BAMP (Schmid & Held, 2007) and bacp (Riebler & Held, 2017). The assumption of independent normal double differences results in a cumulated random walk for the time effects and is denoted the RW2 model. Assuming that the first differences are independent normal gives a random walk model and is denoted RW1. Smith and Wakefield (2016) gave a more detailed overview of these approaches.\n\n### A Bayesian Double-Difference Model Using the Invariant Parametrization\n\nSmith and Wakefield (2016) have addressed the lack of invariance in the Berzuini and Clayton (1994) model. The idea is to choose a prior where the double differences are independent normal as in (25), but only give uniform priors to three anchoring points such as $μ11,μ21,μ12$, rather than the six level and slope effects in (26). Thus, the unidentifiable parameter is just the hyper-parameter, so that $λ=ψ$. The dependence structure is simpler in this model, and the problems stemming from the APC identification issues are addressed.\n\nSome unresolved problems remain. As in any Bayesian model with hyper-parameters we have that the conditional prior $p(λ|ξ)$=$p(ψ|ξ)$ has a complicated expression and is not updated by the likelihood. Forecasts will depend on the choice prior on the hyper-parameters. Further, as remarked by Smith and Wakefield (2016) the anchoring points can be chosen in arbitrary ways, which would result in different priors. Finally, the prior depends on the choice of coordinate system, which is not ideal.\n\n## Concluding Remarks on the Identification Problem\n\nTo summarize, the identification problem is that the linear parts of the time effects cannot be identified because of transformations in (10). Instead, what can be identified are the non-linear parts of the time effects and a linear plane for the predictor that combines the linear parts of the time effects. In practice these non-linear and linear features must be kept apart. The approach of identification by restriction does not achieve this, as demonstrated in Figures 2 through 6. It creates problems with interpretation, formulation of hypotheses, and counts of degrees of freedom. In contrast, the canonical parametrization using $ξ$ keeps non-linear and linear features apart, and it is therefore suitable for estimation, formulation of hypotheses, and counts of degrees freedom. The interpretation of the APC model and its elements is addressed under “Interpretation of Estimated Effects.”\n\n## Interpretation of Estimated Effects\n\nIt is generally understood that to achieve meaningful interpretation of the time effects, the non-linear and linear features of the APC model must be kept apart. The canonical parametrization (20) combines the linear features in a single, common linear plane and records the non-linear features as double differences. The representation (20) is therefore well suited for estimation and statistical inference. In terms of interpretation two issues remain: how to interpret double differences of the time effect directly and whether any interpretation in terms of the original time effects in (1) is feasible.\n\n## Interpretation of Double Differences of Time Effects\n\nThe double differences have an odds ratio or difference-in-difference interpretation. A double difference in age is defined by\n\n$Display mathematics$\n(27)\n\nAs a numerical example, let $age=18$ and $coh=2001$. Then the first two terms in (27) give the effect of aging from 17 to 18 for the 2001 cohort, while the last two terms give the effect of aging from 16 to 17 for the 2002 cohort. Both of these effects happen over the period 2017 to 2018, with the time convention in (6). Indeed, writing (27) in AP coordinates gives\n\n$Display mathematics$\n(28)\n\nOn the right-hand sides of (27) and (28) any pair of consecutive cohorts or periods, respectively, could be used. Thus $Δ2αage$ equals the average difference-in-difference effect for all cohorts or periods. For binary outcomes the double difference $Δ2αage$ has a log odds interpretation.", null, "Figure 8. Illustration of double differences. Solid/open circles represent predictors taken with positive/negative sign.\n\nIn the same vein, the period and cohort double differences are interpretable through\n\n$Display mathematics$\n(29)\n\n$Display mathematics$\n(30)\n\nThe equations (27), (29), (30) are illustrated with Figure 8, which is a modification of a figure in Martínez Miranda, Nielsen, and Nielsen (2015). A major advantage of the double differences is their invariance, as explored in the section “What to Look For in a Good Approach.” However, estimated double differences will inevitably be somewhat erratic. Therefore, it is often desirable for interpretation to generate a representation of the time effects by double cumulating the double differences. Plots of the double cumulated double differences could inspire the formulation of restrictions such as a quadratic or otherwise concave time effect, which in turn implies a smooth restriction on the double differences. Smoothing of the double difference can also be achieved by the Bayesian RW2 method; see Smith and Wakefield (2016, Figure 7).\n\n## Interpretation of Time Effects\n\nThe original time effects are not fully identifiable and thus not fully interpretable. Yet, the APC model (1) is composed of the time effects, so it remains of interest to seek to interpret them as far as possible. Since the non-linear parts of the time effects are identifiable the focus should be on illustrating these.\n\nIn representation (17) the double differences are double cumulated with respect to the plane anchored at $μUU$, $μU,U+1$, and $μU+1,U$. This representation is useful for estimation as it immediately leads to design vectors as in (19) and (21). However, the cumulations of the double differences are not ideally suited for graphical representation of the non-linear time effect. On the one hand, it is easy to see that these double sums have the same degrees of freedom as the double differences and are disentangled, in contrast to the time effects identified by restriction. On the other hand, they will often be strongly trending in practice, which does not allow for an easy interpretation. The last issue can be addressed through detrending.\n\nThe double sums of double differences can be detrended in various ways. One approach would be to orthogonalize each of the three sets of double sums with respect to an intercept and a time trend. This is in spirit with the approach of Deaton and Paxson (1994a) but with the difference being that the orthogonalization is applied to each of the three double sums, so that the time trends are disentangled. A drawback of this approach is that it is no longer evident that the degrees of freedom are the same as for the double differences.\n\nAnother approach to detrending is to impose that the double sums start and end in zero (Nielsen, 2015). Defining $αagedetrend=αageΣΣΔΔ−a−d×age$ this entails the choices $a=−d$ and $d=αAΣΣΔΔ/(A−1)$ so that $α1detrend=αAdetrend=0$. With this approach it is apparent that the degrees of freedom are the same as for the double differences. The graph of $αagedetrend$ visually emphasizes the non-linearity as the start and end points are anchored at zero. At the same time, the detrending clearly depends on the particular index array with its particular choice of minimal and maximal age. From the graph it may be possible to identify a U- or S-shaped curve which can be tested for consistency with a quadratic or higher-order polynomial.\n\n# Submodels\n\nA common empirical question is whether all components of the APC model are needed. Such restrictions can typically be tested using likelihood ratio tests or deviance tests. For this purpose, a test statistic, a degrees of freedom calculation, and critical values are needed. The test statistic can be computed using identification by restriction or an invariant parametrization as all approaches result in the same in-sample predictors. The calculation of degrees of freedom can sometimes be difficult when using the time effect formulation (1). Instead, the restrictions and the associated degrees of freedom are more easily appreciated when using the canonical parametrization and the associated canonical parameter ξ‎ in (20). The calculation of critical values requires the formulation of a statistical model. In the following the focus will be on interpretation of the models and the calculation of degrees of freedom.\n\n## Age-Cohort Models\n\nThe hypothesis of no period effect illustrates the identification issues very well. The hypothesis results in age-cohort (AC) models, which are commonly used in economics; see for instance Browning et al. (1985), Attanasio (1998), Deaton and Paxson (2000), and Browning et al. (2016). AC models can arise through reduction of the general APC model, or they may be postulated at the outset. From the perspective of the time effect formulation (1) the hypothesis is that $βL+1=⋯=βL+P=0$. This leaves the model (1) as an age-cohort model of the form\n\n$Display mathematics$\n(31)\n\nThis formulation gives the impression of a P-dimensional restriction. However, it is in fact observationally equivalent to imposing a hypothesis of no non-linear effect in the period. Under the canonical parametrization this is $Δ2βL+3=⋯=Δ2βL+P=0$, which is a restriction of dimension $P−2$. Nielsen and Nielsen (2014, section 5.3) presented a formal algebraic analysis of the relation between restrictions of time effects and double differences. The intuition is that because the period effect is only identified up to a linear trend, imposing the hypothesis $βL+1=⋯=βL+P=0$ in (1) does not actually restrict the common linear plane at all. Any linear effect of period will still be present in the restricted model (31).\n\nThe feature that the linear time effects are not identifiable from the AC model is perhaps best understood in the special case where all time effects are linear as in (11). It is explained in “Illustration in a Simple Case: The Linear Plane Model” that (11) can be written equivalently as a combination of APC, AC, AP, or CP effects. The model (31) is analogous to the model (12). At first glance it may appear natural to attribute the linear plane in (12) to age and cohort effects, but in fact the linear effect of period is not constrained. Rather it is absorbed into the slopes in the age and cohort dimensions, with $μ21−μ11=Δα2+Δβ2$ and $μ12−μ11=Δβ2+Δγ2$.\n\n## Linear Submodels\n\nApart from the AC model, there are many other submodels of the APC model. Table 1 gives a range of submodels that may be of interest. It is taken from Nielsen (2015), with similar tables appearing in Holford (1983) and Oh and Holford (2015). The first model, denoted APC, is the unrestricted APC model.\n\nRestricting one set of double differences. The three models, AP, AC, and PC each have one set of double differences or non-linearities eliminated, that is the cohort, period, and age double differences, respectively. The remarks pertaining to the AC model in the section “Age-Cohort Models” apply to any of the three models.\n\nRestricting two sets of double differences. The three models Ad, Pd, and Cd are known as drift models. For instance, the age-drift model has both period and cohort double differences eliminated, so that $Δ2α3=⋯=Δ2αA=0$ and $Δ2βL+3=⋯=Δ2βL+P=0$, while the linear plane is unrestricted. The identification problem remains, as pointed out by Clayton and Schiffler (1987b), because the linear plane can be parametrized either in terms of age and cohort linear trends or in terms of age and period linear trends.\n\nRestricting two sets of double differences and the linear plane. The three models A, P, and C are the first to include restrictions on the linear plane. For instance, in the A model period and cohort double differences are eliminated, and the linear plane is restricted to just one slope in age. Consequently, the A model can be written as $μage,coh=αage$.\n\nLinear plane model. This model arises when all non-linear effects are absent. In this case $Δ2α3=⋯=Δ2αA=0$ and $Δ2βL+3=⋯=Δ2βL+P=0$ and $Δ2γ3=⋯=Δ2γC=0$. This is the model seen in the section “Illustration in a Simple Case: The Linear Plane Model.”\n\nTable 1. Submodels With Degrees of Freedom\n\nModel\n\nLinear\n\nDouble Differences\n\nTotal\n\nPlane\n\n$Δ2αage$\n\n$Δ2βper$\n\n$Δ2γcoh$\n\nAPC\n\n3\n\nA–2\n\nP–2\n\nC–2\n\nA+P+C–3\n\nAP\n\n3\n\nA–2\n\nP–2\n\nA+P–1\n\nAC\n\n3\n\nA–2\n\nC–2\n\nA+C–1\n\nPC\n\n3\n\nP–2\n\nC–2\n\nP+C–1\n\nA-drift\n\n3\n\nA–2\n\nA+1\n\nP-drift\n\n3\n\nP–2\n\nP+1\n\nC-drift\n\n3\n\nC–2\n\nC+1\n\nA\n\n2\n\nA–2\n\nA\n\nP\n\n2\n\nP–2\n\nP\n\nC\n\n2\n\nC–2\n\nC\n\nlinear plane\n\n3\n\n3\n\n## Functional Form Submodels\n\nAnother set of submodels arises by imposing a specific functional form on the time effects.\n\nQuadratic polynomials. The age effect, in particular, often has a concave or convex appearance. In that case the age effect may be described parsimoniously by a quadratic polynomial. The hypotheses of a quadratic age effect, $αage=αc+αℓ×age+αq×age2$ as in (2), and of constant double differences,\n\n$Display mathematics$\n(32)\n\nare equivalent since the linear trends are not identified. Thus, the hypothesis can be imposed as a linear restriction on the canonical parameter. The degrees of freedom are $A−3$. Similarly, restricting a time effect to be a polynomial of order $k$ is equivalent to restricting the corresponding double differences to be a polynomial of order $k−2$. For instance, a slightly skew concave or an S-shape appearance could potentially be captured by a third order polynomial in the time effects, or equivalently a first order polynomial in the double differences.\n\nA more elaborate quadratic model. Suppose now that all three time effects are quadratic so that equation (1) becomes\n\n$Display mathematics$\n(33)\n\nThe identifiable non-linear parameters are $αq$, $βq$, $γq$, while the remaining parameters combine to a linear plane as in (11). A submodel is the quadratic AC model\n\n$Display mathematics$\n(34)\n\nwhich is a special case of (31). The linear parts $αc+αℓ×age$, $γc+γℓ×coh$, and $δ$ combine to a linear plane and the identification problem remains. Only the absence of $βq$ is an over-identifying constraint. Thus, a test of (34) against (33) would have one degree of freedom.\n\nReplacing a time effect by an observed variable. It is often of interest to replace the period effect, in particular, with an observed time series, $Tper$ say. The time series $Tper$ decomposes into a linear part and a non-linear part. Thus, in the context of an APC model it is equivalent to imposing $βper=Tper$ for $1≤per≤P$ and $Δ2βper=Δ2Tper$ for $3≤per≤P.$ Thus, this restriction has $P−3$ degrees of freedom. Since there is already a linear plane in the model the linear effect of $Tper$ remains unidentified.\n\n# When to Use APC Models\n\nIt is important to recognize that no APC identification strategy can “solve” the identification problem. The identification problem still limits the range of questions that can be answered using formal statistical analysis. The following sections explain the questions that can and cannot be answered with APC models, given that the non-linear parts of the time effects are identified, but the linear parts are not.\n\n## Questions That Can Be Answered\n\nThe questions that APC models can answer fall into the following categories: certain difference-in-difference questions; questions related to the non-linear effects of age, period, or cohort; exploratory analysis; forecasting; and questions where APC effects appear in the model as control variables.\n\nDifference-in-difference analysis can be done using the APC model. For example, McKenzie (2006) used data from the Mexican ENIGH household survey, collected at two-year intervals, to investigate the effect of the 1995 peso crisis on consumption. He compares the change in consumption from 1994 to 1996 with the change in consumption from 1992 to 1994, and that from 1996 to 1998. This is equivalent to tests on the parameters $Δ2β1996$ and $Δ2β1998.$\n\nNon-linearities implied by economic theory can be investigated with APC models. For example, the lifecycle hypothesis of consumption implies decelerating saving in old age, which is a testable non-linearity in the age effect. An analysis could start by first estimating an APC model for the stock of savings and then isolating the age non-linearity from the linear plane and testing it for significance. If significant, the shape could be inspected for consistency with the lifecycle hypothesis in consumption either through visual inspection or through a formal test: for instance for a concave, quadratic age effect, as in (32).\n\nExploratory analysis. APC models are well suited to exploratory analysis. Diouf et al. (2010) conducted such an analysis of the dynamics of the obesity epidemic in France from 1997 to 2006. They found significant curvature in the cohort dimension, with deceleration among those who were children during World War II and acceleration post-1960s, but there was little evidence for non-linearities in either age or period. These findings correspond to a cohort-drift model (see Table 1) and are interpreted as evidence that early life conditions are important determinants of obesity.\n\nForecasting. APC models are effective forecasting tools. Suppose an APC model has been fitted to data with index set $J$ of the form (7). Forecasting for some index values $age,coh$ outside $J$ requires the evaluation of the linear predictor $μage,coh$, which in turn requires extrapolation of one or more of the estimated time effects. This extrapolation is often done using a time series model.\n\nIn general, forecasts will depend on the identification of the linear trends. This problem can be avoided by choosing extrapolation methods that carry linear trends forward in a linear way. Kuang et al. (2008b) characterized this problem and gave suggestions for invariant extrapolation methods. These include a linear trend model, a stationary autoregression with a linear trend, an autoregression for first differences with an intercept, or an autoregression for second differences. Following the theory for econometric forecasting of non-stationary time series, see Clements and Hendry (1999), the methods based on models for first or second differences have an advantage when there are structural breaks in the end of the sample. An application to general insurance is given by Kuang et al. (2011).\n\nExtrapolation can be avoided altogether if an AC model is adequate and forecasting is performed only for cohorts already present in the data. This is a possibility for AP data arrays. Mammen, Martínez Miranda, and Nielsen (2015) refer to this as in-sample forecasting. One example is the Chain–Ladder model used in general insurance (England & Verrall, 2002) with distribution forecasts by bootstrap (England, 2002) or by asymptotic theory (Harnau & Nielsen, 2017). Another example is the forecast of future rates of mesothelioma, a cancer resulting from exposure to asbestos, in Martinez Miranda et al. (2015, 2016).\n\nQuestions that do not involve time effects. Often, a researcher is interested in the effect of some policy intervention or treatment but is concerned about possible confounding with pure time effects; in this case, the APC model is included as a statistical control. For example, Ejrnæs and Hochguertel (2013) are interested in the effect of a change to unemployment insurance in Denmark on employment and use a model incorporating APC effects identified by restriction to ensure that their results are not contaminated by pure time effects.\n\nThere are many variations and extensions of these question types. One possibility is to include interactions with other covariates; for example, allowing for an interaction between age and level of education in a model for earnings. Another is to use two or more samples and test cross-sample restrictions: comparing estimated period non-linearities in savings between pairs of countries to assess macroeconomic interdependence. Some extensions are discussed further in the section “Using APC Models.”\n\n## Questions That Cannot Be Answered\n\nAny question relating to the linear parts of any of the time effects is unanswerable. This is true regardless of the nature of the dataset. If the data is a single slice in any one time dimension it is not possible to separate the effects of the other two. For example, with a cross-section of adults in 2018 it is not possible to determine whether the old have higher savings because savings increase with age or because later cohorts exhibit declining financial responsibility.\n\nHaving a repeated cross-section containing data from 2008–2018 does not help. There is now a possible period trend to contend with: savings may be decreasing over the period range due to a rising gap between real wages and the cost of living. An APC model cannot separate these effects, except by imposing a substantive and untestable assumption. More subtly, it is not possible to identify the linear part of the effect in a single time dimension even if the other time dimensions are excluded from the model.\n\nGiven this, it is recommended that hypotheses in terms of the linear parts of any of the three time effects be avoided. Instead, it is advised to formulate hypotheses primarily in terms of the non-linear parts of time effects.\n\n# Using APC Models\n\nThis section introduces the reader to the practicalities of APC modeling. The different data contexts in which APC models have been used are described. Possible extensions of the APC models are discussed. Finally, a fully worked example of an APC analysis is provided.\n\n## Data Types\n\nAPC models have primarily been used with aggregate or repeated cross-section data. The most commonly used models are least squares, log-linear/Poisson, and logistic/binomial regressions. These are all examples of generalized linear models (GLMs); the GLM framework was developed by Nelder and Wedderburn (1972), and an introduction can be found in Dobson (1990).\n\n### Aggregate Data\n\nThe simplest form of APC data is a table where each age-cohort combination is a single cell. Information is aggregated over individuals within each cell. The APC literature using this form of data has focused on point estimation and point forecasting. The information recorded in each cell will take one of the following forms:\n\n• Counts of both exposure and outcomes. An example is the size of the labor force and the number of unemployed. This format is common in epidemiology, where exposure is the population size, and the outcome is the number of deaths from a particular disease, such as cancer. Clayton and Schifflers (1987b) provided an overview of the use of APC models for this form of epidemiological data. Such data are analyzed using logistic regression or by log-linear regression with the log exposure as an offset.\n\n• Rates can be calculated from counts of outcomes and exposure. The unemployment rate is a clear example. In demography, fertility and mortality rates are of substantial interest. Rates are often modeled by (log) least squares regression.\n\n• Counts of outcomes without a measure of exposure. While outcomes may be clearly defined, the exposure is sometimes ill-defined or poorly measured. Forecasts of the counts alone may be of interest in this situation. An example from epidemiology is the number of AIDS cases classified by time of diagnosis (cohort) and reporting delay (age), where only an unknown subset of the population is exposed (Davison & Hinkley, 1997, ex. 7.4). Another example is the number of mesothelioma deaths, caused by exposure to asbestos fibers, classified by age and year of death (period). Proxies for exposure may be constructed (Peto et al., 1995), or the counts can be modeled directly using Poisson regression with no offset (Martínez Miranda et al., 2015).\n\n• Values of outcomes without a measure of exposure. An example is the insurance reserving problem, where the data consists of the total value of payments from an insurance portfolio classified by insurance year (cohort) and reporting delay (age). The objective is to forecast unknown liabilities (i.e., incurred but not yet reported). A commonly used modeling approach is the chain ladder (England & Verrall, 2002), which is equivalent to a Poisson regression with an AC predictor.\n\n### Inference for Aggregate Data\n\nFor conducting inference, classical exact normal theory may be applied. Some thought is required concerning the repetitive structure. Two frameworks have been considered for asymptotic analysis: expanding array asymptotics and fixed array asymptotics.\n\nExpanding array asymptotics. Fu and Hall (2006) considered a least squares approach to modeling aggregate values of outcomes. The time effects are identified by restricting averages in each dimension to zero. Consistency is investigated with increasing period dimension. Fu (2016) gave further consistency results for the age effects for the same least squares model and for a Poisson regression with exposure.\n\nFixed array asymptotics. Where the time dimensions are fixed, asymptotic analysis of APC models can be related to the analysis of contingency tables (Agresti, 2013) with the difference that rows and columns are ordered by the APC structure. Tools for inference have been proposed for models without exposure. The framework resembles that for inference from contingency tables, where data are independent, but not identically distributed because of the APC parametrization. Martínez Miranda et al. (2015) considered a Poisson model for counts. Harnau and Nielsen (2017) provided inference for over-dispersed Poisson model for values of outcomes using a new central limit theorem for infinitely divisible distributions. The latter theory is aimed at reserving problems in insurance, where the over-dispersion can be large.\n\nSpecification tests. For aggregate, discrete data the model fit can be assessed by a deviance test against a saturated model where the cells have unrelated predictors $μage,coh$. Harnau (2018a) suggested a Bartlett test for constant over dispersion in an over-dispersed Poisson model. Harnau (2018b) suggested an encompassing test comparing over-dispersed Poisson and log normal specifications.\n\n### Repeated Cross-Sections\n\nRepeated surveys can be used to form repeated cross-section data. A basic regression model would be of the form (3). Ejrnæs and Hochguertel (2013) estimated a model of this form and address the identification problem by the restriction method. Yang and Land (2006) proposed a hierarchical APC model where age is quadratic and where cohort and period are treated as random effects. Fannon and et al. (2018) proposed models involving the canonical parametrization. This includes a least squares regression as in (3) and a logistic regression of the form\n\n$Display mathematics$\n(35)\n\nAsymptotic inference is conducted by allowing the number of individuals in the sample to increase while holding the array fixed. Likelihood ratio tests are used to assess restrictions imposed on the APC model. In both models the fit can be tested by saturating the data array with indicators for each age-cohort cell.\n\n## Extensions\n\nSeveral extensions to the basic age-period-cohort model have been considered in the literature. These include: models for continuous time data; models with unequal intervals, where the data on each time dimension is recorded at different intervals; a two-sample model; and sub-sample analysis, to compare estimates from non-overlapping sub-samples or from a sequence of expanding sub-samples.\n\n### Continuous Time Data\n\nThere is a budding literature on non-parametric models for continuous time data. Ogate et al. (2000) developed an empirical Bayes model for the incidence of diabetes. Martínez Miranda et al. (2013) developed a continuous time version of the chain ladder model. This is extended to in-sample density forecasting methods by Lee et al. (2015) and Mammen et al. (2015).\n\n### Models With Unequal Intervals\n\nThe theoretical framework used in this chapter is primarily concerned with data where each time dimension is recorded in the same units. This is often not the case.\n\nRegular intervals. It is common that data are recorded annually, but age is grouped at a coarser level; this is seen in the empirical example in this chapter. There are two approaches when working with such data. The first and easy option is to coerce the data into a single unit framework by grouping periods, either by taking averages or by dropping certain periods. This of course implies a loss of information. The second option is to construct a model allowing for different interval lengths. This may actually create more identification issues, as discussed by Holford (1998). Holford proposed an approach based on finding the least common multiple of the interval lengths, using this least common multiple to split the data into blocks, and treating within-block micro trends separately from between-block macro trends. Riebler and Held (2010) provided a Bayesian approach to this problem.\n\nIrregular intervals. This can arise with repeated survey data. In some cases, one is interested in an outcome variable that is irregularly recorded; for instance, a variable recorded in 1997, 1999, 2002, 2009, and annually thereafter. One solution is to use a subsample with a single frequency. An alternative possibility may be to use interpolation to regularize the intervals or to use continuous time scales.\n\n### Two-Sample Model\n\nA further extension involves combining data for two samples, for instance women and men or data from two countries. The model (1) for the predictor then becomes\n\n$Display mathematics$\n(36)\n\nwhere the index $s$ indicates the sample. Tests could then be performed for common parameters between the two samples, for instance a common period effect such that $βper,1=βper,2$. Riebler and Held (2010) presented a Bayesian estimation method. The identification is discussed further by Nielsen and Nielsen (2014).\n\n### Subsample Analysis\n\nThe stability of models can be analyzed by comparing estimators from non-overlapping subsamples of the data array $J$ or from a sequence of expanding subsamples. This idea has been used informally by Martínez Miranda et al. (2015). Harnau (2018a) provided formal tests for common dispersion in subsamples for reserving models. Asymptotically, these tests resemble Bartlett’s test.\n\n## Software\n\nVarious software packages are available for APC analysis. For R these include epi (Carstensen, 2018) and apc (Nielsen, 2018). BAMP (Schmid & Held, 2007) and bapc (Riebler & Held, 2017) are available for Bayesian analysis in R. For Stata these include st0245 (Sasieni, 2012), apc (Schulhofer-Wohl & Yang, 2006), and apcd (Chauvel, 2012).\n\n# Empirical Illustration Using U.S. Employment Data\n\nConsider U.S. data for employment for 1960–2015, retrieved from the OECD’s online database. Age is recorded in five-year intervals. Data from every fifth year is used to get an AP dataset with base unit five. There are 12 periods and 11 ages, thus 22 cohorts. Table 2 shows the size of the labor force in each age-cohort cell, while Table 3 shows the number of unemployed.\n\nVarious questions could be answered with this data. Expected non-linearities could be checked: for example, a U-shape in age, or discontinuities in period consistent with known periods of recession. Difference-in-difference hypotheses could be tested: for instance, was there a significant difference between the increase in unemployment from 2000 to 2005 and that from 2005 to 2010? This could indicate how quickly the effects of the financial crisis were felt in the labor market.\n\nTable 2. U.S. Labor Force in 1000s\n\n1960\n\n1965\n\n1970\n\n1975\n\n1980\n\n1985\n\n1990\n\n1995\n\n2000\n\n2005\n\n2010\n\n2015\n\n15–19\n\n5246\n\n6350\n\n7249\n\n8870\n\n9380\n\n7901\n\n7792\n\n7765\n\n8271\n\n7164\n\n5905\n\n5700\n\n20–24\n\n7679\n\n9301\n\n10597\n\n13750\n\n15922\n\n15717\n\n14700\n\n13687\n\n14251\n\n15127\n\n15028\n\n15523\n\n25–29\n\n7186\n\n7582\n\n9241\n\n12698\n\n15400\n\n17265\n\n17677\n\n15913\n\n15800\n\n16049\n\n17300\n\n17494\n\n30–34\n\n7884\n\n7407\n\n7795\n\n10165\n\n13827\n\n16285\n\n18253\n\n18285\n\n16955\n\n16291\n\n16313\n\n17153\n\n35–39\n\n8474\n\n8341\n\n7774\n\n8560\n\n11161\n\n14371\n\n16927\n\n18633\n\n18616\n\n17124\n\n16271\n\n16267\n\n40–44\n\n8173\n\n8887\n\n8664\n\n8343\n\n9303\n\n11702\n\n15218\n\n17118\n\n18950\n\n18905\n\n17095\n\n16337\n\n45–49\n\n8011\n\n8326\n\n8980\n\n8675\n\n8478\n\n9270\n\n11557\n\n14667\n\n16907\n\n18562\n\n18460\n\n16640\n\n50–54\n\n6903\n\n7520\n\n7968\n\n8409\n\n8433\n\n8052\n\n8691\n\n10555\n\n14164\n\n15841\n\n17500\n\n17262\n\n55–59\n\n5464\n\n6138\n\n6768\n\n6866\n\n7388\n\n7240\n\n6902\n\n7423\n\n9267\n\n12289\n\n14145\n\n15394\n\n60–64\n\n3927\n\n4217\n\n4515\n\n4480\n\n4597\n\n4751\n\n4673\n\n4437\n\n5090\n\n6691\n\n9152\n\n10559\n\n65–69\n\n1798\n\n1794\n\n1922\n\n1757\n\n1828\n\n1719\n\n2076\n\n2123\n\n2322\n\n2846\n\n3796\n\n5125\n\nTable 3. U.S. Unemployed in 1000s\n\n1960\n\n1965\n\n1970\n\n1975\n\n1980\n\n1985\n\n1990\n\n1995\n\n2000\n\n2005\n\n2010\n\n2015\n\n15–19\n\n711\n\n874\n\n1105\n\n1768\n\n1668\n\n1467\n\n1211\n\n1346\n\n1082\n\n1186\n\n1527\n\n966\n\n20–24\n\n583\n\n557\n\n866\n\n1864\n\n1836\n\n1738\n\n1299\n\n1244\n\n1022\n\n1335\n\n2329\n\n1501\n\n25–29\n\n380\n\n288\n\n427\n\n1091\n\n1234\n\n1299\n\n1056\n\n916\n\n651\n\n933\n\n1883\n\n1057\n\n30–34\n\n372\n\n241\n\n290\n\n685\n\n791\n\n1043\n\n938\n\n925\n\n556\n\n728\n\n1501\n\n848\n\n35–39\n\n354\n\n272\n\n250\n\n514\n\n548\n\n769\n\n739\n\n864\n\n582\n\n694\n\n1320\n\n708\n\n40–44\n\n317\n\n275\n\n265\n\n437\n\n392\n\n572\n\n589\n\n686\n\n550\n\n705\n\n1383\n\n644\n\n45–49\n\n328\n\n237\n\n261\n\n452\n\n362\n\n448\n\n443\n\n503\n\n422\n\n675\n\n1441\n\n616\n\n50–54\n\n286\n\n199\n\n214\n\n440\n\n313\n\n364\n\n279\n\n342\n\n340\n\n520\n\n1328\n\n643\n\n55–59\n\n221\n\n189\n\n197\n\n308\n\n246\n\n327\n\n241\n\n266\n\n220\n\n416\n\n995\n\n576\n\n60–64\n\n174\n\n133\n\n113\n\n212\n\n153\n\n191\n\n145\n\n159\n\n134\n\n214\n\n667\n\n402\n\n65–69\n\n83\n\n68\n\n75\n\n114\n\n66\n\n62\n\n67\n\n91\n\n73\n\n98\n\n286\n\n198\n\n### Preliminaries\n\nThe package apc for R is used (Nielsen, 2015). The first step of the analysis is to visualize the data. Employment rates are found by dividing the unemployment numbers in Table by the labor force numbers in Table 2. Line plots of within-period changes in employment with respect to age, or within-cohort evolution of unemployment over time, can be informative; see Figure 9. To aid the visualization the numbers are averaged over 10- or 20-year groups. The curves in panel (a) correspond to the columns in the AP table for unemployment rates. Panel (b) shows the same columns but plotted against cohort, which is period minus age. In panel (c) the curves correspond to the cohort diagonals in the AP table plotted against age. In panel (d) the rows of the AP table are plotted against cohort. Panel (e) shows these rows plotted against period, and panel (f) shows the cohort diagonals plotted against period. These plots were generated using apc.plot.data.within from apc, but similar plots could be generated using rateplot and Aplot from epi. Applying Carstensen’s graphical analysis framework to the plots presented, one can see that there are parallel trends in plot (a) and in plot (e). This suggests than an age-period model may be a good fit to this data.", null, "Figure 9. Plots of unemployment data.\n\n### Model Estimation\n\nTo answer the questions proposed above an econometric model that isolates the identifiable non-linear parts of the time effects from the non-identifiable linear parts is required. A logit model is used where\n\n$Display mathematics$\n(37)\n\nHere $πage,coh$ is the probability of unemployment for a given age-cohort combination and $μage,coh=ξ′xage,coh$, where $ξ$ and $xage,coh$ are given in (20) and (21). Since the canonical parametrization is identified and embedded in a GLM framework it can be estimated uniquely.\n\nThe individual double-differences at this point have a difference-in-difference or log odds interpretation. Where it is of interest to study the general shape of the non-linearities in each time dimension, the double differences may be double cumulated and detrended, following the discussion in the earlier section on “Interpretation of Time Effects.” This fully separates the linear and non-linear parts of the time effects.\n\nFigure 10 visualizes the estimated APC model for the U.S. unemployment data using the canonical parametrization and detrending. Panels (a)–(c) show the estimated double-differences in each of age, period, and cohort. Panels (d)–(f) show the level and slopes of the linear plane, calculated after the detrending. Panels (g)–(i) show the non-linear parts of time effects. These are found by double cumulating and detrending the double differences so that the first and last value in each plot is anchored at zero. There is evidence for a U-shaped relationship between age and unemployment. The non-linear parts of the period effect show the discontinuous effects of macroeconomic conditions, with accelerations in unemployment in the early 1970s and late 2000s. There is weak evidence for discontinuities in cohort, which may reflect hysteresis; the cohorts of the late 1950s (who came of age in the 1970s) are relatively underemployed compared to those before and after them.", null, "Figure 10. APC model for U.S. unemployment data in terms of the canonical parametrization.\n\nA Bayesian analysis was also performed using the BAMP package. Using RW1 priors for each of age, period, and cohort, the non-linear parts of the estimated effects were similar to those seen in plots (g) through (i). Intriguingly, the general shape of the results remained the same when the RW1 prior on either age or period was replaced with an RW2, and when both the age and period priors were changed to RW2. However, using RW2 priors on both period and cohort, or on all three series, resulted in over-smoothing.\n\n# Closing Remarks on the Problem of Age-Period-Cohort Identification\n\nThe existence of an identification problem between age, period, and cohort is widely recognized by economists. Many papers have grappled with the problem, particularly in the contexts of consumption, savings, and labor market dynamics. The problem is not unique to economics; it is also discussed by sociologists, demographers, political scientists, actuaries, epidemiologists, and statisticians. A comprehensive account of the problem therefore requires a survey of a broad literature, much of it outside economics.\n\nThe APC identification problem arises due to the identity $age+coh=per+1$, which links the time scales. This article has focused exclusively on the linear APC model, but the problem also arises in the non-linear Lee-Carter (Lee & Carter, 1992) model and in extensions thereof such as Cairns et al. (2009). The main features of the APC identification problem are the following. First, it is a problem affecting the linear parts of the time effects only; the levels and slopes specific to each dimension cannot be identified, whereas higher-order effects can be. Second, a model including only one or two of the three remains afflicted by the problem. Finally, the problem is fundamentally one in continuous time; changing the observation unit for the APC scales will not resolve it.\n\nA range of identification strategies have been proposed to deal with the APC problem, some of which are outlined in this chapter. The key question to ask of any such strategy is: Would a different identification strategy lead to the same conclusions? This is a question of invariance to the transformations in (10). Of those parametrizations discussed in this chapter, only the canonical parametrization is invariant as it does not attempt the impossible by seeking to separate the linear effects but rather focuses on the identifiable non-linear effects. This brings clarity to interpretation and inference.\n\n# Acknowledgments\n\nFunding was received from ESRC grant ES/J500112/1 (Fannon) and ERC grant 694262, DisCont (Fannon, Nielsen).\n\nGlenn, N. D. (2005). Cohort analysis (2nd ed.). Quantitative applications in the social sciences (Vol. 5). SAGE.Find this resource:\n\n## Methodological Papers\n\nBerzuini, C., & Clayton, D. (1994). Bayesian analysis of survival on multiple time scales. Statistics in Medicine, 13, 823–838.Find this resource:\n\nClayton, D., & Schifflers, E. (1987a). Models for temporal variation in cancer rates. I: Age-period and age-cohort models. Statistics in Medicine, 6, 449–467.Find this resource:\n\nClayton, D., & Schifflers, E. (1987b). Models for temporal variation in cancer rates. II: Age-period-cohort models. Statistics in Medicine, 6, 469–481.Find this resource:\n\nCarstensen, B. (2007). Age-period-cohort models for the Lexis diagram. Statistics in Medicine, 26, 3018–3045.Find this resource:\n\nGlenn, N. D. (1976). Cohort analysts’ futile quest: Statistical attempts to separate age, period, and cohort effects. American Sociological Review, 41(5), 900–904.Find this resource:\n\nHolford, T. R. (1983). The estimation of age, period and cohort effects for vital rates. Biometrics, 39, 311–324.Find this resource:\n\nHolford, T. R. (1985). An alternative approach to statistical age-period-cohort analysis. Journal of Chronic Diseases, 38, 831–836.Find this resource:\n\nKuang, D., Nielsen, B., & Nielsen, J. P. (2008a). Identification of the age-period-cohort model and the extended chain ladder model. Biometrika, 95, 979–986.Find this resource:\n\nKupper, L. L., Janis, J. M., Karmous, A., & Greenberg, B. G. (1985). Statistical age-period-cohort analysis: A review and critique. Journal of Chronic Diseases, 38, 811–830.Find this resource:\n\nMason, K. O., Mason, W. M., Winsborough, H. H., & Poole, W. K. (1973). Some methodological issues in cohort analysis of archival data. American Sociological Review 38, 242–258.Find this resource:\n\nNielsen, B. (2015). APC: An R package for age-period-cohort analysis. R Journal, 7, 52–64.Find this resource:\n\nOh, C., & Holford, T. R. (2015). Age-period-cohort approaches to back-calculation of cancer incidence rate. Statistics in Medicine, 34, 1953–1964.Find this resource:\n\nSmith, T. R., & Wakefield, J. (2016). A review and comparison of age-period-cohort models for cancer incidence. Statistical Science, 31, 591–610.Find this resource:\n\n## Applied Papers in Economics and Elsewhere\n\nAttanasio, O. P. (1998). Cohort analysis of saving behaviour by U.S. households. Journal of Human Resources, 33, 575–609.Find this resource:\n\nDiouf, I., Charles, M., Ducimetière, P., Basdevant, A., Eschwege, E., & Heude, B. (2010). Evolution of obesity prevalence in France: An age-period-cohort analysis. Epidemiology, 21, 360–365.Find this resource:\n\nEjrnæs, M., & Hochguertel, S. (2013). Is business failure due to lack of effort? Empirical evidence from a large administrative sample. Economic Journal, 123, 791–830.Find this resource:\n\nHeckman, J., & Robb, R. (1985). Using longitudinal data to estimate age, period and cohort effects in earnings equations. In W. M. Mason & S. E. Fienberg (Eds.), Cohort analysis in social research (pp. 137–150). New York, NY: Springer.Find this resource:\n\nMcKenzie, D. J. (2006). Disentangling age, cohort and time effects in the additive model. Oxford Bulletin of Economics and Statistics, 68, 473–495.Find this resource:\n\nVoas, D., & Chaves, M. (2016). Is the United States a counterexample to the secularization thesis? American Journal of Sociology, 121, 1517–1556.Find this resource:\n\nAgresti, A. (2013). Categorical Data Analysis (3rd ed.). Hoboken, NJ: John Wiley & Sons.Find this resource:\n\nAttanasio, O. P. (1998). Cohort analysis of saving behaviour by U.S. households. Journal of Human Resources, 33, 575–609.Find this resource:\n\nBarndorff-Nielsen, O. E. (1978). Information and exponential families. New York, NY: Wiley.Find this resource:\n\nBeenstock, M., Chiswick, B. R., & Paltiel, A. (2010). Testing the immigrant assimilation hypothesis with longitudinal data. Review of Economics of the Household, 8, 7–27.Find this resource:\n\nBerzuini, C., & Clayton, D. (1994). Bayesian analysis of survival on multiple time scales. Statistics in Medicine, 13, 823–838.Find this resource:\n\nBrowning, M., Crossley, T. F., & Lührmann, M. (2016). Durable purchases over the later life cycle. Oxford Bulletin of Economics and Statistics, 78, 145–169.Find this resource:\n\nBrowning, M., Deaton, A., & Irish, M. (1985). A profitable approach to labor supply and commodity demands over the life-cycle. Econometrica, 53, 503–544.Find this resource:\n\nCairns, A. J. G., Blake, D., Dowd, K., Coughlan, G. D., Epstein, D., Ong, A., . . . Balevich, I. (2009). A quantitative comparison of stochastic mortality models using data from England and Wales and the United States. North American Actuarial Journal, 13, 1–35.Find this resource:\n\nCarstensen, B. (2007). Age-period-cohort models for the Lexis diagram. Statistics in Medicine, 26, 3018–3045.Find this resource:\n\nCarstensen, B., Plummer, M., Laara, E., & Hills, M. (2018). Epi: A Package for Statistical Analysis in Epidemiology. R package version 2.32.Find this resource:\n\nChauvel, L. (2012). APCD: Stata module for estimating age-period-cohort effects with detrended coefficients. Statistical Software Components S457440. Boston, MA: Boston College Department of Economics.Find this resource:\n\nChow, G. C. (1960). Tests of equality between sets of coefficients in two linear regressions. Econometrica, 28, 591–605.Find this resource:\n\nClayton, D., & Schifflers, E. (1987a). Models for temporal variation in cancer rates. I: Age-period and age-cohort models. Statistics in Medicine, 6, 449–467.Find this resource:\n\nClayton, D., & Schifflers, E. (1987b). Models for temporal variation in cancer rates. II: Age-period-cohort models. Statistics in Medicine, 6, 469–481.Find this resource:\n\nClements, M. P., & Hendry, D. F. (1999). Forecasting non-stationary time series. Cambridge, MA: MIT Press.Find this resource:\n\nCox, D. R., & Hinkley, D. V. (1974). Theoretical statistics. London: Chapman & Hall.Find this resource:\n\nDavison, A. C., & Hinkley, D. V. (1997). Bootstrap methods and their applications. Cambridge, U.K.: Cambridge University Press.Find this resource:\n\nDeaton, A. S., & Paxson, C. H. (1994a). Saving, growth, and aging in Taiwan. In D. A. Wise (Ed.), Studies in the economics of aging (pp. 331–361). Chicago, IL: Chicago University Press,Find this resource:\n\nDeaton, A. S., & Paxson, C. H. (1994b). Intertemporal choice and inequality. Journal of Political Economy, 102, 437–467.Find this resource:\n\nDeaton, A., & Paxson, C. (2000). Growth and saving among individuals and households. Review of Economics and Statistics, 82, 212–225.Find this resource:\n\nDiouf, I., Charles, M., Ducimetière, P., Basdevant, A., Eschwege, E., & Heude, B. (2010). Evolution of obesity prevalence in France: An age-period-cohort analysis. Epidemiology, 21, 360–365.Find this resource:\n\nDobson, A. (1990). An introduction to generalized linear models. Boca Raton, FL: Chapman & Hall.Find this resource:\n\nEjrnæs, M., & Hochguertel, S. (2013). Is business failure due to lack of effort? Empirical evidence from a large administrative sample. Economic Journal, 123, 791–830.Find this resource:\n\nEngland, P. D. (2002). Addendum to ‘Analytic and bootstrap estimates of prediction errors in claims reserving.’ Insurance: Mathematics and Economics, 31, 461–466.Find this resource:\n\nEngland, P. D., & Verrall, R. J. (2002). Stochastic claims reserving in general insurance. British Actuarial Journal, 8, 519–544.Find this resource:\n\nFahrmeir, L., & Kaufmann, H. (1985). Consistency and asymptotic normality of the maximum likelihood estimator in generalized linear models. Annals of Statistics, 13, 342–368.Find this resource:\n\nFannon, Z., Monden, C., & Nielsen, B. (2018). Age-period-cohort modelling and covariates, with an application to obesity in England 2001–2014. Nuffield Discussion Paper 2018-W05.Find this resource:\n\nFienberg, S.E., & Mason, W. M. (1979). Identification and estimation of age-period-cohort models in the analysis of discrete archival data. Sociological Methodology, 10, 1–67.Find this resource:\n\nFitzenberger, B., Schnabel, R., & Wunderlich, G. (2004). The gender gap in labor market participation and employment: A cohort analysis for West Germany. Journal of Population Economics, 17, 83–116.Find this resource:\n\nFu, W. J. (2016). Constrained estimators and consistency of a regression model on a Lexis diagram. Journal of the American Statistical Association, 111, 180–199.Find this resource:\n\nFu, W. J. (2018). A practical guide to age-period-cohort analysis: The identification problem and beyond. Boca Raton, FL: CRC Press.Find this resource:\n\nFu, W. J., & Hall, P. (2006). Asymptotic properties of estimators in age-period-cohort analysis. Statistics & Probability Letters, 76, 1925–1929.Find this resource:\n\nFu, W. J., Land, K. C., & Yang, Y. (2011). On the intrinsic estimators and constrained estimators in age-period-cohort models. Sociological Methods & Research, 40, 453–466.Find this resource:\n\nGlenn, N. D. (1976). Cohort analysts’ futile quest: Statistical attempts to separate age, period, and cohort effects. American Sociological Review, 41(5), 900–904.Find this resource:\n\nGlenn, N. D. (2005). Cohort analysis (2nd ed.). Quantitative Applications in the Social Sciences (Vol. 5). SAGE.Find this resource:\n\nHanoch, G., & Honig, M. (1985). ‘True’ age profiles of earnings: Adjusting for censoring and for period and cohort effects. The Review of Economics and Statistics, 67, 384–394.Find this resource:\n\nHarnau, J. (2018a). Misspecification tests for log-normal and over-dispersed Poisson chain-ladder models. Risks, 6(2), 25.Find this resource:\n\nHarnau, J. (2018b). Log-normal or over-dispersed Poisson. Risks, 6(3), 70.Find this resource:\n\nHarnau, J., & Nielsen, B. (2017). Over-dispersed age-period-cohort models. Journal of the American Statistical Associatio, 113(524), 1722–1732.Find this resource:\n\nHeckman, J., & Robb, R. (1985). Using longitudinal data to estimate age, period and cohort effects in earnings equations. In W. M. Mason & S. E. Fienberg (Eds.), Cohort analysis in social research (pp. 137–150). New York, NY: Springer.Find this resource:\n\nHolford, T. R. (1983). The estimation of age, period and cohort effects for vital rates. Biometrics, 39, 311–324.Find this resource:\n\nHolford, T. R. (1985). An alternative approach to statistical age-period-cohort analysis. Journal of Chronic Diseases, 38, 831–836.Find this resource:\n\nHolford, T. R. (1998). Age-period-cohort analysis. In P. Armitage & T. Colton (Eds.), Encyclopedia of biostatistics (pp. 82–99). Chichester: Wiley.Find this resource:\n\nHolford, T. R. (2006). Approaches to fitting age-period-cohort models with unequal intervals. Statistics in Medicine, 25, 977–993.Find this resource:\n\nKalwij, A. S., & Alessie, R. (2007). Permanent and transitory wages of British men, 1975–2001: Year, age, and cohort effects. Journal of Applied Econometrics, 22, 1063–1093.Find this resource:\n\nKeiding, N. (1990). Statistical inference in the Lexis diagram. Philosophical Transactions of the Royal Society of London A332, 487–509.Find this resource:\n\nKrueger, A. B., & Pischke, J. (1992). The effect of social security on labor supply: A cohort analysis of the notch generation. Journal of Labor Economics, 10, 412–437.Find this resource:\n\nKuang, D., Nielsen, B., & Nielsen, J. P. (2008a). Identification of the age-period-cohort model and the extended chain ladder model. Biometrika, 95, 979–986.Find this resource:\n\nKuang, D., Nielsen, B., & Nielsen, J. P. (2008b). Forecasting with the age-period-cohort model and the extended chain-ladder model. Biometrika, 95, 987–991.Find this resource:\n\nKuang, D., Nielsen, B., & Nielsen, J. P. (2011). Forecasting in an extended chain-ladder-type model. Journal of Risk and Insurance, 78, 345–359.Find this resource:\n\nKupper, L. L., Janis, J. M., Karmous, A., & Greenberg, B. G. (1985). Statistical age-period-cohort analysis: A review and critique. Journal of Chronic Diseases, 38, 811–830.Find this resource:\n\nLee, R. D., & Carter, L. R. (1992). Modeling and forecasting U.S. mortality. Journal of the American Statistical Association, 87, 659–671.Find this resource:\n\nLee, Y. K., Mammen, E., Nielsen, J. P., & Park, B. U. (2015). Asymptotics for in-sample density forecasting. Annals of Statistics, 43, 620–651.Find this resource:\n\nLehman, E. L. (1986). Testing statistical hypotheses (2nd ed.). New York, NY: Springer.Find this resource:\n\nLuo, L. (2013). Assessing validity and application scope of the intrinsic estimator approach to the age-period-cohort problem. Demography, 50, 1945–1967.Find this resource:\n\nMammen, E., Martínez Miranda, M. D., & Nielsen, J. P. (2015). In-sample forecasting applied to reserving and mesothelioma mortality. Insurance: Mathematics and Economics, 61, 76–86.Find this resource:\n\nMartínez Miranda, M.D., Nielsen, B., & Nielsen, J. P. (2015). Inference and forecasting in the age-period-cohort model with unknown exposure with an application to mesothelioma mortality. Journal of the Royal Statistical Society, A178, 29–55.Find this resource:\n\nMartínez Miranda, M. D., Nielsen, B., & Nielsen, J. P. (2016). A simple benchmark for mesothelioma projection for Great Britain. Occupational and Environmental Medicine, 73, 561–563.Find this resource:\n\nMartínez Miranda, M. D., Nielsen, J. P., Sperlich, S., & Verrall, R. (2013). Continuous chain ladder: Reformulating and generalizing a classical insurance problem. Expert Systems with Applications, 40, 5588–5603.Find this resource:\n\nMason, K. O., Mason, W. M., Winsborough, H. H., & Poole, W. K. (1973). Some methodological issues in cohort analysis of archival data. American Sociological Review, 38, 242–258.Find this resource:\n\nMcKenzie, D. J. (2006). Disentangling age, cohort and time effects in the additive model. Oxford Bulletin of Economics and Statistics, 68, 473–495.Find this resource:\n\nMeghir, C., & Whitehouse, E. (1996). The evolution of wages in the United Kingdom: Evidence from micro-data. Journal of Labor Economics 14, 1–25.Find this resource:\n\nMoffitt, R. (1993). Identification and estimation of dynamic models with a time series of repeated cross-sections. Journal of Econometrics, 59, 99–123.Find this resource:\n\nNelder, J. A., & Wedderburn, R. W. M. (1972). Generalized linear models. Journal of the Royal Statistical Society Series, A135, 370–384.Find this resource:\n\nNielsen, B., & Nielsen, J. P. (2014). Identification and forecasting in mortality models. The Scientific World Journal, 2014, 347043.Find this resource:\n\nNielsen, B. (2015). APC: An R package for age-period-cohort analysis. R Journal, 7, 52–64.Find this resource:\n\nNielsen, B. (2018). apc: Age-Period-Cohort Analysis. R package version 1.4.Find this resource:\n\nO’Brien, R. M. (2011). Constrained estimators and age-period-cohort models (with discussion). Sociological Methods & Research, 40, 419–470.Find this resource:\n\nO’Brien, R. M. (2015). Age-period-cohort models: Approaches and analyses with aggregate data. Boca Raton, FL: CRC Press.Find this resource:\n\nOECD. (2018). Short-Term Labour Market Statistics. Paris, France: OECD.Find this resource:\n\nOgate, Y., Katsura, K., Keiding, N., Holst, C., & Green, A. (2000). Empirical Bayes age-period-cohort analysis of retrospective incidence data. Scandinavian Journal of Statistics, 27, 415–432.Find this resource:\n\nOh, C., & Holford, T. R. (2015). Age-period-cohort approaches to back-calculation of cancer incidence rate. Statistics in Medicine, 34, 1953–1964.Find this resource:\n\nOsmond, C., & Gardner, M. J. (1982). Age, period and cohort models applied to cancer mortality rates. Statistics in Medicine, 1, 245–259.Find this resource:\n\nOsmond, C., & Gardner, M. J. (1989). Age, period, and cohort models: Non-overlapping cohorts don’t resolve the identification problem. American Journal of Epidemiology, 129, 31–35.Find this resource:\n\nPeto, J., Hodgson, J. T., Matthews, F. E., & Jones, J. R. (1995). Continuing increase in mesothelioma mortality in Britain. Lancet, 345, 535–539.Find this resource:\n\nPoirier, D. (1998). Revising belief in nonidentified models. Econometric Theory, 14, 483–509.Find this resource:\n\nRiebler, A., & Held, L. (2010). The analysis of heterogeneous time trends in multivariate age-period-cohort models. Biostatistics, 11, 57–69.Find this resource:\n\nRiebler, A., & Held, L. (2017). Projecting the future burden of cancer: Bayesian age-period-cohort analysis with integrated nested Laplace approximations. Biometrical Journal, 59, 531–549.Find this resource:\n\nSchulhofer-Wohl, S., & Yang, Y. (2006). APC: Stata module for estimating age-period-cohort effects. Statistical Software Components S456754. Boston, MA: Boston College Department of Economics.Find this resource:\n\nSchulhofer-Wohl, S. (2018). The age-time-cohort problem and the identification of structural parameters in life-cycle models. Quantitative Economics, 9, 643–658.Find this resource:\n\nSchmid, V. J., & Held, L. (2007). Bayesian age-period-cohort modeling and prediction—BAMP. Journal of Statistical Software, 21(8), 1–15.Find this resource:\n\nSmith, T. R., & Wakefield, J. (2016). A review and comparison of age-period-cohort models for cancer incidence. Statistical Science, 31, 591–610.Find this resource:\n\nStasieni, P. D. (2012). Age-period-cohort models in Stata. The Stata Journal, 12, 45–60.Find this resource:\n\nVoas, D., & Chaves, M. (2016). Is the United States a counterexample to the secularization thesis? American Journal of Sociology, 121, 1517–1556.Find this resource:\n\nYang, Y., & Land, K. C. (2006). Age-period-cohort analysis of repeated cross-section surveys. Sociological Methodology, 36, 297–326.Find this resource:\n\nYang, Y., & Land, K. D. (2013). Age-period-cohort analysis: New models, methods and empirical applications. Boca Raton, FL: CRC Press.Find this resource:\n\nYang, Y., Fu, W. J., & Land, K. C. (2004). A methodological comparison of age-period-cohort models: The intrinsic estimator and conventional generalized linear models. Sociological Methodology, 34, 75–110.Find this resource:" ]
[ null, "https://oxfordre.com/economics/doc/10.1093/acrefore/9780190625979.001.0001/acrefore-9780190625979-e-495-graphic-001-inline.gif", null, "https://oxfordre.com/economics/doc/10.1093/acrefore/9780190625979.001.0001/acrefore-9780190625979-e-495-graphic-002-inline.gif", null, "https://oxfordre.com/economics/doc/10.1093/acrefore/9780190625979.001.0001/acrefore-9780190625979-e-495-graphic-003-inline.gif", null, "https://oxfordre.com/economics/doc/10.1093/acrefore/9780190625979.001.0001/acrefore-9780190625979-e-495-graphic-004-inline.gif", null, "https://oxfordre.com/economics/doc/10.1093/acrefore/9780190625979.001.0001/acrefore-9780190625979-e-495-graphic-005-inline.gif", null, "https://oxfordre.com/economics/doc/10.1093/acrefore/9780190625979.001.0001/acrefore-9780190625979-e-495-graphic-006-inline.gif", null, "https://oxfordre.com/economics/doc/10.1093/acrefore/9780190625979.001.0001/acrefore-9780190625979-e-495-graphic-007-inline.gif", null, "https://oxfordre.com/economics/doc/10.1093/acrefore/9780190625979.001.0001/acrefore-9780190625979-e-495-graphic-008-inline.gif", null, "https://oxfordre.com/economics/doc/10.1093/acrefore/9780190625979.001.0001/acrefore-9780190625979-e-495-graphic-009-inline.gif", null, "https://oxfordre.com/economics/doc/10.1093/acrefore/9780190625979.001.0001/acrefore-9780190625979-e-495-graphic-010-inline.gif", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.89005584,"math_prob":0.9322858,"size":100699,"snap":"2019-51-2020-05","text_gpt3_token_len":23007,"char_repetition_ratio":0.18332589,"word_repetition_ratio":0.08748993,"special_character_ratio":0.24382566,"punctuation_ratio":0.14644636,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9916078,"pos_list":[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20],"im_url_duplicate_count":[null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2020-01-28T00:54:10Z\",\"WARC-Record-ID\":\"<urn:uuid:a17a9b17-4de0-4f18-b686-a94954148075>\",\"Content-Length\":\"392332\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:368adf5c-1fbb-4cc6-baad-2dcd72900ed5>\",\"WARC-Concurrent-To\":\"<urn:uuid:5c5e8092-ef7f-4279-863b-1d0275b20e05>\",\"WARC-IP-Address\":\"69.63.133.205\",\"WARC-Target-URI\":\"https://oxfordre.com/economics/oso/viewentry/10.1093$002facrefore$002f9780190625979.001.0001$002facrefore-9780190625979-e-495?print\",\"WARC-Payload-Digest\":\"sha1:5XM2NEEPIUMKY4VKLMDMGXBNHWV6VHLP\",\"WARC-Block-Digest\":\"sha1:LBWB6DHKCHMPZD66EY6F6UXJVM5M77G7\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2020/CC-MAIN-2020-05/CC-MAIN-2020-05_segments_1579251737572.61_warc_CC-MAIN-20200127235617-20200128025617-00182.warc.gz\"}"}
http://n3wt0n.com/blog/tag/interview/
[ "## FizzBuzz and a false coin\n\nI just had my co-worker visit me at home. We all work remotely so we don’t see each other on a daily basis, but he was passing through town. He brought up an interesting topic; a programming “test” called “FizzBuzz”.\n\nThe rules: Print out all values between 1 and 100. When a number is divisible by 3 print “FIZZ” and when divisible by 5 print “BUZZ”. When the number is divisible by both, print “FIZZ BUZZ”.\n\nMy simple solution to it (a Perl script) is as follows:\n\n```for (\\$i = 1; \\$i <= 100; \\$i++) {\nprint \"\\$i:\\t\";\nif (\\$i%3==0) { print \"FIZZ \"; }\nif (\\$i%5==0) { print \"BUZZ \"; }\nprint \"\\n\";\n}```\n\nThis meets the criteria, though I don’t check if a number is specifically divisible by both within a condition.\n\nThis brought back a memory of an “Algorithm Analysis” exam from University. I call it “The False Coin” problem, though it probably has many names.\n\nYou have a pile of coins. All of the coins are equal, except one, which is lighter than the other coins. This is the false coin. You also have a scale which allows you to compare two weights against each other. Using the scale, what is the most efficient way to find the false coin?\n\nThe other students insisted you split the pile of coins into two equal piles, weigh the two against each other and find the lighter pile, then split the lighter pile into two piles, and so on.\n\nClose. But wrong. There is a better way and it involves splitting the first pile into three equal piles. Compare two piles, identify which of the three is the lightest pile and split that into three more equal piles, and so on. With every weighing you get rid of 66% of the remaining coins, compared to only 50% when splitting the piles by half.\n\nSo keep that in mind before your next programming interview.\n\nStaypressed theme by Themocracy" ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.94691926,"math_prob":0.96475345,"size":1756,"snap":"2019-35-2019-39","text_gpt3_token_len":433,"char_repetition_ratio":0.110159814,"word_repetition_ratio":0.0,"special_character_ratio":0.25626424,"punctuation_ratio":0.11859838,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9712759,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2019-08-20T08:26:05Z\",\"WARC-Record-ID\":\"<urn:uuid:cb1de887-d355-49a4-8884-f0c571d33284>\",\"Content-Length\":\"12769\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:b87a7b20-70b2-4d0d-883d-c69787b852f2>\",\"WARC-Concurrent-To\":\"<urn:uuid:b8249902-8aed-4dea-afba-691d51258128>\",\"WARC-IP-Address\":\"198.187.31.93\",\"WARC-Target-URI\":\"http://n3wt0n.com/blog/tag/interview/\",\"WARC-Payload-Digest\":\"sha1:7COBC5VB7TIWSH4YYTAAYDKWW3D3IYE2\",\"WARC-Block-Digest\":\"sha1:KYXXE5JAX47TEWTOGZGEOBQ2SMNOZJLV\",\"WARC-Identified-Payload-Type\":\"application/xhtml+xml\",\"warc_filename\":\"/cc_download/warc_2019/CC-MAIN-2019-35/CC-MAIN-2019-35_segments_1566027315258.34_warc_CC-MAIN-20190820070415-20190820092415-00277.warc.gz\"}"}
https://answers.everydaycalculation.com/gcf/1170-36
[ "Solutions by everydaycalculation.com\n\n## What is the GCF of 1170 and 36?\n\nThe GCF of 1170 and 36 is 18.\n\n#### Steps to find GCF\n\n1. Find the prime factorization of 1170\n1170 = 2 × 3 × 3 × 5 × 13\n2. Find the prime factorization of 36\n36 = 2 × 2 × 3 × 3\n3. To find the GCF, multiply all the prime factors common to both numbers:\n\nTherefore, GCF = 2 × 3 × 3\n4. GCF = 18\n\nMathStep (Works offline)", null, "Download our mobile app and learn how to find GCF of upto four numbers in your own time:" ]
[ null, "https://answers.everydaycalculation.com/mathstep-app-icon.png", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.77043325,"math_prob":0.99831545,"size":587,"snap":"2020-34-2020-40","text_gpt3_token_len":194,"char_repetition_ratio":0.12864494,"word_repetition_ratio":0.0,"special_character_ratio":0.4173765,"punctuation_ratio":0.07964602,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9963126,"pos_list":[0,1,2],"im_url_duplicate_count":[null,null,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2020-09-27T06:20:28Z\",\"WARC-Record-ID\":\"<urn:uuid:42e64452-d5e1-4b11-84e7-c0d59b03c232>\",\"Content-Length\":\"6012\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:d2c62b96-9503-41ba-8913-2d955b92d1b8>\",\"WARC-Concurrent-To\":\"<urn:uuid:61800abf-0a5c-4ac4-b571-5e708db8070f>\",\"WARC-IP-Address\":\"96.126.107.130\",\"WARC-Target-URI\":\"https://answers.everydaycalculation.com/gcf/1170-36\",\"WARC-Payload-Digest\":\"sha1:FLXQUKKOFH7SIBFDQIQLJWRXNRVSU62A\",\"WARC-Block-Digest\":\"sha1:5SYTZ54GDJCWO2OJXQYDUXIPLVGG3OOW\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2020/CC-MAIN-2020-40/CC-MAIN-2020-40_segments_1600400265461.58_warc_CC-MAIN-20200927054550-20200927084550-00021.warc.gz\"}"}
https://neuron.eng.wayne.edu/tarek/MITbook/chap5/5_1.html
[ "5.1 Learning Rule for Multilayer Feedforward Neural Networks\n\nConsider the two-layer feedforward architecture shown in Figure 5.1.1. This network receives a set of scalar signals {x0, x1, ... , xn}, where x0 is a bias signal equal to 1. This set of signals constitutes an input vector x, x Rn+1. The layer receiving the input signal is called the hidden layer. Figure 5.1.1 shows a hidden layer having J units. The output of the hidden layer is a (J+1)-dimensional real-valued vector z, z = [z0, z1, ..., zJ]T. Again, z0 = 1 represents a bias input and can be thought of as being generated by a \"dummy\" unit (with index zero) whose output z0 is clamped at 1. The vector z supplies the input for the output layer of L units. The output layer generates an L-dimensional vector y in response to the input x which, when the network is fully trained, should be identical (or very close) to a \"desired\" output vector d associated with x.", null, "Figure 5.1.1. A two layer fully interconnected feedforward neural network architecture. For clarity, only selected connections are drawn.\n\nThe activation function fh of the hidden units is assumed to be a differentiable nonlinear function (typically, fh is the logistic function defined by", null, ", or a hyperbolic tangent function fh(net) = tanh ( net), with values for and close to unity). Each unit of the output layer is assumed to have the same activation function, denoted fo; the functional form of fo is determined by the desired output signal/pattern representation or the type of application. For example, if the desired output is real-valued (as in some function approximation applications), then a linear activation fo (net) = net may be used. On the other hand, if the network implements a pattern classifier with binary outputs, then a saturating nonlinearity similar to fh may be used for fo. In this case, the components of the desired output vector d must be chosen within the range of fo. It is important to note that if fh is linear, then one can always collapse the net in Figure 5.1.1 to a single layer net and thus lose the universal approximation/mapping capabilities discussed in Chapter 2. Finally, we denote by wji the weight of the jth hidden unit associated with the input signal xi. Similarly, wlj is the weight of the lth output unit associated with the hidden signal zj.\n\nNext, consider a set of m input/output pairs {xk, dk}, where dk is an L-dimensional vector representing the desired network output upon the presentation of xk. The objective here is to adaptively adjust the J(+ 1) + L(+& nbsp;1) weights of this network such that the underlying function/mapping represented by the training set is approximated or learned. Since the learning here is supervised, i.e., target outputs are available, we may define an error function to measure the degree of approximation for any given setting of the network's weights. A commonly used error function is the SSE measure, but this is by no means the only possibility, and later in this chapter, several other error functions will be discussed. Once a suitable error function is formulated, learning can be viewed (as was done in Chapters 3 and 4) as an optimization process. That is, the error function serves as a criterion function, and the learning algorithm seeks to minimize the criterion function over the space of possible weight settings. For instance, if a differentiable criterion function is used, gradient descent on such a function will naturally lead to a learning rule. This idea has been invented independently by Bryson and Ho (1969), Amari (1967; 1968), Werbos (1974), and Parker (1985). Next, we illustrate the above idea by deriving a supervised learning rule for adjusting the weights wji and wlj such that the following error function is minimized (in a local sense) over the training set (Rumelhart et al., 1986b):", null, "(5.1.1)\n\nHere, w represents the set of all weights in the network. Note that Equation (5.1.1) is the \" instantaneous\" SSE criterion of Equation (3.1.32) generalized for a multiple output network.\n\n5.1.1 Error Backpropagation Learning Rule\n\nSince the targets for the output units are explicitly specified, one can directly use the delta rule, derived in Section 3.1.3 for updating the wlj weights. That is,", null, "(5.1.2)\n\nwhere", null, "is the weighted sum for the lth output unit,", null, "is the derivative of fo with respect to net, and", null, "and", null, "represent the updated (new) and current weight values, respectively. The zj's are computed by propagating the input vector x through the hidden layer according to:", null, "(5.1.3)\n\nThe learning rule for the hidden layer weights wji is not as obvious as that for the output layer since we do not have available a set of target values (desired outputs) for hidden units. However, one may derive a learning rule for hidden units by attempting to minimize the output layer error. This amounts to propagating the output errors (d- yl) back through the output layer towards the hidden units in an attempt to estimate \"dynamic\" targets for these units. Such a learning rule is termed error back-propagation or the backprop learning rule and may be viewed as an extension of the delta rule (Equation 5.1.2) used for updating the output layer. To complete the derivation of backprop for the hidden layer weights, and similar to the above derivation for the output layer weights, we perform gradient descent on the criterion function in Equation (5.1.1), but this time, the gradient is calculated with respect to the hidden weights:", null, "(5.1.4)\n\nwhere the partial derivative is to be evaluated at the current weight values. Using the chain rule for differentiation, one may express the partial derivative in Equation (5.1.4) as", null, "(5.1.5)\n\nwith", null, "(5.1.6)", null, "(5.1.7)\n\nand", null, "Now, upon substituting Equations (5.1.6) through (5.1.8) into Equation (5.1.5) and using Equation (5.1.4), we arrive at the desired learning rule:", null, "(5.1.9)\n\nBy comparing Equation (5.1.9) to (5.1.2), one can immediately define an \"estimated target\" dj for the jth hidden unit implicitly in terms of the back propagated error signal dj-zj as follows:", null, "(5.1.10)\n\nIt is usually possible to express the derivatives of the activation functions in Equations (5.1.2) and (5.1.9) in terms of the activations themselves. For example, for the logistic activation function, we have", null, "(5.1.11)\n\nand for the hyperbolic tangent function, we have", null, "(5.1.12)\n\nThe above learning equations may also be extended to feedforward nets with more than one hidden layer and/or nets with connections that  jump  over  one  or  more  layers  (see Problems 5.1.2 and 5.1.3). The complete procedure for updating the weights in a feedforward neural net utilizing the above rules is summarized below for the two layer architecture of Figure 5.1.1. We will refer to this learning procedure as incremental backprop or just backprop:\n\n1. Initialize all weights and refer to them as \"current\" weights", null, "and", null, ". (see Section 5.2.1 for details).\n\n2. Set the learning rates o and h to small positive values (refer to Section 5.2.2 for additional details).\n\n3. Select an input pattern xk from the training set (preferably at random) and propagate it through the network, thus generating hidden and output unit activities based on the current weight settings.\n\n4. Use the desired target dk associated with xk and employ Equation (5.1.2) to compute the output layer weight changes", null, ".\n\n5. Employ Equation (5.1.9) to compute the hidden layer weights changes", null, ". Normally, the current weights are used in these computations. In general, enhanced error correction may be achieved if one employs the updated output layer weights", null, ". However, this comes at the added cost of recomputing yl and fo'(netl).\n\n6. Update all weights according to", null, "and", null, "for the output and hidden layers, respectively.\n\n7. Test for convergence. This is done by checking some preselected function of the output errors to see if its magnitude is below some preset threshold. If convergence is met, stop; otherwise, set", null, "and", null, "and go to step 3. It should be noted that backprop may fail to find a solution which passes the convergence test. In this case, one may try to reinitialize the search process, tune the learning parameters, and/or use more hidden units.\n\nThe above procedure is based on \"incremental\" learning, which means that the weights are updated after every presentation of an input pattern. Another alternative is to employ \" batch\" learning where weight updating is performed only after all patterns (assuming a finite training set) have been presented. The batch learning is formally stated by summing the right hand side of Equations (5.1.2) and (5.1.9) over all patterns xk. This amounts to gradient descent on the criterion function", null, "(5.1.13)\n\nEven though batch updating moves the search point w in the direction of the true gradient at each update step, the \"approximate\" incremental updating is more desirable for two reasons: (1) It requires less storage, and (2) it makes the search path in the weight space stochastic (here, at each time step the input vector x is drawn at random) which allows for a wider exploration of the search space and, potentially, leads to better quality solutions. When backprop converges, it converges to a local minima of the criterion function (McInerny et al., 1989). This fact is true of any gradient descent-based learning rule when the surface being searched is nonconvex (Amari, 1990); i.e., it admits local minima. Using stochastic approximation theory, Finnoff (1993; 1994) showed that for \"very small\" learning rates (approaching zero), incremental backprop approaches batch backprop and produces essentially the same results. However, for small constant learning rates there is a nonnegligible stochastic element in the training process which gives incremental backprop a \"quasiannealing\" character in which the cumulative gradient is continuously perturbed, allowing the search to escape local minima with small shallow basins of attraction. Thus, solutions generated by incremental backprop are often practical ones. The local minima problem can be further eased by heuristically adding random noise to the weights (von Lehman et al., 1988) or by adding noise to the input patterns (Sietsma and Dow, 1988). In both cases, some noise reduction schedule should be employed to dynamically reduce the added noise level towards zero as learning progresses.\n\nNext, the incremental backprop learning procedure is applied to solve a two-dimensional, two-class pattern classification problem. This problem should help give a good feel for what is learned by the hidden units in a feedforward neural network, and how the various units work together to generate a desired solution.\n\nExample 5.1.1: Consider the two-class problem shown in Figure 5.1.2. The points inside the shaded region belong to class B and all other points are in class A. A three layer feedforward neural network with backprop training is employed which is supposed to learn to distinguish between these two classes. The network consists of an 8-unit first hidden layer, followed by a second hidden layer with 4 units, followed by a 1-unit output layer. We will refer to such a network as having an 8-4-1 architecture. All units employ a hyperbolic tangent activation function. The output unit should encode the class of each input vector; a positive output indicates class B and a negative output indicates class A. Incremental backprop was used with learning rates set to 0.1. The training set consists of 500 randomly chosen points, 250 from region A and another 250 from region B. In this training set, points representing class B and class A were assigned desired output (target) values of +1 and -1, respectively. Training was performed for several hundred cycles over the training set.\n\nFigure 5.1.2 Decision regions for the pattern classification problem in Example 5.1.1\n\nFigure 5.1.3 shows geometrical plots of all unit responses upon testing the network with a new set of 1000 uniformly randomly generated points inside the [-1, +1]2 region. In generating each plot, a black dot was placed at the exact coordinates of the test point (input) in the input space if and only if the corresponding unit response is positive. The boundaries between the dotted and the white regions in the plots represent approximate decision boundaries learned by the various units in the network. Figure 5.1.3 (a)-(h) represent the decision boundaries learned by the eight units in the first hidden layer. Figure 5.1.3 (i)-(l) shows the decision boundaries learned by the four units of the second hidden layer. Figure 5.1.3 (m) shows the decision boundary realized by the output unit. Note the linear nature of the separating surface realized by the first hidden layer units, from which complex nonlinear separating surfaces are realized by the second hidden layer units and ultimately by the output layer unit. This example also illustrates how a single hidden layer feedforward net (counting only the first two layers) is capable of realizing convex, concave, as well as disjoint decision regions, as can be seen from Figure 5.1.3 (i)-(l). Here, we neglect the output unit and view the remaining net as one with an 8-4 architecture.\n\nThe present problem can also be solved with smaller networks (fewer number of hidden units or even a network with a single hidden layer). However, the training of such smaller networks with backprop may become more difficult. A smaller network with a 5-3-1 architecture utilizing a variant backprop learning procedure (Hassoun et al., 1990) is reported in Song (1992), which has a comparable separating surface to the one in Figure 5.1.3 (m).\n\nFigure 5.1.3. Separating surfaces generated by the various units in the 8-4-1 network of Example 5.1.1 (a)-(h): Separating surfaces realized by the units in the first hidden layer; (i)-(l): Separating surface realized by the units in the second hidden layer; and (m): Separating surface realized by the output unit.\n\nHuang and Lippmann (1988) employed Monte Carlo simulations to investigate the capabilities of backprop in learning complex decision regions (see Figure 2.3.3). They reported no significant performance difference between two and three layer feedforward nets when forming complex decision regions using backprop. They also demonstrated that backprop's convergence time is excessive for complex decision regions and the performance of such trained classifiers is similar to that obtained with the k-nearest neighbor classifier (Duda and Hart, 1973). Villiers and Barnard (1993) reported similar simulations but on data sets which consisted of a \"distribution of distributions\" where a typical class is a set of clusters (distributions) in the feature space, each of which can be more or less spread out and which might involve some or all of the dimensions of the feature space; the distribution of distributions thus assigns a probability to each distribution in the data set. It was found for networks of equal complexity (same number of weights), that there is no significant difference between the quality of \"best\" solutions generated by two and three layer backprop-trained feedforward networks; actually, the two layer nets demonstrated better performance, on the average. As for the speed of convergence, three layer nets converged faster if the number of units in the two hidden layers were roughly equal.\n\nGradient descent search may be eliminated all together in favor of a stochastic global search procedure that guarantees convergence to a global solution with high probability; genetic algorithms and simulated annealing are examples of such procedures and are considered in Chapter 8. However, the assured (in probability) optimality of these global search procedures comes at the expense of slow convergence. Next, a deterministic search procedure termed global descent is presented which helps backprop reach globally optimal solutions.\n\nHere, we describe a learning method in which the gradient descent rule in batch backprop is replaced with a \"global descent\" rule (Cetin et al. 1993a). This methodology is based on a global optimization scheme, acronymed TRUST: terminal repeller unconstrained subenergy tunneling, which formulates optimization in terms of the flow of a special deterministic dynamical system (Cetin et al., 1993b).\n\nGlobal descent is a gradient descent on a special criterion function C(w, w*) given by", null, "(5.1.14)\n\nwhere w*, with component values", null, ", is a fixed weight vector which can be a local minimum of E(w) or an initial weight state w0,", null, "is the unit step function, is a shifting parameter (typically set to 2), and k is a small positive constant. The first term in the right-hand side in Equation (5.1.14) is a monotonic transformation of the original criterion function (e.g., SSE criterion may be used) which preserves all critical points of E(w) and has the same relative ordering of the local and global minima of E(w). It also flattens the portion of E(w) above E(w*) with minimal distortion elsewhere. On the other hand, the term", null, "is a \"repeller term\" which gives rise to a convex surface with a unique minimum located at", null, ". The overall effects of this energy transformation is schematically represented for a one-dimensional criterion function in Figure 5.1.4.\n\nPerforming gradient descent on C(w, w*) leads to the \"global descent\" update rule", null, "(5.1.15)\n\nThe first term on the right-hand side of Equation (5.1.15) is a \"subenergy gradient\", while the second term is a \"non-Lipschitzian\" terminal repeller (Zak, 1989). Upon replacing the gradient descent in Equation (5.1.2) and (5.1.4) by Equation (5.1.15) where wi represents an arbitrary hidden unit or output unit weight, the modified backprop procedure may escape local minima of the original criterion function E(w) given in Equation (5.1.13). Here, the batch training is required since Equation (5.1.15) necessitates a unique error surface for all patterns.\n\nFigure 5.1.4. A plot of a one-dimensional criterion function E(w) with local minimum at w*. The function E(w) - E(w*) is plotted below, as well as the global descent criterion function C(w, w*).\n\nThe update rule in Equation (5.1.15) automatically switches between two phases: A tunneling phase and a gradient descent phase. The tunneling phase is characterized by", null, ". Since for this condition the subenergy gradient term is nearly zero in the vicinity of the local minimum w*, the terminal repeller term in Equation (5.1.15) dominates, leading to the dynamical system", null, "(5.1.16)\n\nThis system has an unstable repeller equilibrium point at", null, "; i.e., at the local minimum of E(w). The \"power\" of this repeller is determined by the constant k. Thus, the dynamical system given by Equation (5.1.15), when initialized with a small perturbation from w*, is repelled from this local minimum until it reaches a lower energy region", null, "; i.e., tunneling through portions of E(w) where", null, "is accomplished. The second phase is a gradient descent minimization phase, characterized by", null, ". Here, the repeller term is identically zero. Thus, Equation (5.1.15) becomes", null, "(5.1.17)\n\nwhere (w) is a dynamic learning rate (step size) equal to", null, ". Note that (w) is approximately equal to when E(w*) is larger than E(w)+.\n\nInitially, w* is chosen as one corner of a domain in the form of a hyperparallelepiped of dimension", null, ", which is the dimension of w in the architecture of Figure 5.1.1. A slightly perturbated version of w*, namely", null, ", is taken as the initial state of the dynamical system in Equation (5.1.15). Here w is a small perturbation which drives the system into the domain of interest. If", null, ", the system immediately enters a gradient descent phase which equilibrates at a local minimum. Every time a new equilibrium is reached, w* is set equal to this equilibrium and Equation (5.1.15) is reinitialized with", null, "which assures a necessary consistency in the search flow direction. Since w* is now a local minimum,", null, "holds in the neighborhood of w*. Thus, the system enters a repelling (tunneling) phase, and the repeller at w* repels the system until it reaches a lower basin of attraction where", null, ". As the dynamical system enters the next basin, the system automatically switches to gradient descent and equilibrates at the next lower local minimum. We then set w* equal to this new minimum and repeat the process. If, on the other hand,", null, "at the onset of training, then the system is initially in a tunneling phase. The tunneling will proceed to a lower basin, at which point it enters the minimization phase and follows the behavior discussed above. Training can be stopped when a minimum w* corresponding to", null, "is reached, or when E(w*) becomes smaller than a preset threshold.\n\nThe global descent method is guaranteed to find the global minimum for functions of one variable, but not for multivariate functions. However, in the multidimensional case, the algorithm will always escape from one local minimum to another with a lower or equal functional value. Figure 5.1.5 compares the learning curve for the global descent-based backprop to that of batch backprop for the four-bit parity problem in a feedforward net with four hidden units and a single output unit. The same initial random weights are used in both cases. The figure depicts one tunneling phase for the global descent algorithm before convergence to a (perfect) global minimum solution. In performing this simulation, it is found that the direction of the perturbation vector w is very critical in regard to successfully reaching a global minimum. On the other hand, batch backprop converges to the first local minimum it reaches. This local solution represents a partial solution to the 4-bit parity problem (i.e., mapping error is present). Simulations using incremental backprop with the same initial weights as in the above simulations are also performed, but are not shown in the figure. Incremental backprop was able to produce both of the solutions shown in Figure 5.1.5; very small learning rates (0 and n) often lead to imperfect local solutions, while relatively larger learning rates may lead to a perfect solution.\n\nFigure 5.1.5. Learning curves for global descent- and gradient descent-based batch backprop for the 4-bit parity.\n\nGoto [5.0] [5.2] [5.3] [5.4] [5.5]", null, "Back to the Table of Contents", null, "Back to Main Menu" ]
[ null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00001.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00002.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00003.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00004.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00005.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00006.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00007.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00008.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00009.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00010.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00011.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00012.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00013.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00014.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00015.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00016.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00017.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00018.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00019.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00020.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00021.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00022.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00023.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00024.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00025.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00026.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00027.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00028.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00029.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00030.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00031.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00032.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00033.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00034.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00035.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00036.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00037.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00038.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00039.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00040.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00041.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00042.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00043.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00044.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00045.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00046.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00047.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00048.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00049.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/chap5/img00050.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/barrow.gif", null, "https://neuron.eng.wayne.edu/tarek/MITbook/barrow.gif", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.8518121,"math_prob":0.95936066,"size":46490,"snap":"2020-34-2020-40","text_gpt3_token_len":11217,"char_repetition_ratio":0.13780493,"word_repetition_ratio":0.90865576,"special_character_ratio":0.223661,"punctuation_ratio":0.12348934,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9933983,"pos_list":[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104],"im_url_duplicate_count":[null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,null,null,null,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2020-09-23T22:31:02Z\",\"WARC-Record-ID\":\"<urn:uuid:ca022af2-9b40-4663-ad48-3eb69c061146>\",\"Content-Length\":\"35572\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:d460f329-b7e6-4daa-bb48-fa7a44069da4>\",\"WARC-Concurrent-To\":\"<urn:uuid:cbcf41f8-2ecc-4aa3-88a4-be8e49897f2c>\",\"WARC-IP-Address\":\"141.217.43.236\",\"WARC-Target-URI\":\"https://neuron.eng.wayne.edu/tarek/MITbook/chap5/5_1.html\",\"WARC-Payload-Digest\":\"sha1:IM3ADNVHZEW2VDN7I4E3VSMFFKVHTXEG\",\"WARC-Block-Digest\":\"sha1:NBM5N7FBKEE4QWHT3TPJ3KQUMCCZCKC6\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2020/CC-MAIN-2020-40/CC-MAIN-2020-40_segments_1600400212959.12_warc_CC-MAIN-20200923211300-20200924001300-00718.warc.gz\"}"}
https://cracku.in/9-a-circle-is-inscribed-in-a-rhombus-with-diagonals--x-cat-2020-slot-1-quant
[ "Question 9\n\n# A circle is inscribed in a rhombus with diagonals 12 cm and 16 cm. The ratio of the area of circle to the area of rhombus is\n\nSolution", null, "Let the length of radius be 'r'.\n\nFrom the above diagram,\n\n$$x^2+r^2=6^2\\$$....(i)\n\n$$\\left(10-x\\right)^2+r^2=8^2\\$$----(ii)\n\nSubtracting (i) from (ii), we get:\n\nx=3.6 => $$r^2=36-\\left(3.6\\right)^2$$ ==> $$r^2=36-\\left(3.6\\right)^2\\ =23.04$$.\n\nArea of circle = $$\\pi\\ r^2=23.04\\pi\\$$\n\nArea of rhombus= 1/2*d1*d2=1/2*12*16=96.\n\n.'. Ratio of areas = 23.04$$\\pi\\$$/96=$$\\frac{6\\pi}{25}$$\n\n### View Video Solution", null, "" ]
[ null, "https://cracku.in/media/uploads/image_yBmetC1.png", null, "https://cracku.in/media/Question_348335_video_qtGUZqr.jpeg", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.71726835,"math_prob":0.9999894,"size":902,"snap":"2023-40-2023-50","text_gpt3_token_len":331,"char_repetition_ratio":0.116926506,"word_repetition_ratio":0.0,"special_character_ratio":0.38026607,"punctuation_ratio":0.120192304,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":1.000009,"pos_list":[0,1,2,3,4],"im_url_duplicate_count":[null,1,null,1,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2023-11-28T10:36:44Z\",\"WARC-Record-ID\":\"<urn:uuid:fa7c7b28-d877-48e6-9977-55966a911ba3>\",\"Content-Length\":\"124603\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:abb53d68-3d1b-4dd2-aab5-46e351694239>\",\"WARC-Concurrent-To\":\"<urn:uuid:6d94f326-15ca-4fc0-9f00-8cd04ca4f5db>\",\"WARC-IP-Address\":\"174.138.120.144\",\"WARC-Target-URI\":\"https://cracku.in/9-a-circle-is-inscribed-in-a-rhombus-with-diagonals--x-cat-2020-slot-1-quant\",\"WARC-Payload-Digest\":\"sha1:O2MO5ENRFQS5XDKYQ5N4DFNPXZTYUJWR\",\"WARC-Block-Digest\":\"sha1:JRFZHPDH3NHUCBFPIGJJXSXEM6OMJRYV\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2023/CC-MAIN-2023-50/CC-MAIN-2023-50_segments_1700679099281.67_warc_CC-MAIN-20231128083443-20231128113443-00186.warc.gz\"}"}
https://anchoragegroup.com.au/variable-and-a-constant-spring-support/
[ "", null, "# Variable and Constant spring support\n\n### Anchorage Group explains the difference between variable and constant spring supports\n\nIn a variable load support, action forces and reactive forces on the spring varies during the pipe travel and, therefore, it has a zero moment about its line of action. In a constant load support, however, the fixed applied load is constant during its travel while the moment around its pivot point varies.", null, "Variable Spring Support\nA variable spring is fundamentally a spring in a vessel. When the load “w” is applied onto the system, the spring compresses by a distance W/k (where k is given by the spring rate), while the spring’s reactive force is also “w” under the condition of equilibrium. Deflection (∆L) is developed by pipe movement due to thermal expansion, causing a differential load (∆W=k ● ∆L) to act on the spring. The direction of the move will result in a change of load (∆W), which will either increase or decrease the applied load “w” converting it into our final operating load (w1). To minimize stress variations, the differential load (∆W) for a particular variable spring support is to be limited to a maximum of 25% of the operating load (w1).", null, "Constant Spring Support\nA constant spring is an assembly comprised by a spring and a primary cam mechanism. In this system, the external load remains uniform while the moment around its fixed pivot point varies during displacement (due to changes in moment arm length). An equilibrium is maintained by balancing the external force moment and the internal moment, which is produced by the spring’s compression/decompression around the pivot during pipe travel.\nA resisting force, nearly independent of position during its travel, can be provided by selecting proper moment arms, as developed by the cam geometry and spring properties\nAt each travel location of the external load, the moment resulted by the applied load is in equilibrium with the counter moment caused by the compressed/decompressed spring force with the suitable moment arm.\nTypically, the disparity of active and reactive forces is minimal (with a maximum deviation of 6%) it can be taken as a constant force while moving either upward or downward." ]
[ null, "https://anchoragegroup.com.au/wp-content/uploads/elementor/thumbs/constants-web-3-od9wc2nehc8b0ifnkjbzzy195r2ht293496z8ezseg.jpg", null, "https://anchoragegroup.com.au/wp-content/uploads/2018/11/variables1.jpg", null, "https://anchoragegroup.com.au/wp-content/uploads/2018/11/constants-2.jpg", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.9307395,"math_prob":0.9401776,"size":2249,"snap":"2019-51-2020-05","text_gpt3_token_len":446,"char_repetition_ratio":0.15055679,"word_repetition_ratio":0.005479452,"special_character_ratio":0.1956425,"punctuation_ratio":0.0719603,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9529114,"pos_list":[0,1,2,3,4,5,6],"im_url_duplicate_count":[null,1,null,1,null,1,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2020-01-19T13:15:56Z\",\"WARC-Record-ID\":\"<urn:uuid:5bf6597a-6741-48f1-a6d1-29e5aefbf9b4>\",\"Content-Length\":\"144832\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:48a48261-c45f-4708-a167-492213577616>\",\"WARC-Concurrent-To\":\"<urn:uuid:3f4423a1-524c-439b-9c11-735d874d1ae1>\",\"WARC-IP-Address\":\"27.50.93.31\",\"WARC-Target-URI\":\"https://anchoragegroup.com.au/variable-and-a-constant-spring-support/\",\"WARC-Payload-Digest\":\"sha1:NQ24LV7T5A2452JZCUAMGLTMJFLMGWOT\",\"WARC-Block-Digest\":\"sha1:EI2ZIVHFTGFUKH5NAJ5XDSQDZ4L6QMAQ\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2020/CC-MAIN-2020-05/CC-MAIN-2020-05_segments_1579250594603.8_warc_CC-MAIN-20200119122744-20200119150744-00405.warc.gz\"}"}
https://itprospt.com/num/6205302/4-find-the-dimension-of-the-subspace-h-of-r2-spanned-by
[ "5\n\n# -4 Find the dimension of the subspace H of R2 spanned by \"H -5 20 dim H=...\n\n## Question\n\n###### -4 Find the dimension of the subspace H of R2 spanned by \"H -5 20 dim H=\n\n-4 Find the dimension of the subspace H of R2 spanned by \"H -5 20 dim H=", null, "", null, "#### Similar Solved Questions\n\n##### 5-Hydroxypentanal reacting with ethanol under acidic catalysis forms two enantiomersand Show full mechanism of products formation. (6 points) Label each step of the reaction as protonation_ nucleophilic addition with cyclization, elimination_ nucleophilic addition, or deprotonation. (2 points) (iii) Circle the oxonium ion intermediate. ( 1 points) (iiii) Make one sentence statement why racemic mixture will be the product of the reaction. points)\n5-Hydroxypentanal reacting with ethanol under acidic catalysis forms two enantiomers and Show full mechanism of products formation. (6 points) Label each step of the reaction as protonation_ nucleophilic addition with cyclization, elimination_ nucleophilic addition, or deprotonation. (2 points) (i...\n##### Ccs cnnkte mese senlenosHonts demoitsbatc VOn UIrJe TSaEN UFwt US4 Wlecal puur5alds dagging the conec teIE; into !EanFecal buleplants hnee beon approved UeacmnM[nect Mthahuud t0#ulnt (unan budThi\": OcL;GIln hiclenum and Glse , neeIdIOL; Git: inechon otinemcchotnTwpical Lqeabrie[E, lor Uni nluctori Ficludlu ttu USt ol potent anbbolics hoxtel m UO okin [email protected]/ reM; Itd CuI2 (aleFocalhalt Malls Ilnolve ityin-ttation rlEIn Mn elan t0 (ecIne noltI unlenlatalMtmy corta nnig ant alIncJai(tidoCoUr\nCcs cnnkte mese senlenos Honts demoitsbatc VOn UIrJe TSaEN UFwt US4 Wlecal puur5alds dagging the conec teIE; into ! Ean Fecal buleplants hnee beon approved Ueacmn M[nect Mthahuud t0 #ulnt ( unan bud Thi\": OcL;GIln hiclenum and Glse , neeIdIOL; Git: inechon otine mcchotn Twpical Lqeabrie[E, lor ...\n##### Question 6The cell indicated by the arrow is inmetaphaseprophaseanaphasetelophase\nQuestion 6 The cell indicated by the arrow is in metaphase prophase anaphase telophase...\n##### 4unitsDacaunmanutucurery CotkCi calletaIantRu cmeoC(s0) 750 , R(Sm)MC(S0i=MR(Su)19; HnJt0590l 4SuX?proouctionIncrensnd Ircm~Wlto Su| unitapuroulinatelymuch docs Protit phanaetcnanac\n4units Dacaun manutucurery CotkCi calleta Iant Ru cmeo C(s0) 750 , R(Sm) MC(S0i= MR(Su) 19; HnJt 0590l 4 SuX? proouction Incrensnd Ircm~Wlto Su| unit apuroulinately much docs Protit phanaet cnanac...\n##### If 2r? + 3 + zy = 4and y(4) ~10. find y (4) by implicit differentiation.y (4)Thus an equation of the cangent line to the graph at the poinc (4,~10) is\nIf 2r? + 3 + zy = 4and y(4) ~10. find y (4) by implicit differentiation. y (4) Thus an equation of the cangent line to the graph at the poinc (4,~10) is...\n##### Compute5L5 K < (6x + 9y) dydx 55 (Round your answer to the nearest integer)Submit AnswerTries 0/10\nCompute 5 L5 K < (6x + 9y) dydx 55 (Round your answer to the nearest integer) Submit Answer Tries 0/10...\n##### Intercept3.3130.47049.6200.000RED0.8110.7431.1890.2750.4450.1041.908RED_A0.9830.5473.2370.0722.6730.9167.802POINTS0.0350.00914.5900.0001.0361.0171.055POINTS~0.0350.00916.1600.0000.9660.9500.982HTGD1.6180.143 127.2250.0005.0453.8086.683TOTAL_H_P0.0100.0047.2270.0071.0101.0031.018TOTAL A P-0.0150.00413.7880.0000.9850.9780.993[FGS-0] [FGs-1]3.3200.41364.5550.0000.0360.0160.081-2.4730.43033.0800.0000.0840.0360.196[FGs-2]The relerence category is 0. This parameler is set t0 zero because it is redunda\nIntercept 3.313 0.470 49.620 0.000 RED 0.811 0.743 1.189 0.275 0.445 0.104 1.908 RED_A 0.983 0.547 3.237 0.072 2.673 0.916 7.802 POINTS 0.035 0.009 14.590 0.000 1.036 1.017 1.055 POINTS ~0.035 0.009 16.160 0.000 0.966 0.950 0.982 HTGD 1.618 0.143 127.225 0.000 5.045 3.808 6.683 TOTAL_H_P 0.010 0.004...\n##### Engnaer Jexcned & Valve that Will regulate Din pountIsquJr watct pressurc C automobile cnginc. nouldysoneante theelarted that the valve performs ahwe eobile fcgtons Thhenaineer desigried the Katve such that #t wouild produce AS um + vallante Known The valae was tested rricjn Dr cssur- Jeuma nlacer be U JenlicanT engines und tric Mejn Dfessur De ued cus 4 Find the value of the Iest Statistic. Round your unsrcr to Enter the vale of the test stalrtehoul untntrTablas5]Kexpad\nengnaer Jexcned & Valve that Will regulate Din pountIsquJr watct pressurc C automobile cnginc. nouldysoneante theelarted that the valve performs ahwe eobile fcgtons Thhenaineer desigried the Katve such that #t wouild produce AS um + vallante Known The valae was tested rricjn Dr cssur- Jeuma nlac...\n##### Given the graph of y = f(x), evaluate each of the following: (4 marks)(a) lim flx)(b) lim f(x)(c) lim f(x)(d) fl4)\nGiven the graph of y = f(x), evaluate each of the following: (4 marks) (a) lim flx) (b) lim f(x) (c) lim f(x) (d) fl4)...\n##### Use the randm variables X and from the previous probler and consider two new random variables W and where W 2+4X and VCompute EJW] and EJV] Compute Var[W] and VarlV]. Compute Cov(W; V) and Corr(W;V) Compute EIVIW 26]:\nUse the randm variables X and from the previous probler and consider two new random variables W and where W 2+4X and V Compute EJW] and EJV] Compute Var[W] and VarlV]. Compute Cov(W; V) and Corr(W;V) Compute EIVIW 26]:...\n##### 19IntegraileS2+ 2J 3 {+1Mz+2)dx (z\n19 Integraile S2+ 2 J 3 {+1Mz+2) dx (z...\n##### Question 51 pGiven the premise set C {p, 4.r,s} and $= (p V 4) ^ r; what is the weighted upper bound of the uncertainty of the conclusion$ if the probability of each element of [ is 0.9? Provide your answer with 3 decimal cases (for instance; write 0.082 if you have obtained the value 0.082441,or 0.083 if you have obtained 0.082689).No new data t0 save Last checked at 12 24pm Submia Ai 7A Pr\nQuestion 5 1 p Given the premise set C {p, 4.r,s} and $= (p V 4) ^ r; what is the weighted upper bound of the uncertainty of the conclusion$ if the probability of each element of [ is 0.9? Provide your answer with 3 decimal cases (for instance; write 0.082 if you have obtained the value 0.082441,o...\n##### The root-mean-square speed of the molecules of an ideal gas at25∘∘C and a pressure of 761 mmHg is 341 m/s.What is the density of this gas? You must show all the calculationsfor credit meaning just writing answers with out formulas andcalculations will not be graded.\nThe root-mean-square speed of the molecules of an ideal gas at 25∘∘C and a pressure of 761 mmHg is 341 m/s. What is the density of this gas? You must show all the calculations for credit meaning just writing answers with out formulas and calculations will not be graded....\n##### 3*-5 2x+1What is Invers fiunction for equation y~X-5 2x-3y 1\n3*-5 2x+1 What is Invers fiunction for equation y ~X-5 2x-3 y 1...\n##### 11 (in Quadrant-I), find 5If tan(r)sin(21)(Please enter answer accurate to decimal places.\n11 (in Quadrant-I), find 5 If tan(r) sin(21) (Please enter answer accurate to decimal places...." ]
[ null, "https://cdn.numerade.com/ask_images/5ebd314fd4cd48079896263c44df8f45.jpg ", null, "https://cdn.numerade.com/previews/a850c658-79ec-4a76-aaae-7559ac3b476b_large.jpg", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.6695023,"math_prob":0.95962864,"size":10259,"snap":"2022-40-2023-06","text_gpt3_token_len":3801,"char_repetition_ratio":0.1003413,"word_repetition_ratio":0.46627218,"special_character_ratio":0.36465544,"punctuation_ratio":0.14442493,"nsfw_num_words":1,"has_unicode_error":false,"math_prob_llama3":0.9501979,"pos_list":[0,1,2,3,4],"im_url_duplicate_count":[null,1,null,1,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2022-09-28T10:16:51Z\",\"WARC-Record-ID\":\"<urn:uuid:b3ccb3f3-3bc0-4aec-8874-c3a9cbfe38da>\",\"Content-Length\":\"72827\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:45afe47b-1b2f-4cb0-ab13-f93a1edcb742>\",\"WARC-Concurrent-To\":\"<urn:uuid:75b02438-57e9-4f92-ac3d-6128555e3f64>\",\"WARC-IP-Address\":\"172.67.73.211\",\"WARC-Target-URI\":\"https://itprospt.com/num/6205302/4-find-the-dimension-of-the-subspace-h-of-r2-spanned-by\",\"WARC-Payload-Digest\":\"sha1:WYB2IKDF3AOBSL2LIUNZ7O5PXZSS3CAO\",\"WARC-Block-Digest\":\"sha1:MWV6PB6SFJ36FFIGLG63RJYPXMTDLKF5\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2022/CC-MAIN-2022-40/CC-MAIN-2022-40_segments_1664030335190.45_warc_CC-MAIN-20220928082743-20220928112743-00533.warc.gz\"}"}
https://www.colorhexa.com/35ff47
[ "# #35ff47 Color Information\n\nIn a RGB color space, hex #35ff47 is composed of 20.8% red, 100% green and 27.8% blue. Whereas in a CMYK color space, it is composed of 79.2% cyan, 0% magenta, 72.2% yellow and 0% black. It has a hue angle of 125.3 degrees, a saturation of 100% and a lightness of 60.4%. #35ff47 color hex could be obtained by blending #6aff8e with #00ff00. Closest websafe color is: #33ff33.\n\n• R 21\n• G 100\n• B 28\nRGB color chart\n• C 79\n• M 0\n• Y 72\n• K 0\nCMYK color chart\n\n#35ff47 color description : Light lime green.\n\n# #35ff47 Color Conversion\n\nThe hexadecimal color #35ff47 has RGB values of R:53, G:255, B:71 and CMYK values of C:0.79, M:0, Y:0.72, K:0. Its decimal value is 3538759.\n\nHex triplet RGB Decimal 35ff47 `#35ff47` 53, 255, 71 `rgb(53,255,71)` 20.8, 100, 27.8 `rgb(20.8%,100%,27.8%)` 79, 0, 72, 0 125.3°, 100, 60.4 `hsl(125.3,100%,60.4%)` 125.3°, 79.2, 100 33ff33 `#33ff33`\nCIE-LAB 88.318, -80.133, 70.139 38.363, 72.728, 17.977 0.297, 0.563, 72.728 88.318, 106.493, 138.805 88.318, -78.242, 97.432 85.281, -68.943, 47.198 00110101, 11111111, 01000111\n\n# Color Schemes with #35ff47\n\n• #35ff47\n``#35ff47` `rgb(53,255,71)``\n• #ff35ed\n``#ff35ed` `rgb(255,53,237)``\nComplementary Color\n• #88ff35\n``#88ff35` `rgb(136,255,53)``\n• #35ff47\n``#35ff47` `rgb(53,255,71)``\n• #35ffac\n``#35ffac` `rgb(53,255,172)``\nAnalogous Color\n• #ff3588\n``#ff3588` `rgb(255,53,136)``\n• #35ff47\n``#35ff47` `rgb(53,255,71)``\n• #ac35ff\n``#ac35ff` `rgb(172,53,255)``\nSplit Complementary Color\n• #ff4735\n``#ff4735` `rgb(255,71,53)``\n• #35ff47\n``#35ff47` `rgb(53,255,71)``\n• #4735ff\n``#4735ff` `rgb(71,53,255)``\n• #edff35\n``#edff35` `rgb(237,255,53)``\n• #35ff47\n``#35ff47` `rgb(53,255,71)``\n• #4735ff\n``#4735ff` `rgb(71,53,255)``\n• #ff35ed\n``#ff35ed` `rgb(255,53,237)``\n• #00e815\n``#00e815` `rgb(0,232,21)``\n• #02ff19\n``#02ff19` `rgb(2,255,25)``\n• #1cff30\n``#1cff30` `rgb(28,255,48)``\n• #35ff47\n``#35ff47` `rgb(53,255,71)``\n• #4fff5e\n``#4fff5e` `rgb(79,255,94)``\n• #68ff75\n``#68ff75` `rgb(104,255,117)``\n• #82ff8d\n``#82ff8d` `rgb(130,255,141)``\nMonochromatic Color\n\n# Alternatives to #35ff47\n\nBelow, you can see some colors close to #35ff47. Having a set of related colors can be useful if you need an inspirational alternative to your original color choice.\n\n• #55ff35\n``#55ff35` `rgb(85,255,53)``\n• #45ff35\n``#45ff35` `rgb(69,255,53)``\n• #35ff36\n``#35ff36` `rgb(53,255,54)``\n• #35ff47\n``#35ff47` `rgb(53,255,71)``\n• #35ff58\n``#35ff58` `rgb(53,255,88)``\n• #35ff69\n``#35ff69` `rgb(53,255,105)``\n• #35ff7a\n``#35ff7a` `rgb(53,255,122)``\nSimilar Colors\n\n# #35ff47 Preview\n\nThis text has a font color of #35ff47.\n\n``<span style=\"color:#35ff47;\">Text here</span>``\n#35ff47 background color\n\nThis paragraph has a background color of #35ff47.\n\n``<p style=\"background-color:#35ff47;\">Content here</p>``\n#35ff47 border color\n\nThis element has a border color of #35ff47.\n\n``<div style=\"border:1px solid #35ff47;\">Content here</div>``\nCSS codes\n``.text {color:#35ff47;}``\n``.background {background-color:#35ff47;}``\n``.border {border:1px solid #35ff47;}``\n\n# Shades and Tints of #35ff47\n\nA shade is achieved by adding black to any pure hue, while a tint is created by mixing white to any pure color. In this example, #000e01 is the darkest color, while #f9fffa is the lightest one.\n\n• #000e01\n``#000e01` `rgb(0,14,1)``\n• #002103\n``#002103` `rgb(0,33,3)``\n• #003505\n``#003505` `rgb(0,53,5)``\n• #004906\n``#004906` `rgb(0,73,6)``\n• #005c08\n``#005c08` `rgb(0,92,8)``\n• #00700a\n``#00700a` `rgb(0,112,10)``\n• #00830c\n``#00830c` `rgb(0,131,12)``\n• #00970d\n``#00970d` `rgb(0,151,13)``\n• #00ab0f\n``#00ab0f` `rgb(0,171,15)``\n• #00be11\n``#00be11` `rgb(0,190,17)``\n• #00d213\n``#00d213` `rgb(0,210,19)``\n• #00e614\n``#00e614` `rgb(0,230,20)``\n• #00f916\n``#00f916` `rgb(0,249,22)``\n• #0eff23\n``#0eff23` `rgb(14,255,35)``\n• #21ff35\n``#21ff35` `rgb(33,255,53)``\n• #35ff47\n``#35ff47` `rgb(53,255,71)``\n• #49ff59\n``#49ff59` `rgb(73,255,89)``\n• #5cff6b\n``#5cff6b` `rgb(92,255,107)``\n• #70ff7d\n``#70ff7d` `rgb(112,255,125)``\n• #83ff8e\n``#83ff8e` `rgb(131,255,142)``\n• #97ffa0\n``#97ffa0` `rgb(151,255,160)``\n• #abffb2\n``#abffb2` `rgb(171,255,178)``\n• #beffc4\n``#beffc4` `rgb(190,255,196)``\n• #d2ffd6\n``#d2ffd6` `rgb(210,255,214)``\n• #e6ffe8\n``#e6ffe8` `rgb(230,255,232)``\n• #f9fffa\n``#f9fffa` `rgb(249,255,250)``\nTint Color Variation\n\n# Tones of #35ff47\n\nA tone is produced by adding gray to any pure hue. In this case, #92a294 is the less saturated color, while #35ff47 is the most saturated one.\n\n• #92a294\n``#92a294` `rgb(146,162,148)``\n• #8aaa8d\n``#8aaa8d` `rgb(138,170,141)``\n• #83b187\n``#83b187` `rgb(131,177,135)``\n• #7bb980\n``#7bb980` `rgb(123,185,128)``\n• #73c17a\n``#73c17a` `rgb(115,193,122)``\n• #6bc974\n``#6bc974` `rgb(107,201,116)``\n• #64d06d\n``#64d06d` `rgb(100,208,109)``\n• #5cd867\n``#5cd867` `rgb(92,216,103)``\n• #54e061\n``#54e061` `rgb(84,224,97)``\n• #4ce85a\n``#4ce85a` `rgb(76,232,90)``\n• #45ef54\n``#45ef54` `rgb(69,239,84)``\n• #3df74d\n``#3df74d` `rgb(61,247,77)``\n• #35ff47\n``#35ff47` `rgb(53,255,71)``\nTone Color Variation\n\n# Color Blindness Simulator\n\nBelow, you can see how #35ff47 is perceived by people affected by a color vision deficiency. This can be useful if you need to ensure your color combinations are accessible to color-blind users.\n\nMonochromacy\n• Achromatopsia 0.005% of the population\n• Atypical Achromatopsia 0.001% of the population\nDichromacy\n• Protanopia 1% of men\n• Deuteranopia 1% of men\n• Tritanopia 0.001% of the population\nTrichromacy\n• Protanomaly 1% of men, 0.01% of women\n• Deuteranomaly 6% of men, 0.4% of women\n• Tritanomaly 0.01% of the population" ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.5167103,"math_prob":0.879798,"size":3682,"snap":"2019-26-2019-30","text_gpt3_token_len":1597,"char_repetition_ratio":0.13322458,"word_repetition_ratio":0.011090573,"special_character_ratio":0.55377513,"punctuation_ratio":0.23250565,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9879402,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2019-07-23T16:10:31Z\",\"WARC-Record-ID\":\"<urn:uuid:586301b1-7a47-45cb-86bd-e06134d01506>\",\"Content-Length\":\"36247\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:5ed856c0-dca8-40be-a2f7-a034b41d07df>\",\"WARC-Concurrent-To\":\"<urn:uuid:e00174d1-cb91-463a-9eae-beb344cb9598>\",\"WARC-IP-Address\":\"178.32.117.56\",\"WARC-Target-URI\":\"https://www.colorhexa.com/35ff47\",\"WARC-Payload-Digest\":\"sha1:WPW2LHD4SL3V7NXTI7YZ5BLH5GSH7Y2Q\",\"WARC-Block-Digest\":\"sha1:XLAWCTBMKGMLYXB7UO7LUUNNOTCWEL2M\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2019/CC-MAIN-2019-30/CC-MAIN-2019-30_segments_1563195529480.89_warc_CC-MAIN-20190723151547-20190723173547-00319.warc.gz\"}"}
https://bashasys.info/document-assembly/expression-model-absolute-valuenum/
[ "# Expression Model: ABSOLUTE VALUE(NUM)\n\nThis tip covers the instruction model: ABSOLUTE VALUE(NUM). This model returns the positive (or absolute) value of a number variable.  In some accounting formulas, the result of the formula will be a negative number.  You may want to not the value as a negative, but still be able to treat and format the number based on its positive value\n\n### What are the elements?\n\n• ABSOLUTE VALUE: The function\n\n• NUM: A number value, positive or negative\n\n### How do you use it?\n\nUse it in a fillpoint or computation for a variable entered in the system\n\n` ABSOLUTE VALUE(Profits NU) `\n\nUse it to test the result of a calculation\n\n` ABSOLUTE VALUE(Gross Revenue NU - Expenses NU)`\n\n### Posts by Category & Tag\n\nCategories Document Assembly Tags" ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.7883223,"math_prob":0.9080692,"size":645,"snap":"2023-40-2023-50","text_gpt3_token_len":145,"char_repetition_ratio":0.13728549,"word_repetition_ratio":0.0,"special_character_ratio":0.20930232,"punctuation_ratio":0.088,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9531881,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2023-09-24T11:09:03Z\",\"WARC-Record-ID\":\"<urn:uuid:9173b0ab-acbe-4d1a-9134-1776e4d27bbb>\",\"Content-Length\":\"19779\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:544e76aa-e3ca-4fed-a06b-68d7120b53ce>\",\"WARC-Concurrent-To\":\"<urn:uuid:cb4b3b1c-ea19-4a96-91f7-3311e1fa60a5>\",\"WARC-IP-Address\":\"104.199.123.142\",\"WARC-Target-URI\":\"https://bashasys.info/document-assembly/expression-model-absolute-valuenum/\",\"WARC-Payload-Digest\":\"sha1:X23OQ7Z43ORHAC2WO45UQB6VKI4AR7MY\",\"WARC-Block-Digest\":\"sha1:YPFAVJHQW7ZA33D6QYOPTW6M2JZF3O7M\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2023/CC-MAIN-2023-40/CC-MAIN-2023-40_segments_1695233506632.31_warc_CC-MAIN-20230924091344-20230924121344-00271.warc.gz\"}"}
https://www.cut-the-knot.org/triangle/AntiparallelAndCircumradius.shtml
[ "### What Might This Be About?\n\n26 January 2016, Created with GeoGebra\n\n### Problem\n\nIn $\\Delta ABC\\;$ $MN\\;$ is an antiparallel, $M\\in AC,\\;$ $N\\in BC;\\;$ $O\\;$ the circumcenter.", null, "Then $MN\\perp OC.$\n\n### Proof\n\nLet $B'\\;$ be the midpoint of $AC,;$ $D$ the intersection of $MN\\;$ and $OC.\\;$", null, "Then triangles $CDM\\;$ and $CB'O\\;$ are similar. Indeed, they share an angle at $C.\\;$ Also, since $AOC$ is a central angle in $(ABC),\\;$ $\\angle AOC=2\\angle ABC,\\;$ implying $\\angle B'OC=\\angle ABC=\\angle CDM,\\;$ the latter because $MN\\;$ is an antiparallel.\n\nSo the triangles are similar. $\\angle CB'O=90^{\\circ},\\;$ hence, $\\angle CDM=90^{\\circ}.$\n\n### Remarks\n\nGrégoire Nicollier has observed that the above result is a direct consequence of the fact that circumcenter and orthocenter are isogonal conjugates: when you reflect $\\Delta ABC\\;$ in the angle bisector of $C,\\;$ the altitude through $C\\;$ is mapped to the circumdiameter through $C.\\;$ In addition, he noted that in a $\\Delta ABC,\\;$ the bisector of angle $A\\;$ and the perpendicular bisector of side $BC\\;$ meet on the circumcircle (an immediate consequence of the inscribed angle theorem.)\n\n•", null, "" ]
[ null, "https://www.cut-the-knot.org/triangle/AntiparallelAndCircumradiusP.jpg", null, "https://www.cut-the-knot.org/triangle/AntiparallelAndCircumradiusS.jpg", null, "https://www.cut-the-knot.org/gifs/tbow_sh.gif", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.7286245,"math_prob":0.9999068,"size":1283,"snap":"2021-43-2021-49","text_gpt3_token_len":385,"char_repetition_ratio":0.12900704,"word_repetition_ratio":0.02631579,"special_character_ratio":0.31410757,"punctuation_ratio":0.20647773,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9999403,"pos_list":[0,1,2,3,4,5,6],"im_url_duplicate_count":[null,5,null,5,null,null,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2021-10-20T06:02:26Z\",\"WARC-Record-ID\":\"<urn:uuid:fc431b13-d9c5-4629-ae23-4bec5a585f06>\",\"Content-Length\":\"17883\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:92093e1e-4665-450f-a810-78be00d92c1f>\",\"WARC-Concurrent-To\":\"<urn:uuid:bc8fd991-d0fc-4846-8382-1fb198062c2e>\",\"WARC-IP-Address\":\"107.180.50.227\",\"WARC-Target-URI\":\"https://www.cut-the-knot.org/triangle/AntiparallelAndCircumradius.shtml\",\"WARC-Payload-Digest\":\"sha1:I24SAP7O5RDJ7JUOIHBKK4XAXLL7VJY6\",\"WARC-Block-Digest\":\"sha1:ML2H25K7KU5L6V7LKYSOCV232Y5G474X\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2021/CC-MAIN-2021-43/CC-MAIN-2021-43_segments_1634323585302.91_warc_CC-MAIN-20211020055136-20211020085136-00219.warc.gz\"}"}
https://mcc.lip6.fr/index.php?CONTENT=results/cmp_Surprise_All_GreatSPN_LTLCardinality.html&TITLE=GreatSPN%20compared%20to%20other%20tools%20(%EF%BF%BD%EF%BF%BDSurprise%EF%BF%BD%EF%BF%BD%20models,%20LTLCardinality)
[ "", null, "Model Checking Contest 2021\n11th edition, Paris, France, June 23, 2021\nGreatSPN compared to other tools (��Surprise�� models, LTLCardinality)\nLast Updated\nJun 28, 2021", null, "# Introduction\n\nThis page presents how GreatSPN do cope efficiently with the LTLCardinality examination face to the other participating tools. In this page, we consider «Surprise» models.\n\nThe next sections will show chart comparing performances in terms of both memory and execution time.The x-axis corresponds to the challenging tool where the y-axes represents GreatSPN' performances. Thus, points below the diagonal of a chart denote comparisons favorables to the tool while others corresponds to situations where the challenging tool performs better.\n\nYou might also find plots out of the range that denote the case were at least one tool could not answer appropriately (error, time-out, could not compute or did not competed).\n\n# GreatSPN versus ITS-Tools\n\nSome statistics are displayed below, based on 364 runs (182 for GreatSPN and 182 for ITS-Tools, so there are 182 plots on each of the two charts). Each execution was allowed 1 hour and 16 GByte of memory. Then performance charts comparing GreatSPN to ITS-Tools are shown (you may click on one graph to enlarge it).\n\n Statistics on the executions GreatSPN ITS-Tools Both tools GreatSPN ITS-Tools All computed OK 14 100 47 Smallest Memory Footprint GreatSPN = ITS-Tools — — 0 Times tool wins 41 138 GreatSPN > ITS-Tools — — 0 Shortest Execution Time GreatSPN < ITS-Tools — — 18 Times tool wins 43 136 Do not compete 0 0 0 Error detected 16 0 0 Cannot Compute + Time-out 87 17 0\n\nOn the chart below,", null, "denote cases where the two tools did computed all results without error,", null, "denote cases where the two tool did computed the same number of values (but not al values in the examination),", null, "denote cases where GreatSPN computed more values than ITS-Tools,", null, "denote cases where GreatSPN computed less values than ITS-Tools,", null, "denote the cases where at least one tool did not competed,", null, "denote the cases where at least one tool computed a bad value and", null, "denote the cases where at least one tool stated it could not compute a result or timed-out.\n\nGreatSPN wins when points are below the diagonal, ITS-Tools wins when points are above the diagonal.\n\n# GreatSPN versus LoLA\n\nSome statistics are displayed below, based on 364 runs (182 for GreatSPN and 182 for LoLA, so there are 182 plots on each of the two charts). Each execution was allowed 1 hour and 16 GByte of memory. Then performance charts comparing GreatSPN to LoLA are shown (you may click on one graph to enlarge it).\n\n Statistics on the executions GreatSPN LoLA Both tools GreatSPN LoLA All computed OK 14 97 26 Smallest Memory Footprint GreatSPN = LoLA — — 4 Times tool wins 28 148 GreatSPN > LoLA — — 25 Shortest Execution Time GreatSPN < LoLA — — 10 Times tool wins 43 133 Do not compete 0 0 0 Error detected 16 0 0 Cannot Compute + Time-out 84 17 3\n\nOn the chart below,", null, "denote cases where the two tools did computed all results without error,", null, "denote cases where the two tool did computed the same number of values (but not al values in the examination),", null, "denote cases where GreatSPN computed more values than LoLA,", null, "denote cases where GreatSPN computed less values than LoLA,", null, "denote the cases where at least one tool did not competed,", null, "denote the cases where at least one tool computed a bad value and", null, "denote the cases where at least one tool stated it could not compute a result or timed-out.\n\nGreatSPN wins when points are below the diagonal, LoLA wins when points are above the diagonal.\n\n# GreatSPN versus Tapaal\n\nSome statistics are displayed below, based on 364 runs (182 for GreatSPN and 182 for Tapaal, so there are 182 plots on each of the two charts). Each execution was allowed 1 hour and 16 GByte of memory. Then performance charts comparing GreatSPN to Tapaal are shown (you may click on one graph to enlarge it).\n\n Statistics on the executions GreatSPN Tapaal Both tools GreatSPN Tapaal All computed OK 14 100 44 Smallest Memory Footprint GreatSPN = Tapaal — — 2 Times tool wins 27 152 GreatSPN > Tapaal — — 4 Shortest Execution Time GreatSPN < Tapaal — — 15 Times tool wins 36 143 Do not compete 0 0 0 Error detected 16 0 0 Cannot Compute + Time-out 87 17 0\n\nOn the chart below,", null, "denote cases where the two tools did computed all results without error,", null, "denote cases where the two tool did computed the same number of values (but not al values in the examination),", null, "denote cases where GreatSPN computed more values than Tapaal,", null, "denote cases where GreatSPN computed less values than Tapaal,", null, "denote the cases where at least one tool did not competed,", null, "denote the cases where at least one tool computed a bad value and", null, "denote the cases where at least one tool stated it could not compute a result or timed-out.\n\nGreatSPN wins when points are below the diagonal, Tapaal wins when points are above the diagonal.\n\n# GreatSPN versus enPAC\n\nSome statistics are displayed below, based on 364 runs (182 for GreatSPN and 182 for enPAC, so there are 182 plots on each of the two charts). Each execution was allowed 1 hour and 16 GByte of memory. Then performance charts comparing GreatSPN to enPAC are shown (you may click on one graph to enlarge it).\n\n Statistics on the executions GreatSPN enPAC Both tools GreatSPN enPAC All computed OK 14 91 27 Smallest Memory Footprint GreatSPN = enPAC — — 1 Times tool wins 48 122 GreatSPN > enPAC — — 27 Shortest Execution Time GreatSPN < enPAC — — 10 Times tool wins 52 118 Do not compete 0 0 0 Error detected 16 0 0 Cannot Compute + Time-out 78 17 9\n\nOn the chart below,", null, "denote cases where the two tools did computed all results without error,", null, "denote cases where the two tool did computed the same number of values (but not al values in the examination),", null, "denote cases where GreatSPN computed more values than enPAC,", null, "denote cases where GreatSPN computed less values than enPAC,", null, "denote the cases where at least one tool did not competed,", null, "denote the cases where at least one tool computed a bad value and", null, "denote the cases where at least one tool stated it could not compute a result or timed-out.\n\nGreatSPN wins when points are below the diagonal, enPAC wins when points are above the diagonal.\n\n# GreatSPN versus 2020-gold\n\nSome statistics are displayed below, based on 364 runs (182 for GreatSPN and 182 for 2020-gold, so there are 182 plots on each of the two charts). Each execution was allowed 1 hour and 16 GByte of memory. Then performance charts comparing GreatSPN to 2020-gold are shown (you may click on one graph to enlarge it).\n\n Statistics on the executions GreatSPN 2020-gold Both tools GreatSPN 2020-gold All computed OK 28 103 32 Smallest Memory Footprint GreatSPN = 2020-gold — — 1 Times tool wins 62 120 GreatSPN > 2020-gold — — 5 Shortest Execution Time GreatSPN < 2020-gold — — 13 Times tool wins 68 114 Do not compete 0 0 0 Error detected 16 28 0 Cannot Compute + Time-out 87 0 0\n\nOn the chart below,", null, "denote cases where the two tools did computed all results without error,", null, "denote cases where the two tool did computed the same number of values (but not al values in the examination),", null, "denote cases where GreatSPN computed more values than 2020-gold,", null, "denote cases where GreatSPN computed less values than 2020-gold,", null, "denote the cases where at least one tool did not competed,", null, "denote the cases where at least one tool computed a bad value and", null, "denote the cases where at least one tool stated it could not compute a result or timed-out.\n\nGreatSPN wins when points are below the diagonal, 2020-gold wins when points are above the diagonal.\n\n# GreatSPN versus BVT-2021\n\nSome statistics are displayed below, based on 364 runs (182 for GreatSPN and 182 for BVT-2021, so there are 182 plots on each of the two charts). Each execution was allowed 1 hour and 16 GByte of memory. Then performance charts comparing GreatSPN to BVT-2021 are shown (you may click on one graph to enlarge it).\n\nImportant: here, GreatSPN is compared to BVT-2021. It is a good way to check how GreatSPN compete in terms of resource consomption with the best tools (even virtual). When GreatSPN is best, the corresponding plots are on the diagonal of the scatter plots chart.\n\n Statistics on the executions GreatSPN BVT-2021 Both tools GreatSPN BVT-2021 All computed OK 2 103 58 Smallest Memory Footprint GreatSPN = BVT-2021 — — 1 Times tool wins 2 180 GreatSPN > BVT-2021 — — 0 Shortest Execution Time GreatSPN < BVT-2021 — — 18 Times tool wins 2 180 Do not compete 0 0 0 Error detected 16 2 0 Cannot Compute + Time-out 87 0 0\n\nOn the chart below,", null, "denote cases where the two tools did computed all results without error,", null, "denote cases where the two tool did computed the same number of values (but not al values in the examination),", null, "denote cases where GreatSPN computed more values than BVT-2021,", null, "denote cases where GreatSPN computed less values than BVT-2021,", null, "denote the cases where at least one tool did not competed,", null, "denote the cases where at least one tool computed a bad value and", null, "denote the cases where at least one tool stated it could not compute a result or timed-out.\n\nGreatSPN wins when points are below the diagonal, BVT-2021 wins when points are above the diagonal." ]
[ null, "https://mcc.lip6.fr/images/background.jpg", null, "https://mcc.lip6.fr/images/bandeau-titre.png", null, "https://mcc.lip6.fr/images/cmp-charts-computed.png", null, "https://mcc.lip6.fr/images/cmp-charts-eqp.png", null, "https://mcc.lip6.fr/images/cmp-charts-gtp.png", null, "https://mcc.lip6.fr/images/cmp-charts-ltp.png", null, "https://mcc.lip6.fr/images/cmp-charts-dnc.png", null, "https://mcc.lip6.fr/images/cmp-charts-error.png", null, "https://mcc.lip6.fr/images/cmp-charts-cc-timeout.png", null, "https://mcc.lip6.fr/images/cmp-charts-computed.png", null, "https://mcc.lip6.fr/images/cmp-charts-eqp.png", null, "https://mcc.lip6.fr/images/cmp-charts-gtp.png", null, "https://mcc.lip6.fr/images/cmp-charts-ltp.png", null, "https://mcc.lip6.fr/images/cmp-charts-dnc.png", null, "https://mcc.lip6.fr/images/cmp-charts-error.png", null, "https://mcc.lip6.fr/images/cmp-charts-cc-timeout.png", null, "https://mcc.lip6.fr/images/cmp-charts-computed.png", null, "https://mcc.lip6.fr/images/cmp-charts-eqp.png", null, "https://mcc.lip6.fr/images/cmp-charts-gtp.png", null, "https://mcc.lip6.fr/images/cmp-charts-ltp.png", null, "https://mcc.lip6.fr/images/cmp-charts-dnc.png", null, "https://mcc.lip6.fr/images/cmp-charts-error.png", null, "https://mcc.lip6.fr/images/cmp-charts-cc-timeout.png", null, "https://mcc.lip6.fr/images/cmp-charts-computed.png", null, "https://mcc.lip6.fr/images/cmp-charts-eqp.png", null, "https://mcc.lip6.fr/images/cmp-charts-gtp.png", null, "https://mcc.lip6.fr/images/cmp-charts-ltp.png", null, "https://mcc.lip6.fr/images/cmp-charts-dnc.png", null, "https://mcc.lip6.fr/images/cmp-charts-error.png", null, "https://mcc.lip6.fr/images/cmp-charts-cc-timeout.png", null, "https://mcc.lip6.fr/images/cmp-charts-computed.png", null, "https://mcc.lip6.fr/images/cmp-charts-eqp.png", null, "https://mcc.lip6.fr/images/cmp-charts-gtp.png", null, "https://mcc.lip6.fr/images/cmp-charts-ltp.png", null, "https://mcc.lip6.fr/images/cmp-charts-dnc.png", null, "https://mcc.lip6.fr/images/cmp-charts-error.png", null, "https://mcc.lip6.fr/images/cmp-charts-cc-timeout.png", null, "https://mcc.lip6.fr/images/cmp-charts-computed.png", null, "https://mcc.lip6.fr/images/cmp-charts-eqp.png", null, "https://mcc.lip6.fr/images/cmp-charts-gtp.png", null, "https://mcc.lip6.fr/images/cmp-charts-ltp.png", null, "https://mcc.lip6.fr/images/cmp-charts-dnc.png", null, "https://mcc.lip6.fr/images/cmp-charts-error.png", null, "https://mcc.lip6.fr/images/cmp-charts-cc-timeout.png", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.7867829,"math_prob":0.89708567,"size":9206,"snap":"2021-43-2021-49","text_gpt3_token_len":2422,"char_repetition_ratio":0.20082591,"word_repetition_ratio":0.608365,"special_character_ratio":0.26732567,"punctuation_ratio":0.059808612,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9830282,"pos_list":[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88],"im_url_duplicate_count":[null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2021-10-26T02:43:59Z\",\"WARC-Record-ID\":\"<urn:uuid:43150ccb-9883-4ece-b4bf-aab106709d89>\",\"Content-Length\":\"33556\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:e9a96fb6-e8d9-4ef5-b7dd-a463ed199b76>\",\"WARC-Concurrent-To\":\"<urn:uuid:6526eecb-785a-4ff9-a6d9-62237df2e835>\",\"WARC-IP-Address\":\"132.227.104.37\",\"WARC-Target-URI\":\"https://mcc.lip6.fr/index.php?CONTENT=results/cmp_Surprise_All_GreatSPN_LTLCardinality.html&TITLE=GreatSPN%20compared%20to%20other%20tools%20(%EF%BF%BD%EF%BF%BDSurprise%EF%BF%BD%EF%BF%BD%20models,%20LTLCardinality)\",\"WARC-Payload-Digest\":\"sha1:FHZHPSYT7WDLK7JKI73E5YEGQCTZWUNH\",\"WARC-Block-Digest\":\"sha1:GSF4SR3U3PEOPWCMSVNO2M3MAFTVS5ZW\",\"WARC-Identified-Payload-Type\":\"application/xhtml+xml\",\"warc_filename\":\"/cc_download/warc_2021/CC-MAIN-2021-43/CC-MAIN-2021-43_segments_1634323587794.19_warc_CC-MAIN-20211026011138-20211026041138-00083.warc.gz\"}"}
https://www.colorhexa.com/42e657
[ "# #42e657 Color Information\n\nIn a RGB color space, hex #42e657 is composed of 25.9% red, 90.2% green and 34.1% blue. Whereas in a CMYK color space, it is composed of 71.3% cyan, 0% magenta, 62.2% yellow and 9.8% black. It has a hue angle of 127.7 degrees, a saturation of 76.6% and a lightness of 58%. #42e657 color hex could be obtained by blending #84ffae with #00cd00. Closest websafe color is: #33ff66.\n\n• R 26\n• G 90\n• B 34\nRGB color chart\n• C 71\n• M 0\n• Y 62\n• K 10\nCMYK color chart\n\n#42e657 color description : Bright lime green.\n\n# #42e657 Color Conversion\n\nThe hexadecimal color #42e657 has RGB values of R:66, G:230, B:87 and CMYK values of C:0.71, M:0, Y:0.62, K:0.1. Its decimal value is 4384343.\n\nHex triplet RGB Decimal 42e657 `#42e657` 66, 230, 87 `rgb(66,230,87)` 25.9, 90.2, 34.1 `rgb(25.9%,90.2%,34.1%)` 71, 0, 62, 10 127.7°, 76.6, 58 `hsl(127.7,76.6%,58%)` 127.7°, 71.3, 90.2 33ff66 `#33ff66`\nCIE-LAB 80.981, -69.24, 56.245 32.262, 58.437, 18.595 0.295, 0.535, 58.437 80.981, 89.206, 140.912 80.981, -67.435, 80.952 76.444, -58.444, 39.088 01000010, 11100110, 01010111\n\n# Color Schemes with #42e657\n\n• #42e657\n``#42e657` `rgb(66,230,87)``\n• #e642d1\n``#e642d1` `rgb(230,66,209)``\nComplementary Color\n• #7fe642\n``#7fe642` `rgb(127,230,66)``\n• #42e657\n``#42e657` `rgb(66,230,87)``\n• #42e6a9\n``#42e6a9` `rgb(66,230,169)``\nAnalogous Color\n• #e6427f\n``#e6427f` `rgb(230,66,127)``\n• #42e657\n``#42e657` `rgb(66,230,87)``\n• #a942e6\n``#a942e6` `rgb(169,66,230)``\nSplit Complementary Color\n• #e65742\n``#e65742` `rgb(230,87,66)``\n• #42e657\n``#42e657` `rgb(66,230,87)``\n• #5742e6\n``#5742e6` `rgb(87,66,230)``\n• #d1e642\n``#d1e642` `rgb(209,230,66)``\n• #42e657\n``#42e657` `rgb(66,230,87)``\n• #5742e6\n``#5742e6` `rgb(87,66,230)``\n• #e642d1\n``#e642d1` `rgb(230,66,209)``\n• #1ac22f\n``#1ac22f` `rgb(26,194,47)``\n• #1dd835\n``#1dd835` `rgb(29,216,53)``\n• #2be343\n``#2be343` `rgb(43,227,67)``\n• #42e657\n``#42e657` `rgb(66,230,87)``\n• #59e96b\n``#59e96b` `rgb(89,233,107)``\n• #6fec7f\n``#6fec7f` `rgb(111,236,127)``\n• #86ef93\n``#86ef93` `rgb(134,239,147)``\nMonochromatic Color\n\n# Alternatives to #42e657\n\nBelow, you can see some colors close to #42e657. Having a set of related colors can be useful if you need an inspirational alternative to your original color choice.\n\n• #56e642\n``#56e642` `rgb(86,230,66)``\n• #48e642\n``#48e642` `rgb(72,230,66)``\n• #42e649\n``#42e649` `rgb(66,230,73)``\n• #42e657\n``#42e657` `rgb(66,230,87)``\n• #42e665\n``#42e665` `rgb(66,230,101)``\n• #42e672\n``#42e672` `rgb(66,230,114)``\n• #42e680\n``#42e680` `rgb(66,230,128)``\nSimilar Colors\n\n# #42e657 Preview\n\nThis text has a font color of #42e657.\n\n``<span style=\"color:#42e657;\">Text here</span>``\n#42e657 background color\n\nThis paragraph has a background color of #42e657.\n\n``<p style=\"background-color:#42e657;\">Content here</p>``\n#42e657 border color\n\nThis element has a border color of #42e657.\n\n``<div style=\"border:1px solid #42e657;\">Content here</div>``\nCSS codes\n``.text {color:#42e657;}``\n``.background {background-color:#42e657;}``\n``.border {border:1px solid #42e657;}``\n\n# Shades and Tints of #42e657\n\nA shade is achieved by adding black to any pure hue, while a tint is created by mixing white to any pure color. In this example, #000200 is the darkest color, while #effdf1 is the lightest one.\n\n• #000200\n``#000200` `rgb(0,2,0)``\n• #021305\n``#021305` `rgb(2,19,5)``\n• #052409\n``#052409` `rgb(5,36,9)``\n• #07360d\n``#07360d` `rgb(7,54,13)``\n• #094711\n``#094711` `rgb(9,71,17)``\n• #0c5815\n``#0c5815` `rgb(12,88,21)``\n• #0e6a1a\n``#0e6a1a` `rgb(14,106,26)``\n• #107b1e\n``#107b1e` `rgb(16,123,30)``\n• #138c22\n``#138c22` `rgb(19,140,34)``\n• #159d26\n``#159d26` `rgb(21,157,38)``\n• #17af2b\n``#17af2b` `rgb(23,175,43)``\n• #19c02f\n``#19c02f` `rgb(25,192,47)``\n• #1cd133\n``#1cd133` `rgb(28,209,51)``\n• #1fe138\n``#1fe138` `rgb(31,225,56)``\n• #31e448\n``#31e448` `rgb(49,228,72)``\n• #42e657\n``#42e657` `rgb(66,230,87)``\n• #53e866\n``#53e866` `rgb(83,232,102)``\n• #65eb76\n``#65eb76` `rgb(101,235,118)``\n• #76ed85\n``#76ed85` `rgb(118,237,133)``\n• #87ef95\n``#87ef95` `rgb(135,239,149)``\n• #99f1a4\n``#99f1a4` `rgb(153,241,164)``\n• #aaf4b3\n``#aaf4b3` `rgb(170,244,179)``\n• #bbf6c3\n``#bbf6c3` `rgb(187,246,195)``\n• #cdf8d2\n``#cdf8d2` `rgb(205,248,210)``\n• #defbe2\n``#defbe2` `rgb(222,251,226)``\n• #effdf1\n``#effdf1` `rgb(239,253,241)``\nTint Color Variation\n\n# Tones of #42e657\n\nA tone is produced by adding gray to any pure hue. In this case, #8c9c8e is the less saturated color, while #29ff45 is the most saturated one.\n\n• #8c9c8e\n``#8c9c8e` `rgb(140,156,142)``\n• #84a488\n``#84a488` `rgb(132,164,136)``\n• #7cac82\n``#7cac82` `rgb(124,172,130)``\n• #73b57c\n``#73b57c` `rgb(115,181,124)``\n• #6bbd76\n``#6bbd76` `rgb(107,189,118)``\n• #63c56f\n``#63c56f` `rgb(99,197,111)``\n• #5bcd69\n``#5bcd69` `rgb(91,205,105)``\n• #52d663\n``#52d663` `rgb(82,214,99)``\n``#4ade5d` `rgb(74,222,93)``\n• #42e657\n``#42e657` `rgb(66,230,87)``\n• #3aee51\n``#3aee51` `rgb(58,238,81)``\n• #32f64b\n``#32f64b` `rgb(50,246,75)``\n• #29ff45\n``#29ff45` `rgb(41,255,69)``\nTone Color Variation\n\n# Color Blindness Simulator\n\nBelow, you can see how #42e657 is perceived by people affected by a color vision deficiency. This can be useful if you need to ensure your color combinations are accessible to color-blind users.\n\nMonochromacy\n• Achromatopsia 0.005% of the population\n• Atypical Achromatopsia 0.001% of the population\nDichromacy\n• Protanopia 1% of men\n• Deuteranopia 1% of men\n• Tritanopia 0.001% of the population\nTrichromacy\n• Protanomaly 1% of men, 0.01% of women\n• Deuteranomaly 6% of men, 0.4% of women\n• Tritanomaly 0.01% of the population" ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.5183959,"math_prob":0.70651615,"size":3694,"snap":"2020-10-2020-16","text_gpt3_token_len":1629,"char_repetition_ratio":0.121409215,"word_repetition_ratio":0.011090573,"special_character_ratio":0.56334597,"punctuation_ratio":0.23608018,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.99181145,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2020-04-06T17:59:19Z\",\"WARC-Record-ID\":\"<urn:uuid:53187891-3543-432d-97c0-b450054841f9>\",\"Content-Length\":\"36286\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:4981fa22-cb46-445b-a7b8-54379ef7dc8a>\",\"WARC-Concurrent-To\":\"<urn:uuid:e262074f-deae-42e6-a905-20f9bf72f77e>\",\"WARC-IP-Address\":\"178.32.117.56\",\"WARC-Target-URI\":\"https://www.colorhexa.com/42e657\",\"WARC-Payload-Digest\":\"sha1:YXMYM5GPRQQ4ZMYFBRFFNI7BAQNNQHA4\",\"WARC-Block-Digest\":\"sha1:CIF3JTIN6AWMX3ZIJWXVTESVFQPQE4X3\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2020/CC-MAIN-2020-16/CC-MAIN-2020-16_segments_1585371656216.67_warc_CC-MAIN-20200406164846-20200406195346-00059.warc.gz\"}"}
https://mathematica.stackexchange.com/questions/31922/logplot-of-the-calculated-data-in-for-loop-in-mathematica
[ "# LogPlot of the calculated data in For Loop in Mathematica\n\nI have the following loop, for calculating a data for different values of inputs. How I can plot the result at the end of For loop? Its not matter if the plot is inside or outside the loop, I just want to plot the results.\n\nClear[\"Global*\"];\nb = 10^4;\ns = b/2;\ntx = ((2*RandomInteger[1, s])-1) + (I*((2*RandomInteger[1, s]) - 1));\nFor[i = 0, i <= 20, i++, snrdb = i;\nsnr = 10^(0.1*snrdb);\nsd = 1/Sqrt[snr/2];\nnoise = 1/Sqrt*(RandomReal[NormalDistribution[], {s, 1}] + I*RandomReal[NormalDistribution[], {s, 1}]);\ny = tx + (sd*noise);\nrx = Flatten[Sign[Re[y]]+I*Sign[Im[y]]];\nerror = Unitize[tx-rx];\nber = Total[error]/b;\nPrint[\"Total number of errors: \",Total[error]];\nPrint[\"Error rate: \",N[ber]];\n]\nLogPlot[{ber, 10^-4, 10^0}, {snrdb, 0, 20}]\n\n\nThanks All, Both techniques was useful. Sorry b is the same as bits, I made a mistake I corrected later. I need the continous line not discrete. how to change the y axis to be from 10^-4 to 10^1?\n\nNote: for the purpose of accuracy I increased b to 10^6.\n\nClear[\"Global*\"];\nb = 10^6;\nsym = b/2; tx = ((2*RandomInteger[1, sym]) -\n1) + (I*((2*RandomInteger[1, sym]) - 1));\nbervals = Reap[\nFor[i = 0, i < 21, i++, snrdb = i;\nsnr = 10^(0.1*snrdb);\nsd = 1/Sqrt[snr/2];\nnoise = 1/\nSqrt*(RandomReal[NormalDistribution[], {sym, 1}] +\nI*RandomReal[NormalDistribution[], {sym, 1}]);\ny = tx + (sd*noise); rx = Flatten[Sign[Re[y]] + I*Sign[Im[y]]];\nerror = Unitize[tx - rx];\nSow[ber = Total[error]/b];\n]][[2, 1]];\nListLogPlot[bervals, Joined -> True]\n\n• You can change the y-axis using the PlotRange option – Timothy Wofford Sep 9 '13 at 5:52\n• why the curve is not connected to x and y axis, I mean there is a gap , I want the curve to be reached both axises. – sky-light Sep 9 '13 at 9:33\n• Hi Timothy Wofford, your code will result range y-axis from (1 to 1000), actually it should be from 10^-4 to 1 – sky-light Sep 9 '13 at 10:04\n• I got it, it should be like this: bert = Append[bert, Total[error]/bits]; – sky-light Sep 9 '13 at 10:06\n• when trying to plot I faced the following message error: How I can fix that? General::obspkg: PlotLegends is now obsolete. The legacy version being loaded may conflict with current Mathematica functionality. See the Compatibility Guide for updating information. PlotLegendsShadowBox is not a Graphics primitive or directive. << PlotLegends; ListLogPlot[{ber, bersim}, PlotRange -> {{0, 15}, {10^-4, 10^0}}, Joined -> True, AxesLabel -> {SNR, BER}, LabelStyle -> Directive[Blue, Bold], PlotLegend -> {Simulated, Analytical}, LegendPosition -> Automatic] – sky-light Sep 9 '13 at 11:43\n\nThis seems like a good application for Reap and Sow.\n\nIf you have a value you would like to accumulate in a List you can Sow the value during execution of a loop:\n\nvals = Reap[\nFor[i = 0, i <= 10, i++,\nSow[i]\n]\n]\nvals[[2, 1]]\n\n(*Output*)\n(*{Null, {{0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100}}}*)\n(*{0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100}*)\n\n\nYou then capture the Sow-ed results with Reap, outside of the loop. We must use Part to then extract the list of relevant values from val. Note: This technique is very useful in many other applications other than loops. The Reap and Sow combination can be used inside almost any evaluation to collect data.\n\nBefore we can apply this technique to your code, there are a few changes that need to be made. First, the symbol bits is never assigned a value. This is problematic if you wish to plot the values of ber. Let us assume bits has a value of 1 for now. Second, LogPlot is used to plot (continuous) functions, not discrete data. Here we will want to use ListLogPlot instead. Making these changes and introducing Reap and Sow:\n\nb = 10^4;\ns = b/2;\nbits=1.0;\ntx = ((2*RandomInteger[1, s]) - 1) + (I*((2*RandomInteger[1, s]) - 1));\nbervals = Reap[\nFor[i = 0, i <= 20, i++, snrdb = i;\nsnr = 10^(0.1*snrdb);\nsd = 1/Sqrt[snr/2];\nnoise =\n1/Sqrt*(RandomReal[NormalDistribution[], {s, 1}] +\nI*RandomReal[NormalDistribution[], {s, 1}]);\ny = tx + (sd*noise);\nrx = Flatten[Sign[Re[y]] + I*Sign[Im[y]]];\nerror = Unitize[tx - rx];\nSow[ber = Total[error]/bits];\n]][[2, 1]];\nListLogPlot[bervals]\n\n\nShould generate a plot of the values of ber from inside the loop.\n\nEDIT To answer a question in the comments about using Sow multiple times. Here is a simple example that plots y=x^2.\n\nBlock[{x, y, pts},\nvals = Reap[Do[\nSow[x = i];\nSow[y = i^2];\n, {i, 1, 10}]][[2, 1]];\npts = Partition[vals, 2];\nListPlot[pts, Joined -> True]\n]\n\n\nNotice the use of Partition at the end. We wouldn't have needed to do this had we used Sow on the list {x,y} instead. I only did it this way to show the use of multiple Sows.\n\n• Can I use Reap and Sow more than one time ? – sky-light Sep 9 '13 at 11:55\n• @barznjy Yes, you can absolutely use Sow more than once. It's quite common to Sow multiple values and use then later. I'll edit post to give an example. – leibs Sep 9 '13 at 19:32\n\nLogPlot plots functions, ListLogPlot is used for plotting data points. You seem to be calculating a bunch of data points, and then immediately forgetting/overwriting them in the line I left below. You should add a couple of lines for remembering these values in a list.\n\nbert = {};\nFor[i = 0, i <= 20, i++,\n...\nber = Total[error]/bits;\n...\nbert = Append[bert, Total[error]];\n]\nListLogPlot[bert, PlotRange -> {{0, 20}, Automatic}]\n`\n• Thanks All, Both techniques was useful. Sorry b is the same as bits, I made a mistake I corrected later. I need the continous line not discrete. how to change the y axis to be from 10^-4 to 10^1? Note: for the purpose of accuracy I increased b to 10^6. – sky-light Sep 9 '13 at 0:33" ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.6847926,"math_prob":0.98657024,"size":1811,"snap":"2019-35-2019-39","text_gpt3_token_len":643,"char_repetition_ratio":0.08799115,"word_repetition_ratio":0.09386282,"special_character_ratio":0.39591387,"punctuation_ratio":0.2169576,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9981,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2019-09-16T03:49:59Z\",\"WARC-Record-ID\":\"<urn:uuid:40b2b773-f496-48c1-81e8-5e0479287b74>\",\"Content-Length\":\"153729\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:18f2393c-0f2f-473a-87f7-724e9c642b4b>\",\"WARC-Concurrent-To\":\"<urn:uuid:7a34ddf7-b2c7-4b0b-9bc6-a33596bdc2ea>\",\"WARC-IP-Address\":\"151.101.129.69\",\"WARC-Target-URI\":\"https://mathematica.stackexchange.com/questions/31922/logplot-of-the-calculated-data-in-for-loop-in-mathematica\",\"WARC-Payload-Digest\":\"sha1:5QRMAEWQTP5KK4CPMFL2AEMQC4OZBLRK\",\"WARC-Block-Digest\":\"sha1:YVIEOOMFKFCDQLRHOJTQRPR4J3CFXRTE\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2019/CC-MAIN-2019-39/CC-MAIN-2019-39_segments_1568514572471.35_warc_CC-MAIN-20190916015552-20190916041552-00502.warc.gz\"}"}
https://numbermatics.com/n/4791168/
[ "# 4791168\n\n## 4,791,168 is an even composite number composed of three prime numbers multiplied together.\n\nWhat does the number 4791168 look like?\n\nThis visualization shows the relationship between its 3 prime factors (large circles) and 48 divisors.\n\n4791168 is an even composite number. It is composed of three distinct prime numbers multiplied together. It has a total of forty-eight divisors.\n\n## Prime factorization of 4791168:\n\n### 27 × 32 × 4159\n\n(2 × 2 × 2 × 2 × 2 × 2 × 2 × 3 × 3 × 4159)\n\nSee below for interesting mathematical facts about the number 4791168 from the Numbermatics database.\n\n### Names of 4791168\n\n• Cardinal: 4791168 can be written as Four million, seven hundred ninety-one thousand, one hundred sixty-eight.\n\n### Scientific notation\n\n• Scientific notation: 4.791168 × 106\n\n### Factors of 4791168\n\n• Number of distinct prime factors ω(n): 3\n• Total number of prime factors Ω(n): 10\n• Sum of prime factors: 4164\n\n### Divisors of 4791168\n\n• Number of divisors d(n): 48\n• Complete list of divisors:\n• Sum of all divisors σ(n): 13790400\n• Sum of proper divisors (its aliquot sum) s(n): 8999232\n• 4791168 is an abundant number, because the sum of its proper divisors (8999232) is greater than itself. Its abundance is 4208064\n\n### Bases of 4791168\n\n• Binary: 100100100011011100000002\n• Base-36: 2UOW0\n\n### Squares and roots of 4791168\n\n• 4791168 squared (47911682) is 22955290804224\n• 4791168 cubed (47911683) is 109982654731892293632\n• The square root of 4791168 is 2188.8736829703\n• The cube root of 4791168 is 168.5830084657\n\n### Scales and comparisons\n\nHow big is 4791168?\n• 4,791,168 seconds is equal to 7 weeks, 6 days, 10 hours, 52 minutes, 48 seconds.\n• To count from 1 to 4,791,168 would take you about eleven weeks!\n\nThis is a very rough estimate, based on a speaking rate of half a second every third order of magnitude. If you speak quickly, you could probably say any randomly-chosen number between one and a thousand in around half a second. Very big numbers obviously take longer to say, so we add half a second for every extra x1000. (We do not count involuntary pauses, bathroom breaks or the necessity of sleep in our calculation!)\n\n• A cube with a volume of 4791168 cubic inches would be around 14 feet tall.\n\n### Recreational maths with 4791168\n\n• 4791168 backwards is 8611974\n• 4791168 is a Harshad number.\n• The number of decimal digits it has is: 7\n• The sum of 4791168's digits is 36\n• More coming soon!\n\n#### Copy this link to share with anyone:\n\nMLA style:\n\"Number 4791168 - Facts about the integer\". Numbermatics.com. 2023. Web. 5 December 2023.\n\nAPA style:\nNumbermatics. (2023). Number 4791168 - Facts about the integer. Retrieved 5 December 2023, from https://numbermatics.com/n/4791168/\n\nChicago style:\nNumbermatics. 2023. \"Number 4791168 - Facts about the integer\". https://numbermatics.com/n/4791168/\n\nThe information we have on file for 4791168 includes mathematical data and numerical statistics calculated using standard algorithms and methods. We are adding more all the time. If there are any features you would like to see, please contact us. Information provided for educational use, intellectual curiosity and fun!\n\nKeywords: Divisors of 4791168, math, Factors of 4791168, curriculum, school, college, exams, university, Prime factorization of 4791168, STEM, science, technology, engineering, physics, economics, calculator, four million, seven hundred ninety-one thousand, one hundred sixty-eight.\n\nOh no. Javascript is switched off in your browser.\nSome bits of this website may not work unless you switch it on." ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.8541812,"math_prob":0.9175102,"size":2891,"snap":"2023-40-2023-50","text_gpt3_token_len":794,"char_repetition_ratio":0.13301004,"word_repetition_ratio":0.06167401,"special_character_ratio":0.34244207,"punctuation_ratio":0.16757742,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9799975,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2023-12-05T18:07:23Z\",\"WARC-Record-ID\":\"<urn:uuid:411c101f-2cd8-47fa-bdb6-6e5007b0bc0d>\",\"Content-Length\":\"22532\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:50c6e66c-8781-49ca-ac2d-8132e1715ddf>\",\"WARC-Concurrent-To\":\"<urn:uuid:1b081d40-babd-468c-a954-46cdb1175a07>\",\"WARC-IP-Address\":\"72.44.94.106\",\"WARC-Target-URI\":\"https://numbermatics.com/n/4791168/\",\"WARC-Payload-Digest\":\"sha1:B2DLCFQ6TYTBWKCSXSGUJO35MV67ENO2\",\"WARC-Block-Digest\":\"sha1:VXTVFNHOTZKLOAKYVTWHNKD6IRKKBDL2\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2023/CC-MAIN-2023-50/CC-MAIN-2023-50_segments_1700679100555.27_warc_CC-MAIN-20231205172745-20231205202745-00842.warc.gz\"}"}
https://answers.everydaycalculation.com/compare-fractions/40-24-and-1-10
[ "# Answers\n\nSolutions by everydaycalculation.com\n\n## Compare 40/24 and 1/10\n\n1st number: 1 16/24, 2nd number: 1/10\n\n40/24 is greater than 1/10\n\n#### Steps for comparing fractions\n\n1. Find the least common denominator or LCM of the two denominators:\nLCM of 24 and 10 is 120\n2. For the 1st fraction, since 24 × 5 = 120,\n40/24 = 40 × 5/24 × 5 = 200/120\n3. Likewise, for the 2nd fraction, since 10 × 12 = 120,\n1/10 = 1 × 12/10 × 12 = 12/120\n4. Since the denominators are now the same, the fraction with the bigger numerator is the greater fraction\n5. 200/120 > 12/120 or 40/24 > 1/10\n\n#### Compare Fractions Calculator\n\nand\n\nUse fraction calculator with our all-in-one calculator app: Download for Android, Download for iOS\n\n© everydaycalculation.com" ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.7991472,"math_prob":0.99059904,"size":398,"snap":"2019-35-2019-39","text_gpt3_token_len":163,"char_repetition_ratio":0.28680202,"word_repetition_ratio":0.0,"special_character_ratio":0.4849246,"punctuation_ratio":0.08,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9949183,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2019-08-25T16:51:45Z\",\"WARC-Record-ID\":\"<urn:uuid:2131db31-8129-449d-8c64-252f62c71890>\",\"Content-Length\":\"8042\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:4b7aee78-cc39-4b6b-8315-370e53b77c86>\",\"WARC-Concurrent-To\":\"<urn:uuid:cd5e15fd-79a2-4f9f-9d10-4f9ab3cef51f>\",\"WARC-IP-Address\":\"96.126.107.130\",\"WARC-Target-URI\":\"https://answers.everydaycalculation.com/compare-fractions/40-24-and-1-10\",\"WARC-Payload-Digest\":\"sha1:KY4GQBZSD6DUNDLGLQYNKXNZZH5YS34D\",\"WARC-Block-Digest\":\"sha1:CJNGKFDE7HWT2IR5QCW67KKVAH4YJDP6\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2019/CC-MAIN-2019-35/CC-MAIN-2019-35_segments_1566027330750.45_warc_CC-MAIN-20190825151521-20190825173521-00142.warc.gz\"}"}
https://harrykhachatrian.com/engineering/the-c-programming-language/calculations/?shared=email&msg=fail
[ "# Calculations\n\nAn important tool when writing any computer program is making calculations. In the C programming language we use different operators to perform calculations.\n\nUnary\nA unary operator is one that acts on a single operand. This is an operation we perform on a single variable. This includes inversion, increment and decrement, etc.\n\n```int x;\nint y;\n\nx = 4;\ny = !x; // This assigns y to be the inverse of x. ie. y = -4\n\nx+=1; //This is identical to writing x = x+1 ie. x = 5\n```\n\nPost Increment vs Pre Increment\n\n```int a = 3;\nint b0 = 1;\nint b1 = 1;\n\nint x = a + b0++;\nint y = a + ++b1;\n```\n\nNote that despite having similar syntax the post increment, a++ and pre increment ++a, they perform in different order, and should be used with caution to avoid unexpected results.\nYou can see that the line:\n\n``` int x = a + b0++;\n```\n\nResults in x = 3 + 1 = 4, then increments b0 to be equal to 2\nHowever the next line:\n\n```int y = a + ++b1;\n```\n\nResults in x = 3 + (1+1), b1 is first incremented to 2 then added to a.\n\nThe decrement operator: —\nWorks in the exact same way except decrements the operand opposed to incrementing it by 1.\n\nBinary Operators\nBinary means 2. Accordingly, a binary operators is one that involves 2 operands.\n\nArithmetic\nBinary arithmetic operators are anything from math, this includes addition(+), subtraction(-), division(/), assignment(=), etc.\n\n```int a = 2 + 1; //assigns a to the sum of 2 and 1, ie. 3\nint x = 3; //The \"=\" symbol is called the assignment operator\nint y = x * 7; //assigns y to the product of x and 7, ie. 21\n```\n\nAnother very useful binary arithmetic operator is called the modulus operator, %.\nThis operator returns the remainder of a / b.\nNote that if b is greater than a: The result of a % b will be a.\nThis operator is very useful because of its versatility. For example, say you wish to check if a number is even,\nif a number is even than the result of the following must be 0:\n\n```int x = 4;\nint isEven = x % 2;\n```\n\nAny number mod 2 is 0 if it’s even.\nAnother useful feature of the modulus operator is it’s ability to dissect a number.\nSay we want to write a program that accepts input from a user and prints off the last digit.\nTo get the last digit, we can take the number and mod it by 10.\n\n```int x;\nint lastDig;\n\nprintf(\"Enter a digit greater than 0: \");\nscanf(\"%d\", &x);\n\nlastDig = x % 10;\n\nprintf(\"The last digit of your number is: %d\", lastDign\");\n```" ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.87071544,"math_prob":0.99824667,"size":2335,"snap":"2022-27-2022-33","text_gpt3_token_len":629,"char_repetition_ratio":0.12870012,"word_repetition_ratio":0.025974026,"special_character_ratio":0.29336187,"punctuation_ratio":0.1484375,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.99828386,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2022-06-26T16:49:09Z\",\"WARC-Record-ID\":\"<urn:uuid:7e8866a6-1065-4382-b584-0c741fee5006>\",\"Content-Length\":\"70936\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:d615bdfc-a9fa-40f6-b107-1168c4ede90a>\",\"WARC-Concurrent-To\":\"<urn:uuid:40720653-ecfd-459d-9791-c550a25d830a>\",\"WARC-IP-Address\":\"192.0.78.24\",\"WARC-Target-URI\":\"https://harrykhachatrian.com/engineering/the-c-programming-language/calculations/?shared=email&msg=fail\",\"WARC-Payload-Digest\":\"sha1:FBMFD3UTS7JJEI2SWKYAGOLV4GX6NFRF\",\"WARC-Block-Digest\":\"sha1:2HJQ3GWEI7BDBIEYBPW3DSX2AHFIP2TW\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2022/CC-MAIN-2022-27/CC-MAIN-2022-27_segments_1656103271763.15_warc_CC-MAIN-20220626161834-20220626191834-00397.warc.gz\"}"}
https://slidetodoc.com/working-with-the-stack-stack-concept-stack-is/
[ "", null, "# Working with the Stack Stack concept Stack is\n\n• Slides: 9", null, "Working with the Stack", null, "Stack concept • Stack is a special memory area used to store data temporarily when calling a subprogram and nested loops. Stack mechanism works in style “Last In First Out” LIFO. The following figure shows the stack mechanism.", null, "Stack mechanism", null, "PUSH & POP instructions • push REG/MEM 16 -bits only • push ax Store AX in the top of stack memory area then decrement SP register. • pop REG/MEM 16 -bits only • pop bx Increment SP register then copy the value in the top of stack to BX.", null, "e. g. using the stack to print an array in reverse way. model small. data x db 0, 1, 2, 3, 4, 5, 6, 7, 8, 9. code mov ax, @data mov ds, ax lea si, x mov cx, 10 lop 1: mov ah, 0 mov al, [si] push ax inc si loop lop 1 mov lop 2: mov pop add int cx, 10 ah, 02 h dx dl, '0' 21 h loop lop 2", null, "Procedures. model small. data num db 4 str 1 db ‘hello’, ’\\$’. code main proc mov ax, @data mov ds, ax call printme mov ah, 4 ch int 21 h endp main printme proc lea dx, str 1 mov ah, 09 h int 21 h ret endp printme", null, "Using the stack to print out 4 digits decimal number. model small. data number dw 2153. code mov ax, @data mov ds, ax mov cl, 4 loop 1: mov ax, number mov bl, 10 div bl mov dl, ah mov dh, 0 push dx mov ah, 0 mov number, ax dec cl cmp cl, 0 jg loop 1 mov cl, 4 loop 2: pop dx add dl, 30 h mov ah, 2 int 21 h dec cl cmp cl, 0 jg loop 2", null, "Using the stack to print out 4 digits hexadecimal number. model small. data number dw 2 A 5 Ch. code mov ax, @data mov ds, ax mov cl, 4 loop 1: mov ax, number and ax, 000 Fh push ax shr number, 4 dec cl cmp cl, 0 jg loop 1 mov cl, 4 loop 2: pop dx cmp dl, 9 jg ABCDEF add dl, 30 h mov ah, 2 int 21 h jmp update ABCDEF: add dl, 37 h mov ah, 2 int 21 h update: dec cl cmp cl, 0 jg loop 2", null, "Question For the following assembly language program, find the content of the register AL after the execution of each underlined instruction: . MODEL SMALL. DATA M DB 8, 7, 8, 3, 2, 9. CODE MOV AX, @data MOV DS, AX LEA BX, M MOV AL, [BX] SHR AL, 2 SHL AL, 1 OR AL, 0 INC BX ADD AL, [BX] ADD BX, 3 SUB AL, [BX] MOV DX, 12 PUSH DX MOV DL, 3 MOV AH, 0 DIV DL MOV DL, 2 MUL DL POP AX MOV AL, '0' MOV AH, 4 ch INT 21 h AL = 8 2 4 4 AL = 11 AL = 9 AL = 4 AL = 8 AL = 12 AL = 30 h" ]
[ null, "https://mc.yandex.ru/watch/64202359", null, "data:image/svg+xml,%3Csvg%20xmlns=%22http://www.w3.org/2000/svg%22%20viewBox=%220%200%20415%20289%22%3E%3C/svg%3E", null, "data:image/svg+xml,%3Csvg%20xmlns=%22http://www.w3.org/2000/svg%22%20viewBox=%220%200%20415%20289%22%3E%3C/svg%3E", null, "data:image/svg+xml,%3Csvg%20xmlns=%22http://www.w3.org/2000/svg%22%20viewBox=%220%200%20415%20289%22%3E%3C/svg%3E", null, "data:image/svg+xml,%3Csvg%20xmlns=%22http://www.w3.org/2000/svg%22%20viewBox=%220%200%20415%20289%22%3E%3C/svg%3E", null, "data:image/svg+xml,%3Csvg%20xmlns=%22http://www.w3.org/2000/svg%22%20viewBox=%220%200%20415%20289%22%3E%3C/svg%3E", null, "data:image/svg+xml,%3Csvg%20xmlns=%22http://www.w3.org/2000/svg%22%20viewBox=%220%200%20415%20289%22%3E%3C/svg%3E", null, "data:image/svg+xml,%3Csvg%20xmlns=%22http://www.w3.org/2000/svg%22%20viewBox=%220%200%20415%20289%22%3E%3C/svg%3E", null, "data:image/svg+xml,%3Csvg%20xmlns=%22http://www.w3.org/2000/svg%22%20viewBox=%220%200%20415%20289%22%3E%3C/svg%3E", null, "data:image/svg+xml,%3Csvg%20xmlns=%22http://www.w3.org/2000/svg%22%20viewBox=%220%200%20415%20289%22%3E%3C/svg%3E", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.5393712,"math_prob":0.97373754,"size":2254,"snap":"2023-40-2023-50","text_gpt3_token_len":786,"char_repetition_ratio":0.13244444,"word_repetition_ratio":0.17624521,"special_character_ratio":0.35137534,"punctuation_ratio":0.17152104,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9665477,"pos_list":[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20],"im_url_duplicate_count":[null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2023-11-30T07:33:42Z\",\"WARC-Record-ID\":\"<urn:uuid:087ac5d1-7550-4f15-a447-a664ed5b1f96>\",\"Content-Length\":\"54680\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:168fe583-0b21-4d87-bd97-39c750029811>\",\"WARC-Concurrent-To\":\"<urn:uuid:cc88f103-b71a-4d8e-a846-1ade38b24748>\",\"WARC-IP-Address\":\"172.67.221.241\",\"WARC-Target-URI\":\"https://slidetodoc.com/working-with-the-stack-stack-concept-stack-is/\",\"WARC-Payload-Digest\":\"sha1:PP7JQVDKBWSPWCLIEK44WVD7XLWVPZWD\",\"WARC-Block-Digest\":\"sha1:7GEWY4EWNLEMVAEJX62CLZBBMHSNMDPP\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2023/CC-MAIN-2023-50/CC-MAIN-2023-50_segments_1700679100172.28_warc_CC-MAIN-20231130062948-20231130092948-00238.warc.gz\"}"}
https://html.scirp.org/file/_2-6901916_1.htm
[ "Model (factor) χ2 P CFI χ2/df SRMR RMSEA Model A1 (Single factor) χ2(2, N = 243) = 17.59 <0.001 0.96 8.79 0.04 0.18 (CI90% 0.11 - 0.26) Model A2 (Two-factors: one consisted of subtests II and IV & one of subtests III and V + interrelation between factors) χ2(1, N = 243) = 13.68 <0.001 0.97 13.68 0.03 0.23 (CI90% 0.13 - 0.34) Model A3 (Two-factors: one consisted of subtests II and III & one of subtests IV and V + interrelation between factors) χ2(1, N = 243) = 0.04 =0.83 1.00 0.04 0.00 0.00 (CI90% 0.00 - 0.10)" ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.76265687,"math_prob":0.99698746,"size":570,"snap":"2020-10-2020-16","text_gpt3_token_len":268,"char_repetition_ratio":0.1466431,"word_repetition_ratio":0.17886178,"special_character_ratio":0.5754386,"punctuation_ratio":0.2,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.98615605,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2020-04-08T16:05:45Z\",\"WARC-Record-ID\":\"<urn:uuid:03910666-6d1c-4e4e-bcbf-79e24a5d1a5b>\",\"Content-Length\":\"12939\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:105f8203-a39c-44f4-9a9a-3b11d365323a>\",\"WARC-Concurrent-To\":\"<urn:uuid:1c343bdf-eb4d-4c69-8629-f69f4f719c3c>\",\"WARC-IP-Address\":\"161.117.81.245\",\"WARC-Target-URI\":\"https://html.scirp.org/file/_2-6901916_1.htm\",\"WARC-Payload-Digest\":\"sha1:XLPZLTTUMYA6CMDVO5XTOZ6MFZIYLCGB\",\"WARC-Block-Digest\":\"sha1:EZJOEXC2WURXTXONTSVG6X4GHQYFQ5Q3\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2020/CC-MAIN-2020-16/CC-MAIN-2020-16_segments_1585371818008.97_warc_CC-MAIN-20200408135412-20200408165912-00355.warc.gz\"}"}
https://stats.arabpsychology.com/st/fix-in-r-argument-is-of-length-zero/
[ "# Fix in R: argument is of length zero\n\nOne error message you may encounter when using R is:\n\n```Error in if (x < 10) { : argument is of length zero\n```\n\nThis error usually occurs when you attempt to make some logical comparison within an if statement in R, but the variable that you’re using in the comparison is of length zero.\n\nTwo examples of variables with length zero are numeric() or character(0).\n\nThe following example shows how to resolve this error in practice.\n\n### How to Reproduce the Error\n\nSuppose we create the following numeric variable in R with a length of zero:\n\n```#create numeric variable with length of zero\nx <- numeric()\n```\n\nNow suppose we attempt to use this variable in an if statement:\n\n```#if x is less than 10, print x to console\nif(x < 10) {\nx\n}\n\nError in if (x < 10) { : argument is of length zero\n```\n\nWe receive an error because the variable that we defined has a length of zero.\n\nIf we simply created a numeric variable with an actual value, we would never receive this error when using the if statement:\n\n```#create numeric variable\ny <- 5\n\n#if y is less than 10, print y to console\nif(y < 10) {\ny\n}\n\n 5\n```\n\n### How to Avoid the Error\n\nTo avoid the argument is of length zero error, we must include an isTRUE function, which uses the following logic:\n\n`is.logical(x) && length(x) == 1 && !is.na(x) && x`\n\nIf we use this function in the if statement, we won’t receive an error when comparing our variable to some value:\n\n```if(isTRUE(x) && x < 10) {\nx\n}\n```\n\nInstead of receiving an error, we simply receive no output because the isTRUE(x) function evaluates to FALSE, which means the value of x is never printed." ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.7902148,"math_prob":0.9817826,"size":1638,"snap":"2023-40-2023-50","text_gpt3_token_len":393,"char_repetition_ratio":0.13280293,"word_repetition_ratio":0.0751634,"special_character_ratio":0.25335777,"punctuation_ratio":0.0795107,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.99884576,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2023-12-03T21:18:08Z\",\"WARC-Record-ID\":\"<urn:uuid:5034300e-c3d1-4dfe-b8b0-2d625ca705c9>\",\"Content-Length\":\"111726\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:873d6c9a-4d09-4fa1-9f0d-1906526112b4>\",\"WARC-Concurrent-To\":\"<urn:uuid:75ec8cef-1f54-4879-91d7-3e457c91640e>\",\"WARC-IP-Address\":\"104.21.24.82\",\"WARC-Target-URI\":\"https://stats.arabpsychology.com/st/fix-in-r-argument-is-of-length-zero/\",\"WARC-Payload-Digest\":\"sha1:C5W3UHWYAYXHAMZJJBAQDBAYQTMTJHSK\",\"WARC-Block-Digest\":\"sha1:LDW7NFEJKUP32HAGSSLNVXMTXRPTNVN4\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2023/CC-MAIN-2023-50/CC-MAIN-2023-50_segments_1700679100508.53_warc_CC-MAIN-20231203193127-20231203223127-00236.warc.gz\"}"}
https://www.koofers.com/files/notes-eal92qog0i/
[ "# Lecture Notes for CVEN 489 - SPTP: APPL AUTOCAD CEDESIGN at Texas A&M (A&M)\n\n## Notes Information\n\n Material Type: Class Note Professor: Staff Class: CVEN 489 - SPTP: APPL AUTOCAD CEDESIGN Subject: CIVIL ENGINEERING University: Texas A&M University Term: -- Keywords: Reaction ModelParticularlyConcentration DistributionConcentrationImmediatelyWater QualityAssumptionsSubstitutingLongitudinalApproximations", null, "", null, "", null, "", null, "", null, "", null, "## Sample Document Text\n\n7. Water Quality Modeling Until now we have derived governing equations for and sought solutions to idealized cases where analytical solutions could be found. Many problems in the natural world, however, are complex enough that simplified analytical solutions are inadequate to predict the transport and mixing behavior. In these situations, approximations of the governing transport equations (such as finite difference) must be made so that numerical solutions can be found. These approximations can be simple or complex, but often result in a large number of equations that must be solved to predict the concentration distribution. Hence, computer algorithms are used to make the numerical solutions tractable. In this chapter, we introduce the field of water quality modeling based on computerized (nu- merical or digital) tools. This chapter begins by outlining how to select an appropriate numerical tool. The next two section describe common computer approximations. First, simple numerical models based o...\n\n## Related Documents", null, "Environmental Justice Notes", null, "Marketspace Notes", null, "Underpayment Inequity Notes", null, "Particularly Notes", null, "Geropsychology Notes", null, "Bidder's List Notes", null, "Competitive Market Structure Notes", null, "Third-Variable Problem Quiz", null, "Particularly Exam", null, "Participants Notes", null, "Problem Statements Notes", null, "Expressive Style Notes", null, "Particularly Notes", null, "Invitation for Bids Notes", null, "Emotional Contagion Notes", null, "Formula Sheet Notes" ]
[ null, "https://koofer-files.s3.amazonaws.com/converted/EaL92Qog0I_Page_01_medium.jpg", null, "https://koofer-files.s3.amazonaws.com/converted/EaL92Qog0I_Page_02_medium.jpg", null, "https://koofer-files.s3.amazonaws.com/converted/EaL92Qog0I_Page_03_medium.jpg", null, "https://koofer-files.s3.amazonaws.com/converted/EaL92Qog0I_Page_04_medium.jpg", null, "https://koofer-files.s3.amazonaws.com/converted/EaL92Qog0I_Page_05_medium.jpg", null, "https://koofer-files.s3.amazonaws.com/converted/EaL92Qog0I_Page_06_medium.jpg", null, "https://koofer-files.s3.amazonaws.com/converted/3zJkiUMtJL_Page_01_medium.jpg", null, "https://koofer-files.s3.amazonaws.com/converted/2BbddsFVBy_Page_1_medium.jpg", null, "https://koofer-files.s3.amazonaws.com/converted/cJK65Sd9FS_Page_1_medium.jpg", null, "https://koofer-files.s3.amazonaws.com/converted/IVwNnfwJjy_Page_01_medium.jpg", null, "https://koofer-files.s3.amazonaws.com/converted/sZzwWcWnxA_Page_01_medium.jpg", null, "https://koofer-files.s3.amazonaws.com/converted/WnxdMESe7F_Page_1_medium.jpg", null, "https://koofer-files.s3.amazonaws.com/converted/265gixkPtu_Page_01_medium.jpg", null, "https://koofer-files.s3.amazonaws.com/converted/R9apyQMM2m_Page_1_medium.jpg", null, "https://koofer-files.s3.amazonaws.com/converted/pnf7SIJwjh_Page_1_medium.jpg", null, "https://koofer-files.s3.amazonaws.com/converted/7pXBTLUSzq_Page_01_medium.jpg", null, "https://koofer-files.s3.amazonaws.com/converted/nnW6SCs5Mc_Page_1_medium.jpg", null, "https://koofer-files.s3.amazonaws.com/converted/KpqlINQnEV_Page_01_medium.jpg", null, "https://koofer-files.s3.amazonaws.com/converted/USTkAXRQ74_Page_1_medium.jpg", null, "https://koofer-files.s3.amazonaws.com/converted/772LHbzgsD_Page_01_medium.jpg", null, "https://koofer-files.s3.amazonaws.com/converted/NtRx1W2YXQ_Page_01_medium.jpg", null, "https://koofer-files.s3.amazonaws.com/converted/RN6HHmAa7W_Page_01_medium.jpg", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.8430193,"math_prob":0.7307061,"size":2161,"snap":"2019-43-2019-47","text_gpt3_token_len":420,"char_repetition_ratio":0.116828926,"word_repetition_ratio":0.0,"special_character_ratio":0.16705228,"punctuation_ratio":0.11815562,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.95853764,"pos_list":[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44],"im_url_duplicate_count":[null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,1,null,3,null,1,null,2,null,1,null,1,null,1,null,1,null,1,null,1,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2019-11-13T22:55:37Z\",\"WARC-Record-ID\":\"<urn:uuid:9593b4f4-d1cd-4cf0-83ba-1c0cae90af82>\",\"Content-Length\":\"43450\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:50d0f67b-0772-4931-8087-7c8f5c2ddd19>\",\"WARC-Concurrent-To\":\"<urn:uuid:7a25659e-6bd3-47fc-898e-1f431f356365>\",\"WARC-IP-Address\":\"3.232.230.212\",\"WARC-Target-URI\":\"https://www.koofers.com/files/notes-eal92qog0i/\",\"WARC-Payload-Digest\":\"sha1:EMYVADVAEMR4W5PNJ6UIITSENIKTK7NV\",\"WARC-Block-Digest\":\"sha1:IZMOTBLEO4PTXHLX4CCAHXWZXRZK5I6X\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2019/CC-MAIN-2019-47/CC-MAIN-2019-47_segments_1573496667442.36_warc_CC-MAIN-20191113215021-20191114003021-00026.warc.gz\"}"}
http://www.cs.technion.ac.il/he/events/2012/1569/
[ "# Theory Seminar: On Identity Testing of Tensors, Low-rank Recovery and Compressed Sensing\n\nדובר:\nמיכאל פורבס, MIT\nתאריך:\nיום רביעי, 6.6.2012, 12:30\nמקום:\nטאוב 201\n\nLet M be a low-rank matrix. For vectors x,y, define the bilinear form f(x,y)=x^t M y. We study the question of reconstructing M from evaluations to f. Much of previous work allowed randomized evaluations, or a stronger query model (or both). We show how to, in an optimal number of 4nr measurements, efficiently reconstruct M from deterministically chosen queries to f. This can be seen as a (noiseless) generalization of compressed sensing, and we make this connection formal by reducing (in the noiseless case) the task of recovering a low-rank matrix to the task of recovering a sparse vector.\n\nWe also generalize the above ideas to higher dimensional matrices, known as tensors. This generalization can be seen as a question in black-box polynomial identity testing (PIT), which is the task of (deterministically) determining if a given algebraic circuit computes the zero polynomial. For a certain model of circuits (known as depth-3 set-multilinear circuits), we give the first quasipolynomial algorithm for the black-box PIT question.\n\nJoint with Amir Shpilka, appeared at STOC 2012\n\nבחזרה לאינדקס האירועים" ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.8349014,"math_prob":0.9339914,"size":1260,"snap":"2020-24-2020-29","text_gpt3_token_len":327,"char_repetition_ratio":0.083598725,"word_repetition_ratio":0.02020202,"special_character_ratio":0.22222222,"punctuation_ratio":0.12809917,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9880505,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2020-05-31T07:25:49Z\",\"WARC-Record-ID\":\"<urn:uuid:a269b021-17e7-4f0c-8fd6-42bd96b26ce8>\",\"Content-Length\":\"14273\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:ee5ae399-16b7-4464-bcf8-862752412466>\",\"WARC-Concurrent-To\":\"<urn:uuid:e4afceef-718c-44a3-b2c5-437ba8979685>\",\"WARC-IP-Address\":\"132.68.32.15\",\"WARC-Target-URI\":\"http://www.cs.technion.ac.il/he/events/2012/1569/\",\"WARC-Payload-Digest\":\"sha1:JBALCNDNKTKVF7XUTSGABBG65GWQGJDM\",\"WARC-Block-Digest\":\"sha1:HRJNTZP4ZY5F7G4XE6YJXU6NHF4AOJSH\",\"WARC-Identified-Payload-Type\":\"application/xhtml+xml\",\"warc_filename\":\"/cc_download/warc_2020/CC-MAIN-2020-24/CC-MAIN-2020-24_segments_1590347411862.59_warc_CC-MAIN-20200531053947-20200531083947-00316.warc.gz\"}"}
https://support.minitab.com/en-us/minitab/20/help-and-how-to/statistics/basic-statistics/how-to/1-sample-z/methods-and-formulas/methods-and-formulas/
[ "# Methods and formulas for 1-Sample Z\n\nSelect the method or formula of your choice.\n\n## Confidence interval\n\n### Notation\n\nTermDescription", null, "sample mean\nzα/2 inverse cumulative probability of the standard normal distribution at 1- α /2; α = 1 - confidence level/100\nσ population standard deviation (assumed known)\nn sample size\n\n## Z-value\n\n### Notation\n\nTermDescription", null, "sample mean\nμ0 hypothesized population mean\nσ population standard deviation (assumed known)\nn sample size\n\n## P-value\n\nThe calculation for the p-value depends on the alternative hypothesis.\n\nAlternative Hypothesis P-value\n\n### Notation\n\nTermDescription\nμ population mean\nμ0 hypothesized population mean\nzz-value of the sample data\nZ a random variable from the standard normal distribution\nBy using this site you agree to the use of cookies for analytics and personalized content.  Read our policy" ]
[ null, "https://support.minitab.com/en-us/minitab/20/png/confidence_interval_one_sample_z_mf.dita_ID73155B22AB424CFF883C203552492C5D_mtbreference_1.png", null, "https://support.minitab.com/en-us/minitab/20/png/Test_one_sample_z_mf.dita_ID152E091682264778BB06624753F249BF_mtbreference_1.png", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.7681808,"math_prob":0.89358664,"size":612,"snap":"2021-31-2021-39","text_gpt3_token_len":144,"char_repetition_ratio":0.15789473,"word_repetition_ratio":0.14634146,"special_character_ratio":0.22712418,"punctuation_ratio":0.021978023,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9780458,"pos_list":[0,1,2,3,4],"im_url_duplicate_count":[null,2,null,2,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2021-08-03T05:12:49Z\",\"WARC-Record-ID\":\"<urn:uuid:21feffd6-4dc1-4156-aded-f2fa854ef407>\",\"Content-Length\":\"13190\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:bd67b8de-2903-4dfc-aad3-fa078a500366>\",\"WARC-Concurrent-To\":\"<urn:uuid:c43bc5eb-32d0-44aa-8559-b5083c28653c>\",\"WARC-IP-Address\":\"23.96.207.177\",\"WARC-Target-URI\":\"https://support.minitab.com/en-us/minitab/20/help-and-how-to/statistics/basic-statistics/how-to/1-sample-z/methods-and-formulas/methods-and-formulas/\",\"WARC-Payload-Digest\":\"sha1:2RFQCI6I6V3OFD3C32SSW44SNEL2IV2F\",\"WARC-Block-Digest\":\"sha1:YRW4DXPZY2KC5M2PA6EF3FUYAYQK2RBW\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2021/CC-MAIN-2021-31/CC-MAIN-2021-31_segments_1627046154420.77_warc_CC-MAIN-20210803030201-20210803060201-00177.warc.gz\"}"}
http://freescience.info/books.php?id=4
[ "", null, "", null, "Language/Lingua", null, "", null, "", null, "", null, "Books of Mathematics", null, "· Book News · Most clicked · Least clicked · Books Index · Search on Amazon\n\nSearch for a Book", null, "Mathematics\n\n Books -> Mathematics Search on Amazon\n\n Algebra Analysis Applied Mathematics Arithmetic Calculus Catastrophe theory Control Theory Discrete Mathematics Dynamical Systems Fractals Game Theory General Mathematics Geometry Group Theory Information elaboration systems Information Theory Mathematical Physics Measure Theory Misc. Books Number Theory Optimization Probability and Statistics Queueing Theory Topology", null, "Book News: Mathematics", null, "Game Theory (Open Access textbook with 165 solved exercises) Category :Game Theory  Language:", null, "clicks: 142This is an Open Access textbook on non-cooperative Game Theory with 165 solved exercises. Stochastic Differential Equations: Models and Numerics Category :Stochastic differential equation  Language:", null, "clicks: 57The goal of this course is to give useful understanding for solving problems formulated by stochastic differential equations models in science, engineering and mathematical finance. Typically, these . . . . . Around the boundary of complex dynamics Category :Dynamical Systems  Language:", null, "clicks: 23We introduce the exciting field of complex dynamics at an undergraduate level while reviewing, reinforcing, and extending the ideas learned in an typical first course on complex analysis. Julia sets a . . . . . Lectures on singular stochastic PDEs Category :Probability Theory  Language:", null, "clicks: 53 These are the notes for a course at the 18th Brazilian School of Probability held from August 3rd to 9th, 2014 in Mambucaba. The aim of the course is to introduce the basic problems of non--linear PD . . . . . Metric and Topological Spaces Category :Topology  Language:", null, "clicks: 43The syllabus for the course is defined by the Faculty Board Schedules (which are minimal for lecturing and maximal for examining). What is presented here contains some results which it would not, in . . . . . Combinatorics and algebra of tensor calculus Category :Calculus  Language:", null, "clicks: 40 In this paper, motivated by the theory of operads and PROPs we reveal the combinatorial nature of tensor calculus for strict tensor categories and show that there exists a monad which is descr . . . . .\n\n```Home | Authors | About | Contact Us | Email" ]
[ null, "http://freescience.info/images/punto.gif", null, "http://freescience.info/images/freescience.gif", null, "http://freescience.info/flags/it.png", null, "http://freescience.info/flags/en.png", null, "http://freescience.info/flags/fr.png", null, "http://freescience.info/images/punto.gif", null, "http://freescience.info/images/punto.gif", null, "http://freescience.info/images/punto.gif", null, "http://freescience.info/images/new.gif", null, "http://freescience.info/images/new.gif", null, "http://freescience.info/flags/en.png", null, "http://freescience.info/flags/en.png", null, "http://freescience.info/flags/en.png", null, "http://freescience.info/flags/en.png", null, "http://freescience.info/flags/en.png", null, "http://freescience.info/flags/en.png", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.9326348,"math_prob":0.80124545,"size":1041,"snap":"2020-45-2020-50","text_gpt3_token_len":223,"char_repetition_ratio":0.12150434,"word_repetition_ratio":0.027472528,"special_character_ratio":0.22382325,"punctuation_ratio":0.18181819,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9869785,"pos_list":[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32],"im_url_duplicate_count":[null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,6,null,6,null,null,null,null,null,null,null,null,null,null,null,null,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2020-11-24T15:48:02Z\",\"WARC-Record-ID\":\"<urn:uuid:8cb8bd81-9238-447c-90b6-f16af883ab76>\",\"Content-Length\":\"17845\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:bd3c4210-b473-4b9d-8195-72d683425cf3>\",\"WARC-Concurrent-To\":\"<urn:uuid:e1151dcf-88dd-457d-867a-649cfa384a55>\",\"WARC-IP-Address\":\"93.191.242.19\",\"WARC-Target-URI\":\"http://freescience.info/books.php?id=4\",\"WARC-Payload-Digest\":\"sha1:FRGOA7J4MLQJ3OG2IVQUO6PVAAD7ZF7D\",\"WARC-Block-Digest\":\"sha1:MXFEQAE4LLAQYEAQF6OO2DZZKAHQGIKT\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2020/CC-MAIN-2020-50/CC-MAIN-2020-50_segments_1606141176864.5_warc_CC-MAIN-20201124140942-20201124170942-00199.warc.gz\"}"}
https://www.bankersadda.com/2018/11/quantitative-aptitude-quiz-for-ibps-clerk-28.html
[ "Dear Aspirants,\n\nNumerical Ability or Quantitative Aptitude Section has given heebie-jeebies to the aspirants when they appear for a banking examination. As the level of every other section is only getting complex and convoluted, there is no doubt that this section, too, makes your blood run cold. The questions asked in this section are calculative and very time-consuming. But once dealt with proper strategy, speed, and accuracy, this section can get you the maximum marks in the examination. Following is the Quantitative Aptitude quiz to help you practice with the best of latest pattern questions.\n\nWatch the Video Solutions\n\nQ1. The compound interest on a sum of money for 2 years is Rs. 832 and the simple interest on the same sum for the same period is Rs. 800. The difference between the compound interest and the simple interest for 3 years at the same rate will be:\nRs. 48\nRs. 66.56\nRs. 98.56\nRs. 106.56\nRs. 96\nQ2. A person took a loan of Rs. 6000 for 3 years, at 5% per annum compound interest. He repaid Rs. 2100 in each of the first 2 years. The amount he should pay at the end of 3rd to clear all his debts is:\nRs. 2425.50\nRs. 2552.50\nRs. 2635.50\nRs. 2745.50\nNone of these\nQ3. Nikita bought 30 kg of wheat at the rate of Rs. 9.50 per kg and 40 kg of wheat at the rate of Rs. 8.50 per kg and mixed them. She sold the mixture at the rate of Rs. 8.90 per kg. Her total profit or loss in the transaction was:\nRs. 7 loss\nRs. 5 profit\nRs. 2 loss\nRs. 7 profit\nRs. 9 loss\nSolution:\nTotal CP of 70 kg of wheat\n= (30 × 9.5 + 40 × 8.5\n= 285 + 340\n= Rs. 625\nTotal SP of 70 kg of wheat\n= 8.90 × 70\n= Rs. 623\n∴ Loss = 625 - 623 = Rs. 2\n\nQ4. A trader marks-up his goods by 60% and gives discount of 25%. Besides it he weighs 20% less amount while selling his goods. What is the net profit of trader?\n50%\n35%\n45%\n55%\n60%\nQ5. The marked price of an electric iron is Rs. 690. The shopkeeper allows a discount of 10% and gains 8%. If no discount is allowed, his gain per cent would be\n20%\n24%\n25%\n28%\n36%\nDirections (6-10): Study the following graph carefully to answer the questions that follow:\n\nCost of three different vegetables (in rupees per kg.) in five different cities.\n\nQ6. In which city is the difference between the cost of one kg. of mushroom and cost of one kg. of lady finger second lowest?\nLudhiyana\nHissar or Chitrakut\nAurangabad\nRaigarh\nNone of these\nSolution:\nFrom the graph it is clear that 2nd lowest differences are in Aurangabad (Rs. 40 per kg)\n\nQ7. Cost of one kg. of lady finger in Ludhiyana is approximately what percent of the cost of two kg of cauliflower in Chitrakut?\n66\n24\n28\n33\n58\nQ8. What will be the difference between average cost of all vegetables in Chitrakut and average cost of all vegetables in Ludhiyana?\nRs 50\nRs 40\nRs 20\nRs 60\nRs 30\nQ9. Ravinder had to purchase 45 kg of cauliflower from Hissar. Shopkeeper gave him discount of 4% per kg. What amount did he pay to the shopkeeper after the discount?\nRs 8,208\nRs 8,104\nRs 8340\nRs 8,550\nRs 8,410\nQ10. What is the respective ratio between the cost of 5/2 kg. of cauliflower from Raigarh to the cost of 3/2 kg. of mushroom from Ludhiyana?\n3 : 2\n2 : 3\n5 : 11\n4 : 9\n5 : 3\nDirections (11-15): What will come in place of question mark (?) in the following questions?\n\nQ11. 112, 225, 345, 479, 634, ?\n803\n817\n825\n793\n786\nQ12. 516, 514, 502, 472, 416, ?\n298\n334\n371\n326\n352\nQ13. 12, 11, 18, 45, 164, ?\n825\n845\n795\n784\n640\nSolution:\n12 × 1 – 1² = 11\n11 × 2 – 2² = 18\n18 × 3 – 3² = 45\n45 × 4 – 4² = 164\n164 × 5 – 5² = 795\n\nQ14. 28, 15, 17, 28.5, 61, ?\n157.5\n185.5\n172\n158\n155.5\nSolution:\n28 × 0.5 + 1 = 15\n15 × 1 + 2 = 17\n17 × 1.5 + 3= 28.5\n28.5 × 2 + 4 = 61\n61 × 2.5 + 5 = 157.5\n\nQ15. 71, 91, 117, 155, 217, ?\n292\n338\n286\n327\n317\n\nYou May also like to Read:", null, "" ]
[ null, "https://2.bp.blogspot.com/-JWspNW2-k_w/WYLB46ntaHI/AAAAAAAAK_o/I65py0PDbQMUURtCsnCWgCZbKxqZqM5xwCLcBGAs/s320/Download%2BThis%2BQuiz.png", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.8987624,"math_prob":0.97907543,"size":3573,"snap":"2019-13-2019-22","text_gpt3_token_len":1322,"char_repetition_ratio":0.10311011,"word_repetition_ratio":0.5585586,"special_character_ratio":0.44164568,"punctuation_ratio":0.17480136,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.97797644,"pos_list":[0,1,2],"im_url_duplicate_count":[null,null,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2019-05-27T14:05:27Z\",\"WARC-Record-ID\":\"<urn:uuid:d82f3d78-6fa2-4553-96ff-8d6b14865042>\",\"Content-Length\":\"274843\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:e60f2aac-a3f9-4cd0-a1bb-f596c749bff1>\",\"WARC-Concurrent-To\":\"<urn:uuid:786b22f6-208c-4107-9bf2-bd669e77deff>\",\"WARC-IP-Address\":\"172.217.7.147\",\"WARC-Target-URI\":\"https://www.bankersadda.com/2018/11/quantitative-aptitude-quiz-for-ibps-clerk-28.html\",\"WARC-Payload-Digest\":\"sha1:J6RBGDDFO2EYKSMWGSKPDN6KUZ5GVLJL\",\"WARC-Block-Digest\":\"sha1:QKXXZ76COHPVRA72RF4TIXFTRTHYTFMJ\",\"WARC-Identified-Payload-Type\":\"application/xhtml+xml\",\"warc_filename\":\"/cc_download/warc_2019/CC-MAIN-2019-22/CC-MAIN-2019-22_segments_1558232262600.90_warc_CC-MAIN-20190527125825-20190527151825-00231.warc.gz\"}"}
https://www.keithforbes.org/sailing-a-caper-cat/
[ "# Sailing a Caper Cat\n\nA Caper Cat is a 14 feet long catamaran designed and built in the 1960’s or 1970’s. I believe that it was designed in Queensland (my home state in Australia) and I would like to hear from someone who knows more – especially the designer if possible.\n\nOne of the important features of the Caper Cat is its storage capacity in each hull. This makes it ideal for carrying all sorts of things which many similar catamarans cannot do. It is especially useful for camping on the beach and has a history of being used for this purpose.\n\nhttp://users.tpg.com.au/kkmiller/jessemartin/jesse_martin1.html\n\nIn spite of it being very suitable for cruising and laying no claim to being a high-performance machine, I don’t think it has anything to be ashamed of in this department.\n\nFewer similar catamarans are now being made and, sadly, there seems to be less interest in such sailing (this has not been assisted by local councils who seem to have an agenda of stopping people getting sailing boats onto the beach by putting up obstacles). The age of these boats does not, however, seem to degrade them. It is probably fortunate that fibre glass, aluminium, and good sail cloth were available 50 years ago.\n\nThis discussion may also be useful for sailors of other models of catamarans. There are many more Hobie Cats in the world than there are Caper Cats.\n\nSome of the measurements of the Caper Cat follow:\n\n• Length 4.3 metres (14 ft)\n• Width 2.3 m (7’6”)\n• Weight 115kg (250 lbs)\n• Mast Height 6.7m (22 ft) Mast Weight 15 kg (33 lbs)\n• Boom length 2.4m (8 ft)\n• Mainsail area 10 sq meters (107 square feet) Weight 5.4kg (12 lbs)\n• Jib area 3.35 m ² (36 ft2)\n• Rudders each 0.9 m (3 ft) deep by 0.25 m (10”) wide Weight of rudders and tiller 13kg\n\nHere are some estimates of mine of the distance from the bow of various points on the longitudinal axis.\n\n• Main spar and mast 1.5 m (5 ft)\n• Rear spar 3.4 m (11 ft)\n• Stern and rudders 4.3m (14 ft)\n• Centre of Mass (or CG) 2.3 m (7’6”) (boat only – no crew)\n• Centre Of Buoyancy when level (CB) 2.3m (7’6”)\n\n### Theoretical Sailing Performance\n\nI have tried to use the physics of Fluid Mechanics and Sailing Theory to predict the boat’s performance.\n\nSome references are: Marchaj C A, Sailing Theory and Practice, Adlard Coles, 3rd edn. 1973\n\nFox R W & McDonald A T, Introduction to Fluid Mechanics, Wiley\n\nRoberson J A and Crowe C T, Engineering Fluid Mechanics, Wiley, 5th edn. 1993\n\nSNAME Transactions, Vol 101, 1993, pp337-397\n\nMuch of that which I have learned has come from the book by Marchaj. I have owned it for over 30 years and can still reread parts of it and learn something. Hence, I don’t claim that it is easy or an “Introduction” – don’t you hate it when text books start with that word and drive you crazy trying to follow them? – but it is well worth adding to your library and reading.\n\nFigures 80 and 224 in my edition (or Fig. 1 in Oossanen) are very important to show which features we need to know about to truly understand sailing.\n\nThese include:\n\n• Apparent wind strength and direction,\n• Strength and direction of the aerodynamic force on the sails; the breaking up of this force into Driving force and Heeling force,\n• Strength and direction of the Hydrodynamic force on the hulls and rudders; the breaking up of these forces into Resistance and Side force,\n• Leeway angle and its effect on the Hydrodynamic forces;\n• Center of Effort (CE) on the sails and Center of Lateral Resistance (CLR) on hull and rudders.\n\n#### Mainsail\n\nI have assumed a sail similar to sail in Fig.83 of p.130 of Marchaj. It has a camber of 110. This sail is equivalent to sail II in Fig. 46 on p.72.\n\nThe characteristics of the sail are given below. More information on how the forces are calculated will be given when discussing the boat performance.\n\nIts maximum force coefficient occurs at an angle of incidence of the apparent wind to the chord (roughly, the boom) of 15 degrees, α , and is Ct = 1.55. Another important angle is epsilon, ε , where tan(ε ) = drag/Lift . α – ε is normally positive and is the angle that the direction of the cross-wind force is forward of the perpendicular to the chord of the sail. This angle is forward of the chord because the front part of the sail produces the greatest force and the sail is more curved here just after the luff. This is advantageous because it throws the direction of the force more forward, which is the whole point in driving the boat. I will assume that the main sheet adjusts the sail so that the angle of incidence is always 15 deg. whenever possible. It will not be possible when the wind is coming from abaft because the shroud places a limit on how far the sail can be let out. I assume that the boom cannot be let out beyond 60 deg. It is also not possible, and has to be eased, if it would otherwise capsize the boat , and also if the boat is pointing very close to the wind.\n\nIn most cases, I assume that the CE of the sail is about 75 cm (2’6”) behind the luff and 2.6 meters (8’6”) above the boom , which puts it about 3.5 m (11’6”) above the water line.\n\nSail Characteristics Sail Fig.83 and Fig. 46\n\n#### Jib\n\nThe jib has a third the area of the mainsail. There is a school of thought that the foresail is more efficient than the mainsail. In addition, the combination of the two sails increases the effectiveness.\n\nHowever, the jib is lower than the main and thus encounters lighter winds. I will therefore assume equal efficiency. Most of the time, the combination only needs to be considered so I use the characteristics of the mainsail and use the area of the combination.\n\nI assume that its CE is 6.5 ft above the waterline.\n\nOf course, the jib shifts the combined CE of the sails forward and I assume the new CE is 1’6” to the rear of the mast in most cases (i.e. close hauled or on a close reach or reach).\n\nIf the jib supplies one third of the force of the main, the combined height of the CE above the waterline becomes 10.25 ft.\n\n#### Hulls\n\nEach hull is divided vertically into 2 sections which I will call the hull (immersed) and the superstructure (normally out of the water). I estimate each hull is 0.194 cubic metres (7 cubic feet) and each superstructure is 0.222 m ³ (8 ft ³ ). This gives a buoyancy in each hull of about 200 kg (450 lbs) and extra buoyancy in each superstructure of 235 kg (520 lbs).\n\nTo estimate the wetted area of the hulls, I assume the hulls are triangular with an angle of 60 degrees at the bottom and calculate using the total weight (boat + crew).\n\nWith the total weight W in lbs , the immersed cross-sectional area of each hull is\n\nAx = (W/64)/(2*LWL) in ft ²\n\nand this is d ² *tan(theta), where d is the vertical depth immersed and theta is the half-angle (30 deg) at the base.\n\nThe immersed distance is then s = d/Cos(theta) and the wetted area is 4*LWL*s .\n\nThis gives wetted area, Aw = sqrt(W*LWL/2/tan(theta))/(2*Cos(theta))\n\nWith 2 crewman making a total weight of 620 lbs (280 kg), this gives Aw = 50 ft ² ( 4.6m ² ).\n\nThe sideways planform area of the hull , Ap (needed for calculating the sideways force on the hull ) , is half the wetted area. (25 ft ² with the above values).\n\n`For calculating the “lift” on the hull (i.e.) the sideways force generated by a certain leeway angle (required to balance the boat by counteracting the sideways force from the sails), we need to know the Effective Aspect Ratio. This is pitifully small and I estimate AR as 1 foot immersed divided by 12 feet lengthwise, i.e. 112. The effective AR can be taken as twice this value – see e.g. Marchaj, pp 276-7. In calculating the coefficient of lift later, it will be seen that this small AR could be a problem.\n\n#### Rudders\n\nThe rudders, of course, steer the boat but also supply much-needed sideways force to augment that from the hull (see Marchaj, pp 350-354). Each rudder has an immersed planform area of about 1.25 ft ² and an AR of more than 2. This presents a far-superior lifting surface to that of the hull, as will be seen. The tiller gives a mechanical advantage of about 11 when the rudders are fully down. However, if the rudders are raised to almost horizontal to travel over shallow water, this drops to about 3 and steering the boat becomes very tiring. Some Caper Cats have squatter rudders to make running up on the beach easier. These rudders do not need to be released, which can be convenient because having to do so while so much else is happening can be difficult. However, given that the lift coefficient of the hulls is so small, it is probably better to have the deeper rudders.\n\n### Performance\n\nYacht performance is discussed in Marchaj, Part III, Section 2, p293 and in Section 4 of Oossanen.\n\nI have tackled this problem by creating an Excel spreadsheet. Velocity Performance Programs (VPP) start with reasonable estimates of velocity and other factors and use successive iteration to improve the parameters until the boat is balanced. In Excel, there is a feature called Goal Seek which effectively solves an equation using this method.\n\n#### Input\n\n1. The various parameters of the boat ;\n2. Hydrodynamic and aerodynamic constants ;\n3. Speed and angle of the wind to the course of the boat;\n4. An initial estimate of the boat speed.\n5. These consist of many of the constants discussed above. The most complex entry is the characteristics of the sail. Refer to Fig 46 and Fig 83 (sail 2) in Marchaj. For each angle of incidence from 5 deg to 35 deg in increments of 1 deg are entered:\n\nCt (the coefficient of total force);\n\nangle Epsilon ε (the direction of the force forward of the perpendicular to the chord).\n\nHow these are used is discussed below.\n\nFor the skin friction coefficient, Cf , I am using Cf =.0040. This is based on a Reynold’s number of 1.7×10^7 and a reasonably smooth hull (See Marchaj, fig. 148).\n\n1. .00119 which is 12 *Rho_a , where ρa is the mass density of the air (Marchaj, p45)\n\nKinematic Viscosity for sea water , ν = .0000111 Marchaj, p235\n\nMass density of water Rho_w ρw =1.990\n\n#### Calculations\n\nForces on the sail\n\nIt is necessary to estimate the strength and direction of the apparent wind. This is discussed qualitatively in Marchaj, Chapter 6. It can be calculated numerically fairly simply by using the cosine rule and this is done, with the results called Va and Beta_a. (Beta, β, is the angle of the true wind to the direction of travel of the boat and Beta_a (βA) is the corresponding angle of the apparent wind).\n\n[ For those who are interested:\n\nVa ² = Vwind ² + v ² + 2*Vwind*v*Cos( β) , and then Cos(βA) = (v + Vwind*Cos( β) ) / Va , where Vwind = true wind velocity , v =velocity of boat , and β and βA are described above. (Note: Marchaj uses γ for β and β for βA . See Marchaj , Fig. 72 ) . These calculations are done in cells C25, C26, and C27 in the Excel spreadsheet which can be downloaded (see ”Using the VPP” below ). ]\n\nIt is this wind which impinges on the sails and gives the aerodynamic forces on the boat.\n\nThe table of values for the sail is then used to choose the optimum angle of incidence ( 15 deg for the most common up-wind directions).\n\nRefer to Marchaj, Fig. 49A, where\n\nL = cross-wind force = Ft*Cos( ε )\n\nD = Drag = Ft*Sin( ε ) and then\n\nFR = L*Sin(βA) – D*Cos(βA)\n\n= Ft * Sin(βA – ε )\n\nFH = L*Cos(βA) + D*Sin(βA)\n\n= Ft * Cos(βA – ε )\n\nFR is the driving force and needs to be maximised.\n\nSo Ct* Sin(βA – ε) and Ct * Cos(βA – ε) are calculated for each angle of attack (angle of incidence) on the sail and the angle α for which Ct Sin(βA – ε) is a maximum is chosen.\n\nThe driving force from the sail is then\n\nFR = 0.5*Rho_a * Sa * Ct* Sin(βA – ε) *Va ² , where Sa is the sail area.\n\nThe heeling force , FH , is similarly calculated:\n\nFH = 0.5*Rho_a * Sa * Ct* Cos(βA – ε) *Va ²\n\nIt is then necessary to calculate the hydrodynamic resistance of the hull (Marchaj, Part III, Ch 1).\n\nThis consists of skin-friction resistance, wave-making resistance and resistance due to leeway (induced resistance). For yachts and dinghies, resistance due to heel is also important but the catamaran has little or no heel so it is ignored.\n\nSkin Friction\n\nThis is given by Rf = Cf*Rho_w/2*Aw*v ² , where Cf and Rho_w are given above,\n\nAw is the wetted area and v is the speed of the boat. Note that I am using imperial units (ft/lb/s).\n\nAt low speeds and points of sailing other than close-hauled, this will be the predominant resistance. At higher speeds, wave-making resistance becomes very important. I am using\n\nCf = 0.0040 . For A being 50 ft ² , this gives Rf = 0.199*v ² (v in ft/s and Rf in lbs).\n\nWave-Making Resistance\n\nRefer to Marchaj, pp242-270\n\nThis is the most complex and difficult-to-estimate resistance (especially for a catamaran). Most of the forces encountered in sailing are proportional to the second power of the velocity (e.g. force on the sail and skin friction) but wave-making resistance involves a higher power (Marchaj, p254: “it can rise to 3rd, 4th, 5th,or even 6th power of the velocity, especially for heavy-displacement keel boats”).\n\nCall this Cw. v^ κ , so Cw = 0.001226 and κ = 4. Note that when Rf is similarly converted to ft/s it becomes Rf = 0.098*v ² and , for the quoted wetted area of 39 ft² , this implies that Cf =0.0025. The previous graph (Fig. 148) in Marchaj shows that to attain such a low value of Cf requires a very smooth surface. I am being conservative for the Caper Cat and using Cf = 0.0040 .\n\nIt is interesting to see at what speed Rf = Rw and it can be seen that, using the above coefficients for the international canoe, this occurs at 5.3 knots. At higher speeds than this, Rw predominates – at least until full planing occurs for some dinghies.\n\nCatamarans have narrow hulls and so the wave-making resistance is fairly small. I am using a power of 4 (called Kappa κ so that it can be varied if desired). In Fig. 155, Marchaj graphs the resistances for the heavy-displacement yacht “New York 32” and inspection of the graph shows that Rw = 0.0118*v^5 gives a good fit. So κ is about 5 here and 4 for the canoe. If v is in ft/s, the coefficient becomes 0.000856 , which for Displacement (Delta Δ) of 11.38 tons and length (LWL) of 32.26 feet is 0.44*Δ/LWL^(5/2). For the Caper Cat, I am using 0.75*Δ/144/2 since I am using κ = 4. This is 0.00106 when the total weight , with 2 crew, is 620 lbs ( Δ = 0.277 tons). With these values, Rf = Rw at v=13.7 ft/s (8.1 knots).\n\nInduced Drag\n\nThis is also known as Drag due to Leeway. In order to balance the boat laterally , the hydrodynamic lift on the hulls and rudders has to be equal and opposite in magnitude, direction , and point of application, to the force generated by the sails. This force is created by the water hitting the hulls at a small angle λ, the leeway angle, and the rudders at an angle i (see Marchaj, Fig 224 , shown above).\n\nRoberson & Crowe (ref iii), p520 shows that the coefficient of lift , CL for an infinite aspect ratio airfoil or hydrofoil is given by\n\nCL = 2πα where α is the angle of attack and is measured in radians. For α measured in degrees, this becomes CL = π ² /90 . α = 0.11 α (α is equivalent to our angle λ ).\n\nHence, the lifting force FL is given by\n\nFL = CL * S * Rho_w*v ² /2 Unfortunately, our hydrofoils are far from having infinite Aspect Ratio (AR) and CL is greatly reduced. Ref (iv), in Section 4.3.3 , gives an expression for the multiplying factor which allows for finite AR . When ignoring certain less-important effects such as viscosity, this factor becomes AR/[2 + ( 4 + AR2 ) ] . (Marchaj covers this qualitatively in Fig. 166 as does Fig. 11-23 in Ref. (iii) . It is useful to draw in the straight line of slope 0.11 , for infinite AR, in these figures to better appreciate the effect of finite AR. ) R&C11.23\n\nThe Effective AR of the hulls is twice the AR (Marchaj, p276) and I have taken this to be 0.2 and, with minor adjustments, I have taken CL to be 0.007 λ = a * λ . The AR of the rudders is about 2.5 and this gives a multiplying factor of 0.48 and CL 0.05 λ . Fortunately, not all this side force has to be supplied by the hulls, since the rudders can supply a substantial proportion of this force (see Marchaj, Fig. 224 and p 350). If we take the CLR of the hulls to be 5 ft from the bow, the CE of the sails to be 6.5 ft and the rudders to be 14 ft from the bow, 1/5 of the total side force is supplied by the rudders and 4/5 by the hulls.\n\nThe induced resistance coefficient CDi is given by\n\nCDi = CL² /( π *AR) (ref. iii, (11-19) on p522).\n\nFor the hulls, this gives CDi = 0.007² /0.6 λ² = 0.0000817 λ² = adi λ² .\n\nWe can now calculate the lift and the induced drag in terms of the (as yet unknown) leeway angle λ .\n\nThis allows us to calculate the total drag ( skin friction + wave-making resistance + induced drag due to leeway) and hence the ratio of lift to drag. This has to equal the ratio of lift to drag supplied by the sails and so we get an equation in which the only unknown (for a given velocity of the boat) is λ:\n\n(0.8 FH/FR)*( ½ ρw Aw )*Cf*v ² + (0.8 FH/FR)*Cw.v^ κ + (0 * .8 FH/FR)*adi *(½ ρw Ap) v² *λ² – a * (½ ρw Ap) v²*λ = 0\n\nThis is a quadratic equation (QE) in λ and, with the philosophy that no QE should go unsolved, that has been done . It is probably unnecessary since Excel’s Goal Seek solves iteratively for v later, but it is instructive and gives a good starting value for the iteration .\n\nThe QE is solved in cells E26 to E29 in the spreadsheet.\n\nHaving found λ , then FL = a*( ½ ρw Ap)*λ* v² and\n\nFdrag = ( ½ ρw Aw )*Cf*v ² + adi *(½ ρw Ap)*λ² *v² + Cw.v^ κ\n\n#### Velocity Performance Program (VPP)\n\nVelocity and direction of the true wind is input (in cells E13 and A14 respectively) as is also an estimate of the boat speed (in cell C21). It may also be necessary to change the crew weight (in cell F9).\n\nFrom this is calculated speed and direction of the apparent wind and the strength and direction of the force on the sails. Then the total drag Fdrag is calculated as above.\n\nFdrag is subtracted from the driving force of the sail, Fr. Call this Fresultant.\n\nThen the Goal Seek function of Excel (What-if Analysis > Goal Seek) is used to set this value to 0 by varying the speed of the boat. This is first done by: Set E36 to 0 by changing C21.\n\nNormally, this will calculate the boat speed (in C21 and C22) which gives a resultant force of 0.\n\nSometimes, however, maintaining this boat speed would require the crew to be in an impossible position (well outside the boat) in order to balance the boat. In this case, the word “HELP” will appear in cell F44. It is then necessary to use Goal Seek again to set cell D51 to 0 by changing C21. This is explained below in “Capsizing Laterally”.\n\n#### Capsizing Laterally\n\nOne complication is if the crew are unable to stop the boat capsizing laterally (sideways). To check for this, the VPP calculates the capsize moment caused by the force on the sails. The combined CE of the sails is taken as 10.25 ft above the water and this provides the heeling arm. The heeling moment is therefore FH * heeling arm . The restoring moment, to hopefully prevent capsize , is given by the weight of the boat (270 lbs) by half the width (3.25 ft) plus the weight of crew by their distance from the leeward hull. The empty boat’s restoring moment is thus 270*3.25 = 877.5 ft lbs and the crew have to be capable of supplying the remainder of the restoring moment. Thus, the crew have to be a distance of\n\n(heeling moment – 877.5 )/ Crew weight. If this distance is greater than 7 ft, I assume that the sheets need to be eased to bring it back to 7 ft.\n\nRestoring moment = 877.5 + crew weight * 7 ft lbs\n\nPermissible sideways force on sail , FH , is Restoring moment / 10.25 lbs .\n\nBut FH = .5*RhoaSa Ct* Cos(βA – ε) Va2 and this determines Ct Cos(βA – ε) , which determines the value of α – smaller than the optimum – which is required to produce the maximum sideways force, FH , permissible to prevent the capsize.\n\nThen FR = .5*RhoaSa Ct* Sin(βA – ε) *Va2 . A new Goal Seek then has to be performed using this reduced value of the driving force.\n\n#### Capsizing Longitudinally\n\nWhen reaching or running in high winds, FR can be large and cause a large forwards pitching moment. This value is FR*10.25 ft lbs. For example, if FR is 300 lbs, the moment is 3075 ft lbs.\n\nThe restoring moment for this situation is provided by Total weight of boat and crew multiplied by the distance between the boat’s CG (centre of gravity) and CB (Centre of buoyancy). The crew can move CG back by moving back and the boat will move its CB forward by burying the nose. Just how forward the CB can move before disaster strikes and the boat pitch poles is not known. If the crew has a CG 4’ behind the mast (which seems to be about the maximum if there are 2 crew) , then the overall CG of boat and crew becomes\n\n(‘Weight of Boat ‘* 7.5 = ‘Weight of crew * 9.0)/’Total Weight of boat and crew’ , e.g.\n\n(250*7.5 + 350*9.0)/ 600 = 8.375 ft from the bow.\n\nTo supply a restoring moment of 3075ft lbs, the distance between CG and CB has to be\n\n3075 / 600 = 5.125 ft. This means CB has to be at 8.375 – 5.125 = 3.25 ft from the bow. It is doubtful if this could occur without the boat nosediving.\n\nThe VPP estimates this position of CB.\n\nThe above section may be very relevant for Hobie Cat sailors.\n\n#### Pinching\n\nA value of α smaller than the “ optimum ” may also be used when close-hauled because of a small value of βA . This is because values of α smaller than 15 deg will then produce the maximum value of Ct* Sin(βA – ε) . This is the value of α used to find the driving force:\n\n#### Off The Wind\n\nAnother complication is when the direction of the apparent wind is greater than 75 deg from the bow. The boom is then out at its maximum angle (60 deg ) and the angle of attack on the sail is βA – 60 . This value is then used for finding the sail force, instead of the optimum of 15 deg which is used for lesser values of βA\n\n#### Using the VPP\n\nHere is the VPP in the form of the spreadsheet (SS)." ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.91236466,"math_prob":0.9648247,"size":23376,"snap":"2021-43-2021-49","text_gpt3_token_len":6433,"char_repetition_ratio":0.1361886,"word_repetition_ratio":0.013495277,"special_character_ratio":0.28884324,"punctuation_ratio":0.11198274,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9842301,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2021-11-27T20:29:15Z\",\"WARC-Record-ID\":\"<urn:uuid:27c02ca7-2eaf-41a4-be2e-a1b50c568bdd>\",\"Content-Length\":\"51210\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:b167c0f1-35e2-44fa-92cd-2e7d2fb5255e>\",\"WARC-Concurrent-To\":\"<urn:uuid:7a4c6d36-8641-4209-a7f2-5675b4b50706>\",\"WARC-IP-Address\":\"45.124.55.220\",\"WARC-Target-URI\":\"https://www.keithforbes.org/sailing-a-caper-cat/\",\"WARC-Payload-Digest\":\"sha1:5EP2XT5S5KUZZJWSG6LBUMEBQ4JMFLK2\",\"WARC-Block-Digest\":\"sha1:NRSWRJSJIRVV27PXEEDV5UCKX3X3SOQW\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2021/CC-MAIN-2021-49/CC-MAIN-2021-49_segments_1637964358233.7_warc_CC-MAIN-20211127193525-20211127223525-00165.warc.gz\"}"}
https://www.shaalaa.com/question-bank-solutions/in-triangle-abc-if-ab-ac-ab-produced-d-such-that-bd-bc-find-acd-adc-properties-triangle_63014
[ "Advertisement Remove all ads\n\n# In a Triangle Abc, If Ab = Ac and Ab is Produced to D Such that Bd = Bc, Find ∠Acd: ∠Adc. - Mathematics\n\nAnswer in Brief\n\nIn a triangle ABC, if AB =  AC and AB is produced to D such that BD =  BC, find ∠ACD: ∠ADC.\n\nAdvertisement Remove all ads\n\n#### Solution\n\nIn the given ΔABC,AB = ACand AB is produced to D such that  BD = BC\n\nWe need to find  ∠ACD : ∠ADC", null, "Now, using the property, “angles opposite to equal sides are equal”\n\nAs  AB = AC\n\n∠6 = ∠4          ........(1)\n\nSimilarly,\n\nAs   BD = BC\n\n∠1 = ∠2              ........(2)\n\nAlso, using the property, “an exterior angle of the triangle is equal to the sum of the two opposite interior angle”\n\nIn ΔBDC\n\next. ∠6 = ∠1 + ∠2\n\next. ∠6 = ∠1 + ∠1 (Using 2)\n\next. ∠6 = 2∠1\n\nFrom (1), we get\n\next. ∠4 = ∠2        .......(3)\n\nNow, we need to find  ∠ACD : ∠ADC\n\nThat is,\n\n(∠4 + ∠2): ∠1\n\n(2∠1 + ∠2) : ∠1 (Using 3)\n\n(2∠1 + ∠1) : ∠1(Using 2)\n\n3∠1 :∠1\n\nEliminating ∠1from both the sides, we get 3:1\n\nThus, the ratio of ∠ACD :∠ADC is 3 :1\n\nIs there an error in this question or solution?\nAdvertisement Remove all ads\n\n#### APPEARS IN\n\nRD Sharma Mathematics for Class 9\nChapter 11 Triangle and its Angles\nQ 12 | Page 24\nAdvertisement Remove all ads\n\n#### Video TutorialsVIEW ALL \n\nAdvertisement Remove all ads\nShare\nNotifications\n\nView all notifications\n\nForgot password?\nCourse" ]
[ null, "https://www.shaalaa.com/images/_4:382d8df6130c4a6b9fdb2c94d758d1f0.png", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.7862043,"math_prob":0.9830518,"size":759,"snap":"2021-04-2021-17","text_gpt3_token_len":323,"char_repetition_ratio":0.12582782,"word_repetition_ratio":0.09638554,"special_character_ratio":0.39789197,"punctuation_ratio":0.24637681,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9654767,"pos_list":[0,1,2],"im_url_duplicate_count":[null,1,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2021-04-13T10:15:47Z\",\"WARC-Record-ID\":\"<urn:uuid:1b09513d-d917-4ef0-ab47-a1f6d093f79d>\",\"Content-Length\":\"42777\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:92351c95-4774-4d69-83e9-5add1c4cfdff>\",\"WARC-Concurrent-To\":\"<urn:uuid:55307adf-380d-4329-88d1-2b7a4581af1b>\",\"WARC-IP-Address\":\"172.105.37.75\",\"WARC-Target-URI\":\"https://www.shaalaa.com/question-bank-solutions/in-triangle-abc-if-ab-ac-ab-produced-d-such-that-bd-bc-find-acd-adc-properties-triangle_63014\",\"WARC-Payload-Digest\":\"sha1:LIURS5RQSYNELOBNHEUZLPRIINEIF3D2\",\"WARC-Block-Digest\":\"sha1:XUAPJC2PC6CJRISFW77Y6XZM5AUADMKN\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2021/CC-MAIN-2021-17/CC-MAIN-2021-17_segments_1618038072180.33_warc_CC-MAIN-20210413092418-20210413122418-00400.warc.gz\"}"}
https://it.mathworks.com/matlabcentral/answers/521056-how-to-find-the-are-between-two-graphs
[ "# How to find the are between two graphs?\n\n3 views (last 30 days)\nAngelavtc on 27 Apr 2020\nAnswered: Angelavtc on 29 Apr 2020\nHello to all!\nHow can I find the area in purple between the two graphs in green and blue? is there a exchange file that can do this for me? Considering y limits as [-3000,3000] ?", null, "I attach the points data (x1,y1 definies one curve and x2,y2 the other).\nKSSV on 27 Apr 2020\n\nAngelavtc on 29 Apr 2020\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%1st graph %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\nt=25\nx1=Array_jan_13{t}.Volume;\ny1=Array_jan_13{t}.Price;\n%x2=Array_jan_13_s{t}.Volume_s\n%y2=Array_jan_13_s{t}.Price_s\nline1 = 2500*ones(1, length(x1));\n%line2 = 2500*ones(1, length(x2));\nx_inter=intersectPoints_jan_13{t}.x0;\ny_inter=intersectPoints_jan_13{t}.y0;\nhFig = figure\nplot(x1, y1, 'r-', 'LineWidth', 2)\nhold on\n%plot(x2, y2, 'g-', 'LineWidth', 2)\n%hold on\nplot(x1, line1, 'b', 'LineWidth', 2);\nhold on\n%plot(x2, line2, 'b', 'LineWidth', 2);\n%hold on\nplot(x_inter,y_inter,'r.','markersize',18)\n%CALCULATE THE AREA ABOVE THE DEMAND CURVE UNTIL THE INTERSECTION POINT\nn=50000\n%INTERPOLATE VALUES\nx1_new=linspace(x1(1),x_inter,n);\ny1_new=interp1(x1,y1,x1_new);\nline_fine1 = 2500*ones(1, length(x1_new));\nhFig = figure\nplot(x1_new, y1_new, 'r-', 'LineWidth', 2)\nhold on\nplot(x1_new, line_fine1, 'b', 'LineWidth', 2);\nhold on\nplot(x_inter,y_inter,'r.','markersize',18)\n%Gives the position of the first element in y1_new that meets the\n%condition of being less than the condition\nindexLeft = find(y1_new < 2500, 1, 'first');\n% Put up vertical where the are will be calculated\nxline(x1_new(indexLeft), 'Color', 'm', 'LineWidth', 2);\n%the last element in demand that intersects which will be the last element\n%where the area will be calculated\nxline(x1_new(end), 'Color', 'y', 'LineWidth', 2);\n% COMPUTE AREAS BETWEEN CURVE AND FLAT BLUE LINE\n% Find that area below the line at 2500\n%asigna los valores de x_fine desde el primer elemento hasta\n%uno antes de que la curva y fuera mayor a 1\nx_area=x1_new(indexLeft : end);\ny_area=line_fine1(indexLeft : end)-y1_new(indexLeft : end);\narea_Loss_up = trapz(x_area, y_area)\n% To double check, compute area by summing also.\narea_Loss2_up = sum(y_area) * abs(x1_new(2) - x1_new(1)) % Should be fairly close.\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%2ND GRAPH %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%Calculate the area between the y=2500 and supply curve\n%until the intersection point\nt=25\nx2=Array_jan_13_s{t}.Volume_s;\ny2=Array_jan_13_s{t}.Price_s;\nline2 = 2500*ones(1, length(x2));\nx_inter=intersectPoints_jan_13{t}.x0\ny_inter=intersectPoints_jan_13{t}.y0\nhFig = figure\nplot(x2, y2, 'r-', 'LineWidth', 2)\nhold on\nplot(x2, line2, 'b', 'LineWidth', 2);\nhold on\nplot(x_inter,y_inter,'r.','markersize',18)\n%CALCULATE THE AREA ABOVE THE DEMAND CURVE UNTIL THE INTERSECTION POINT\nn=50000;\n%INTERPOLATE VALUES\nx2_new=linspace(x_inter, x2(end),n);\ny2_new=interp1(x2,y2,x2_new);\nline_fine2 = 2500*ones(1, length(x2_new));\nhFig = figure\nplot(x2_new, y2_new, 'r-', 'LineWidth', 2)\nhold on\nplot(x2_new, line_fine2, 'b', 'LineWidth', 2);\nhold on\nplot(x_inter,y_inter,'r.','markersize',18)\n%Gives the position of the last element in y2_new that meets the\n%condition of being less than the condition\nindexLeft = find(y2_new < 2500, 1, 'last');\n% Put up vertical where the are will be calculated\nxline(x2_new(indexLeft), 'Color', 'm', 'LineWidth', 2);\n%the first element in demand that intersects which will be the last element\n%where the area will be calculated\nxline(x2_new(1), 'Color', 'y', 'LineWidth', 2);\n% COMPUTE AREAS BETWEEN CURVE AND FLAT BLUE LINE\n% Find that area below the line at 2500\nx_area_s=x2_new(1 : indexLeft);\ny_area_s=line_fine2(1 : indexLeft)-y2_new(1 : indexLeft);\narea_Loss_s_up = trapz(x_area_s, y_area_s)\n% To double check, compute area by summing also.\narea_Loss2_s_up= sum(y_area_s) * abs(x2_new(2) - x2_new(1))\nTotal_area_up=area_Loss_up+area_Loss_s_up\nTotal_area2_up=area_Loss2_up+area_Loss2_s_up" ]
[ null, "https://www.mathworks.com/matlabcentral/answers/uploaded_files/287968/image.jpeg", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.91848207,"math_prob":0.99893177,"size":550,"snap":"2022-27-2022-33","text_gpt3_token_len":139,"char_repetition_ratio":0.1007326,"word_repetition_ratio":0.0,"special_character_ratio":0.26,"punctuation_ratio":0.104347825,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9785923,"pos_list":[0,1,2],"im_url_duplicate_count":[null,1,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2022-07-06T23:14:09Z\",\"WARC-Record-ID\":\"<urn:uuid:b39767f8-4879-4f93-bc3b-59b923644ee0>\",\"Content-Length\":\"144868\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:bf7008fc-ca2c-45f4-8785-cdb6f7e5996b>\",\"WARC-Concurrent-To\":\"<urn:uuid:803da331-e203-4f10-b2ae-d5c02c3c7555>\",\"WARC-IP-Address\":\"23.218.145.211\",\"WARC-Target-URI\":\"https://it.mathworks.com/matlabcentral/answers/521056-how-to-find-the-are-between-two-graphs\",\"WARC-Payload-Digest\":\"sha1:SLKAIL4WCX3RHMY2ZZUW55G6SEGYAX3K\",\"WARC-Block-Digest\":\"sha1:XROG2LRWMTJSP77KTK4UEK4YDVADF5TU\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2022/CC-MAIN-2022-27/CC-MAIN-2022-27_segments_1656104678225.97_warc_CC-MAIN-20220706212428-20220707002428-00025.warc.gz\"}"}
https://community.plotly.com/t/data-upload-and-callback/36588
[ "Hello, I am using Dash for the first time.\n\nThe goal is that the user uploads files, the program must then process its files with functions (Coclustering) and then display results and plots.\n\nI don’t know how to process the files that the user is going to upload (I have already the function which do this).\n\nActually, I try to create an interface for Coclustering, I already have the coclustering algorithm and I don’t how to combine it with Dash.\n\nAny help !\n\nHello,\nfor the document upload, check dcc.Upload, basically you will add another function to process the file after being uploaded, this function will be called in the Callback function, the result of your processing should be data that will be plotted and displayed for the user.\n\n``````app.layout = html.Div([\ndcc.Graph(id='MyGraph',animate=True),\nchildren=html.Div([\n'Drag and Drop or ',\nhtml.A('Select Files')\n])\n])\ndef parse_contents(contents, filename, date):\n# processing\n\[email protected](Output('MyGraph', 'figure'),\ndef update_output(list_of_contents, list_of_names, list_of_dates):\nif list_of_contents is not None:\n# Do processing using parse_contents() function\nreturn figure #return graph content to be shown\n``````\n\nCheck here to get an idea how to link Callback function return with your graph.\n\nthen we must use this function to process the uploaded files (I’m stuck here)\n\n``````if data_name == \"CLASSIC4\":\n# Set data file path\nfilename = \"C:/Users/Amira/Desktop/CLASSIC4/docbyterm.txt\"\n# Open the file and read content\nmyfile = open(filename, \"rb\")\nmyfile.close()\n# Split content to build a matrix\ncontent = content.split(\"\\n\")\nmeta = content.split(\" \")\ndoc_term_counts = np.zeros((int(meta)-1, int(meta)))\n\nfor i in range(1, len(content)-1):\nmeta = content[i].split(\" \")\nif len(meta) == 3:\nrow = int(meta)\nif row >= 1553:\nrow -=1\ndoc_term_counts[row-1, int(meta)-1] = int(meta)\n\nfilename = \"C:/Users/Amira/Desktop/CLASSIC4/documents.txt\"\nlabels_df = pd.read_csv(filename, usecols = , delim_whitespace = True, header = None)\n# il y a un header\nlabels = labels_df.values.flatten()\nlabels = labels[:len(labels)-1]\n## Permutation\ntmp_perm = np.random.RandomState(seed=42).permutation(doc_term_counts.shape)\nnp.take(doc_term_counts, tmp_perm, axis = 0, out = doc_term_counts)\nlabels = [labels[i] for i in tmp_perm.tolist()]\n\nfilename = \"C:/Users/Amira/Desktop/CLASSIC4/terms.txt\"\n#dicbyterm2\nmyfile = open(filename, \"rb\")" ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.64989555,"math_prob":0.52036995,"size":991,"snap":"2023-14-2023-23","text_gpt3_token_len":215,"char_repetition_ratio":0.14488348,"word_repetition_ratio":0.0,"special_character_ratio":0.24217962,"punctuation_ratio":0.1875,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9563766,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2023-03-21T05:39:44Z\",\"WARC-Record-ID\":\"<urn:uuid:4b7f6baa-9e40-43f9-b63c-e7c3337391e9>\",\"Content-Length\":\"28590\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:46d58d4f-fb7a-45be-ad21-a4ce983c9952>\",\"WARC-Concurrent-To\":\"<urn:uuid:9e03a4e7-20a3-400a-bc73-45535dc75db2>\",\"WARC-IP-Address\":\"184.105.176.45\",\"WARC-Target-URI\":\"https://community.plotly.com/t/data-upload-and-callback/36588\",\"WARC-Payload-Digest\":\"sha1:5PSUYY6XZLHLCTT3UG7LZUATSM7KNW65\",\"WARC-Block-Digest\":\"sha1:JMEFNLGXCNVW6DS55YXNR7Z5U7GRFN5J\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2023/CC-MAIN-2023-14/CC-MAIN-2023-14_segments_1679296943625.81_warc_CC-MAIN-20230321033306-20230321063306-00219.warc.gz\"}"}
https://dev.mysql.com/doc/dev/mysql-server/latest/structhypergraph_1_1Hypergraph-members.html
[ "", null, "MySQL 8.2.0 Source Code Documentation\nhypergraph::Hypergraph Member List\n\nThis is the complete list of members for hypergraph::Hypergraph, including all inherited members.\n\n AddEdge(NodeMap left, NodeMap right) hypergraph::Hypergraph AddNode() hypergraph::Hypergraph AttachEdgeToNodes(size_t left_first_idx, size_t right_first_idx, NodeMap left, NodeMap right) hypergraph::Hypergraph private edges hypergraph::Hypergraph Hypergraph(MEM_ROOT *mem_root) hypergraph::Hypergraph inlineexplicit ModifyEdge(unsigned edge_idx, NodeMap new_left, NodeMap new_right) hypergraph::Hypergraph nodes hypergraph::Hypergraph" ]
[ null, "https://dev.mysql.com/doc/dev/mysql-server/latest/logo-mysql-110x55.png", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.62250555,"math_prob":0.88753986,"size":588,"snap":"2023-40-2023-50","text_gpt3_token_len":160,"char_repetition_ratio":0.31164384,"word_repetition_ratio":0.0,"special_character_ratio":0.20068027,"punctuation_ratio":0.2888889,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9767328,"pos_list":[0,1,2],"im_url_duplicate_count":[null,null,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2023-12-11T09:42:52Z\",\"WARC-Record-ID\":\"<urn:uuid:293d974a-1230-4a66-99c0-b049853adb64>\",\"Content-Length\":\"11571\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:a5cc018d-7092-4d8b-a411-f939f16419cf>\",\"WARC-Concurrent-To\":\"<urn:uuid:a9264af7-a0e5-4d21-92dc-d14a5f46b16c>\",\"WARC-IP-Address\":\"23.49.176.249\",\"WARC-Target-URI\":\"https://dev.mysql.com/doc/dev/mysql-server/latest/structhypergraph_1_1Hypergraph-members.html\",\"WARC-Payload-Digest\":\"sha1:4DM7WKZKUIFAZ3AM37WGANBALC57WWVE\",\"WARC-Block-Digest\":\"sha1:VNOLH4RK7LVVGNMFAQRSKTSBORE47XA3\",\"WARC-Identified-Payload-Type\":\"application/xhtml+xml\",\"warc_filename\":\"/cc_download/warc_2023/CC-MAIN-2023-50/CC-MAIN-2023-50_segments_1700679103810.88_warc_CC-MAIN-20231211080606-20231211110606-00633.warc.gz\"}"}
http://accessphysiotherapy.mhmedical.com/content.aspx?bookid=2433&sectionid=191511590
[ "## INTRODUCTION\n\nAfter completing this chapter, you will be able to:\n\n• Define torque, quantify resultant torques, and identify the factors that affect resultant joint torques.\n\n• Identify the mechanical advantages associated with the different classes of levers and explain the concept of leverage within the human body.\n\n• Solve basic quantitative problems using the equations of static equilibrium.\n\n• Define center of gravity and explain the significance of center of gravity location in the human body.\n\n• Explain how mechanical factors affect a body's stability.\n\nCONNECT RESOURCES\n\n• Online Lab Manual\n\n• Chapter lecture PowerPoint presentation\n\n• Chapter quizzes\n\n• Web links for study and exploration of chapter-related topics\n\nWhy do long jumpers and high jumpers lower their centers of gravity before takeoff? What mechanical factors enable a wheelchair to remain stationary on a graded ramp or a sumo wrestler to resist the attack of his opponent? A body's mechanical stability is based on its resistance to both linear and angular motion. This chapter introduces the kinetics of angular motion, along with the factors that affect mechanical stability.\n\nMany athletic skills require mechanical stability. ©Susan Hall", null, "## EQUILIBRIUM\n\n### Torque\n\nAs discussed in Chapter 3, the rotary effect created by an applied force is known as torque, or moment of force. Torque, which may be thought of as rotary force, is the angular equivalent of linear force. Algebraically, torque is the product of force and the force's moment arm, or the perpendicular distance from the force's line of action to the axis of rotation:\n\ntorque the rotary effect of a force about an axis of rotation, measured as the product of the force and the perpendicular distance between the force's line of action and the axis\n\nmoment arm shortest (perpendicular) distance between a force's line of action and an axis of rotation\n\nT = Fd\n\nThus, both the magnitude of a force and the length of its moment arm equally affect the amount of torque generated (Figure 13-1). Moment arm is also sometimes referred to as force arm or lever arm.\n\nAs may be observed in Figure 13-2, the moment arm is the shortest distance between the force's line of action and the axis of rotation. A force directed through an axis of rotation produces no torque, because the force's moment arm is zero.\n\n###### FIGURE 13-1\n\nWhich position of force application is best for opening the swinging door? Experience should verify that position C is best.", null, "### Pop-up div Successfully Displayed\n\nThis div only appears when the trigger link is hovered over. Otherwise it is hidden from view." ]
[ null, "https://mgh.silverchair-cdn.com/mgh/content_public/book/2433/hall8_ch13_uf001-1.png", null, "https://mgh.silverchair-cdn.com/mgh/content_public/book/2433/hall8_ch13_f001-1.png", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.9094047,"math_prob":0.9120091,"size":2545,"snap":"2020-34-2020-40","text_gpt3_token_len":511,"char_repetition_ratio":0.12947658,"word_repetition_ratio":0.033333335,"special_character_ratio":0.20628683,"punctuation_ratio":0.07727273,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.977119,"pos_list":[0,1,2,3,4],"im_url_duplicate_count":[null,4,null,4,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2020-08-11T07:50:34Z\",\"WARC-Record-ID\":\"<urn:uuid:df848db9-cceb-421d-9eb3-4edc559b7a30>\",\"Content-Length\":\"159556\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:46b227bc-3823-428f-a284-985377674b90>\",\"WARC-Concurrent-To\":\"<urn:uuid:9d424adc-d5ab-4880-bd2b-f5e768cdcc63>\",\"WARC-IP-Address\":\"209.135.208.79\",\"WARC-Target-URI\":\"http://accessphysiotherapy.mhmedical.com/content.aspx?bookid=2433&sectionid=191511590\",\"WARC-Payload-Digest\":\"sha1:RK7XFROM5OK5V44MDGRKLLMUFR26NUAK\",\"WARC-Block-Digest\":\"sha1:BYVYIMWZ5VAYYQMYWCWIGS6OIS26F5HH\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2020/CC-MAIN-2020-34/CC-MAIN-2020-34_segments_1596439738735.44_warc_CC-MAIN-20200811055449-20200811085449-00304.warc.gz\"}"}
https://www.codecogs.com/library/maths/geometry/spherical/latlong.php
[ "I have forgotten\n\n•", null, "http://facebook.com/\n•", null, "https://www.google.com/accounts/o8/id\n•", null, "https://me.yahoo.com", null, "COST (GBP)", null, "0.00", null, "0.00", null, "0\n\n# Lat Long\n\nviewed 2648 times and licensed 41 times\nConverts latitude expressed in degrees, minutes and seconds into Degrees.\nController: CodeCogs", null, "", null, "", null, "C++\n\n## Latitude\n\n doublelatitude( int degrees int minutes` = 0` float seconds` = 0` char compassDirection` = 'N'` )\nConverts a geographical location in degrees, minutes and seconds into a single numeric angle from the center of the earth, expressed in Degrees.\n\nThere are a total of 180 degrees from the South pole to the North pole. By convention there are 60 minutes in one degree, and naturally 60 seconds in each minute.\n\n### Example 1\n\n```#include <codecogs/maths/geometry/spherical/latlong.h>\n#include <iostream>\nint main()\n{\nstd::cout << \"New York is at a Latitude of: \" << latitude(40,47,0,'N') << \" degrees\" << std::endl;\nstd::cout << \"London is at a Latitude of: \" << latitude(51,32,0,'N') << \" degrees\" << std::endl;\nreturn 0;\n}```\nOutput:\n```New York is at a Latitude of: 40.7833 degrees\nLondon is at a Latitude of: 51.5333 degrees```\n\n### Parameters\n\n degrees Degrees from -90 to 90 minutes Minutes from 0 to 60. seconds Seconds from 0 to 60. compassDirection The key direction either 'N' for North, or 'S' for South.\n##### Source Code\n\nSource code is available when you agree to a GP Licence or buy a Commercial Licence.\n\nNot a member, then Register with CodeCogs. Already a Member, then Login.\n\n## Longitude\n\n doublelongitude( int degrees int minutes` = 0` float seconds` = 0` char compassDirection` = 'N'` )\nReturns an geographical location in degrees, minutes and seconds as a single numeric angle from the center of the earth, expressed in Degrees. There are a total of 360 degrees around the world. By convention there are 60 minutes in one degree, and naturally 60 seconds in each minute.\n\n### Example 2\n\n```#include <codecogs/maths/geometry/spherical/latlong.h>\n#include <iostream>\nint main()\n{\nstd::cout << \"New York is at a Longitude of: \" << longitude(73,58,0,'W') << \" degrees\" <<\nstd::endl;\nstd::cout << \"London is at a Longitude of: \" << longitude(0,5,0,'W') << \" degrees\" << std::endl;\nreturn 0;\n}```\nOutput:\n```New York is at a Longitude of: -73.9667 degrees\nLondon is at a Longitude of: -0.0833333 degrees```\n\n### Parameters\n\n degrees Degrees from -180 to 180. if you need to go futher than 180 degrees east or west, then you should describe that location from the opposite direction. minutes Minutes from 0 to 60. seconds Seconds from 0 to 60. compassDirection The key direction either 'N' for North, or 'S' for South.\n##### Source Code\n\nSource code is available when you agree to a GP Licence or buy a Commercial Licence.\n\nNot a member, then Register with CodeCogs. Already a Member, then Login." ]
[ null, "https://www.codecogs.com/images/openID/facebookW.png", null, "https://www.codecogs.com/images/openID/googleW.png", null, "https://www.codecogs.com/images/openID/yahooW.png", null, "https://www.codecogs.com/images/cart/getgpl_grey.gif", null, "https://www.codecogs.com/images/cart/thisunit.gif", null, "https://www.codecogs.com/images/cart/subunits.gif", null, "https://www.codecogs.com/images/cart/addv3.gif", null, "https://www.codecogs.com/images/spacer.gif", null, "https://www.codecogs.com/images/browser/getgpl.gif", null, "https://www.codecogs.com/images/browser/addtocart.gif", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.7148921,"math_prob":0.8044627,"size":1982,"snap":"2019-43-2019-47","text_gpt3_token_len":556,"char_repetition_ratio":0.13549039,"word_repetition_ratio":0.64375,"special_character_ratio":0.30978808,"punctuation_ratio":0.18686868,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.98105985,"pos_list":[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20],"im_url_duplicate_count":[null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2019-11-17T04:45:52Z\",\"WARC-Record-ID\":\"<urn:uuid:745ed0ea-9016-43b9-ace1-6f0057d1d0af>\",\"Content-Length\":\"29589\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:8d4d5fb7-7edb-496c-b14a-6cad9af20ed9>\",\"WARC-Concurrent-To\":\"<urn:uuid:00519ef8-9f8b-4a6c-9bee-04ca9813c1c8>\",\"WARC-IP-Address\":\"192.155.228.11\",\"WARC-Target-URI\":\"https://www.codecogs.com/library/maths/geometry/spherical/latlong.php\",\"WARC-Payload-Digest\":\"sha1:7ICLRA5R57MOKYKXP576OL4OLWSJXNZT\",\"WARC-Block-Digest\":\"sha1:ELLRA3ZREFAWQJHP4CITLLDZKW7DK2KJ\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2019/CC-MAIN-2019-47/CC-MAIN-2019-47_segments_1573496668787.19_warc_CC-MAIN-20191117041351-20191117065351-00558.warc.gz\"}"}
https://www.boost.org/doc/libs/1_78_0/libs/multiprecision/doc/html/boost_multiprecision/tut/floats/fp_eg/aos.html
[ "#", null, "Boost C++ Libraries\n\n...one of the most highly regarded and expertly designed C++ library projects in the world.\n\n##### Area of Circle\n\nGeneric numeric programming employs templates to use the same code for different floating-point types and functions. Consider the area of a circle a of radius r, given by\n\na = π * r2\n\nThe area of a circle can be computed in generic programming using Boost.Math for the constant π as shown below:\n\n```#include <boost/math/constants/constants.hpp>\n\ntemplate<typename T>\ninline T area_of_a_circle(T r)\n{\nusing boost::math::constants::pi;\nreturn pi<T>() * r * r;\n}\n```\n\nIt is possible to use `area_of_a_circle()` with built-in floating-point types as well as floating-point types from Boost.Multiprecision. In particular, consider a system with 4-byte single-precision float, 8-byte double-precision double and also the `cpp_dec_float_50` data type from Boost.Multiprecision with 50 decimal digits of precision.\n\nWe can compute and print the approximate area of a circle with radius 123/100 for `float`, `double` and `cpp_dec_float_50` with the program below (see next section for choosing 123/100 instead of 1.23).\n\n```#include <iostream>\n#include <iomanip>\n#include <boost/multiprecision/cpp_dec_float.hpp>\n\nusing boost::multiprecision::cpp_dec_float_50;\n\nint main(int, char**)\n{\nconst float r_f(float(123) / 100);\nconst float a_f = area_of_a_circle(r_f);\n\nconst double r_d(double(123) / 100);\nconst double a_d = area_of_a_circle(r_d);\n\nconst cpp_dec_float_50 r_mp(cpp_dec_float_50(123) / 100);\nconst cpp_dec_float_50 a_mp = area_of_a_circle(r_mp);\n\n// 4.75292\nstd::cout\n<< std::setprecision(std::numeric_limits<float>::digits10)\n<< a_f\n<< std::endl;\n\n// 4.752915525616\nstd::cout\n<< std::setprecision(std::numeric_limits<double>::digits10)\n<< a_d\n<< std::endl;\n\n// 4.7529155256159981904701331745635599135018975843146\nstd::cout\n<< std::setprecision(std::numeric_limits<cpp_dec_float_50>::digits10)\n<< a_mp\n<< std::endl;\n}\n```\n\nIn later examples we'll look at calling both standard library and Boost.Math functions from within generic code. We'll also show how to cope with template arguments which are expression-templates rather than number types.\n\nBut first some warnings about how multiprecision types are slightly but significantly different fundamental (built-in) types." ]
[ null, "https://www.boost.org/gfx/space.png", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.55471575,"math_prob":0.9289052,"size":2284,"snap":"2022-05-2022-21","text_gpt3_token_len":574,"char_repetition_ratio":0.13333334,"word_repetition_ratio":0.0,"special_character_ratio":0.29422066,"punctuation_ratio":0.19607843,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9978748,"pos_list":[0,1,2],"im_url_duplicate_count":[null,null,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2022-05-28T19:48:48Z\",\"WARC-Record-ID\":\"<urn:uuid:0253bf9b-1cd2-4362-b789-d231ba288177>\",\"Content-Length\":\"13847\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:831fac8a-aedf-4511-ba09-11fdebc759c9>\",\"WARC-Concurrent-To\":\"<urn:uuid:59321e73-db3e-4790-8b5b-59eb2720dcf3>\",\"WARC-IP-Address\":\"146.20.110.251\",\"WARC-Target-URI\":\"https://www.boost.org/doc/libs/1_78_0/libs/multiprecision/doc/html/boost_multiprecision/tut/floats/fp_eg/aos.html\",\"WARC-Payload-Digest\":\"sha1:GFVUHCA3V7TUXF47ANBDSOWF2WFUM64D\",\"WARC-Block-Digest\":\"sha1:SVEBI6OWAUK6ZNEEQYJIIAA3JQDQG5GR\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2022/CC-MAIN-2022-21/CC-MAIN-2022-21_segments_1652663019783.90_warc_CC-MAIN-20220528185151-20220528215151-00060.warc.gz\"}"}
https://www.scirp.org/journal/paperinformation.aspx?paperid=109813
[ "Droplet Characterization Based on the Simulated Secondary Rainbows\n\nAbstract\n\nThe droplet size, size distribution, refractive index, and temperature can be measured simultaneously by the rainbow technique. In the present work, the rainbow scattering diagram for a spherical droplet in the secondary rainbow region is simulated by the use of the generalized Lorenz-Mie theory. For achieving high spatial resolution in denser droplet sprays, a focused Gaussian beam is used. For droplet characterization, different inversion algorithms are investigated, which includes trough-trough (θmin1 and θmin2) method and inflection-inflection (θinf1 and θinf2) method. For the trough-trough algorithm, the absolute error of the refractive index is between −6.4 × 10−4 and 1.7 × 10−4, and the error of the droplet radius is only between −0.55% and 1.77%. For the inflection-inflection algorithm, the maximum absolute error of the inverted refractive index is less than −1.1 × 10−3. The error of the droplet radius is between −0.75% and 5.67%.\n\nShare and Cite:\n\nWang, W. , Wang, J. and Zhang, Y. (2021) Droplet Characterization Based on the Simulated Secondary Rainbows. Optics and Photonics Journal, 11, 133-139. doi: 10.4236/opj.2021.116011.\n\n1. Introduction\n\nLiquid atomization and spray play an important role in industrial production, fuel atomization and combustion, spray cooling, and spray drying, etc. . It is of great significance to accurately measure the size, refractive index (temperature), speed, and other parameters of the droplets during the atomization process. Due to inherent non-intrusiveness, the optical technology is used for particle/droplet characterization, including phase Doppler technology, particle image/tracking velocimetry, laser digital holography technology, interferometric particle imaging, time-shift technique, and rainbow technique (or called rainbow refractrometry) - .\n\nRoth first proposed the rainbow technique method, which is called the standard rainbow technique (SRT). The rainbow pattern was used to measure the refractive index and size of a single spherical droplet. Han measured the small change in the diameter of the liquid column based on the rainbow technique in 1998, assuming that the refractive index is constant. Van Beeck generalized the SRT to global rainbow technology (GRT) to measure the size distribution and temperature of the droplet cloud in the spray. Van Beeck and his colleagues also studied the inversion algorithm of GRT . GRT can be used to measure the mixing ratio of liquid-liquid suspension systems, droplets in large containment vessels, and multiphase flow . In order to have a higher spatial resolution, Gaussian beams are used as a light source. The rainbow pattern of droplets illuminated by Gaussian beams was simulated and used for droplet characterization . It can achieve the purpose of measuring a single tiny droplet under high-density spray conditions. However, only the angles of the first two peaks are used for droplet inversion. This study will discuss the feasibility of other inversion algorithms to measure droplets based on the intensity of scattered light in the secondary rainbow region when Gaussian beams are used as the light source.\n\nThe structure of this paper is as follows: In Section 2, inversion schemes for the characterization of droplets are given. In Section 3, based on the secondary rainbow, two inversion schemes are used to compute the refractive index and size of the spherical droplet. Section 4 is devoted to the conclusions.\n\n2. Inversion Algorithm of Rainbow Technology\n\nBased on the geometrical optics, the deflection angle θp for a ray emerging from a spherical droplet is given as :\n\n${\\theta }_{p}\\left({\\theta }_{i},m,p\\right)=\\left(p-1\\right)\\pi +2{\\theta }_{i}-2p\\mathrm{arcsin}\\left(\\mathrm{sin}{\\theta }_{i}/m\\right),\\text{\\hspace{0.17em}}\\text{\\hspace{0.17em}}p=0,1,2,\\cdots$ (1)\n\nhere θi is the incident angle, and m is the relative refractive index of the droplet. The physical meaning of p is p = 0 is the light ray directly reflected by the spherical droplet, p = 1 is the light ray directly refracted by the spherical droplet, and p ≥ 2 is the light ray refracted by the spherical droplet, refracted after p − 1 times of internal surface reflection. Therefore, the deflection angle of the secondary rainbow (p = 3) is:\n\n${\\theta }_{p}=2\\pi +2{\\theta }_{i}-6\\mathrm{arcsin}\\left(\\mathrm{sin}{\\theta }_{i}/m\\right)$ (2)\n\nIn the above formula, there is an incident angle that minimizes the deflection angle. The minimum deflection angle is called the geometrical optics rainbow angle, and the geometrical optics rainbow angle is given:\n\n${\\theta }_{rg}=2\\pi +2{\\mathrm{sin}}^{-1}\\left(\\frac{{9}^{2}-{m}^{2}}{8}\\right)-6{\\mathrm{sin}}^{-1}\\left[\\frac{{9}^{2}-{m}^{2}}{8{m}^{2}}\\right]$ (3)\n\nIn Airy theory, the light intensity distribution in the rainbow angle region is expressed by Airy integral. Airy integral F(z) is:\n\n$F\\left(z\\right)={\\int }_{0}^{\\infty }\\mathrm{cos}\\left[\\pi \\left(zt-{t}^{3}\\right)/2\\right]\\text{d}t$ (4)\n\nwhere z is a dimensionless parameter, defined as:\n\n$z=\\left(-q\\right){\\left[12/\\left(h{\\pi }^{2}\\right)\\right]}^{1/2}{\\alpha }^{2/3}\\left(\\theta -{\\theta }_{rg}\\right)$ (5)\n\nwhere,\n\n$\\begin{array}{l}\\alpha =2a\\text{π}/\\lambda ,\\\\ h={\\left({p}^{2}-1\\right)}^{2}{\\left({p}^{2}-{m}^{2}\\right)}^{1/2}/\\left[{p}^{2}{\\left({m}^{2}-1\\right)}^{3/2}\\right]\\end{array}$ (6)\n\nAmong them, the value of q is −1 or +1, depending on the angle of the emitted light relative to the incident light . From Airy function, the related z values for the maximum, minimum and inflection point of the curve can be obtained. The normalized light intensity distribution shown in Figure 1 is calculated by using Debye series for spherical water droplet with a radius of 50 μm and a relative refractive index of 1.333. Here, the droplet is illuminated by Gaussian beam, the beam waist radius ω0 = 100 μm, and the wavelength is 0.6328 μm. The eigenvalues of the light intensity distribution in the rainbow area (such as θmax1, θmax2) have a correspondence with the dimensionless parameter z.\n\nPerforming algebraic operations on the above formula, the geometrical optics rainbow angle can be obtained from the first two troughs (θmin1 and θmin2):\n\n${\\theta }_{rg}=\\frac{{\\theta }_{\\mathrm{min}1}-{C}_{1}{\\theta }_{\\mathrm{min}2}}{1-{C}_{1}}$ (7)\n\nThe refractive index can be obtained from formula (3) and (7), and the droplet radius can be calculated:\n\n$a=\\frac{2\\lambda }{3\\sqrt{3}}{\\left[\\frac{{\\left(9-{m}^{2}\\right)}^{1/2}}{{\\left({m}^{2}-1\\right)}^{3/2}}\\right]}^{1/2}{\\left(\\frac{{z}_{\\mathrm{min}2}-{z}_{\\mathrm{min}1}}{|{\\theta }_{\\mathrm{min}1}-{\\theta }_{\\mathrm{min}2}|}\\right)}^{3/2}$ (8)\n\nhere,\n\n${z}_{\\mathrm{min}1}=2.4956,\\text{\\hspace{0.17em}}\\text{\\hspace{0.17em}}{z}_{\\mathrm{min}2}=4.3632,\\text{\\hspace{0.17em}}\\text{\\hspace{0.17em}}{C}_{1}=0.5719$ (9)\n\nFigure 1. Normalized intensity distribution of the secondary rainbow region.\n\nFor the another algorithm, the angles of the first two inflection points (i.e. θinf1 and θinf2) of the intensity distribution are employed. The geometrical optics rainbow angle and droplet size are given by:\n\n${\\theta }_{rg}=\\frac{{\\theta }_{\\mathrm{inf}1}-{C}_{2}{\\theta }_{\\mathrm{inf}2}}{1-{C}_{2}}$ (10)\n\nand,\n\n$a=\\frac{2\\lambda }{3\\sqrt{3}}{\\left[\\frac{{\\left(9-{m}^{2}\\right)}^{1/2}}{{\\left({m}^{2}-1\\right)}^{3/2}}\\right]}^{1/2}{\\left(\\frac{{z}_{\\mathrm{inf}2}-{z}_{\\mathrm{inf}1}}{|{\\theta }_{\\mathrm{inf}1}-{\\theta }_{\\mathrm{inf}2}|}\\right)}^{3/2}$ (11)\n\nhere,\n\n${z}_{\\mathrm{inf}1}=0.3276,\\text{\\hspace{0.17em}}\\text{\\hspace{0.17em}}{z}_{\\mathrm{inf}2}=1.8724,\\text{\\hspace{0.17em}}\\text{\\hspace{0.17em}}{C}_{2}=0.3013$ (12)\n\n3. Inversion of Refractive Index and Size of Droplets\n\nFirstly, according to the Debye series expansion, the scattered light intensity distribution for the secondary rainbow of the spherical droplet is calculated, and the influence of different Gaussian beam waist radius on the peak of the secondary rainbow light intensity distribution is explored. Take the droplet with refractive index of 1.333 and radius of 50 μm as an example, as shown in Figure 2. It can be found that when the beam waist radius is 50 μm, the peak and valley values tend to be stable. Therefore, this study uses Gaussian beam with awaist radius of 50 μm as the incident light source and the wavelength of 0.6328 μm.\n\nThe rainbow intensity distribution based on the Debye series expansion simulation only considers the contribution of specific rays. In actual measurement, the total light intensity distribution is produced by the interference of all light rays. These extra interference phenomena cause high-frequency oscillations superimposed on the rainbow pattern, that is, ripple structure. According to the generalized Lorenz-Mie theory (GLMT), the total light intensity distribution in the rainbow region (also known as the rainbow diagram) is calculated. The wavelength of the Gaussian beam is 0.6328 μm, and the beam waist radius ω0 = 100 μm. The size of the droplet is Between 50 μm and 200 μm. The rainbow diagram of the droplet with a radius of 50 μm is shown in Figure 3. The Butterworth digital filter is used for processing to obtain a smooth curve as shown in Figure 1. The cut-off frequency of the low-pass filter is determined based on the elimination of the number of redundant inflection points after roughly estimating the position of the second trough.\n\nThe condition of central incidence is studied, where the center of the Gaussian beam is located on the Déscartes ray . According to the inversion algorithm given in Section 2, the calculated refractive index and its absolute error are shown in Figure 4. The calculated radius and its relative error are shown in Figure 5.\n\nFor the trough-trough (θmin1 and θmin2) algorithm, the absolute error of the refractive index is between −6.4 × 10−4 and 1.7 × 10−4 (see Figure 4(b), and the error of the droplet radius is only between −0.55% and 1.77% (see Figure 5(b)). For inflection-inflection (θinf1 and θinf2) algorithm, the inverted refractive index is less than −1.1 × 10−3 (see Figure 4(b)) and error of the droplet radius is between −0.75% and 5.67% (see Figure 5(b)). Obviously, both algorithms can be used for measuring droplet parameters, but the first algorithm is more suitable for calculating the droplet size than the second algorithm.\n\nFigure 2. The influence of Gaussian beam incidence with different beam waist radius on the peak position of the secondary rainbow light intensity distribution of the droplet: (a) for the first peak; (b) for the first trough.\n\nFigure 3. Normalized light intensity distribution for the secondary rainbow of a spherical droplet with r = 50 μm illuminated by Gaussian beam.\n\nFigure 4. (a) The comparison between the refractive index of the droplet extracted from the secondary rainbow pattern and the true value; (b) The absolute error of the calculated refractive index.\n\nFigure 5. (a) Comparison of the droplet radius extracted from the second-order rainbow pattern with the true value; (b) The relative error of the calculated droplet radius.\n\n4. Summary\n\nThe generalized Lorenz-Mie theory is used to simulate the rainbow scattering pattern of the droplets in the secondary rainbow region. The low-pass filter passes the simulated rainbow pattern to obtain the characteristic angles. Two different inversion algorithms are studied to invert the droplet parameters. The results show that different inversion algorithms are also feasible to measure droplet information. In particular, for the first algorithm, the error of the droplet radius is even less than 1.77%. However, in this study, the selection of the cut-off frequency of the digital low-pass filter requires artificial evaluation of the angular position of the rainbow diagram. Therefore, we need to develop a better method to select the cutoff frequency or find a better filter, which we need to study further.\n\nConflicts of Interest\n\nThe authors declare no conflicts of interest regarding the publication of this paper.\n\n Tropea, C. (2011) Optical Particle Characterization in Flows. Annual Review of Fluid Mechanics, 43, 399-426. https://doi.org/10.1146/annurev-fluid-122109-160721 Sazhin, S. (2014) Droplets and Sprays. Springer, London. https://doi.org/10.1007/978-1-4471-6386-2 Dames, P., Gleich, B., Flemmer, A., Hajeket, K., Seidl, N., Wiekhorst, F., Eberbeck, D., Bittmann, I., Bergemann, C., Weyh, T., Trahms, L., Rosenecker, J. and Rudolphal, C. (2007) Targeted Delivery of Magnetic Aerosol Droplets to the Lung. Nature Nanotechnology, 2, 495-499. https://doi.org/10.1038/nnano.2007.217 Nascimento, L.F., Saldarriaga, C.V., Vanhavere, F., D’Agostino, E., Defraene, G. and Deene, Y.D. (2013) Characterization of OSL Al2O3:C Droplets for Medical Dosimetry. Radiation Measurements, 56, 200-204. https://doi.org/10.1016/j.radmeas.2013.01.048 Albrecht, H.E., Borys, M., Damaschke, N. and Tropea, C. (2003) Laser Doppler and Phase Doppler Measurement Techniques. Springer-Verlag, Heidelberg. https://doi.org/10.1007/978-3-662-05165-8 Adrian R.J. and Westerweel, J. (2010) Particle Image Velocimetry. Cambridge University Press, Cambridge UK. Glover, A.R., Skippon, S.M. and Boyle, R.D. (1995) Interferometric Laser Imaging for Droplet Sizing: A Method for Droplet-Size Measurement in Sparse Spray Systems. Applied Optics, 34, 8409-8421. https://doi.org/10.1364/AO.34.008409 Wu, X.C., Lin, X.D., Yao, L.C., Wu, Y.C., Wu, C.Y., Chen, L.H. and Cen, K.F. (2019) Primary Fragmentation Behavior Investigation in Pulverized Coal Combustion with High-Speed Digital Inline Holography. Energy Fuels, 33, 8126-8134. https://doi.org/10.1021/acs.energyfuels.9b01521 Schäfer, W. and Tropea, C. (2014) Time-Shift Technique for Simultaneous Measurement of Size, Velocity, and Relative Refractive Index of Transparent Droplets or Particles in a flow. Applied Optics, 53, 588-597. https://doi.org/10.1364/AO.53.000588 van Beeck, J.P.A.J. and Riethmuller, M.L. (1995) Nonintrusive Measurements of Temperature and Size of Single Falling Raindrops. Applied Optics, 34, 1633-1639. https://doi.org/10.1364/AO.34.001633 Han, X.E., Ren, K.F., Wu, Z.S., Corbin, F., Gouesbet, G. and Gréhan, G. (1998) Characterization of Initial Disturbances in a Liquid Jet by Rainbow Sizing. Applied Optics, 37, 8498-8503. https://doi.org/10.1364/AO.37.008498 van Beeck, J.P.A.J., Giannoulis, D. and Zimmer, L. (1999) Global Rainbow Thermometry for Droplet-Temperature Measurement. Optics Letters, 24, 1696-1698. https://doi.org/10.1364/OL.24.001696 van Beeck, J.P.A.J., Zimmer, L. and Riethmuller, M.L. (2001) Global Rainbow Thermometry for Mean Temperature and Size Measurement of Spray Droplets. Particle & Particle Systems Characterization, 18, 196-204. https://doi.org/10.1002/1521-4117(200112)18:4<196::AID-PPSC196>3.0.CO;2-H Vetrano, M.R., van Beeck, J.P.A.J. and Riethmuller, M.L. (2004) Global Rainbow Thermometry: Improvements in the Data Inversion Algorithm and Validation Technique in Liquid-Liquid Suspension. Applied Optics, 43, 3600-3607. https://doi.org/10.1364/AO.43.003600 Lemaitre, P., Porcheron, E., Gréhan, G. and Bouilloux, L. (2006) Development of a Global Rainbow Refractometry Technique to Measure the Temperature of Spray Droplets in a Large Containment Vessel. Measurement Science and Technology, 17, 1299-306. https://doi.org/10.1088/0957-0233/17/6/002 Wu, Y.C., Li, C., Cao, J.Z., Wu, X.C., Saengkaew, S., Chen, L.H., Gréhan, G. and Cen, K.F. (2018) Mixing Ratio Measurement in Multiple Sprays with Global Rainbow Refractometry. Experimental Thermal and Fluid Science, 98, 309-316. https://doi.org/10.1016/j.expthermflusci.2018.06.004 Yu, H.T., Sun, H. and Shen, J.Q. (2018) Measurements of Refractive Index and Size of a Spherical Drop from Gaussian Beam Scattering in the Primary Rainbow Region. Journal of Quantitative Spectroscopy and Radiative Transfer, 207, 83-88. https://doi.org/10.1016/j.jqsrt.2017.12.028 Cao, Y.Y., Wang, W.T., Yu, H.T., Shen, J.Q. and Tropea, C. (2020) Characterization of Refractive Index and Size of a Spherical Drop by Using Gaussian Beam Scattering in the Secondary Rainbow Region. Journal of Quantitative Spectroscopy and Radiative Transfer, 242, Article ID: 106785. https://doi.org/10.1016/j.jqsrt.2019.106785 Hulst, H.C.V.D. (1957) Light Scattering by Small Particles. Physics Today, 10, 28-30. https://doi.org/10.1063/1.3060205 Wang, R.T. and Hulst, H.C.V.D. (1991) Rainbows: Mie Computations and the Airy Approximation. Applied Optics, 30, 106-117. https://doi.org/10.1364/AO.30.000106", null, "", null, "", null, "", null, "", null, "", null, "", null, "", null, "", null, "", null, "" ]
[ null, "https://www.scirp.org/images/Twitter.svg", null, "https://www.scirp.org/images/fb.svg", null, "https://www.scirp.org/images/in.svg", null, "https://www.scirp.org/images/weibo.svg", null, "https://www.scirp.org/images/email.svg", null, "https://www.scirp.org/images/WhatsApp.svg", null, "https://www.scirp.org/images/qq.svg", null, "https://www.scirp.org/images/weixinsrp120.jpg", null, "https://www.scirp.org/images/weixin.svg", null, "https://www.scirp.org/Images/ccby.png", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.88562536,"math_prob":0.975275,"size":9036,"snap":"2021-31-2021-39","text_gpt3_token_len":2017,"char_repetition_ratio":0.16275465,"word_repetition_ratio":0.049655173,"special_character_ratio":0.21823816,"punctuation_ratio":0.10194175,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.99272317,"pos_list":[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20],"im_url_duplicate_count":[null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2021-07-23T19:04:13Z\",\"WARC-Record-ID\":\"<urn:uuid:7c53a8bc-acf1-41dd-8449-dab044c92132>\",\"Content-Length\":\"130156\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:3df6e56f-9a03-42cb-b727-2032efef8e2e>\",\"WARC-Concurrent-To\":\"<urn:uuid:003af6e2-9985-4414-9ece-885dda0705fa>\",\"WARC-IP-Address\":\"144.126.144.39\",\"WARC-Target-URI\":\"https://www.scirp.org/journal/paperinformation.aspx?paperid=109813\",\"WARC-Payload-Digest\":\"sha1:BZRLAUYDD7D4NETHE77OVMJJVAFHK4ZP\",\"WARC-Block-Digest\":\"sha1:ILIZPIZNH4AZ3TKFER2EGASMYN6FBCLC\",\"WARC-Identified-Payload-Type\":\"application/xhtml+xml\",\"warc_filename\":\"/cc_download/warc_2021/CC-MAIN-2021-31/CC-MAIN-2021-31_segments_1627046150000.59_warc_CC-MAIN-20210723175111-20210723205111-00526.warc.gz\"}"}
https://johnbaras.com/publication-conferen/an-optimization-problem-from-linear-filtering-with-quantum-measurements/
[ "# An Optimization Problem from Linear Filtering with Quantum Measurements\n\n## An Optimization Problem from Linear Filtering with Quantum Measurements\n\nTitle : An Optimization Problem from Linear Filtering with Quantum Measurements\nAuthors :\nBaras, John S.\nConference : Journal of Applied Mathematics and Optimization Vol. 18, pp. 191-214\n\nWe consider the problem of optimal (in the sense of minimum error variance) linear filtering a vector discrete-time signal process, which influences a quantum mechanical field, utilizing quantum mechanical measurements. The nonclassical characteristic of the problem is the joint optimization over the measurement process and the linear signal processing scheme. The problem is formulated as an optimization problem of a functional over a set of operator-valued measures and matrices. We prove the existence of optimal linear filters and provide necessary and sufficient conditions for optimality." ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.8493682,"math_prob":0.9038154,"size":886,"snap":"2021-31-2021-39","text_gpt3_token_len":154,"char_repetition_ratio":0.14058957,"word_repetition_ratio":0.08130081,"special_character_ratio":0.1738149,"punctuation_ratio":0.10638298,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.96967775,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2021-09-22T23:55:10Z\",\"WARC-Record-ID\":\"<urn:uuid:decb1227-fd3c-48c4-85ab-3238f276f2a5>\",\"Content-Length\":\"49689\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:9f11b6c4-ee9e-4bcf-aa2a-e626c0664335>\",\"WARC-Concurrent-To\":\"<urn:uuid:6a3217f2-09f7-4210-9208-cd9930551eaf>\",\"WARC-IP-Address\":\"192.0.78.218\",\"WARC-Target-URI\":\"https://johnbaras.com/publication-conferen/an-optimization-problem-from-linear-filtering-with-quantum-measurements/\",\"WARC-Payload-Digest\":\"sha1:PEBTLEL2IWHPCH7RARNTZMM4A57ZTYL4\",\"WARC-Block-Digest\":\"sha1:HPP4WXRR32EQQPWBQIQV5CB3SO66DWZ5\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2021/CC-MAIN-2021-39/CC-MAIN-2021-39_segments_1631780057403.84_warc_CC-MAIN-20210922223752-20210923013752-00631.warc.gz\"}"}
https://madmimi.com/s/ec18a01
[ "table.module-1{width:81.32%;padding:0}table div table+table div table{width:81.32%;float:none;margin-left:auto;margin-right:auto;padding:0}table div table+table div table a{border:0 none;text-decoration:none}table div table+table div table img{width:100%!important;border:0 none;text-decoration:none}table div table+table div table td{width:100%;padding:0}/* styles */\n table div table+table+table div table{width:100%;padding:0}table div table+table+table div table img{width:96.23%;padding:0;float:none}table div table+table+table div table td{width:100%;padding:0 1.88% 18px}/* styles */", null, "table.module-3{width:75.47%;padding:0}table div table+table+table+table div table{width:75.47%;float:none;margin-left:auto;margin-right:auto;padding:0}table div table+table+table+table div table a{border:0 none;text-decoration:none}table div table+table+table+table div table img{width:100%!important;border:0 none;text-decoration:none}table div table+table+table+table div table td{width:100%;padding:0}/* styles */\n table div table+table+table+table+table div table{width:100%;padding:0}table div table+table+table+table+table div table img{width:96.23%;padding:0;float:none}table div table+table+table+table+table div table td{width:100%;padding:0 1.88% 18px}/* styles */", null, "table.module-5{width:75.47%;padding:0}table div table+table+table+table+table+table div table{width:75.47%;float:none;margin-left:auto;margin-right:auto;padding:0}table div table+table+table+table+table+table div table a{border:0 none;text-decoration:none}table div table+table+table+table+table+table div table img{width:100%!important;border:0 none;text-decoration:none}table div table+table+table+table+table+table div table td{width:100%;padding:0}/* styles */\n table.module-6{width:75.47%;padding:0}table div table+table+table+table+table+table+table div table{width:75.47%;float:none;margin-left:auto;margin-right:auto;padding:0}table div table+table+table+table+table+table+table div table a{border:0 none;text-decoration:none}table div table+table+table+table+table+table+table div table img{width:100%!important;border:0 none;text-decoration:none}table div table+table+table+table+table+table+table div table td{width:100%;padding:0}/* styles */\n table div table+table+table+table+table+table+table+table div table{width:100%;padding:0}table div table+table+table+table+table+table+table+table div table img{width:96.23%;padding:0;float:none}table div table+table+table+table+table+table+table+table div table td{width:100%;padding:0 1.88% 18px}/* styles */", null, "", null, "", null, "", null, "", null, "/* styles */ HRC Joins Laz on #GVRAT1000K Great Virtual Race Across Tennessee 1000K runsignup.com 5 miles per day walk or run for the Summer!\n table div table+table+table+table+table+table+table+table+table+table+table+table div table{width:100%;padding:0}table div table+table+table+table+table+table+table+table+table+table+table+table div table img{width:96.23%;padding:0;float:none}table div table+table+table+table+table+table+table+table+table+table+table+table div table td{width:100%;padding:0 1.88% 18px}/* styles */", null, "table div table+table+table+table+table+table+table+table+table+table+table+table+table div table td,table.module-12{width:100%;padding:0}table div table+table+table+table+table+table+table+table+table+table+table+table+table div table{width:100%;float:none;margin-left:auto;margin-right:auto;padding:0}table div table+table+table+table+table+table+table+table+table+table+table+table+table div table a{border:0 none;text-decoration:none}table div table+table+table+table+table+table+table+table+table+table+table+table+table div table img{width:100%!important;border:0 none;text-decoration:none}/* styles */\n table div table+table+table+table+table+table+table+table+table+table+table+table+table+table div table{width:100%;padding:0}table div table+table+table+table+table+table+table+table+table+table+table+table+table+table div table img{width:96.23%;padding:0;float:none}table div table+table+table+table+table+table+table+table+table+table+table+table+table+table div table td{width:100%;padding:0 1.88% 18px}/* styles */", null, "", null, "Click for the Online Trail-Surfer Shop!\n table div table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table div table{width:100%;padding:0}table div table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table div table img{width:96.23%;padding:0;float:none}table div table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table div table td{width:100%;padding:0 1.88% 18px}/* styles */", null, "", null, "table div table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table div table{width:100%;padding:0}table div table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table div table img{width:96.23%;padding:0;float:none}table div table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table div table td{width:100%;padding:0 1.88% 18px}/* styles */", null, "table div table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table div table td,table.module-18{width:100%;padding:0}table div table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table div table{width:100%;float:none;margin-left:auto;margin-right:auto;padding:0}table div table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table div table a{border:0 none;text-decoration:none}table div table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table div table img{width:100%!important;border:0 none;text-decoration:none}/* styles */\n table div table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table div table{width:100%;padding:0}table div table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table div table img{width:96.23%;padding:0;float:none}table div table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table+table div table td{width:100%;padding:0 1.88% 18px}/* styles */", null, "/* styles */ Healdsburg Running Company 333 Center Street, Healdsburg, CA (707) 395-0372 Running shoes (trail & road), apparel, nutrition, runs, races, gear, ambassadors and more... More Deets On Our Social Media" ]
[ null, "https://d1lggihq2bt4jo.cloudfront.net/assets/responsive_divider-003cda7043b1bbd93c29436541bdc9f7503eb3bbb2fb9b9323bc9b29c83a9fe6.png", null, "https://d1lggihq2bt4jo.cloudfront.net/assets/responsive_divider-003cda7043b1bbd93c29436541bdc9f7503eb3bbb2fb9b9323bc9b29c83a9fe6.png", null, "https://d1lggihq2bt4jo.cloudfront.net/assets/responsive_divider-003cda7043b1bbd93c29436541bdc9f7503eb3bbb2fb9b9323bc9b29c83a9fe6.png", null, "https://cascade.madmimi.com/promotion_images/1547/1325/original/94737774_2574698466082113_7475446022420824064_n.jpg", null, "https://cascade.madmimi.com/promotion_images/1547/1324/original/95175428_2574698762748750_7369737711806054400_n.jpg", null, "https://cascade.madmimi.com/promotion_images/1547/1323/original/94976178_2574698792748747_5626481585248272384_n.jpg", null, "https://cascade.madmimi.com/promotion_images/1547/1322/original/94802989_2574698916082068_2400121832033222656_n.jpg", null, "https://d1lggihq2bt4jo.cloudfront.net/assets/responsive_divider-003cda7043b1bbd93c29436541bdc9f7503eb3bbb2fb9b9323bc9b29c83a9fe6.png", null, "https://d1lggihq2bt4jo.cloudfront.net/assets/responsive_divider-003cda7043b1bbd93c29436541bdc9f7503eb3bbb2fb9b9323bc9b29c83a9fe6.png", null, "https://cascade.madmimi.com/promotion_images/1552/7184/original/mcqueen.jpg", null, "https://d1lggihq2bt4jo.cloudfront.net/assets/responsive_divider-003cda7043b1bbd93c29436541bdc9f7503eb3bbb2fb9b9323bc9b29c83a9fe6.png", null, "https://cascade.madmimi.com/promotion_images/1552/7186/original/Stacy-Powell.jpg", null, "https://d1lggihq2bt4jo.cloudfront.net/assets/responsive_divider-003cda7043b1bbd93c29436541bdc9f7503eb3bbb2fb9b9323bc9b29c83a9fe6.png", null, "https://d1lggihq2bt4jo.cloudfront.net/assets/responsive_divider-003cda7043b1bbd93c29436541bdc9f7503eb3bbb2fb9b9323bc9b29c83a9fe6.png", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.6519061,"math_prob":0.9577559,"size":616,"snap":"2021-43-2021-49","text_gpt3_token_len":166,"char_repetition_ratio":0.119281046,"word_repetition_ratio":0.38709676,"special_character_ratio":0.26136363,"punctuation_ratio":0.14782609,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.991131,"pos_list":[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28],"im_url_duplicate_count":[null,null,null,null,null,null,null,4,null,4,null,4,null,4,null,null,null,null,null,2,null,null,null,2,null,null,null,null,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2021-11-28T20:26:27Z\",\"WARC-Record-ID\":\"<urn:uuid:1ca9a4bf-1ded-4110-9fb3-1cc38e7924ce>\",\"Content-Length\":\"97665\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:3fa14d63-e924-404d-88b6-051beb440d1b>\",\"WARC-Concurrent-To\":\"<urn:uuid:9976d259-5662-4e34-a30f-d93b7427ad00>\",\"WARC-IP-Address\":\"198.71.248.151\",\"WARC-Target-URI\":\"https://madmimi.com/s/ec18a01\",\"WARC-Payload-Digest\":\"sha1:7S5IPCLDDJSWTJQ5T457O6DVF5GU33A4\",\"WARC-Block-Digest\":\"sha1:MX3HSZKUTUPPLCRIQQVHM4HRJMPOJXIY\",\"WARC-Identified-Payload-Type\":\"application/xhtml+xml\",\"warc_filename\":\"/cc_download/warc_2021/CC-MAIN-2021-49/CC-MAIN-2021-49_segments_1637964358591.95_warc_CC-MAIN-20211128194436-20211128224436-00477.warc.gz\"}"}
https://chothuemasteri.info/hamrock-elementos-de-maquinas-57/
[ "# HAMROCK ELEMENTOS DE MAQUINAS PDF\n\nDownload Elementos de Maquinas Bernard k. Elementos De Máquinas Autor: Bernard J. Hamrock, Bo Jacobson, Steven R. Schmid. Análisis crítico de los problemas que se presentan en el vaciado de. : ELEMENTOS DE MAQUINAS () and a great selection of 1. Elemento de maquinas. Hamrock. Published by MC GRAW HILL .", null, "Author: Dukasa Branris Country: Samoa Language: English (Spanish) Genre: Life Published (Last): 6 April 2007 Pages: 372 PDF File Size: 6.58 Mb ePub File Size: 12.82 Mb ISBN: 136-6-64849-274-9 Downloads: 14096 Price: Free* [*Free Regsitration Required] Uploader: Tojatilar", null, "The material should be recyclable or be able to be burned leaving harmlesscombustion products. The skiers weight is mag, and theslope has a normal force as a reaction and afriction force which namrock tangent to theslope.\n\nDraw the shear force and moment along thelength of the bar and give tabular elmentos. The firstboundary condition gives: Material properties are obtained from Table A.\n\nThis solution uses singularity functions, although it could be solved through the othermethods in Chapter 2. Referring to the sketch in Figure 6. Although the diameter is notspecified at this location, it is reasonable to approximate it as 1in. Therefore, the moments of inertia for the inner and outer circles are: The three dimensional Mohrs circle canbe drawn using these stresses as shown below: Note from Figure 2.\n\nThe maximum deflection occurs at the free end of the cantilever, but the maximum momentlocation is unknown; it is obtained by taking the derivative of the moment equation.\n\nThis solution makessure that the steel does not plastically deform before catastrophic crack propagation occurs. Principio de Saint Venant: The eccentricity is given by Equation 4. The diagramsare as follows: Symmetry should be used to transform the problem to a 1. Calculate the safety factor. Therefore, the total strain experienced by the composite is constant, as is thestrain seen by the fibers. In this problem, the shear and momentdiagrams are obtained through direct integration, as suggested by Equations 2.\n\nIBM WHEELWRITER 3500 MANUAL PDF\n\nNote that large negativebending stresses are still objectionable. Note that Kca and Kcbare functions of only d, since the other variables needed for their definition are fixed. Load, Stress and Strain. Therefore, Kc is just under 2. Strain Gage Rosette Figure 2.\n\n## SOLU Elementos de Maquinas – Hamrock, Bernard J. Jacobson, Bo Schmid, Steven R.\n\nTherefore, oneonly needs to consider the left link. Therefore, the circle can be drawn as follows: The hamroco properties as a function of temper temperature is obtained from Table 6.\n\nToughness is defined on page as the ability to absorb energy up to fracture. Therefore, a one gallon container is preferably madeof a plastic with an integral handle. Bothspheres have a diameter of 0. Page Note that the radii of the circles are easily calculated to give the principal shear stresses seeEquation 2. Assume that the bracket is made of AISI steel and use thefollowing values: The area is obtained from Equation 4.\n\n### PPT – BIBLIOGRAFIA de referencia PowerPoint Presentation – ID\n\nThe shaft can be consideredweightless. This problem can be easily solved through the principal of superposition. Determine the shear force and bending moment inthe bar.", null, "Elements in Bending Figure 4. The forces in each member are obtained from statics, but in order to obtain the verticaldeflection at point A, include a load Q in the analysis as shown. Elementks three dimensional equilibrium, Equations 2.\n\nLUDOVICO EINAUDI LE ONDE SHEET MUSIC PDF", null, "Furthermore, it is an example of a fail-safe design, because if thetowbar fails, the towed vehicle will follow the car and will not swerve into other lanes and collidewith other vehicles. Theweight of the skier and equipment is kg. This beam, which carries a balcony onthe wall of a house, is welded together with a beam in the house structure.\n\nTecnologia meccanica by Serope Kalpakjian Book 3 editions published between and in Italian and held by 15 WorldCat member libraries worldwide. Using a point on the circle of MPa, 30MPa ,the circle is drawn as follows: Assume the friction force is the viscosity times the surface area timesvelocity of the moving surfaces and divided by the lubricant film thickness.\n\nBending Stress Distribution Figure 4.", null, "The two alternatives are sketched above. Also, itshould be noted that for the moments of inertia to be evaluated about the x-y axes, Ixr needs to betaken for the base of the rectangle and Iyr needs to be taken from the centroid. It may be helpful dee think of thecutouts as negative areas in the application of Equations 4. Find the stress at theinnermost and outermost radii." ]
[ null, "https://imgv2-1-f.scribdassets.com/img/document/371393573/original/40f77a230e/1536485269", null, "https://chothuemasteri.info/download_pdf.png", null, "https://3.bp.blogspot.com/-vHAOTAQOXHo/WHUn-GM9qMI/AAAAAAAAJqE/hemumXzmLN4o3EvRNmC-58MyjdGLinqGQCLcB/s1600/hamcrock.jpg", null, "https://image.slidesharecdn.com/hamrockelementosdemquinas-160614002151/95/hamrock-elementos-de-mquinas-1-638.jpg", null, "https://imgv2-1-f.scribdassets.com/img/document/371393573/149x198/b3b4d427a7/1518492529", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.8899937,"math_prob":0.85749143,"size":4649,"snap":"2020-45-2020-50","text_gpt3_token_len":1086,"char_repetition_ratio":0.097308934,"word_repetition_ratio":0.0,"special_character_ratio":0.20585072,"punctuation_ratio":0.12891985,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9587451,"pos_list":[0,1,2,3,4,5,6,7,8,9,10],"im_url_duplicate_count":[null,6,null,4,null,6,null,8,null,4,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2020-12-03T19:07:42Z\",\"WARC-Record-ID\":\"<urn:uuid:44126e8e-414f-4a86-9154-3353bd4d9121>\",\"Content-Length\":\"40489\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:3e8ee80b-c8bb-4d71-ad0c-840dc1205dd7>\",\"WARC-Concurrent-To\":\"<urn:uuid:b703bfce-6d3a-4959-9ede-f255fbc2ee51>\",\"WARC-IP-Address\":\"104.18.39.69\",\"WARC-Target-URI\":\"https://chothuemasteri.info/hamrock-elementos-de-maquinas-57/\",\"WARC-Payload-Digest\":\"sha1:7VB63RZNPCCKWKIISL3T34JLRUQUSJ4E\",\"WARC-Block-Digest\":\"sha1:FPYZCV2OA6JXFXTAUWSOTPOJOXFE4Z24\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2020/CC-MAIN-2020-50/CC-MAIN-2020-50_segments_1606141732696.67_warc_CC-MAIN-20201203190021-20201203220021-00558.warc.gz\"}"}
https://www.rroij.com/open-access/2d-cross-correlation-multimodal-image-recognition.php?aid=38212
[ "All submissions of the EM system will be redirected to Online Manuscript Submission System. Authors are requested to submit articles directly to Online Manuscript Submission System of respective journal.\n\n# 2D CROSS CORRELATION MULTI-MODAL IMAGE RECOGNITION\n\n C Yuganya1, A V Khirthana2, and Udayakumara pandian3 The Computer Software Department, Bharath University, Chennai, TamilNadu, India The Computer Software Department, Bharath University, Chennai, TamilNadu, India The Computer Science Department, Bharath University, Chennai, TamilNadu,India Corresponding Author: C Yuganya, E-mail: [email protected] Related article at Pubmed, Scholar Google\n\nVisit for more related articles at Journal of Global Research in Computer Sciences.\n\n## Abstract\n\nThe bio-metric is ultra secure and more than one form of biometric identification is required. To use a combination of different biometric recognition we are using multi-modal biometric recognition. In this paper, the multi-level wavelet transform algorithm that combines information from palm, face, iris, and digital signature images and recognition of signature that makes use of biometric traits to recognize individuals. Multimodal biometric systems technology uses more than one biometric identifier to compare the identity of the image..A multi modal biometric system of iris and palm print based on Wavelet Packet Analysis is described. The visible texture of a person's face, palm, iris and signature is encoded into a compact sequence of 2-D wavelet packet coefficients, which generate a \"feature vector code\". The multi-resolution approach based on Wavelet Packet Transform (WPT) for texture analysis and recognition of face, iris, palm, and signature images. WPT sub images coefficients are quantized into 1, 0 or -1 as multi-resolution. The input of the iris,palm,face and signature is matched and stores as a scrumbled image usin DWT algorithm,then reconstruct the method and then gives the result to get access into the data.\n\n### Index terms\n\nWavelet Packet, Face, Palm, Iris, digital signature, recognition, bio-metric, multi level.\n\n### INTRODUCTION\n\nA major motivation for using biometrics is the ability to authenticate the true identity of an individual. Sometimes the problem is associated with noisy data that collapse and loose datas.All the process left to time consumption. To overcome these problems, multi-modal biometrics relies on more than one form of biometric data. The images can be separated into three types they are, 1.RGB (its value is 0-255), 2.Gray Scale (its value is 0-255), 3.Binary (its value is either 0 or 1). Image creates an image graphics object by interpreting each element in a matrix as an index into the figure's color map or directly as RGB values. once the preprocessing and gray conversion of the image is completed it is set to resize the image in short it is said to be as imresize.Later combine the preprocessed image, and to get the fussed image we apply IDWT(Inverse Discrete Wavelet Transform).The data-base process of DWT(Discrete Wavelet Transform) is stored as fused image. At last the recognize process by giving the individuals user name and the password which will reconstruct the image given by the user previously and match them and gives the output. The advantages of the proposed approach are reduction in memory size, increase in recognition accuracy due to the use of multi-modal biometric. The proposed system provides a very accurate outcome and more secured access to the database.\n\n### RELATED WORKS\n\nA successful implementation of iris recognition system was proposed in 1993.It was published more than 29 years ago still remains valuable since because it provides solutions for each part of the data. It is important that most of the data’s were secure. They are based on Gabor wavelet analysis in order to extract iris, palm, face, and signature images. It consists in convolution of image with complex Gabor filters. The approach of the iris image is pre processed for contrast enhancement .After preprocessing; a ring mask is created and moved through the entire image to obtain the iris data. Using the data the iris and pupil are reconstructed from the original picture. Using the iris center and its radius, the iris was cropped out from the reconstructed image. The iris data (iris donut shape) is transformed into a rectangular shape. A scrumbled fusion iris pattern is matched. The method followed by Jie Wang the iris texture extraction is performed by applying wavelet packet transforms (WPT)and DWT,IDWT algorithms were used.\nThe iris image is decomposed in to sub images by applying WPT and suitable sub images are selected and WPT coefficients are encoded. The method of iris features extraction method is designed .The iris imaging should capture a minimum of 70 pixels in iris radius. The iris radius of 100 to 140 pixels has been more typical. CCD monochrome cameras of (480 x 640) have been used because the NIR illumination in the 700nm - 900nm band was required for imaging to be invisible to humans view. A wide-angle camera is used to detect the eyes in faces that acquired higher resolution images of eyes. Many alternative methods for finding the iris image is used. An iris recognition system can be detected briefly such as an iris detector for detection and location of iris image, a feature extractor is to extract the features and a pattern matching module for matching the given input image the iris is extracted from the given image of the entire eye. To perform the iris pattern matching, the iris is localized and extracted from the acquired image. So, to resolve this process are using a wavelet-packet transform algorithm. In that, the standard discrete wavelet transform (DWT) is a powerful tool used successfully to solve various problems in image processing.\nThe DWT divides an image into four sub-sampled images. The results of the image that has been highly passed in the horizontal and vertical directions (HH) and one that has been low passed in the vertical and high passed in the horizontal (LH),the one that has been highly passed in the vertical and low passed in the horizontal (HL) and last that has been low pass filters in both directions (LL).The H and L are the high pass and low pass filter,instantly.While HH means that the high pass filter is applied to the images of both directions, represent diagonal features of the image, the horizontal size is equal to HL,the vertical size is equal to LH and LL is used for further process. Wavelet Packets Transform (WPT) is a wavelet transform that is used as a Image analysis. With WPT, it is possible to zoom into any desired frequency channels for further extraction. Compared with WT, WPT gives a very fine extraction. A algorithm of progressive texture classification of WPT gives better performance because the iris image frequencies are located in the low and middle frequency channels.\nA digital signature is a term used to describe a data string which associates a digital message with an assigned person’s image only .The authentication, data integrity, and non-repudiation has large applications in Data security. One of the most significant advances in digital signature technologies is the development of the first practical cryptographic scheme called RSA, while it still remains as one of the most practical and versatile digital signature techniques available today.\nA face recognition system is successfully deployed, it is fully automatic .A fully automatic system detects and identifies/varies a face in an image without human intervention . Fully automatic face recognition systems generally have two components, recognition and detection. The recognition component identifies or varies the face. Usually, a recognition module requires that the face be in a standard position. A new image of a face is transformed into an image components, then compare this image with our given input image and it is extracted using DWT algorithm and it is extracted scrum bled and stored after the the process gets completed the matching process reconstruct’s and match both the input image and original secure image. The image has its difference with the matrix value. The given input image of an face is a process of fusion, and then the face is extracted and reconstructed .The inputs will match to the response of the image of iris, palm, face and digital signature. It can be used in setting the user name and the passwords to the personal data where your face, iris, palm, and digital signature is taken as the password of your personal data. This can be used to the various set of data’s to maintain and secure projects are objectives with their faces, iris, palm, and signature as their data identity. This is helpful to maintain our data’s and is thread safe. Every person has their unique identity of their iris, palm, face and signature as their identity. The basic input of the face recognition is vey important. All the recognition is used in mat lab using its image processing applications and tool box.\nThe palm print of an image is the feature extraction of the acquisition of raw informed acquarency.The image matching process with the variation and similarity with the given image process. Performance and image verification mode are indentified. Proposed image analysis is the limited advantages to match the process. The palm line feature and the wrinkles are determined using some techniques such as, Datum point determination finds and locate the endpoints of each palm lines. Each line is located inside the palm parallel. The end point are closer to the finger wrist. Digital palm print images are use for clear and accurate matching for data base protection. The wavelet based DWT transform is used. To recognize the palm print image Gabor filter is used .The palm print as the feature of Line Edge Map (LEM) ,and for distance matching Hausdorff distance algorithm were used.The interlaminar interactions of neural network executes two layers, the one has a fixed weights and other has a layer with adjustable weights, the co-efficient of 2-D Gabor transforms without the related conditions. The image has no complete transforms, in which the coefficients may be interpreted by signifying the presence of a certain features in the image; the network finds optimal coefficients in the sense of minimal mean-squared-error in representing the image. The normal image should have 7.57 in the pixel representation to 2.55 in the complete 2-D Gabor transform. In “wavelet” expansions based on a WPT and DWT of 2-D Gabor wavelet template, image compression is elaborated with ratios up to 20: 1.The image segmentation is demonstrated based on the clustering of coefficients in the complete 2-D Gabor transform.\n\n### PRPOSEDSYSTEM\n\nA image processing recognition system is proposed, to safe a individual data’s that has been stored by a particular user. The first thing is, collect a different type of persons face,iris,palm,and signature images, each person have their identity face,iris,palm and signature images of their’s.When the input is given the process starts by matrix method. There are three steps going to be accomplished .They are preprocessing, data-base process and recognition.\n\n### PREPROCESSIG\n\nThe persons or an individual image that was already given and stored .The given image may be in JPG or any type of image. When the person gives the input of their Face,palm,iris,and signature images the process start to convert the image, first the gray conversion method occurs, In this the given image is converted into gray shaded image and the four input image is subploted,and then image is resized.\nThe input of the face, palm, iris, and signature images are given. A normal image has three variations and has its value rate such as, RGB (0-255), Grayscale (0-255) and Binary (0 or 1).Using the values we change the image color’s, like that we change the value of the given input image and convert them in to gray. If we give the binary value as 0(then the image will be in black),else we give 1(the image will be in white).After converted the image started to read the image by giving the syntax as “a=imread(“water.jpg)”the image starts to read and show the image by giving the code line as “Figure,imshow(a)”.Now we are converting the RGB image in ti gray so we include as=rgb2gray(a);”when this syntax is given it is converted and shown.\nThe subplot syntax is given below”h = subplot(m,n,p) or subplot(mnp)”. The subplot divides the current figure into rectangular panes and they are numbered row wise. Every row and column are calculate b matrix method. Subsequent plots are the output to the current pane. The syntax = subplot(m,n,p) or subplot(mnp)” breaks the figure window into an m-by-n matrix of small axes, and selects the path axes object to the subplot, and returns the matrix . The matrix value are counted along the top row of the figure window, then the second row, etc. For example,\nsubplot(2,1,1), plot(income)\nsubplot(2,1,2), plot(outgo)\nThe plots income on the top half of the window and outgo on the bottom half. If the Current matrix is plotted to the entered value of the panel, the panel is used as the parent for the subplot instead of the current image. The new axes object becomes the current axes. Replace syntax “subplot(m,n,p,'replace')”.If entered matrix already matchesexists,delete it and create new axes.\nA resize method is used in matrix. By using scale method the image gets resized. A normal value of an image is <600*800*3unit8>, after resized with 0.3we get<120*240*3 unit8>.A image resize syntax is given below:\nB = imresize(A, scale)\nB = imresize(A, [mrows ncols])\n[Y newmap] = imresize(X, map, scale)\nThe B = imresize(A, scale) returns a image B that is scale times the size of A. The given input matrix image A can be a grayscale image, RGB, or binary image. If scale is between 0 and 1.0, B is smaller than A. If scale is greater than 1.0, B is larger than A. The “B = imresize(A, [mrows ncols])” returns the image B that has the number of rows and columns specified by [mrows ncols]. Either NUMROWS or NUMCOLS may be null, then the imresize computes the number of rows or columns automatically to preserve the image aspect ratio.\nThe “[Y newmap] = imresize(X, map, scale)’ resizes the indexed image X. The scale can either be a numeric scale specifies the size of the output image” ([numrows numcols])”,A default imresize returns a new, matrix featured image of color map (newmap) with the resized image. To return a color map that is the same as the original color map, to use the 'Colormap' parameter (given below).\n\n### DATA BASE PROCESS\n\nIn this process the converted image is combined with the IDWT (Inverse Discrete Wavelet Transform) algorithm and gets an fused image and then al the process were reconstructed. The converted image is combined and scrum bled and reconstructed using DWT (Discrete Wavelet Transform) algorithm and then stores as a fused image. The dwt (Computes discrete wavelet transform DWT of input) block computes the discrete wavelet transform (DWT) of each column of the matrix image. The output is a reconstructed fused and scrum bled matrix image with the same given identity as the input. Each column of the output is the DWT of the corresponding input column. The Wavelet Toolbox were installed product for the block to automatically design wavelet-based filters to compute the DWT. Otherwise, you must specify your own low pass and high pass FIR filters by setting the Filter parameter to User defined. For the same input, the DWT block and the Wavelet Toolbox function do not produce the same results. Because the block set is designed for real-time implementation and the toolbox are said to be designed for analysis, the matrix image subplots are the output to the various given data. To make the output of the dwt function and the DWT block match, complete the following steps:\na. For the dwt function, set the boundary condition to zero-padding by typing dwtmode('zpd') at the MATLAB® command prompt.\nb. To match the latency of the DWT and IDWT, are implemented using the FIR filters, add zeros to the input of the dwt function. The number of zeros you add must be equal to the half the filter length.\n\n### RECOGNIZE\n\nIn this process reconstructs the methods and matches with the users input and then recognize. If any one of the images is mismatched it won’t recognize till it match. The idwt alogarithm performs a single-level one-dimensional wavelet reconstruction transform of the image with respect to either a particular wavelet (wname) or particular wavelet reconstruction filters (Lo_R and Hi_R) that were specify.\nThe syntax were given as:\nX = idwt(cA,cD,'wname')\nX = idwt(cA,cD,Lo_R,Hi_R)\nX = idwt(cA,cD,'wname',L)\nX = idwt(cA,cD,Lo_R,Hi_R,L)\nX = idwt(...,'mode',MODE)\nX = idwt(cA,cD,'wname') returns the single-level reconstructed approximation coefficients vector X based on approximation and detail coefficients vectors cA and cD, and using the wavelet 'wname'. X = idwt(cA,cD,Lo_R,Hi_R) reconstructs as above using filters which is specifed.\n* Lo_R is the reconstruction low-pass filter.\n* Hi_R is the reconstruction high-pass filter.\nLo_R and Hi_R must be the same length. Let la be the length of cA (which also equals the length of cD) and lf the length of the filters Lo_R and Hi_R; then the length(X) = LX where LX = 2*la if the DWT extension mode is set to per iodization. For the other extension modes LX = 2*la-lf+2.\nX = idwt(cA,cD,'wname',L) or X = idwt(cA,cD,Lo_R,Hi_R,L) returns the length-L central portion of the result obtained using idwt(cA,cD,'wname'). L must be less than LX. X = idwt(...,'mode',MODE) computes the wavelet reconstruction using the specified extension mode MODE. X = idwt(cA,[],...) returns the single-level reconstructed approximation coefficients vector X based on approximation coefficients vector cA.X = idwt([],cD,...) returns the single-level reconstructed detail coefficients vector X based on detail coefficients vector cD.\n\n### SYSTEM ARCHITECTURE", null, "", null, "", null, "### RESULT\n\nThe experimental results clearly demonstrate that the candidate’s bio-metric image processing. By matching the iris, palm, face, and signature of the image, are fused and scrum bled as an distracted image using IDWT algorithm and at last it is reconstructed and matches with the given input and then show the result as either it is matches are mismatched. The overall recognition of the data is improved. On the other hand, the storage for all these data requires very less memory in the data.\n\n### CONCLUSION\n\nThe 2D cross correlation has a statistical results were conducted with multi-model biometric images in which the user was looking directly at the imaging device. The purpose for approaching the issue of multi-modal pattern extraction without the assumption that patterns are circular is to allow for the extraction of fused and scrum bled biometric images. This approach is applicable to non-orthogonal biometric images, because the image is rotated away from the normal to the imaging device, current commercial systems develop complications extracting and authenticating the singe biometric pattern. As for it this system multimodal biometric recognition is proposed. It is a 2D wavelet packet transform method.\n\n#### References\n\n1. John Daughman \"Complete Discrete 2-D Gabor Transforms by Neural Networks for Image Analysis and Compression\", IEEE Transactions on Acoustics, Speech and signal Processing, VOL.36, No. 7, July 1988\n2. John Daughman, \"High confidence visual recognition of persons by a atensdt Mofasctahtiinseticinalteilnlidgeepnecned, ence\", IEEE Transactions on Pattern Analysis VOL.15, No.ll, November 1993\n3. John Daughman, \"How iris recognition works\" IEEE Transactions on Circuits and Systems for Video Technology, VOL.14, No. 1, January 2004.\n4. Lye Wil Liam, Ali Chekima, Liau Chung Fan, \"Iris recognition using self-organizing neural network\", IEEE 2002.\n5. Taekyoung Kwon and Jae-il Lee” Practical Digital Signature Generation usingBiometrics”\n6. P. Jonathon Phillips, R.MichaelMCCabe” Biometric Face Recognition and Image Pocessing”.\n7. Bradford Bonney, Robert Ives, Delores Etter, \"Iris pattern extraction using bit planes and standard deviations\", iEEE 2004\n8. Lu Chenghong, Lu Zhao yang, \"Efficient iris recognition by computing discriminable textons\", IEEE 2005.\n9. Jie Wang, Xie Mei, \"Iris Feature extraction based on wavelet packet analysis\", iEEE 2006\n10. K. Grabowski, W. Sankowski,\"lris recognition algorithm optimized for hardware implementation\", IEEE 2006.\n11. Ajay Kumar, Helen C. Shen, \"Palm print Identification using Palm Codes\", Proceedings of the Third International Conference on Image and Graphics, 2004.\n12. Sree Rama Murthy kora,Praveen Verma, Yashwant Kashyap.”Palm print recognition” and the digital extracrion.\n13. Fang Li, Maylor K. H. Leung, Xiaozhou You, \"Palmprint Identification Using Hausdorff Distance\", 2004 IEEE International Workshop on Biomedical Circuits & Systems, 2004.\n14. www. mathworks.com", null, "" ]
[ null, "http://www.rroij.com/articles-images/GRCS-01-g001.gif", null, "http://www.rroij.com/articles-images/GRCS-01-g002.gif", null, "http://www.rroij.com/articles-images/GRCS-01-g003.gif", null, "https://www.vizagtechsummit.com/images/bellicon.png", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.8992695,"math_prob":0.9381154,"size":18844,"snap":"2023-14-2023-23","text_gpt3_token_len":4113,"char_repetition_ratio":0.14660297,"word_repetition_ratio":0.012135127,"special_character_ratio":0.20866059,"punctuation_ratio":0.10448593,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9570143,"pos_list":[0,1,2,3,4,5,6,7,8],"im_url_duplicate_count":[null,2,null,2,null,2,null,null,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2023-03-31T13:06:04Z\",\"WARC-Record-ID\":\"<urn:uuid:6fa3ed5a-7f13-49fa-b4f5-4d024c893c1f>\",\"Content-Length\":\"44450\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:bcc9815a-8377-49e2-80ea-42027ca68de1>\",\"WARC-Concurrent-To\":\"<urn:uuid:7ebf37e3-de09-46a0-8e68-c4040327f7d3>\",\"WARC-IP-Address\":\"172.67.70.248\",\"WARC-Target-URI\":\"https://www.rroij.com/open-access/2d-cross-correlation-multimodal-image-recognition.php?aid=38212\",\"WARC-Payload-Digest\":\"sha1:JNOZMFG6WNRXH2AVUIPS6FBLK2MLW6BI\",\"WARC-Block-Digest\":\"sha1:4BV667RT7BTHEMVYXRMKAM3DEJTGNLJS\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2023/CC-MAIN-2023-14/CC-MAIN-2023-14_segments_1679296949642.35_warc_CC-MAIN-20230331113819-20230331143819-00668.warc.gz\"}"}
https://ww2.mathworks.cn/matlabcentral/answers/1459449-how-to-use-parpool-for-independent-expressions
[ "How to use parpool for independent expressions\n\n1 次查看(过去 30 天)\nOle2021-9-23\n\nHow to evaluate independent expressions in local parpool on laptop.\nWhen I try the code below, the parpool is idle.\nHow to force matlab to evaluate the expressions in parallel.\nx = 1; y =2; z=3;\nparpool('local',4)\ns = x + y + z;\na = 2*x + z;\nw = 3*y + 2*z;\nf = 3*x + 2*z;\n1 个评论显示隐藏 无\nRaymond Norris 2021-9-24\nLet me ask you this. Using a pool of workers (i.e. processes) cancels out any implicit threadedness. So will there be any value in doing as your requesting?\nTo see the impact, try the following:\ntic\n< run code >\ntoc\ntic\n< run code >\ntoc\nNext, run your code using a parallel pool of workers and parfeval (as Mohammad has suggested) and see how close it gets to your timings with maxNumCompThreads set to 'auto'. You might see it's a wash.\n\n采纳的回答\n\nYou can use the parfeval function to make these calculations on a worker thread.\nx = 1; y =2; z=3;\np=parpool('local',4)\nFs = parfeval(p,@(x,y,z) x + y + z,1,x,y,z);\nFa = parfeval(p,@(x,z)2*x + z,1,x,z);\nFw = parfeval(p,@(y,z)3*y + 2*z,1,y,z);\nFf = parfeval(p,@(x,z)3*x + 2*z,1,x,z);\ns = Fs.fetchOutputs;\na = Fa.fetchOutputs;\nw = Fw.fetchOutputs;\nf = Ff.fetchOutputs;\nTo avoid copying of data to workers from main thread if you have large datasets, you can use thread based parpool.\n\nR2021a\n\nCommunity Treasure Hunt\n\nFind the treasures in MATLAB Central and discover how the community can help you!\n\nStart Hunting!" ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.7054345,"math_prob":0.9856642,"size":1562,"snap":"2022-05-2022-21","text_gpt3_token_len":531,"char_repetition_ratio":0.10012837,"word_repetition_ratio":0.02962963,"special_character_ratio":0.3175416,"punctuation_ratio":0.17819148,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.989833,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2022-01-19T16:56:09Z\",\"WARC-Record-ID\":\"<urn:uuid:22abcbc8-ba16-476c-82a5-1aa8372a8adf>\",\"Content-Length\":\"115165\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:a72673d2-0626-4731-be5b-a1db229c3950>\",\"WARC-Concurrent-To\":\"<urn:uuid:87a8eae3-bfef-4215-a49f-7dda73e5da4c>\",\"WARC-IP-Address\":\"104.108.97.133\",\"WARC-Target-URI\":\"https://ww2.mathworks.cn/matlabcentral/answers/1459449-how-to-use-parpool-for-independent-expressions\",\"WARC-Payload-Digest\":\"sha1:T5LTZES2PMHMCZF5SYOO3ZYZ7VVZT2AE\",\"WARC-Block-Digest\":\"sha1:UTW66ILO2QZRJRYMARZCXAVGZMK767QZ\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2022/CC-MAIN-2022-05/CC-MAIN-2022-05_segments_1642320301475.82_warc_CC-MAIN-20220119155216-20220119185216-00665.warc.gz\"}"}
http://support.sas.com/documentation/cdl/en/imlsstat/64253/HTML/default/imlsstat_statr_sect002.htm
[ "# Submit R Statements\n\nSubmitting R statements is similar to submitting SAS statements. You use a SUBMIT statement, but add the R option: SUBMIT / R. All statements in the program prior to the next ENDSUBMIT statement are sent to R for execution.\n\nThe simplest program that calls R is one that does not transfer any data between the two environments. In the following program, SAS/IML is used to compute the product of a matrix and a vector. The result is printed. Then the SUBMIT statement with the R option is used to send an equivalent set of statements to R.\n\n```/* Comparison of matrix operations in IML and R */\nprint \"---------- SAS/IML Results -----------------\";\nx = 1:3; /* vector of sequence 1,2,3 */\nm = {1 2 3, 4 5 6, 7 8 9}; /* 3x3 matrix */\nq = m * t(x); /* matrix multiplication */\nprint q;\n\nprint \"------------- R Results --------------------\";\nsubmit / R;\nrx <- matrix( 1:3, nrow=1) # vector of sequence 1,2,3\nrm <- matrix( 1:9, nrow=3, byrow=TRUE) # 3x3 matrix\nrq <- rm %*% t(rx) # matrix multiplication\nprint(rq)\nendsubmit;\n```\n\nFigure 11.1 Output from SAS/IML and R\n```---------- SAS/IML Results -----------------\n\nq\n\n14\n32\n50\n\n------------- R Results --------------------\n[,1]\n[1,] 14\n[2,] 32\n[3,] 50\n```\n\nThe printed output from R is automatically routed to the SAS/IML Studio output window, as shown in Figure 11.1. As expected, the result of the computation is the same in R as in SAS/IML." ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.7874956,"math_prob":0.9288663,"size":1331,"snap":"2019-51-2020-05","text_gpt3_token_len":367,"char_repetition_ratio":0.19442351,"word_repetition_ratio":0.0,"special_character_ratio":0.3809166,"punctuation_ratio":0.14661655,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.98498875,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2019-12-08T02:15:38Z\",\"WARC-Record-ID\":\"<urn:uuid:5f8a08d5-79ad-4c7b-8186-eee0a271a10e>\",\"Content-Length\":\"10243\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:360649f7-4785-4b7a-9a33-bcd6383e406f>\",\"WARC-Concurrent-To\":\"<urn:uuid:dd5a520c-1185-4ea0-add8-4f9d84ea6e79>\",\"WARC-IP-Address\":\"149.173.160.38\",\"WARC-Target-URI\":\"http://support.sas.com/documentation/cdl/en/imlsstat/64253/HTML/default/imlsstat_statr_sect002.htm\",\"WARC-Payload-Digest\":\"sha1:FFSLWCJN5RVG2IPSC4KLR2FR5UW7GINL\",\"WARC-Block-Digest\":\"sha1:UPI6LSGV5L2XCCW2AGAJKZVSR44VIPSV\",\"WARC-Identified-Payload-Type\":\"application/xhtml+xml\",\"warc_filename\":\"/cc_download/warc_2019/CC-MAIN-2019-51/CC-MAIN-2019-51_segments_1575540504338.31_warc_CC-MAIN-20191208021121-20191208045121-00380.warc.gz\"}"}
https://www.liwei8090.com/16251.html
[ "# 85个常用WordPress快捷键\n\n2019年5月17日15:38:24 2 253 views", null, "## WordPress标准快捷键\n\nWordPress是一个流行的建站系统和博客平台,附带一个强大的内容编辑器。\n\nWordPress快捷键跟计算机、Microsoft Word等快捷键编辑内容的功能和方式相同,实用WordPress快捷键可以轻轻松松访问编辑器上的格式和样式按钮。\n\nWindows用户的WordPress快捷键:\n\n• Ctrl + c =复制\n• Ctrl + v =粘贴\n• Ctrl + b =粗体\n• Ctrl + i =斜体\n• Ctrl + x =剪切\n• Ctrl + a =全选\n• Ctrl + z =撤消\n• Ctrl + s =保存更改\n• Ctrl + p =打印\n• Ctrl + u =为所选文本加下划线\n• Ctrl + k =将所选文本转换为链接\n• Alt + Shift + x =以等宽字体显示所选文本\n• Alt + Shift + h =显示键盘快捷键(显示此帮助)\n\nMac用户的WordPress快捷键:\n\n• Command + c =复制\n• Command + v =粘贴\n• Command + b = Bold\n• Command + i = Italic\n• Command + x =剪切\n• Command + a =全选\n• Command + z =撤消\n• Command + s =保存您的更改\n• Command + p =打印\n• Command + u =为所选文本加下划线\n• Command + k =将所选文本转换为链接\n• Option + Control + x =以等宽字体显示所选文本\n• Option + Control + h =显示键盘快捷键(显示此帮助)\n\n## 用于WordPress块编辑器Gutenberg的快捷键\n\n• Enter=添加新块\n• Ctrl + Shift + d =复制选定的块\n• Alt + Shift + z =删除选定的块\n• Ctrl + Alt + t =在所选块之前插入新块\n• Ctrl + Alt + y =在选定的块之后插入新块\n• / =添加新段落后更改块类型\n• Esc =清除选择\n• Ctrl + Shift + z =重做上次撤消操作\n• Ctrl + Shift +,=显示或隐藏设置栏\n• Alt + Shift + o =打开块导航菜单\n• Alt + Shift + n =导航到编辑器的下一部分\n• Alt + Shift + p =导航到编辑器的上一部分\n• Alt + F10 =导航到最近的工具栏\n• Ctrl + Shift + Alt + m =在Visual Editor和代码编辑器之间切换\n\nMac用户的Gutenberg快捷键:\n\n• Enter=添加新块\n• / =添加新段落后更改块类型\n• Command + Shift + d =复制选定的块\n• Control + Option + z =删除选定的块\n• Command + Option + t =在所选块之前插入一个新块\n• Command + Option + y =在选定的块之后插入一个新块\n• Esc =清除选择\n• Command + Shift + z =重做上一次撤消\n• Command + Shift +,=显示或隐藏设置栏\n• Option + Control + o =打开块导航菜单\n• Option + Control + n =导航到编辑器的下一部分\n• Option + Control + p =导航到编辑器的上一部分\n• fn + Option + F10 =导航到最近的工具栏\n• Command + Option + Shift + m =在Visual和Code Editor之间切换\n\n## 经典WordPress编辑器的快捷键\n\n• Ctrl + y =重做\n• Alt + Shift + [数字] =插入标题大小,例如Alt + Shift + 1 = <h1>,Alt + Shift + 2 = <h2>。\n• Alt + Shift + l =左对齐\n• Alt + Shift + j =对齐文本\n• Alt + Shift + c =对齐中心\n• Alt + Shift + d =删除线\n• Alt + Shift + r =右对齐\n• Alt + Shift + u =无序列表\n• Alt + Shift + a =插入链接\n• Alt + Shift + o =数字列表\n• Alt + Shift + s =删除链接\n• Alt + Shift + q =引用\n• Alt + Shift + m =插入图像\n• Alt + Shift + t =插入更多标记\n• Alt + Shift + p =插入分页符标记\n• Alt + Shift + w =​​可视编辑器模式下的全屏分心自由写入模式\n• Alt + Shift + f =纯文本模式下的全屏注意力分散\n\nMac用户的经典编辑器快捷键:\n\n• Command + y =重做\n• Command + Option + [number] =插入标题尺寸,例如Alt + Shift + 1 = h1,Alt + Shift + 2 = h2\n• Command + Option + l =左对齐\n• Command + Option + j = Justify Text\n• Command + Option + c = Align Center\n• Command + Option + d =删除线\n• Command + Option + r =右对齐\n• Command + Option + u =无序列表\n• Command + Option + a =插入链接\n• Command + Option + o =数字列表\n• Command + Option + s =删除链接\n• Command + Option + q = Quote\n• Command + Option + m =插入图像\n• Command + Option + t =插入更多标签\n• Command + Option + p =插入分页符标记\n• Command + Option + w =​​可视编辑器模式下的全屏幕分心自由写入模式\n• Command + Option + f =纯文本模式下的全屏注意力分散\n\n#### 用于评论屏幕的WordPress快捷键", null, "• J =下一条评论(向下移动当前选择)\n• K =上一条评论(向上移动当前选项)\n• A =批准评论\n• U =未批准评论\n• D =删除评论\n• R =回复评论\n• Q =快速编辑评论\n• Z =如果删除评论,则从“废纸篓”或“撤消”中恢复评论\n\n• Shift + A =批准已检查的评论\n• Shift + D =删除选中的评论\n• Shift + U =取消批准所选评论\n• Shift + T =将所选评论移至垃圾箱\n• Shift + Z =从垃圾箱中恢复所选的评论", null, "•", null, "奶爸de笔记 6", null, "你这个采集范围,真的太广了。\n•", null, "里维斯社 Admin" ]
[ null, "https://www.liwei8090.com/wp-content/themes/begin4.6/img/blank.gif", null, "https://www.liwei8090.com/wp-content/themes/begin4.6/img/blank.gif", null, "https://secure.gravatar.com/avatar/fec4fbe652f195da7547c70171afdd0a", null, "https://www.liwei8090.com/wp-content/themes/begin4.6/img/load-avatar.gif", null, "https://www.liwei8090.com/wp-content/themes/begin4.6/img/smilies/icon_exclaim.gif", null, "https://www.liwei8090.com/wp-content/themes/begin4.6/img/load-avatar.gif", null ]
{"ft_lang_label":"__label__zh","ft_lang_prob":0.70346546,"math_prob":0.9993424,"size":3804,"snap":"2020-10-2020-16","text_gpt3_token_len":2381,"char_repetition_ratio":0.2986842,"word_repetition_ratio":0.10105581,"special_character_ratio":0.31466877,"punctuation_ratio":0.0042643924,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9984482,"pos_list":[0,1,2,3,4,5,6,7,8,9,10,11,12],"im_url_duplicate_count":[null,7,null,7,null,null,null,null,null,null,null,null,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2020-03-30T00:03:26Z\",\"WARC-Record-ID\":\"<urn:uuid:6e53f2fe-4b3b-4b08-832a-7c447a3c892e>\",\"Content-Length\":\"73247\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:ad5fbc1e-407d-4f4a-a9cb-9844cb32d969>\",\"WARC-Concurrent-To\":\"<urn:uuid:89d98bcc-2c4a-4f7e-8d22-427428958745>\",\"WARC-IP-Address\":\"118.31.17.61\",\"WARC-Target-URI\":\"https://www.liwei8090.com/16251.html\",\"WARC-Payload-Digest\":\"sha1:JHLNXOC3QGWG765O7MYKMNPSVYRTYQ33\",\"WARC-Block-Digest\":\"sha1:MRMSQTDPL5WZHLCYOMGXZQTR5U36KFTM\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2020/CC-MAIN-2020-16/CC-MAIN-2020-16_segments_1585370496330.1_warc_CC-MAIN-20200329232328-20200330022328-00006.warc.gz\"}"}
https://www.nuclear-power.com/nuclear-power/fission/delayed-neutrons/
[ "# Delayed Neutrons\n\nDelayed neutrons are emitted by neutron rich fission fragments that are called the delayed neutron precursors. These precursors usually undergo beta decay but a small fraction of them are excited enough to undergo neutron emission. The presence of delayed neutrons is perhaps most important aspect of the fission process from the viewpoint of reactor control. The term “delayed” in this context means, that the neutron is emitted with half-lifes, ranging from few milliseconds up to 55 s for the longest-lived precursor 87Br.\n\nIt is known the fission neutrons are of importance in any chain-reacting system. Neutrons trigger the nuclear fission of some nuclei (235U, 238U or even 232Th). What is crucial the fission of such nuclei produces 2, 3 or more free neutrons.\n\nBut not all neutrons are released at the same time following fission. Even the nature of creation of these neutrons is different. From this point of view we usually divide the fission neutrons into two following groups:\n\n• Prompt Neutrons. Prompt neutrons are emitted directly from fission and they are emitted within very short time of about 10-14 second.\n• Delayed Neutrons. Delayed neutrons are emitted by neutron rich fission fragments that are called the delayed neutron precursors. These precursors usually undergo beta decay but a small fraction of them are excited enough to undergo neutron emission. The fact the neutron is produced via this type of decay and this happens orders of magnitude later compared to the emission of the prompt neutrons, plays an extremely important role in the control of the reactor.\n\n## Delayed Neutrons\n\nWhile the most of the neutrons produced in fission are prompt neutrons, the delayed neutrons are of importance in the reactor control. In fact the presence of delayed neutrons is perhaps most important aspect of the fission process from the viewpoint of reactor control.", null, "Delayed neutrons are traditionally represented by six delayed neutron groups, whose yields and decay constants (λ) are obtained from nonlinear least-squares fits to experimental measurements.\n\nThe term “delayed” in this context means, that the neutron is emitted with half-lifes, ranging from few milliseconds up to 55 s for the longest-lived precursor 87Br. These neutrons have to be distinguished from the prompt neutrons which are emitted immediately (on the order of 10-14 s) after a fission event from a neutron-rich nucleus. Despite the fact the amount of delayed neutrons is only on the order of tenths of percent of the total amount, the timescale in seconds plays the extremely important role.\n\n## Key Characteristics of Delayed Neutrons\n\n• The presence of delayed neutrons is perhaps most important aspect of the fission process from the viewpoint of reactor control.\n• Delayed neutrons are emitted by neutron rich fission fragments that are called the delayed neutron precursors.\n• These precursors usually undergo beta decay but a small fraction of them are excited enough to undergo neutron emission.\n• The emission of neutron happens orders of magnitude later compared to the emission of the prompt neutrons.\n• About 240 n-emitters are known between 8He and 210Tl, about 75 of them are in the non-fission region.\n• In order to simplify reactor kinetic calculations it is suggested to group together the precursors based on their half-lives.\n• Therefore delayed neutrons are traditionally represented by six delayed neutron groups.\n• Neutrons can be produced also in (γ, n) reactions (especially in reactors with heavy water moderator) and therefore they are usually referred to as photoneutrons. Photoneutrons are usually treated no differently than regular delayed neutrons in the kinetic calculations.\n• The total yield of delayed neutrons per fission, vd, depends on:\n• Isotope, that is fissioned.\n• Energy of a neutron that induces fission.\n• Variation among individual group yields is much greater than variation among group periods.\n• In reactor kinetic calculations it is convenient to use relative units usually referred to as delayed neutron fraction (DNF).\n• At the steady state condition of criticality, with keff = 1, the delayed neutron fraction is equal to the precursor yield fraction β.\n• In LWRs the β decreases with fuel burnup. This is due to isotopic changes in the fuel.\n• Delayed neutrons have initial energy between 0.3 and 0.9 MeV with an average energy of 0.4 MeV.\n• Depending on the type of the reactor, and their spectrum, the delayed neutrons may be more (in thermal reactors) or less effective than prompt neutrons (in fast reactors). In order to include this effect into the reactor kinetic calculations the effective delayed neutron fraction – βeff must be defined.\n• The effective delayed neutron fraction is the product of the average delayed neutron fraction and the importance factor βeff = β . I.\n• The weighted delayed generation time is given by τ = ∑iτi . βi / β = 13.05 s, therefore the weighted decay constant λ = 1 / τ ≈ 0.08 s-1.\n• The mean generation time with delayed neutrons is about ~0.1 s, rather than ~10-5 as in section Prompt Neutron Lifetime, where the delayed neutrons were omitted.\n• Their presence completely changes the dynamic time response of a reactor to some reactivity change, making it controllable by control systems such as the control rods.", null, "This chart shows the energy dependency of delayed neutrons production. The delayed neutrons production remains constant to 4 MeV, then a linear decrease is observed. Source: JANIS (Java-based Nuclear Data Information Software); ENDF/B-VII.1\n\n## Precursors of Delayed Neutrons\n\nDelayed neutrons originate from the radioactive decay of nuclei produced in fission and hence they are different for each fissile material. They are emitted by excited neutron rich fission fragments (so called the delayed neutron precursors) some appreciable time after the fission. How long afterward, is dependent on the half-life of the precursor, since the neutron emission itself occurs in a very short time. The precursors usually undergo beta decay without any neutron emission but a small fraction of them (highly excited nuclei) can undergo the neutron emission instead of the gamma emission.\nIn addition current nuclear physics facilities can produce more neutron-rich isotopes that can emit multiple neutrons. Currently about 18 2n-emitters are experimentally confirmed [IAEA – INDC(NDS)-0599], but only two of them are also fission products.", null, "As an example, the isotope 87Br is the major component of the first group of precursor nuclei. This isotope has half-life of 55.6 seconds. It undergoes negative beta decay through its two main branches with emission of 2.6 MeV and 8 MeV beta particles. This decay leads to the formation of 87Kr* and 87Kr (ground state) and the 87Kr nucleus subsequently decays via two successive beta decays into the stable isotope 87Sr. But there is also one possible way for the 87Br nucleus to beta decay. The 87Br nucleus can beta decay into an excited state of the 87Kr nucleus at an energy of 5.5 MeV, which is larger than the binding energy of a neutron in the 87Kr nucleus. In this case, the 87Kr nucleus can undergo (with probability of 2.5%) a neutron emission leading to the formation of stable 87Kr isotope.\n\nAccording to the JEFF 3.1 database, about 240 n-emitters are known between 8He and 210Tl, about 75 of them are in the non-fission region. Furthermore 18 2n-emitter, and only four 3n-emitters ( 11Li, 14Be, 17B, 31Na) are experimentally confirmed. These numbers are not certainly final. Since new IAEA Co-ordinated Research Project (CRP) on Beta-delayed neutron emission evaluation has been started in 2013, it is expected these numbers will change significantly.\n\nAs can be seen it was identified many precursor nuclei. Not all of them are fission products (about 75 of them in the non-fission region A<70), but there are also many precursor nuclei, that are in the fission region between A=70-150. Their half-lives range between tenths of second (0.12 s) and tens of seconds (55.6 s), therefore their delayed neutrons appear with considerably differing delay times.\n\n## Six Groups of Delayed Neutrons\n\nReactor-kinetic calculations with considering of such a number of initial conditions would be correct, but it also would be very complicated. Therefore G. R. Keepin and his co-workers suggested to group together the precursors based on their half-lives.\n\nTherefore delayed neutrons are traditionally represented by six delayed neutron groups, whose yields and decay constants (λ) are obtained from nonlinear least-squares fits to experimental measurements. This model has following disadvantages:\n\n• All constants for each group of precursors are empirical fits to the data.\n• They cannot be matched with decay constants of specific precursors.\n\nAlthough this six group parameterization still satisfies the requirements of commercial organizations, a higher accuracy of the delayed neutron yields and a better energy resolution in the delayed neutron spectra is desired.\n\nIt was recognised that the half-lives in six-group structure do not accurately reproduce the asymptotic die-away time constants associated with the three longest-lived dominant precursors: 87Br, 137I and 88Br.\n\nThis model may be insufficient especially in case of epithermal reactors, because virtually all delayed neutron activity measurements have been performed for fast or thermal-neutron-induced fission. In case of fast reactors, in which the nuclear fission of six fissionable isotopes of uranium and plutonium is important, the accuracy and energy resolution may play an important role.\n\n## Eight Groups of Delayed Neutrons\n\nIn order to reduce discrepancies between measured and calculated values of the reactivity scale based on reactor kinetics (this discrepancy results in excessive conservatism in the design, because it must be covered by uncertainties during core design and safety analyses calculations), the NEA (Nuclear Energy Agency) and its NEANSC/WPEC Subgroup 6 recommends a new eight-group representation. It seems reasonable to increase the number of delayed neutron groups, because many studies have shown that most of delayed neutrons are produced approximately by twelve precursors that are common to all fissioning isotopes.\n\nThe eight-group representation uses a set of eight-group half-lives for all fissioning systems, with the half-lives adopted for the three longest-lived groups corresponding to the three dominant long-lived precursors: 87Br, 137I and 88Br (these precursors was separated into single groups).\n\nSee also: Delayed Neutron Data for the Major Actinidies, NEA/WPEC–6. OECD 2002.", null, "Delayed neutrons are traditionally represented by six delayed neutron groups, but a new eight-group representation is recommended. Source: DELAYED NEUTRON DATA FOR THE MAJOR ACTINIDES, NEA/WPEC–6. Subgroup 6, NEA. https://www.oecd-nea.org/science/wpec/volume6/volume6.pdf\n\n## Photoneutrons\n\nIn nuclear reactors the gamma radiation plays a significant role also in reactor kinetics and in a subcriticality control. Especially in nuclear reactors with D2O moderator (CANDU reactors) or with Be reflectors (some experimental reactors). Neutrons can be produced also in (γ, n) reactions and therefore they are usually referred to as photoneutrons.\n\nA high energy photon (gamma ray) can under certain conditions eject a neutron from a nucleus. It occurs when its energy exceeds the binding energy of the neutron in the nucleus. Most nuclei have binding energies in excess of 6 MeV, which is above the energy of most gamma rays from fission.\nOn the other hand there are few nuclei with sufficiently low binding energy to be of practical interest. These are: 2D, 9Be, 6Li, 7Li and 13C. As can be seen from the table the lowest threshold have 9Be with 1.666 MeV and 2D with 2.226 MeV.\n\nIn case of deuterium, neutrons can be produced by the interaction of gamma rays (with a minimum energy of 2.22 MeV) with deuterium:", null, "Because gamma rays can be emitted by fission products with certain delays, and the process is very similar to that through which a “true” delayed neutron is emitted, photoneutrons are usually treated no differently than regular delayed neutrons in the kinetic calculations. Photoneutron precursors can be also grouped by their decay constant, similarly to “real” precursors. The table below shows the relative importance of source neutrons in CANDU reactors by showing the makeup of the full power flux.", null, "Despite the fact photoneutrons are of importance especially in CANDU reactors, deuterium nuclei are always present (~0.0156%) also in the light water of LWRs. Moreover the capture of neutrons in the hydrogen nucleus of the water molecules in the moderator yields small amounts of D2O. This enhances the heavy water concentration. Therefore also in LWRs kinetic calculations, photoneutrons from D2O are treated as additional groups of delayed neutrons having characteristic decay constants λj and effective group fractions.\n\nAfter a nuclear reactor has been operated at full power for some time there will be a considerable build-up of gamma rays from the fission products. This high gamma flux from short-lived fission products will decrease rapidly after shutdown. In the long term the photoneutron source decreases with the decay of long-lived fission products that produce delayed high-energy gamma rays and the photoneutron source drops slowly, decreasing a little each day. The longest-lived fission product with gamma ray energy above the threshold is 140Ba with a half-life of 12.75 days.\n\nThe amount of fission products present in the fuel elements depends on how long has been the reactor operated before shut-down and at which power level has been the reactor operated before shut-down. Photoneutrons are usually major source in a reactor and ensure sufficient neutron flux on source range detectors when reactor is subcritical in long term shutdown.\n\nIn comparison with fission neutrons, that make a self-sustaining chain reaction possible, delayed neutrons make reactor control possible and photoneutrons are of importance at low power operation.\n\n## Delayed Neutrons Fraction\n\nThe total yield of delayed neutrons per fission, vd, depends on:\n\n• Isotope, that is fissioned (see table).\n• Energy of a neutron that induces fission (see chart).\nTable: Six Groups of Precursors", null, "Delayed neutrons are traditionally represented by six delayed neutron groups, whose yields and decay constants (λ) are obtained from nonlinear least-squares fits to experimental measurements.\nChart: Delayed Neutron Production - MT-455", null, "This chart shows the energy dependency of delayed neutrons production. The delayed neutrons production remains constant to 4 MeV, then a linear decrease is observed. Source: JANIS (Java-based Nuclear Data Information Software); ENDF/B-VII.1\n\nIn reactor kinetic calculations it is convenient to use relative units usually referred to as delayed neutron fraction (DNF). At the steady state condition of criticality, with keff = 1, the delayed neutron fraction is equal to the precursor yield fraction (β).", null, "where βi is defined as the fraction of the neutrons which appear as delayed neutrons in the ith group. In contrast to the prompt neutrons, which are emitted with a continuous energy spectrum, the delayed neutrons in each group appear with a more or less well defined energy. In general the delayed neutrons are emitted with much less energy than the most prompt neutrons.\nDistinction between these two parameters is obvious. The delayed neutron fraction is dependent on certain reactivity of multiplying system, on the other hand β is not dependent on the reactivity. These two factors, DNF and β, are not the same thing in case of a rapid change in the number of neutrons in the reactor.\n\nIn LWRs the delayed neutron fraction decreases with fuel burnup. This is due to isotopic changes in the fuel. It is simple, fresh uranium fuel contains only 235U as the fissile material, meanwhile during fuel burnup the importance of fission of 239Pu increases (in some cases up to 50%). Since 239Pu produces significantly less delayed neutrons, the resultant core delayed neutron fraction of a multiplying system decreases (it is the weighted average of the constituent delayed neutron fractions). This is also the reason why the neutron spectrum in the core become harder with fuel burnup.\n\nβcore= ∑ Pii\n\nwhere Pi is fraction of power generated by isotope i.\n\nExample:\nLet say the reactor is at the beginning of the cycle and approximately 98% of reactor power is generated by 235U fission and 2% by 238U fission as a result of fast fission. Calculate the core delayed neutron fraction.\n\nβcore= ∑ Pii = 0.98 x β235 + 0.02 x β238\n= 0.98 x 0.0065 + 0.02 x 0.0157\n= 0.0064 + 0.0003\n= 0.0067\n\n## Delayed Neutrons Energy Spectra\n\nThe key properties of delayed neutrons, which are very important for the nuclear reactor design, belong also delayed neutron energy spectra. The energy spectra of the delayed neutrons are the poorest known of all input data required, because it very difficult to measure it.\n\nDepending on the type of the reactor, and their spectrum, the delayed neutrons may be more (in thermal reactors) or less effective than prompt neutrons (in fast reactors). In order to include this effect into the reactor kinetic calculations the effective delayed neutron fraction – βeff must be defined.\n\nTable: Six Groups of Precursors", null, "Delayed neutrons are traditionally represented by six delayed neutron groups, whose yields and decay constants (λ) are obtained from nonlinear least-squares fits to experimental measurements.\n\n## Effective Delayed Neutron Fraction – βeff\n\nThe delayed neutron fraction, β, is the fraction of delayed neutrons in the core at creation, that is, at high energies. But in case of thermal reactors the fission can be initiated mainly by thermal neutron. Thermal neutrons are of practical interest in study of thermal reactor behaviour. The effective delayed neutron fraction, usually referred to as βeff, is the same fraction at thermal energies.\n\nThe effective delayed neutron fraction reflects the ability of the reactor to thermalize and utilize each neutron produced. The β is not the same as the βeff due to the fact delayed neutrons do not have the same properties as prompt neutrons released directly from fission. In general, delayed neutrons have lower energies than prompt neutrons. Prompt neutrons have initial energy between 1 MeV and 10 MeV, with an average energy of 2 MeV. Delayed neutrons have initial energy between 0.3 and 0.9 MeV with an average energy of 0.4 MeV.\n\nTherefore in thermal reactors a delayed neutron traverses a smaller energy range to become thermal and it is also less likely to be lost by leakage or by parasitic absorption than is the 2 MeV prompt neutron. On the other hand, delayed neutrons are also less likely to cause fast fission, because their average energy is less than the minimum required for fast fission to occur.\n\nThese two effects (lower fast fission factor and higher fast non-leakage probability for delayed neutrons) tend to counteract each other and forms a term called the importance factor (I). The importance factor relates the average delayed neutron fraction to the effective delayed neutron fraction. As a result, the effective delayed neutron fraction is the product of the average delayed neutron fraction and the importance factor.\n\nβeff = β . I\n\nThe delayed and prompt neutrons have a difference in their effectiveness in producing a subsequent fission event. Since the energy distribution of the delayed neutrons differs also from group to group, the different groups of delayed neutrons will also have a different effectiveness. Moreover, a nuclear reactor contains a mixture of fissionable isotopes. Therefore, in some cases, the importance factor is insufficient and an importance function must be defined.\n\nFor example:\n\nIn a small thermal reactor with highly enriched fuel, the increase in fast non-leakage probability will dominate the decrease in the fast fission factor, and the importance factor will be greater than one.\n\nIn a large thermal reactor with low enriched fuel, the decrease in the fast fission factor will dominate the increase in the fast non-leakage probability and the importance factor will be less than one (about 0.97 for a commercial PWR).\n\nIn large fast reactors, the decrease in the fast fission factor will also dominate the increase in the fast non-leakage probability and the βeff is less than β by about 10%.\n\n## Mean Generation Time with Delayed Neutrons\n\nMean Generation Time with Delayed Neutrons, ld, is the weighted average of the prompt generation times and a delayed neutron generation time. The delayed neutron generation time, τ, is the weighted average of mean precursor lifetimes of the six groups (or more groups) of delayed neutron precursors.\n\nIt must be noted, the true lifetime of delayed neutrons (the slowing down time and the diffusion time) is very short compared with the mean lifetime of their precursors (ts + td << τi). Therefore τi is also equal to the mean lifetime of a neutron from the ith group, that is, τi = li and the equation for mean generation time with delayed neutrons is the following:\n\nld = (1 – β).lp + ∑li . βi => ld = (1 – β).lp + ∑τi . βi\n\nwhere\n\n• (1 – β) is the fraction of all neutrons emitted as prompt neutrons\n• lp is the prompt neutron lifetime\n• τis the mean precursor lifetime, the inverse value of the decay constant τi = 1/λi\n• The weighted delayed generation time is given by τ = ∑τi . βi / β = 13.05 s\n• Therefore the weighted decay constant λ = 1 / τ ≈ 0.08 s-1\n\nThe number, 0.08 s-1, is relatively high and have a dominating effect of reactor time response, although delayed neutrons are a small fraction of all neutrons in the core. This is best illustrated by calculating a weighted mean generation time with delayed neutrons:\n\nld = (1 – β).lp + ∑τi . βi = (1 – 0.0065). 2 x 10-5 + 0.085 = 0.00001987 + 0.085 ≈ 0.085\n\nIn short, the mean generation time with delayed neutrons is about ~0.1 s, rather than ~10-5 as in section Prompt Neutron Lifetime, where the delayed neutrons were omitted.\n\n## Example – Infinite Multiplying System Without Source and with Delayed Neutrons\n\nAn simplest equation governing the neutron kinetics of the system with delayed neutrons is the point kinetics equation. This equation states that the time change of the neutron population is equal to the excess of neutron production (by fission) minus neutron loss by absorption in one mean generation time with delayed neutrons (ld). The role of ld is evident. Longer lifetimes give simply slower responses of multiplying systems.\n\nIf there are neutrons in the system at t=0, that is, if n(0) > 0, the solution of this equation gives the simplest point kinetics equation with delayed neutrons (similarly to the case without delayed neutrons):", null, "Let us consider that the mean generation time with delayed neutrons is ~0.085 and k (k – neutron multiplication factor) will be step increased by only 0.01% (i.e. 10 pcm or ~1.5 cents), that is k=1.0000 will increase to k=1.0001.\n\nIt must be noted such reactivity insertion (10pcm) is very small in case of LWRs. The reactivity insertions of the order of one pcm are for LWRs practically unrealizable. In this case the reactor period will be:\n\nT = ld / (k-1) = 0.085 / (1.0001-1) = 850s\n\nThis is a very long period. In ~14 minutes the neutron flux (and power) in the reactor would increase by a factor of e = 2.718. This is completely different dimension of the response on reactivity insertion in comparison with the case without presence of delayed neutrons, where the reactor period was 1 second.\n\nReactors with such a kinetics would be quite easy to control. From this point of view it may seem that reactor control will be a quite boring affair. It will not! The presence of delayed neutrons entails many many specific phenomena, that will be described in later chapters.\n\n## Interactive chart – Infinite Multiplying System Without Source and Delayed Neutrons\n\nPress the “clear and run” button and try to increase the power of the reactor.\n\nCompare the response of the reactor with the case of Infinite Multiplying System Without Source and without Delayed Neutrons (or set the β = 0).\n\n## Effective Precursor Decay Constant – Lambda-Effective\n\nThe effective delayed neutron precursor decay constant (pronounced lambda effective) is a new term, which has to be introduced in the reactor period equation in case of a single precursor group model. For the purpose of creating a simple kinetic model conducive to understanding reactor behavior, it is useful to further reduce the precursors to a single group. But if we do this, the convention is to employ a constant precursor yield fraction and a variable precursor decay rate, as defined by lambda effective (λeff). In the single precursor group model the lambda effective is not a constant, but rather a dynamic property that depends on the mix of precursor atoms resulting from the reactivity.\n\nThe reason the constant decay constant cannot be used, is as follows. During power transients, there is a difference in the decay and the creation of short-lived and long-lived precursors.\n\nDuring a power increase (positive reactivity), the short-lived precursors decaying at any given instant were born at a higher power level than the longer-lived precursors decaying at the same instant. The short-lived precursors become more significant. As the magnitude of the positive reactivity increases, the value of lambda effective increases closer to that of the short-lived precursors (let say 0.1 s-1 for +100pcm).\n\nDuring a power decrease (negative reactivity), the long-lived precursors decaying at a given instant were born at a higher power level than the short-lived precursors decaying at that instant. The long-lived precursors become more significant. As the magnitude of the negative reactivity increases, the value of lambda effective decreases closer to that of the long-lived precursors (let say 0.05 s-1 for -100pcm).\n\nIf the reactor is operating at steady-state operation, all the precursor groups reach an equilibrium value and the λeff value is approximately 0.08 s-1.\n\n## Effect of Delayed Neutrons on Reactor Control\n\nDespite the fact the number of delayed neutrons per fission neutron is quite small (typically below 1%) and thus does not contribute significantly to the power generation, they play a crucial role in the reactor control and are essential from the point of view of reactor kinetics and reactor safety. Their presence completely changes the dynamic time response of a reactor to some reactivity change, making it controllable by control systems such as the control rods.\n\nDelayed neutrons allow to operate a reactor in a prompt subcritical, delayed critical condition. All power reactors are designed to operate in a delayed critical conditions and are provided with safety systems to prevent them from ever achieving prompt criticality.\n\nFor typical PWRs, the prompt criticality occurs after positive reactivity insertion of βeff (i.e. keff ≈ 1.006 or ρ = +600 pcm). In power reactors such a reactivity insertion is practically impossible to insert (in case of normal and abnormal operation), especially when a reactor is in power operation mode and a reactivity insertion causes a heating of a reactor core. Due to the presence of reactivity feedbacks the positive reactivity insertion is counterbalanced by the negative reactivity from moderator and fuel temperature coefficients. The presence of delayed neutrons is of importance also from this point of view, because they provide time also to reactivity feedbacks to react on undesirable reactivity insertion.\n\nReferences:\nNuclear and Reactor Physics:\n1. J. R. Lamarsh, Introduction to Nuclear Reactor Theory, 2nd ed., Addison-Wesley, Reading, MA (1983).\n2. J. R. Lamarsh, A. J. Baratta, Introduction to Nuclear Engineering, 3d ed., Prentice-Hall, 2001, ISBN: 0-201-82498-1.\n3. W. M. Stacey, Nuclear Reactor Physics, John Wiley & Sons, 2001, ISBN: 0- 471-39127-1.\n4. Glasstone, Sesonske. Nuclear Reactor Engineering: Reactor Systems Engineering, Springer; 4th edition, 1994, ISBN: 978-0412985317\n5. W.S.C. Williams. Nuclear and Particle Physics. Clarendon Press; 1 edition, 1991, ISBN: 978-0198520467\n6. G.R.Keepin. Physics of Nuclear Kinetics. Addison-Wesley Pub. Co; 1st edition, 1965\n7. Robert Reed Burn, Introduction to Nuclear Reactor Operation, 1988.\n8. U.S. Department of Energy, Nuclear Physics and Reactor Theory. DOE Fundamentals Handbook, Volume 1 and 2. January 1993.\n\n1. K. O. Ott, W. A. Bezella, Introductory Nuclear Reactor Statics, American Nuclear Society, Revised edition (1989), 1989, ISBN: 0-894-48033-2.\n2. K. O. Ott, R. J. Neuhold, Introductory Nuclear Reactor Dynamics, American Nuclear Society, 1985, ISBN: 0-894-48029-4.\n3. D. L. Hetrick, Dynamics of Nuclear Reactors, American Nuclear Society, 1993, ISBN: 0-894-48453-2.\n4. E. E. Lewis, W. F. Miller, Computational Methods of Neutron Transport, American Nuclear Society, 1993, ISBN: 0-894-48452-4.\n\nPrompt Neutrons\n\nNuclear Fission\n\nC/F ratio" ]
[ null, "https://nuclear-power.com/wp-content/uploads/2015/10/Parameters-of-Delayed-Neutrons-300x164.png", null, "https://nuclear-power.com/wp-content/uploads/2015/10/Delayed-Neutron-Production.png", null, "https://nuclear-power.com/wp-content/uploads/2015/10/Precursors-of-Delayed-Neutrons-189x300.png", null, "https://nuclear-power.com/wp-content/uploads/2015/10/Delayed-Neutrons-Eight-Groups.png", null, "https://nuclear-power.com/wp-content/uploads/2015/11/Photoneutron-deuterium.png", null, "https://nuclear-power.com/wp-content/uploads/2015/11/Photoneutron-balance.png", null, "https://nuclear-power.com/wp-content/uploads/2015/10/Parameters-of-Delayed-Neutrons.png", null, "https://nuclear-power.com/wp-content/uploads/2015/10/Delayed-Neutron-Production.png", null, "https://nuclear-power.com/wp-content/uploads/2015/11/delayed-neutron-fraction-definition.png", null, "https://nuclear-power.com/wp-content/uploads/2015/10/Parameters-of-Delayed-Neutrons.png", null, "https://nuclear-power.com/wp-content/uploads/2015/11/point-kinetics-equation-with-delayed-neutrons.png", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.9297428,"math_prob":0.8740408,"size":25854,"snap":"2021-43-2021-49","text_gpt3_token_len":5638,"char_repetition_ratio":0.19984525,"word_repetition_ratio":0.16221374,"special_character_ratio":0.20731802,"punctuation_ratio":0.08819101,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9515599,"pos_list":[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22],"im_url_duplicate_count":[null,3,null,8,null,3,null,4,null,null,null,9,null,10,null,8,null,5,null,10,null,null,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2021-10-18T16:13:09Z\",\"WARC-Record-ID\":\"<urn:uuid:764daea8-da36-456d-92bd-53d50477b9ed>\",\"Content-Length\":\"119191\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:cc6eb3ff-9766-4944-88ef-471e551d9beb>\",\"WARC-Concurrent-To\":\"<urn:uuid:2f10863b-e1e4-4978-897a-f5de2f11fe5d>\",\"WARC-IP-Address\":\"172.67.147.195\",\"WARC-Target-URI\":\"https://www.nuclear-power.com/nuclear-power/fission/delayed-neutrons/\",\"WARC-Payload-Digest\":\"sha1:XOWVUV6K62PUY63FZS5IE52ZLZ7RKXOS\",\"WARC-Block-Digest\":\"sha1:7AM2BLRCOIONZS6RU7CFCFHIYISTKEKU\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2021/CC-MAIN-2021-43/CC-MAIN-2021-43_segments_1634323585204.68_warc_CC-MAIN-20211018155442-20211018185442-00236.warc.gz\"}"}
http://slidegur.com/doc/21098/perpendicular-line-through-a-given-point
[ "### Perpendicular Line Through a Given Point\n\n```• Write an equation of the line that passes through a\ngiven point, parallel to a given line.\n• Write an equation of the line that passes through a\ngiven point, perpendicular to a given line.\n• parallel lines\n• perpendicular lines\nParallel Line Through a Given Point\nWrite the slope-intercept form of an equation for\nthe line that passes through (4, –2) and is parallel to\nthe graph of\nParallel Line Through a Given Point\nPoint-slope form\nReplace m with\ny with –2, and x with 4.\nSimplify.\nDistributive Property\nSubtract 2 from each side\nParallel Line Through a Given Point\nWrite the equation in slopeintercept form.\nWrite the slope-intercept form of an equation for the\nline that passes through (2, 3) and is parallel to the\ngraph of\nA.\nB.\n0%\nD\nA\n0% B\nC\nD\nC\nD.\nA\n0%\nA.\n0%B.\nC.\nD.\nB\nC.\nDetermine Whether Lines are\nPerpendicular\nGEOMETRY The height of a\ntrapezoid is measured on a\nsegment that is perpendicular\nto a base. In trapezoid ARTP,\nRT and AP are bases. Can EZ\nbe used to measure the\nheight of the trapezoid?\nExplain.\nDetermine Whether Lines are\nPerpendicular\nFind the slope of each segment.\nDetermine Whether Lines are\nPerpendicular\nAnswer: The slope of RT and AP is 1 and the slope of\nEZ is –7. –7●1  –1. EZ is not perpendicular to RT and\nAP, so it cannot be used to measure height.\nThe graph shows the\ndiagonals of a rectangle.\nDetermine whether JL is\nperpendicular to KM.\nA. JL is not perpendicular to KM.\n1.\n2.\n3.\nA\n0%\n0%\nB\nC. Cannot be determined\nA\nB\nC\n0%\nC\nB. JL is perpendicular to KM.\nPerpendicular Line Through a Given Point\nWrite the slope-intercept form for an equation of a\nline that passes through (4, –1) and is\nperpendicular to the graph of 7x – 2y = 3.\nStep 1 Find the slope of the given line.\n7x – 2y = 3\n7x – 2y – 7x = 3 – 7x\n–2y = –7 + 3\nOriginal equation\nSubtract 7x from each\nside.\nSimplify.\nPerpendicular Line Through a Given Point\nDivide each side by –2.\nSimplify.\nStep 2\nPerpendicular Line Through a Given Point\nStep 3 Use the point-slope form to find the equation.\nPoint-slope form\nSimplify.\nDistributive Property\nPerpendicular Line Through a Given Point\nSubtract 1 from each side.\nSimplify.\nAnswer: The equation of the line is\nWrite the slope-intercept form for an equation of\na line that passes through (–3, 6) and is\nperpendicular to the graph of 3x + 2y = 6.\nA.\nB.\nC.\nD.\n0%\n1.\n2.\n3.\n4.\nA\nB\nC\nD\nA\nB\nC\nD\nPerpendicular Line Through a Given Point\nWrite the slope-intercept form for an equation of a\nline perpendicular to the graph of 2y + 5x = 2 that\npasses through (0, 6).\nStep 1 Find the slope of 2y + 5x = 2.\n2y + 5x = 2\n2y + 5x – 5x = 2 – 5x\n2y = –5x + 2\nOriginal equation\nSubtract 5x from each\nside.\nSimplify.\nPerpendicular Line Through a Given Point\nDivide each side by 2.\nSimplify.\nStep 2\nPerpendicular Line Through a Given Point\nStep 3 Substitute the slope and the given point into\nthe point-slope form of a linear equation. Then\nwrite the equation in slope-intercept form.\nPoint-slope form\nDistributive Property\nAnswer: The equation of the line is\nWrite the slope-intercept form for an equation of\na line perpendicular to the graph of y = 2x + 1 that\npasses through the x-intercept of that line.\nA.\nB.\nD.\nA\nB0%\nC\nD\nD\n0%\nB\nA\n0%\nA.\nB.0%\nC.\nD.\nC\nC.\n```" ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.88633776,"math_prob":0.99517286,"size":3208,"snap":"2019-51-2020-05","text_gpt3_token_len":895,"char_repetition_ratio":0.17634207,"word_repetition_ratio":0.28938907,"special_character_ratio":0.27026185,"punctuation_ratio":0.116883114,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9999217,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2020-01-28T23:31:12Z\",\"WARC-Record-ID\":\"<urn:uuid:3ed6e2e7-6b5b-40e7-86b6-6a4050d88fbe>\",\"Content-Length\":\"55630\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:7697e8be-1270-439a-8aa2-76588d290490>\",\"WARC-Concurrent-To\":\"<urn:uuid:054c8b89-3d39-41f6-b74e-a59ef415bc34>\",\"WARC-IP-Address\":\"104.27.130.15\",\"WARC-Target-URI\":\"http://slidegur.com/doc/21098/perpendicular-line-through-a-given-point\",\"WARC-Payload-Digest\":\"sha1:RHU4MVIMSB4VQU2ADTYHX5O4C4XFPGGM\",\"WARC-Block-Digest\":\"sha1:I6CEMTMT7F2KCWH6SYRMOWEM7RYYDMVC\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2020/CC-MAIN-2020-05/CC-MAIN-2020-05_segments_1579251783342.96_warc_CC-MAIN-20200128215526-20200129005526-00292.warc.gz\"}"}
https://www.prepbytes.com/blog/author/deepanshu/
[ "## List Reduction\n\nCONCEPTS USED: Linked list DIFFICULTY LEVEL: Hard PROBLEM STATEMENT(SIMPLIFIED): Given a linked list of N nodes such that each node have a lower case alphabet (a - z). Your task...\n\n## Arrange the List\n\nCONCEPTS USED: Basic Pointer Manipulation DIFFICULTY LEVEL: Medium PROBLEM STATEMENT(SIMPLIFIED): Given a linked list of N nodes such that the list is sorted in two parts, the first part and...\n\n## Arrange the Salary\n\nCONCEPTS USED: Basic Pointer Manipulation DIFFICULTY LEVEL: Medium PROBLEM STATEMENT(SIMPLIFIED): Given a linked list of N elements and a value X, your task is to arrange the list in such...\n\n## Contest Winner\n\nCONCEPTS USED: Josephus Problem DIFFICULTY LEVEL: Hard PROBLEM STATEMENT(SIMPLIFIED): Given a circular linked list containing N elements from 1 - N arranged in increasing order and an integer M. Your...\n\n## Binary list\n\nCONCEPTS USED: Basic Manipulation DIFFICULTY LEVEL: Easy PROBLEM STATEMENT(SIMPLIFIED): Given a linked list of N nodes, each node containing binary bit either 0 or 1 as a character. Your task...\n\n## Number of islands\n\nConcepts Used Depth First Search, Disjoint Set Difficulty Level Medium Problem Statement : Given a 2D matrix, which contains only two numbers 0 or 1. In the map group of...\n\n## Ragnar Lorthbrok\n\nConcepts Used Breadth First Search Difficulty Level Easy Problem Statement : Given locations of X & Y islands, we need to find the minimum distance between a given pair of...\n\n## Shortest cycle(Minor image correction “ex 2”)\n\nConcepts Used Breadth First Search Difficulty Level Hard Problem Statement : Given a graph we have to find the length of the shortest cycle in the given graph. If no...\n\n## Median Again\n\nConcepts Used Sorting Difficulty Level Medium Problem Statement (Simplified): Find the maximum possible median of provided odd-sized array where you can increase any element by one where each increment is...\n\n## The Last Game\n\nThe Last Game Concepts Used Sorting Difficulty Level Easy Problem Statement (Simplified): Print the last number left in the array after deleting the largest number in the array then the..." ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.6725129,"math_prob":0.66725624,"size":410,"snap":"2021-43-2021-49","text_gpt3_token_len":84,"char_repetition_ratio":0.10591133,"word_repetition_ratio":0.0,"special_character_ratio":0.18780488,"punctuation_ratio":0.15277778,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.97620165,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2021-12-08T02:54:30Z\",\"WARC-Record-ID\":\"<urn:uuid:63d8f2c0-ef05-4a14-a266-2aa192c7267f>\",\"Content-Length\":\"78365\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:8f6e789f-ae1f-40cd-9133-6757077b5c4b>\",\"WARC-Concurrent-To\":\"<urn:uuid:0627c52f-8235-47d5-b0aa-8bc2cee8e784>\",\"WARC-IP-Address\":\"13.225.63.78\",\"WARC-Target-URI\":\"https://www.prepbytes.com/blog/author/deepanshu/\",\"WARC-Payload-Digest\":\"sha1:KOZXL4QMECK6LGTJU7QZ2ZOMTBHIZI7P\",\"WARC-Block-Digest\":\"sha1:JN4FUFLAZ3BLVYPF3DEXPWA7RXKTXNTW\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2021/CC-MAIN-2021-49/CC-MAIN-2021-49_segments_1637964363437.15_warc_CC-MAIN-20211208022710-20211208052710-00388.warc.gz\"}"}
https://math.meta.stackexchange.com/questions/29793/comment-or-answer
[ "What to do when someone ask for a mistake, comment indicating where he made the mistake or answer indicating where he made the mistake?\n\nFor example, someone ask about a limit that he knows the solution but he get another solution doing himself, then he makes a question posting the steps he made for getting his solution and asking where he made a mistake.\n\nWhen someone finds that mistake what should he do, comment or answer?\n\nWhat to do when someone ask for a mistake, comment indicating where he made the mistake or answer indicating where he made the mistake?\n\nIf what they seek is the mistake, and you wish to state what that mistake is, I would post that as an answer - preferably with an analysis of why this is a mistake, and perhaps a hint or nudge at how to correct it if the situation allows.\n\n• What if there is no mistake to be found and everything is perfect. Should one just write an answer like: \"Yes, your method is correct.\" ? Feb 13, 2019 at 1:01\n• I don't see why not. Feb 13, 2019 at 1:45\n• @Zacky Here are some previous discussions related to your question: What's the policy on answering a question where the correct answer is simply “yes”? and How to answer proof-verification questions? Feb 13, 2019 at 6:32\n• But imagine the mistake is just for example a wrong calculation such 2+2=5. Then you should just answer “you made a mistake when doing 2+2”? Feb 13, 2019 at 8:52\n• \"Then you should just answer “you made a mistake when doing 2+2”?\" - I don't see why the triviality of the mistake would have a bearing on the matter. That is to say, yes, that should be your answer IMO. Feb 13, 2019 at 23:44\n\nI think the confusion that there are two questions:\n\n• the maths problem\n• \"Where did I go wrong?\"" ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.97423995,"math_prob":0.6520938,"size":426,"snap":"2023-40-2023-50","text_gpt3_token_len":83,"char_repetition_ratio":0.1635071,"word_repetition_ratio":0.027777778,"special_character_ratio":0.19248827,"punctuation_ratio":0.084337346,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9666054,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2023-11-28T15:39:00Z\",\"WARC-Record-ID\":\"<urn:uuid:9c757cfc-5ea5-46e7-bb10-606d782e191a>\",\"Content-Length\":\"122456\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:5ac8e5a4-1fd0-4cea-ab12-486391b635de>\",\"WARC-Concurrent-To\":\"<urn:uuid:697aeb6f-abd5-4d2d-899d-3034725d7f0a>\",\"WARC-IP-Address\":\"172.64.144.30\",\"WARC-Target-URI\":\"https://math.meta.stackexchange.com/questions/29793/comment-or-answer\",\"WARC-Payload-Digest\":\"sha1:J5HPVWXB3XG4GZFR6FNQ2HLLNCPGF2G7\",\"WARC-Block-Digest\":\"sha1:BHEBFDENTEUE22DEDVYYNSR5DJ7PWL42\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2023/CC-MAIN-2023-50/CC-MAIN-2023-50_segments_1700679099892.46_warc_CC-MAIN-20231128151412-20231128181412-00235.warc.gz\"}"}
https://efinite.github.io/utile.tools/reference/index.html
[ "## Calculate\n\ncalc_duration()\nCalculate durations of time\ncalc_chunks()\nCalculate data chunk indices\n\n## Format\n\npaste() paste0()\nConcatenate strings\npaste_freq()\nPaste frequency\npaste_mean()\nPaste mean\npaste_median()\nPaste median\npaste_efs()\nPaste event-free survival\npaste_pval()\nPaste p-value\n\n## Test\n\ntest_hypothesis()\nTest the null hypothesis\n\n## Cummulative Sum\n\ncusum_failure()\nCumulative Sum of Failures\ncusum_loglike()\nCumulative Sum of Log-Likelihood Ratio\ncusum_ome()\nCumulative Sum of Observed Minus Expected Outcome\ncusum_sprt()\nRisk-adjusted Sequential Probability Ratio Test (SPRT)\n\n## Miscellaneous\n\nchunk_data_()\nBreak data into chunks" ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.5848163,"math_prob":0.85406107,"size":611,"snap":"2023-14-2023-23","text_gpt3_token_len":155,"char_repetition_ratio":0.16474465,"word_repetition_ratio":0.0,"special_character_ratio":0.26350245,"punctuation_ratio":0.0,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9847833,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2023-06-10T12:18:26Z\",\"WARC-Record-ID\":\"<urn:uuid:9a4d2abe-65dd-4a62-86f1-ce07e7388673>\",\"Content-Length\":\"9555\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:83b1c45c-1a32-495e-a057-e97d41241e3d>\",\"WARC-Concurrent-To\":\"<urn:uuid:5b31a004-a780-4014-8a9f-37dafa8afc82>\",\"WARC-IP-Address\":\"185.199.108.153\",\"WARC-Target-URI\":\"https://efinite.github.io/utile.tools/reference/index.html\",\"WARC-Payload-Digest\":\"sha1:ETMLQLQFJV53J7CJ3AABJVPSMEMJMTBT\",\"WARC-Block-Digest\":\"sha1:MCPORKOB25E2ADPTOJMFLUDVKOO6UHMZ\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2023/CC-MAIN-2023-23/CC-MAIN-2023-23_segments_1685224657169.98_warc_CC-MAIN-20230610095459-20230610125459-00658.warc.gz\"}"}
https://explorer.celo.org/mainnet/tx/0x1da5f1df5fb35f7f1b95d665c1f05b325ff92018548a15328f239654fee49745/internal-transactions
[ "# Transaction Details\n\nTransaction Hash\n0x1da5f1df5fb35f7f1b95d665c1f05b325ff92018548a15328f239654fee49745\nResult\nSuccess\nStatus\nConfirmed\nConfirmed by 21,572,681\nBlock\n20871\nTimestamp\n| Confirmed within <= 5.0 seconds\nFrom\n0xf5720c–3a263c\nTo\n[Contract  a16z - 2 (0xfce077–8c217d)  created]\nValue\n0 CELO ( )\nTransaction Fee\n0.00633202 CELO ()\nGas Price\n0.00000001 CELO\n\nGas Limit\n10,000,000\nGas Used by Transaction\n633,202 | 6.33%\nNoncePosition\n1120\nRaw Input\n``0x60806040526100133361001860201b60201c565b610148565b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1614156100bb576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260118152602001807f6f776e65722063616e6e6f74206265203000000000000000000000000000000081525060200191505060405180910390fd5b6000600160405180807f656970313936372e70726f78792e61646d696e000000000000000000000000008152506013019050604051809103902060001c0360001b90508181558173ffffffffffffffffffffffffffffffffffffffff167f50146d0e3c60aa1d17a70635b05494f864e86144a2201275021014fbf08bafe260405160405180910390a25050565b610a19806101576000396000f3fe60806040526004361061004a5760003560e01c806303386ba3146101e757806342404e0714610280578063bb913f41146102d7578063d29d44ee14610328578063f7e6af8014610379575b6000600160405180807f656970313936372e70726f78792e696d706c656d656e746174696f6e00000000815250601c019050604051809103902060001c0360001b9050600081549050600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff161415610136576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260158152602001807f4e6f20496d706c656d656e746174696f6e20736574000000000000000000000081525060200191505060405180910390fd5b61013f816103d0565b6101b1576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260188152602001807f496e76616c696420636f6e74726163742061646472657373000000000000000081525060200191505060405180910390fd5b60405136810160405236600082376000803683855af43d604051818101604052816000823e82600081146101e3578282f35b8282fd5b61027e600480360360408110156101fd57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291908035906020019064010000000081111561023a57600080fd5b82018360208201111561024c57600080fd5b8035906020019184600183028401116401000000008311171561026e57600080fd5b909192939192939050505061041b565b005b34801561028c57600080fd5b506102956105c1565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b3480156102e357600080fd5b50610326600480360360208110156102fa57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919050505061060d565b005b34801561033457600080fd5b506103776004803603602081101561034b57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506107bd565b005b34801561038557600080fd5b5061038e610871565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b60008060007fc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a47060001b9050833f915080821415801561041257506000801b8214155b92505050919050565b610423610871565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16146104c3576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260148152602001807f73656e64657220776173206e6f74206f776e657200000000000000000000000081525060200191505060405180910390fd5b6104cc8361060d565b600060608473ffffffffffffffffffffffffffffffffffffffff168484604051808383808284378083019250505092505050600060405180830381855af49150503d8060008114610539576040519150601f19603f3d011682016040523d82523d6000602084013e61053e565b606091505b508092508193505050816105ba576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601e8152602001807f696e697469616c697a6174696f6e2063616c6c6261636b206661696c6564000081525060200191505060405180910390fd5b5050505050565b600080600160405180807f656970313936372e70726f78792e696d706c656d656e746174696f6e00000000815250601c019050604051809103902060001c0360001b9050805491505090565b610615610871565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16146106b5576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260148152602001807f73656e64657220776173206e6f74206f776e657200000000000000000000000081525060200191505060405180910390fd5b6000600160405180807f656970313936372e70726f78792e696d706c656d656e746174696f6e00000000815250601c019050604051809103902060001c0360001b9050610701826103d0565b610773576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260188152602001807f496e76616c696420636f6e74726163742061646472657373000000000000000081525060200191505060405180910390fd5b8181558173ffffffffffffffffffffffffffffffffffffffff167fab64f92ab780ecbf4f3866f57cee465ff36c89450dcce20237ca7a8d81fb7d1360405160405180910390a25050565b6107c5610871565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614610865576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260148152602001807f73656e64657220776173206e6f74206f776e657200000000000000000000000081525060200191505060405180910390fd5b61086e816108bd565b50565b600080600160405180807f656970313936372e70726f78792e61646d696e000000000000000000000000008152506013019050604051809103902060001c0360001b9050805491505090565b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff161415610960576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260118152602001807f6f776e65722063616e6e6f74206265203000000000000000000000000000000081525060200191505060405180910390fd5b6000600160405180807f656970313936372e70726f78792e61646d696e000000000000000000000000008152506013019050604051809103902060001c0360001b90508181558173ffffffffffffffffffffffffffffffffffffffff167f50146d0e3c60aa1d17a70635b05494f864e86144a2201275021014fbf08bafe260405160405180910390a2505056fea165627a7a72305820b862497decc842ed6b2cf9c83b03f37ae84f22688fd3d3c38b832781ecde44c60029``\n\n# Internal Transactions\n\nThere are no internal transactions for this transaction." ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.6260769,"math_prob":0.9583933,"size":492,"snap":"2023-40-2023-50","text_gpt3_token_len":204,"char_repetition_ratio":0.13729508,"word_repetition_ratio":0.0,"special_character_ratio":0.5020325,"punctuation_ratio":0.13043478,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9735931,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2023-09-25T10:05:29Z\",\"WARC-Record-ID\":\"<urn:uuid:d09dce3a-38a1-4095-9d7d-843357aeaecb>\",\"Content-Length\":\"92326\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:e20b46d3-699f-430f-b89f-5ada74646b2c>\",\"WARC-Concurrent-To\":\"<urn:uuid:45b198a6-732f-4c29-9ecf-1708cbc29a99>\",\"WARC-IP-Address\":\"34.83.118.64\",\"WARC-Target-URI\":\"https://explorer.celo.org/mainnet/tx/0x1da5f1df5fb35f7f1b95d665c1f05b325ff92018548a15328f239654fee49745/internal-transactions\",\"WARC-Payload-Digest\":\"sha1:YJDR4CE2JVQR624FKG22RLV2JWQZJCXA\",\"WARC-Block-Digest\":\"sha1:K5ZTJJXPMEKULGPUMCCCR4IO77KACLMQ\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2023/CC-MAIN-2023-40/CC-MAIN-2023-40_segments_1695233508959.20_warc_CC-MAIN-20230925083430-20230925113430-00701.warc.gz\"}"}
https://research-explorer.app.ist.ac.at/record/1911
[ "# Tverberg's Theorem and Graph Coloring\n\nA. Engström, P. Noren, Discrete & Computational Geometry 51 (2014) 207–220.\n\nNo fulltext has been uploaded. References only!\n\nJournal Article | Published | English\n\nScopus indexed\nAuthor\nEngström, Alexander; Noren, PatrikIST Austria\nDepartment\nAbstract\nThe topological Tverberg theorem has been generalized in several directions by setting extra restrictions on the Tverberg partitions. Restricted Tverberg partitions, defined by the idea that certain points cannot be in the same part, are encoded with graphs. When two points are adjacent in the graph, they are not in the same part. If the restrictions are too harsh, then the topological Tverberg theorem fails. The colored Tverberg theorem corresponds to graphs constructed as disjoint unions of small complete graphs. Hell studied the case of paths and cycles. In graph theory these partitions are usually viewed as graph colorings. As explored by Aharoni, Haxell, Meshulam and others there are fundamental connections between several notions of graph colorings and topological combinatorics. For ordinary graph colorings it is enough to require that the number of colors q satisfy q&gt;Δ, where Δ is the maximal degree of the graph. It was proven by the first author using equivariant topology that if q&gt;Δ 2 then the topological Tverberg theorem still works. It is conjectured that q&gt;KΔ is also enough for some constant K, and in this paper we prove a fixed-parameter version of that conjecture. The required topological connectivity results are proven with shellability, which also strengthens some previous partial results where the topological connectivity was proven with the nerve lemma.\nPublishing Year\nDate Published\n2014-01-01\nJournal Title\nDiscrete & Computational Geometry\nAcknowledgement\nPatrik Norén gratefully acknowledges support from the Wallenberg foundation\nVolume\n51\nIssue\n1\nPage\n207 - 220\nIST-REx-ID\n\n### Cite this\n\nEngström A, Noren P. Tverberg’s Theorem and Graph Coloring. Discrete & Computational Geometry. 2014;51(1):207-220. doi:10.1007/s00454-013-9556-3\nEngström, A., & Noren, P. (2014). Tverberg’s Theorem and Graph Coloring. Discrete & Computational Geometry, 51(1), 207–220. https://doi.org/10.1007/s00454-013-9556-3\nEngström, Alexander, and Patrik Noren. “Tverberg’s Theorem and Graph Coloring.” Discrete & Computational Geometry 51, no. 1 (2014): 207–20. https://doi.org/10.1007/s00454-013-9556-3.\nA. Engström and P. Noren, “Tverberg’s Theorem and Graph Coloring,” Discrete & Computational Geometry, vol. 51, no. 1, pp. 207–220, 2014.\nEngström A, Noren P. 2014. Tverberg’s Theorem and Graph Coloring. Discrete & Computational Geometry. 51(1), 207–220.\nEngström, Alexander, and Patrik Noren. “Tverberg’s Theorem and Graph Coloring.” Discrete & Computational Geometry, vol. 51, no. 1, Springer, 2014, pp. 207–20, doi:10.1007/s00454-013-9556-3.\n\n### Export\n\nMarked Publications\n\nOpen Data IST Research Explorer" ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.8411777,"math_prob":0.48610342,"size":2922,"snap":"2020-45-2020-50","text_gpt3_token_len":777,"char_repetition_ratio":0.13262509,"word_repetition_ratio":0.1037037,"special_character_ratio":0.26146474,"punctuation_ratio":0.1814882,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.966907,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2020-10-31T14:10:26Z\",\"WARC-Record-ID\":\"<urn:uuid:990ce83a-f0b1-44e1-a726-0812b5b63a5c>\",\"Content-Length\":\"30071\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:25d8042c-b167-40e1-85f8-232460b89659>\",\"WARC-Concurrent-To\":\"<urn:uuid:cd3eba5f-d421-4f16-a746-5190456ae4dc>\",\"WARC-IP-Address\":\"81.223.84.196\",\"WARC-Target-URI\":\"https://research-explorer.app.ist.ac.at/record/1911\",\"WARC-Payload-Digest\":\"sha1:H4KFKGAAVGNZBOA2JBSANYTYC7ISY66B\",\"WARC-Block-Digest\":\"sha1:7KQDQUDL3FL73YBJDJLODNDC62MFTI3B\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2020/CC-MAIN-2020-45/CC-MAIN-2020-45_segments_1603107918164.98_warc_CC-MAIN-20201031121940-20201031151940-00216.warc.gz\"}"}
https://numfactory.upc.edu/web/introMatlab/html/ProgMatlab.html
[ "# Programming with Matlab: files, loops, conditionals, etc.\n\nIn this practice we will focus essentially on the aspects of Matlab linked to programming. Matlab uses its own high-level language that is compiled with C ++ (which is properly the development language). The fact that it is a compiled language means that if we do not do things right the speed of calculation can be significantly reduced.\n\n## Matlab File Types: scripts and functions\n\nMatlab files have the extension .m and can be classified into two types, function files and scripts.\n• Script files: Script files are simply files with instruction lines with Matlab that run in the order they are written.\n• Function files: These are files that are designed to be called from script files to make a certain calculation from input variables and return output variables. Function files are very easily recognized because they must start with the keyword function.\nThe syntax of a function file is:\n[out1, out2,...] = functionName (input1,input2,...)\nWhere the input values are the input variables and the output variables.\nNote: Function files must be named as the function name. So, for example, maxFunc.m will be the name of the file that contains the maxFunc function.\nExample: Suppose that, as an example, we want to calculate the maximum between two functions when we evaluate them at an x-point. We will do this for these two functions:", null, "", null, "This forces us to create a script that initializes the value of x and calls the function. We will need to create the function, give it a name, and define the input and output variables. The script file would be: (must be created, named and saved)\nx=0.2;\n[maxF,indFun] = maximFunc(x); %call the function maximFunc\nfprintf('For x= %e, the max= %e value, for the function %d \\n',x,maxF,indFun)\nFor x= 2.000000e-01, the max= 1.960133e-01 value, for the function 2\n%\n% The file for the function must be named maximFunc.m and it can be found at the end of this page\n%\n\n## Loops: for and while\n\nThe two options for looping in a Matlab program are (as in most programming languages) the for and while loops.\nAs a general rule we will use a for loop if we know exactly how many times we will do the loop (such as traversing an array or vector, etc.). If the number of iterations depends on a condition and therefore it is not clear how many iterations will be needed, then we will use a while loop.\nThe for always usually has a counter that updates automatically, while the while needs to do so explicitly.\n%------ for example:\nx=1:10;\nx=x/norm(x); %normalize the array\nsuma=0;\nfor i=1:10\nsuma=suma+x(i)^2;\nend\nnorma=sqrt(suma) % make the sum in a 'manual' way\nnorma = 1\n%------ while example:\nn=1;\nS(1)=1;\nwhile (abs(S(n)-pi^2/6) > 1.e-4)\nn=n+1;\nS(n)=S(n-1)+1/n^2;\nend\nn\nn = 10000\n\n## Exercise 1:\n\nAdd all the components of an mxn array: A = rand (m, n) using two for loops one for rows and the other for columns. Compare the result with the Matlab sum (A (:)) statement.\n\n## Exercise 2:\n\nUse a while to calculate the number of iterations required for the terms of the sequence", null, ", taking", null, "until", null, "## Conditionals: if, else, elseif\n\nWhen programming an algorithm, conditionals are essential. For the simplest case (two options), in Matlab the syntax is\n• if (condition)\ndo some computations\n• else (condition)\ndo some other computations\n• end\nx=-3; %compute the absolute value\nif (x > 0)\nx = x;\nelse\nx=-x;\nend\nIf there are consecutive (nested) conditions then the syntax is:\n• if (condition)\ndo some computations\n• elseif (condition)\ndo some other computations\n...............\n• elseif (condition)\ndo some other computations\n• end\nx=rand(); %random value in [0,1]\nif (x < 0.3)\nval = 1;\nelseif((x >= 0.3) && (x < 0.6)) %two simultaneous conditions\nval = 2;\nelseif((x >= 0.6) && (x <= 1))\nval = 3;\nend\n[x, val]\nans = 1×2\n0.8147 3.0000\n\n## Needed Functions:\n\nfunction [maxF, indFun]=maximFunc(x)\ny1 = x^2*sin(x); %only scalar values can be computed\ny2 = x*cos(x);\n[maxim, ind] = max([y1,y2]); %the max function give us all the information\nmaxF = maxim;\nindFun = ind;\nend" ]
[ null, "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAANgAAAAmCAYAAACmuFWEAAAMw0lEQVR4Xu2cBYwsxxGGP4eZmexE4SjMnDgOMzMzM3Nih5mZWWFWmElJFGaFmZlJn1VltcczO9Ozs3c3d9PS03v3dranu7r+gr+qbx+WsUhgkcDGJLDPxmZeJl4ksEiABWCLEiwS2KAEFoBtULjL1IsEFoAtOrBIYIMSWAC2QeHOcOqjAjcFrgWcD/g88BrgpcDfZrifbV/y3AHm+s8P/AT40QTSPBNwBODLE8w1tykE1+OA4wOvBE4P3BY4A3A/4InAv2e2qbPGmr8+wbpPCZwc+DTwv6HzzRlgAkEF+G1Y2cGbXiGcIwN3Ar4GvLtGkEMFvoOfuxRwdeC+wF9inRqcV8e/rwt8cwevv1yaeu1e9gWePpFhOBxwA+CIwMuHztkHsNMC1wCuBFy0Uri3A57b851TAXeOUCQPcshrBNddgZ9OCK58r3O7pm/sIZCpB3cH3gd8qTiAIwGPic8uAnx8yOH0PKMePTPkq5x/McGcTXBpDPQ4Tx4KhIFrUE7XA44yFGR9AMv3Xhj4WPzwJ8ANNC28h3HGCCeuD1wc+EjHwgXWHYBbA8cDbhRhycB9HrxJD7y0tkO/O+S5EwKPBR4PTBFeDHnndj9zNODvwH8bC3kwcOPIy0rwjV2v8z0ivlx77kPeqa4KXA3w1OD1/YbSBwFvBz7Qt6ChALsY8OGYTGAp8F93TG5y/FTgZi0hxXGAWwG621OEIJymRtCnC8t07w0rv3u+Tazxd32C3KWfpwfbD7glMIUcNI7PA74L3KThMdcVo4bRkND5e5V/jZeZl2qA7wZ8f9U8QwFmrvOcmOjRgFboPx0T+/IHAXdpORDDLy2kf/R2rwNMRIcC7PDAQ4PRMiHvWsMasjvkq+ZjvkOr/cIpJpzhHCcFXga8pDLC6NvqMeLspmYmNQLnjZB26rmbYaghtQ7j4av0cAjAdInGsoLMcVXgrSskeBLgQuFC/7niOYH4WuDsFQA7S1in209s+bqWeWngPmFpf9anNbvsc3XjhsAFQwZJfOzUbaYx0Ci+dwsWeTbg2RHlfLXrfUMAJhPzqgDNFyP/moJNGgMwrcY5gTsC5oKbHicLy23Y8cZNv2yHzS+DeE/ggRvKZabe7lXCc2kUJL82PY4ZZM0XwgG1sthDAKYVf0+sVnqyqdx6OJPWF1TSuLUAO3a8w+KnYepWDNki6z//CPJmlUfeivVs1TtOHIm8e5+S5DE0lJk+c9Qap6o3Zq5oWK9RkKzZinF/4FzBK/yh7YV9APNzLdgj48vmXjIoJVovEzWHe1RW+2sBpkt+PXCvnhA193n0iMeth1wWMAGWeHlbPKAFkijRYBhSyDz9qkVIes0rRA2k7fOtOMgx7zhuRB3XBi4RSqd1/1xMZiivYZR0MMd8QJyfcvO831IQW13v91lJK1ljKXz15a+AyiY7bL6sUbLGJjF25fj/ZiSUwDt3KKxRk+fxvTg3cyvPToOn/r2okfdkpGHqYjrTN+QCLKRbfvJsZbwtRzwsjKkk3NXiZ8Hr+9tKFHrNA4HrRNnhMO/tA1h6DVtnHFcEPhksoJ5LxVVJpbNriYBagOlJbdnxkFJJVglSltLxr1ifB5weWJJFL+ge7ASxWC2j9cOWCRXiE0bS1CUl3XfoXZ+PDctlW38PeIayapeMPbumE4RnlghQrp+KXELSyPO0vPLBwpCqJ9ZDLcCnR9NbqOx6jGtGCO1zl4vcWnCVyn6sqIsK6DISch5LLpePNWj4nhZ5nzl3elHzevP1T4Sx+0EhsFrj6zs0MD+P/P9ZwLfjjPWqGgz3JLgEnz+/o+WANAgabNvLWvO+PoDlwj2srpELq62R1AJMAOglx3QUJAsqMNOS/ji6FPo6QLIGuKqu1yWb7QRYrknmVY9kOKOSWH+UXm56KK26YNm/AS7nOVEA1dpSkh1lbl7KJnMT3yWJlaN83nNsepqmPmQqICP9lWCu9bgfCvKlzLOyjDSmGF7quOAXdBJ5nl0fsZNrflKwrdUeTGW22dNhV4bhUtKfsjaPAjzAMaRDLcDcsIc/JolNS+Oa3Yc1PMOBIb11F4gcVObSHr05Dr2wgNI66xH1UObMaVw8Q0slkjkCpG2kl2oDjHqgx0p5Cgq7QvSMORIEklN6q2bIlbm+69MAuJ70XpkLaxzSu5kX51AnfL+RVq2hL6O0NwWo7L20v7VvmE+KD2WbadShvrPKg5WWzy+1tT6p9H8eGPc2FzsGYHoThfmbvp03Ps8Y3VzE0EcvNkSATpPrNAyeK8BKK21dy/zmj5UybD6edULrnYZrhknZjNA2tcZZS9/mgcpcX4UVPCq7iuswf5bJPqCjpKNOGNqOiW5KPRf8gjRJvT4RZWO0xqIaYMbpxsrG1NaAmrlPFn0VwpCcaDsBVlpAQxMt1FBGcDcATMLDszSHbgvP+hSp6/PzBPFk+GeX+c07WMcMGzVsbfIvvcgvoxPIelZ6xIxAzCklbZp1p3UA5t4yUmvL71bJZi2AlWFVW3uUVkcXaS4zpmo+xoONDRFdqxbO9pa+Vq+mQDMHMz4vc4qxSrkd39PbSNR4U6AZ6q+znixGC16HXsd8t8m2ZiilTrVFQqWHNXeTmTQXypE5dFuZyGfWCRH9vmmAjkLSqy187ZJR6rB60Vo6WhUi1rRHjTmkWoCtQ3KUlraWlEmAjUmgdwLJ4dmYtKucepK2EG3M+eV3pOr1NuZHDsHRZJQzv2qLhBIgr4jvN3O9spOoy/uuQ3Lo3TU6ekbHkFsguffe6KYLYM32qKG9gjUHVQuwWpo+12LR1OZjadRbRG2oZj9aR7v2O2sdKza9EwBmR4bWVe/i39acaqx0c3syyoZGJYFRnmWbl5HBlAhpix7KKzFtnyf7aM9q17prafrcUzKnphDmeZJ1NR6+l2HuAlhJqdZa/KEgqwVYPv+QgYVm16EA7SW049kQQFZKz5wNy9bDvJ9k61fX1QaVQ0GaP0zRTT5UPlM8p3exgGqB3rwlc+q00srH+pj501DSQ4MjyGxyLVnIshRQyqrMv1Lu1rMknrzyURJQlgnM0crSSXq/9LwWsV2zLGV2bCRfIDkxpNCcsjXlkGG1mcLCsl609PCnicL4ZzsOw9zN1MP04Tttz3QBrGyP8nB0+62tIGtoQS3A8qC+1dJNkstwPxY0LZhaw7BAaHytBzJPzLA3LaWFTDtRrK+UtG/Ol+8UoCu7pteQw5RftQNBwkDFs8CuwmpEZO8cCQKttJ8ZOuqNnlFxe1uDo2IaspcN0OmtfUcpqxJAvk8a3nqa+bBGTeP1rmCjVfLPFAIp2UWBIxXvFSIVvmQskwm0e6QsJTVla2Smt1KX1T8BL0B0IukFLUDrKQWMeuEzbVdS8p2WfsxtW2tmbQArK/Qu0HqJArEFZsrhJt4ZE7ZZrrZ32bIiE9Z1N+kcwJvD6ghEOzOsX6V38q6anysUPZolBt/d5b08BKl5Afr+KTe/obmUjTfDBZdtRnomPXgevt0Yb4hGaY2MRIKA6Suo5nJLSlt63m5ymT6VUj3RkzSZxNKQ2vmh93tKdAQ5b9L3beFhmapI33tOAtA9NhsEbMcSEIb/7r05NLx2tOh1sgfSckUC1VzM3NHWOtlEoxVLM12lh/SaOqDOLqYSYApPSt56RiZ8LtLagK5c1+n9LanSsUPwav20ZL5Di+OwVck7R96a/uiKy5ynBl4cSlNaulyP+ZZhoG7fAzGcLNkoD0wrqPX1QP33ql+WI5BtB9PzzSE8VJm9ju+NA/9WQcpbB+YZ/p9eWzkqK2U/dHjrWYtu/mV3h6GeSu+VFuWpojUZRGVu/mXx2DxQENiB7ijDx7Y+V59RT7yLqJeRTNFANm9d+1wCxDCv7eaD4bBGwXDQcNlSjW1/JVAlWDQClgpsRCjbxZoyEtCu2R7JzkuXfa1SQwW/Vc+5XoXk71tQUG1h3VRr8cC0eM+vKDxO9e5lnnES0HCbFhi1bNIgZpHdFq6yI+Ywq54bwNxAsoLWdbqSz3HHc+hvmcybvxlijanzTbGGZY46CUjqmNvJGGcXSN0Mw562LGBZousGxiGzzBFgLt7EWErV3HATV0iktr2GYH628ncuDDuP5aktlIA5nmFwkhdTv7rKwM8VYHktwot7MmBThormhZIaUtqGAMuYnwTsGJHQsUQx5c1380lzScmjVfnZ7D2YGxBkEhDSq3aBTxHGCS69lo2lnb9nYX76tidX7E1jSYtaIqdLWILLkFByTWax75rTwfPM1YOVQpAiFlxT1Om8pOlctd36e1KDZ7Bpa3zWBqdII6TlJTeG3sLYNQCbwTkvS9yrEtgNHmyvnt2y7xlIYAHYDA5pWeJ8JbAAbL5nt6x8BhL4P062FlTaaP/XAAAAAElFTkSuQmCC", null, "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAANMAAAAkCAYAAAATkA94AAAMjElEQVR4Xu2cBaglyxGGvxcPcSOuxEPc3d3dPcTd3V0JcXd3g7gRd3eIu7sLH1S9dOZ1j5yZc3fPZhoe+3bvTEt1/SV/1dyDWMcqgVUCi0jgoEVmWSdZJbBKgBVMqxKsElhIAiuYFhLkOs0qgRVMqw6sElhIArsOJvd/buCHwPcXkMnpgMMAX1xgrnWKvZXAGYF/AF9dYNkTAycEPg78e+x8uwwmlf5WwK+AV045dI9wDg/cHvgK8PaF5hx7F+tzm0lAHb4qcFLgKQGozWb671uHAq4HHBZ4ydg5h8B0SuBqwBWAC07c4a2BZ3Xecb2TAzcFLgGcB/g68A7gRcDngH+NWEcg3Qn40YJAymWd+w7A11ZAjbiJffuI+nRtQE/ypLFKP3LLzn0d4AhjATUEplz3/MCH4i+/jwN0LffhgNMC9wauC1wY+GCxcZX0+sATgWNWDuS8DxspFA95AeBewB9HCmfKY8cBHgM8bqGwYcra67PjJaBeavg0rD8d/9roJ48IPAJ4K/DeobfGgulCwAdiMkF0Q+AXjcnPBTwZuEl4nXzsYsDzgRcDrwZ+DJws0G+4dhRAQDn3m3o2fqoA3D22rOie+ZZxWb8eEuT68z2XgAbPsO7ZYxR9xu5OE4b1zsB3+uYZCyaV/Zkx0aOABwD/bEzs4vcH7gikEh4DeAbwZuAVnVzEPejJnF9AvTDylprHOTTwIODPwGN79jBDdge/av7kGl8AnrfEhOsci0rg5sA5gbuEPiw6eTGZ+ukaRwce0qdzY8CkqzMeFVCOKwcoWps/HnC+cI1/i4d0xzcA7to4+JGAp4Y3+0gkf9+tLHCGsES3CSXflgBz3ksC9wRuFJ502+ut84+TwPEjwtHYvWvcK7OeOlM4AyOVL7dmGgMmWZKXB0A+H/mSpMGUYUz7JeA9PS+l9+tbQwtxVuB2ERJO2cMmz54AeFmEE6/fZIL1na1I4ErhLczBJaG2PYyYnhYEmY6lSpePAZPW+Z2xW2nCriLruR4KPLeTI009oIJ5KfDu8Ew/70xwtFjjM4Ch5l4MmZwnAH8NYiU97V6sva5Rl4BE16MBw/C7AX/ZI0HdBzgbcAvgt7U1h8Dkz+8XLJvvmyvJbpTIvFTw/K0QbuxZ0zOJfBnBruLqal8L3H0gzMz1DB2Nqa1BXBowYZUUeUs8oLWRxNA4GCrICnUB7KN6w8s1AD72bPviOfNUw+1rAhcJpdNgfTo2YziuEZQZNSe8byUEP2qURi4a+a958pnDqHkXFkm7wxqNhfRrARpAWTbJHIupElO1grjvSFAJDhXVyOfYEc2cN/Quw/6MFsy/1ZWhIYt86ijveI+yzILxwWEkXfsq8XeBai724cqkesOHx7ksmxxiDIEpvcE14s3LAx8F3IAeSSVVIaWQ5yTpaW1EvcpfCwf1kNairlgoRJ8gTxQ//HvsT5YwPau1LL2bZ/DiLfyaF32vIcTHA8pAMmLK0PiosHPGpqG1rOdvQqFlvAREkkcqqh5XIke5fiyYy9/FRlVAQaghfVWUMySEMtxR+Wqsq/UeFU7AaaTMfzW81isllryTGxfMcMrFPNy7eUHkqEYCJ4l3ftnxBlONqnvWcPwk8vanA9+M+xTYkl9XB9RBgebf31a5sLOHIXb/1TxtCEy5cS+mNXJjUxWtnM9CrizfJ0OYXnJ3qOx6P4t0U3O29HpaZZVAQP6gwizWzpg1tm7dbAxA9iWYcn8yoNbvDFNUktsC0ryWH7LcUZ5FAyPpouU2QhB06YHOArwxug30ZGW4rQeRsZX16gKm3EOXrU1DagQgW+Z/Gfl4b+pgGc5lmcY6Y82D9N1Lqc96ZAEmkL2noXqlLHUaFss7hxhDYFJxbdVx2M1Q0pAyKo8EFNQcQsA9SKNrEeyMaPVWeWBDgU2SzrQq7tlzWCNTWWphSldIdmmYM8ogSkbs4jBEETxaYj2dxXRz3G4irUdScQ2DnhP3XSqZobPRg+GfpYwMi+1p1Kvo5TVWyqk7dxoWDZqK/K0QZAmm98X79lo6NGSCpwSt9295ZJNIoYy03hAA0mDken13q3cVC8pR4zQJTKU18cVae5AC+sPI2LW10XPExcr41SxlvudaCldh6vqnjIyzzR26FzY0T1okQ9ldBVNpkbWq5ocZ0pXnV76eUQU3lPvEkHAAczMNlGHha6KE0i1yl7pUC1ttWXtdrNX1ht0teP+mFptEKOU+bBAQkEmuDR31WCEbveFkMBlXG8deJmos3VwlC6giNZPaoQ11f37cCA1MZrvF3O6zc8CUrJwhTovgaO39QACTCu9dmvMaKtcS97Ke2Fc478qpBIK5R62EkLmWXstQ0z9LwJWAVMk13C19mAMm957RVl89s6YLs8BUhka1FiLDM12fuUctxxkCliGD/W8eaghIzjUnzHOvWjPXG2qH6u47cyZDE2PmXRzSyJIodsR3w/U8TxoNyYMW4LpnL4vt7+8Jwcu5teq1TgJDRfXA9WXuBE0tJ5oT5rl/w3YdgMbjshPyrjyDOlAtzfTlTFNaiKYqWALps8HQjcld5hAQhpJ6PwvQUwmTBNMmCe/+QEB4N8mW6SFaSl/WEy13jOksyDxCw9vn8aWbzdEEiaHVpyoKM7atbA4BUXpAt1BLXTaOUFpg6rYQ2Qq0VL4gkGSCpG1bbfNS735LIkWaYyo1nu8ZSlrfUDluFrWXKefREtqdbt2kWl/osST7A5i0+FpSE27/PHLDImfR3OOMNRzl1wQtb6YCWzax5CFhJXmg8XQfMqTeS9YUS71rGb2p1HheT5IrhvzWHCXNWl66dqWDrG4LTGUL0VRL3uelkna1al1Srt13ZO0UtoW5bhjywJFFW99TgNK8dvvq2l1Tj5v1FutNfqcl1d5q4ZdSVpDdOH+qN94Xz2u4ZOb0yvaUZQ6cFln5WH/yi1LJmezWb3km709F9Ktm2bpkOvV4NQOlfglS13VuZZ9yNmyyBmfoWRbL00OaO7kPa2DlyFxe4mBM0TbfVadkNa2dSa7YbVN66VPEp0E1r+kc5lqmCiUT+T8ba4GpdPleRLOFYoKGGLdrvaxD6ClKr1NaDy/XjUsW+JlGjkxiv1HpwshnPI+0rZV6KV3pdi9cz2Jel6Fr5k02znphXmptP7mmYOztGJ4gh20+qkeX/tVYWayW5tZg+A2ZI+tNWmR/ZvhnYm2T8emDjdOT1fIav0FTBupDsq6l0a2FTHoeC+3m1YZ6ZX0wcxe/aC2L9OkBbCurNRgnI+d++jrGBb5eyI4KgasBFQw6h/RuFnPNm6Tp1QGfqX1mkWtaWhH81ZpUDUwqvS1DCtthrCtt/acZWlDWL8ZM49q1hkIvREbKP2vfGGVRUUELOjsarA+lNfRbK4uOCkVLKa3vWi2v5CUY3grGvibdMWfai2eUjUm8QPp2eBw9c15+Mm9afQ2KRUs9rz8v78jcxs9opKsFqF5LJbK7IbsaPE+Z5/jxXHoe/91WJptDZXr1Bq5VjjTY1m6sM+qd3IPUuHvuY/QuHsqvN/Sc3aFBtetDo5ztS5YD0giUoafnUZcsfbRKM+kNNSTNTp8STKJPGtzqtTWDHAreLw11iX7UZ64zdajQMmla+qGhN2q1DNkpkS0ntRqI+ZGhnO7cSzIkLC9Ra2XMrsVTmfz/vl/EImhtmVJJduEDQcGvAttZ758qiPeXwxDNf9MbK0dlZStVDpVZRTW3tADrfals9jMKrFp9T7DpgbxjQyVzIL2dHkGv5O/TqP0qAu9Az2aRXiNg97deUf0ycrGjpvXLTBIMhmo1Kt5zCFBBbHgrQG2DK+eTxtdp/CwK+NYfW+spE/NfezubHwgOdUAMKf5e/zy7JewBU1C10GypPXlhWjc7AcYW9pZae51nWAKGqIbxgnibhi4/EvUTolrXyME73TUwufFk56ybtJLF4asYfsLE2bjekGOTOtrwCusTcySQ5RU9Yd+vOZizhu9KxZu/t74q2GkwuXmTVKlNc7naZxNzBWi4YX5gPtX73f/chdb3Z0nABmzD1iQWZk1WeXmS4d5Fz+SZ3bf5nQyUTNSS4Z7khYSDdK6ufR37twQsGEusWAIo88O5uza/trveskFfPrXznikBJTkgzelvqVkiFBNIeiM/029+6z/3ltb3F5eAX8BKKHQJlU0XEkiGdZJcMnyjfqvrrnqmUkjWCgRS9VPiidL04zXnmtqVPnGZ9fEtSEAGUWZxibBfKlziYcynGQeEZ9rCfaxTrhLYXAIHgmfa/PTrm6sEFpTACqYFhblO9f8tgf8Abcz9Q24c/fYAAAAASUVORK5CYII=", null, "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAOIAAAAnCAYAAAASNZTQAAAJpklEQVR4Xu2cd+xsRRXHPwgoCkpEwEgkikCIIVE6Cij+YQETUImAoPTeFDSg2HtHsCtopCglGhAIoQcUJBBK6KBCKEpVINi75kPOmH377u7evvfHbyZ5eS9v586ce2a+p5+7DHlkDmQOzJ0Dy8ydgkxA5kDmABmI+RJkDgyAAxmIAziETELmQAZivgOZAwPgQAbiAA5h4CSsAmwIXAb8Z+C0DoW8lwPLAbeUJSgDsSynFue8dYHDgM8AjyxOFtR662cBhwK3AxcA/521SgbiLA518/uLgF2BbYHNgMeBy4ETgSuBf3WzbaVV1wPeC3w0g/D/fBNgrwd2Ad4DPDaFo2pEhdidZcCYgVjpbjaeLL/fBBwHeNGLxleBDwF/brxb/QVWA6Tju8DP6i/ztHkyAfD9wGuAC4F3zgCiLy8fvwB8CbhjGjcyEPu9K/oOpwFXhfa7G1gdeAtwJKA/5jgc+FoZk6YD8pcNTejl+/xAtHMHr1l6SYH3VsCz2h94ZQUguslr4zm14xOTds1ALH0ejSd6sfW1fgccU3DBtwZOAl4CXALsDjzUeNfqC6wPfB04CPhl9cefVk+ID03Mf8JTOfePxZ+yGlFmeO5fBG4Gvp+BOP/7oSn6wdB8jxaQoybyoD8SANwOuL5nshMNXj7p+HfP+w99O3nyyYoa0Xd6A3DUNOGaNWJ/R799+AwTpWIc2EVB0lbAL/oj76md1ManAh8HLu5574WwXV0grgH8KCyNM4teNANxWMe/ZURNzT/tFBG3PilUWBglfQdwV58bL5C96gJxhXBH/g58APjH+PsWAfEZwJrAmwEPxstxToRidTZ95tXhyOtPHAj8eIEwcuhkasKoEX8C7As8OULwisCmwNsi8mpEbk/g3Jjz3DB7DwltZnBAf7Ts0Cz9FGDucHzv8TW6pqUszX3PqwtE6TwiMGXaaqlzKQKizuXLgAcBzSOjfB7yG+OADSq4qKjeJvyeb/TNkYb7vSBMBVMJTYbhfXnx1yaLjDzrWl8JIIybsC+OeQYODIfvBpwCCDwrXj4HPBvYPPKSBnvur0CXZ/xN4PeTpPbIWl3TUoHsXqc2AaJK7cvA2yNwswThs0zTZNu+LgD3U+B9kedayJUWQwTi80eiagfM0Gb+/p0I5ghIAzu/DaE5s4pjwtVVw+of/jw0Y9kb3gUtZffue14TICa3Q0UmjysB8ZlhgiqpzwstqCNvKDaPdjlgLlFNt1+JJPrGYZJaoaNWVot5Lk0qcozqngF8O9Ys+3Zd0FJ2777nNQHiq8LtMC1k4KYSEJ2cJJ7/NqH5vTklmvtmep/7vTQS/MeX1GqjlorF2GrFBxoSnICoMFjqokxZuwtaJm1nNcsPG75nk2h0EyBO5e8s09R3Tkj+U5hAfee2GvJ98I8b+DDhqz9XlOgveoEUhTsYOLaET1eGCXWB2AUtGYgFHDBwc3pE7N5VUVqWuQB15+jTqK31iaxYWYjDxLk+t6MsCJ2rALUkzjpGq/vViJqnTUZd07QLWpq8R5fPNtGIyUc0NaQLsMSYpRFTWVa6LEbm5l1xoQbx4ikUfDnzXobdq4whBGsSCNUoasQqkddNIsVhAv7XkyJxVRgSxQZ1gjVd0FKR9N6mtwHEQtN4GhD9zXaPLSKCZzCgLek7zjmDQobP/1iU7BybvHxoQcP0tgwtRCAKQvOvawMfntBpIf8VhH8be/8XRmeElS97x/m0YalUSV8kkrqipTdkVdyoCRD1b+3eKCzUmAZEOwV80D/rAOcDD49IX01DE/qGYpt2bie1XcWRTs/UAWJF/rc6XRDuEV3v8nZSu5P8t3LfAE5KSfisNYv3AmeHOat5niwVz8FuAYu1q6aXvAu2X9ldMCuhL0O6pKVVhre4WBMgHh0WnNbcUl0Yo0BUK2n2qZU03exHswrfesfRyJjS13yi5qCmzHUtvOhiAaJVS1ZWWEjgocrr8eGcDaIVStCZH7T4WsBqoRg8E8CasiminSwVBaOFFxYmW05VdVjZY9R054LOC+/K83qkpSrtfcyvC8RkbShAP1FUTJ+AKAgFlgevNFWyfnoklJ5aOd4N3AT8Jn6z6saLY8mb0tu/jf4pWe0+Vwq4tmp5WrR1MQAxmfom4j2YWcN+REP1lg/ap/ir4K15qKTt7O5XKJpPVEMa2daf9/dVo89xowC2Z+HZaEFY3WEVje7GKGBd5+TIJY4XJysc3KsvWmbxp+/fVw6eeZevCaF4T0kiDISZElKAXlr0TAKif+8YQYM/xAHJ9FGTU5AZKn8O8NkIFowmkC2psp/O+kcPy6Jhm17NO2rqTOs6WAxAtHbXkrTU/DvrDE3we+BGUy2PMnItiHQP0rCkzdYpea9W9N8KyTRStcxf4uwUjl4I13SM9zx6DyzeMAikUB4Fqf5gn7TM4k9fv5s1kP87hNmf9rUn0SIXm7xvnNEytk9YQVowhc3Bs6KmZV82+Rc6ogYRrggJbae5EtwDv3rKYosBiGV52ea8lJLQGjG9Ya2jglSX4tqQ0OMmrMUFVuto1bThdqT3qUNLm7yY11qWLurnnxCVNYV0tAVE1baaT5NHk1aN6VCDau5YtpWaYatWR0yKCC7UYE2fF0JJrsmqO+EZCD75ZuCtsNQqcpT6on5C0YhuHV+z6B3r0NInr7ray/uuX6+FMTFF1RYQXxGmqmbTXvF9j9Rk6tfJNJmSGev/rzX21kYIvwVYKTL+kR3t8PsKuJSBOP3qpLYmfUN9e2tIPQNNT0FpYnlSzbAmr8UCml2axE1HE1qa7j3P573XKib9dgM1E0dbQExazm85Gowwymfwxi+AaVufNYMb2TRt/7oYrNEnNRKua+BF0Cw1Gm6wyPapaZ8D1Jc1uOAatzYkryktDbefy+OV+NcGEFOHhr2JScomp98LYCjcg7hhimrOQGz/rqSuiB+MWCRaIka6NVcN2qwUH8GdtLuXSWluBLVJjXEbtLTPoe5WTHzTF7+tzDZtADFJO6WrJpAfy00+o02sdpCb/PUCTGrTqQPEpHEXWkK/zLm0MUcrRXNUH9svLDiSn2ZBgS6CqYois390f1NX+vnOq9vr2BYtbfCljzVsnNYfnGZxLEFHG0BM0k6fwk59zVJ9DOsnTV775TIPfFotZRUgalY53/IuUy6G+JM/U7WapI9DmcceyUqxPNHAS8p3yTc1odaJgRg/Cd/1GBItXb9r7fXbAGLtzUce9Dsp+pKaQBYx55E5sKg4MBQgLiqm55fNHBjnQAZivhOZAwPgQAbiAA4hk5A5kIGY70DmwAA48D+/YotGeQJR8QAAAABJRU5ErkJggg==", null, "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGgAAAAnCAYAAADjEg0YAAAFuElEQVRoQ+2aZag1VRSGn09FUbERUVHBFsXAbkVsLOzEbsXCFjswMVFsxO7uHzaI/lBMVMRuxQ4sHu7an/vON2fmnHvPPd9cmQ2HGzOzZ+317rXWu959JtCORntgQqOta42jBajhm6AFqAWo4R5ouHltBLUANdwDDTevjaDhAE0BrA7sDHwLTAvMDNwFPAT8PkI8nXdFYHdgXmB64DfgHuBm4JtO87YA/eeZWYFTgWWBA4CX49LCwOXAx8DhwFc9giQYxwLrAAcBLwH/AIsCFwJzAPsBL5TN2wI05BUj5WzgQGBv4KpwYvLZesAdwA3AkcDPXYI0VYB6FrBlRGL+6BrALcCHwG7Am8V5W4CGPLJZOP8NYHvg/YKjZgGuBrYAdgJu7BKgBMCXwLbA24XnjK5LgF2B84Djimm0BWioHiQnXQEcCvxacKR+0nmmwLuBPYDvakCaMu4/JqJvT+D7kmf2iRT6DrAV8Gp+TwsQLBkOXAg4DLigg+M3Be4FfgQ2BJ6rAcioMyVuDHQC3ilWBZ6NufaNeydO3QIEyfE6ZTvg1g6Or3RkyTOLxFxL1QCUbxA3x9HAH2m+MoCkhPMAGzFkvIbdFwzEsPaZlQEL3+KAqN/eZU5u4m2moDPCsNUqIiN3+CSOrAHISJIZGn3FUQlkGUDTAPMDnwIaLE+fAZDJPA6sGXlalDcICmkO79c4AThllJO90qEoF6d1/SfGx2vdAlSVstI78hT3PLAD8EENQNcHk/ypKoLyOeYKxrJWAGFjZS9gwfxilE7s9PggAZJeGw0W6l4AehTYsarBBKYOkiAtd5TRbP/fcwTljvMlpjKZzYORG08qMo0xAmoQ0+br6wUge6JOrCy3e7kgIPMFwXAj5BvbCBZoU6DjTMAN+le3EeR9iQb6e1kTNwhHjuU78ojtZ4rTZgGw/7kUUKl4ADgHeB1YICQlSYTykmNELG4l4DHAvLhJJoGMpdMGOXe+AbsF6LJI9eppdUOQlHWMOCm3Kc2G9RngGmBpwPk+K/NvNzRbwqAcsXxNF62sYaMlVX0NWCKeMx38WbeKyXi9W5qdNqqEqapf6mUpuUJxXRCEYTJSHUAyutNjt5TmyLAmaU5KIUlTWgy4FrgTOD/PqzUrGCRJKBbpbhtV1/lkL0h0uDdJTF5WQbcRHjaqAPKautQqoeBKDh6Jib4uzLNCSOfnBitSrU3yiPTSqBomYVQsbtAA5VJPVX+T+qUngF0iJY0Go7mDHKwdUpNsrygxVX5pxAg4Kj4LAg8Dn2d60ezRqD4dFFydyh7Jv9NYN+qX1NzFC1wTR1KrPWKQVdkD5mOmULhN4YcAFxXWMmP0jtaWSZxcsmA3her5/h3Y3cRH8giScvqg3e5scVZxcXTWeT+kmms/JCA3Ae/FTvAgahvgrcwgi+NtURS7oaWTC7z8uKFMrU4AGj2SivxMSApt/bBXvDJakqrjCGuY50NKOtLryjOmBFDeVLkL/gZOCxXBXW8tEvGDAbv0j+KaKoPACJTgFiX11IRpcKdOenKBUnyvGSHJV24miY5DAKTJKif2g0U1ICcPRqDp/N2SRenrZYCTo2zoXw8CKyMuAeTPrQOEHwDrjVEiUGmov5mmpgvtKrGzBII6XTE9uLhO4DUFmNwON6I1wXX8Es6bM5pNe5gyZ7oxJVIW+eMjinLW6ua3Ru8VUpJirMB4SFc76lhc7QSZVFEGUEqN0smyA6tu5h/P9xgxrluCZHRZDnpqOfoBUFWUjKcU18iN0A+AEsMRjPFKEhoJjkb1A6D8OLhIsz2TfyoEQPN0U2n2/xogF5dUW/uD1O8InKznCGBz4MXGeqHBhvUjglxeknrWDzbzCWBza39wf3xjpafi2GCfDdS0fgGk0VJU6aniYxJLPSr3K0oj/UbmQJ3RxJf1E6Amrm/c29QC1HAIW4BagBrugYab10ZQwwH6F4f2XTcb+D71AAAAAElFTkSuQmCC", null, "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAP4AAAAnCAYAAAAvp/W2AAAJMElEQVR4Xu2dCbDF1RzHP5GtxBTakEGmLEOmvUilsYaytSBFpZJ2Em2E0ESLjKaaEUnRSg3FyFKW0aqpGEyppn2ZrCFpPs3v1P/d7n33/t9/vfedM/PmvXnvf//nnO8539/5rectQW4ZgYzAokNgiUU34zzhjEBGgEz8vAkyAosQgXHE3z0w+coixGYWp7wK8DngE8CNszjBPKfJEBhH/IPiNYdN9rr8VM8RWA04BtgD+EPPx5qH1yACmfgNgtvDV2fi93BRuhhSJn4XqHfXZyZ+d9h31fOzgZcAPyoOIBO/q+Xopt9ZIf6TgM2AbYA9gbu7gbMXvT4e+Fj4boYN6DrgXcA1mfi9WK9OBjHtxE+E3x94NXAB8J5FTvznA98G1h2xo/TpKBj+nYnfCed60ek0E1+ibwH8GdgZeEUm/sPheB21GwInAP8d2GX/A64dJhizqt8LPrY2iGklvvt0ydjY/nxIfC32E38l4OvAkcCFZXZRJn4ZtKb/2S6J/zTgfcBSwBEVoTTM/Ol84vNB4M3w8Pd7y2CaiV8Grel/tgviJ8J/BLD/g4GqeSGZ+LB8qPdvBf4C/AY4F/gJcDvw//m2ayb+9JO5zAzaJP4zgO0BCf884Ldx0p8H/KvMoIc8m4kPbwfOHIHjN8IUumEUzlWI/zjgucCbAKWODobvxUKrdvju9YHPAy8FdgG+W3HBZ/XjbWHZBvG1O1U99waWq5nwaf2bIv5TgI2BrYHbwoGob+FA4Nc923wvBp4DvDw8+q8NvNMwrwJ2BX41bNxViG9o5QXALcCrIqSwDPC6SBZ4TSz+f4A3RH54zvkfvnvawrJJ4rdB+KaIr+DdNGLhVwKfBO4ENgJ+BpwB7Ajc1zPyF4ejSfUOwFCn62xT/dev8sfBcVchfvFdKwPfCmlpAcg5wL4BoPZGbpMj0CSWTRDfwp/dgJ0aPOEH0avzxPdEVzvxnd+MmPc/osOkTvv7DwN/m3wZO3tyReCLQXgH4c/OzQP4kVYX8Z8YKr0Anh+dHAr8rrPpT2/HTWJZF/HdNyaOuN7vB9T06rThx61eXcQvkt54d/F0XAE4PjQB8wZOGzeoHv09jf1twC+BbcMBWDvxfeGHgK/FmwXqxHGexR4BNWwoOqfUYl5fYZxuHMlR1pnVFJZViT+K8F8AdNrNyQ6rgNu4j9ZFfM3Rk8P5WNyzCjLn9F7gM8DRE84tjWvc+Ef9Xbt8q5oqJ5O2opai+T3HR1HXie9E1oskgr8DbwEuW+jse/K5LonfFJZVia8vxxTQV8Yaqd3tBfyp5TWrg/jLxoluHru2sHn/N4VDWmeejj5zBQyPPTjh/PpEfCMppwIbhA/ukiZUfd+po091aO2QlJ6WfWjPCm3EuOZn+zCgCcbQFJZVie/QJcTmwEdjrT1RPDXNHhsZPppgzmUeqYP4esHPDjPFDMCrgbWAiyM6dQXwQJlB9ezZdHC5l945aHbXdeLrlZZUOvRsh4dDwVzhrtrSYbOprhlqrCNxpI25NIllHcRPGAwWzLQpAOog/gHhxXfc7hHLVsuaZG3sh4X2kYiv2dKIja/wUE1SpTAEolPvh0G6uxY66hGf0/HlRFysOV7KIc8/IXwMVi0pxaeB+E1jWSfxuxQAVYn/5NBQjEbUaVfXvN0rvc74vmHIHzRVnWcigbFDv1aNjkx+SOqFqrYJPD8vYSuNmrEntyTW1pxjs8wDUfrMNBC/aSybIH6CXg+5zjJNAB2iSQP4MnB9zY7eMsR3XKsDd8SX4y1GTlTx3w38fsgeMjT2whJ7rRJTa/ywc/545B7Iw0sH370QVV/QVKNdWNUJPZ7HBjjFGLTqk/F887J1Mjym8wVMdNaI3zaWTRI/LafJMGp/+wBbNiQAJiW+5ogaqCTwZFflNWxnM7vQyJNtMArlHPQBeNmstex9u59Qh6QRNC/XsOBJJ2tyQMrpTeLvzt1a/cfk7ZclvhtVIicw7MxwR3q5QJswYI2wQOsl9W9+CaYpvGZD+d1bXs2QeiOgveW7vVRhvmjALBG/CpYupPXo3kKzTqRHi/nLwqRRwzJP/vsDwrUN4hcFwBrAfmEK1uUDeDpwXOyV5I1XoxjW9NybfGMFm81U3NPj52KsO43NlPOnFsbr/lR77VszRPylwqDUqr4K3B9j994CheNFozStssT3eaWN5P5rSFNP9WK4Q1I7EMsvvcpZO6PoHd0uvMBuTHO5DQVZaaT0NS3ypHlQniXi14FlSil1gypkFSY3h49FAf0poOhgbZP4aRmdp6q2JoBqpyfUQqrz9E5bE2J82ks5UtMj74lnoorptsX52rdxcQWF+9STXz9UapqhFhGp6ouNVW4/jj2oUJk0jNe2YDCyYuKUeCr49Xs59p9GPYzfU/bh0LGVJX7VCdqfm1Sg9aL+IkoJjQVbUKBQmK8YYpaIXxVLP6+GJJE0pczHVmhaNGV5pgQxXFVsXRC/KADM9jNk9p06Jp/fsXAE2ia+aponu5JKE0GNwKaGYJGH+d46YdKmPqXE1PQpDMsdmCbnXonpznFQeQuLoVSrIhWs4jssA6xL4peZW362YQTaJn4KMWg37RD3p6UMI9UTr1RKZoG/94QoNr3e2jKGYbw9tNi081R3BtusEj85UrVVdVRZfpkE6z9DhdUcK7ZM/IYJNS2vb5v4qqae4npL9UpqjyU7dZhqOorEsxrOK7NvFGjGaK0HMMXUPHltadVoVX5LoAcTqDLxyyA8w8+2SfwUO7U2X++qlXv2r4dS217V9JnA5fNkUGUb/9HNKG4671Tr00WLCk8JL77WTKgBFe+cz8SfYTKXmdo44nsym8Y4n6d90v4ktaEVN6Khp3sKqqnXAht6MvHA0N+oHOmFED9pFNOQwDMplnp19Yv4H1IUmubIJ8Gq8+wowDp5tYFiGqq/s/DE8Omtk3aWn5s9BMYRv84ZrxnkttwxqaFuYEODJlZ4gYd3hc2XL12G+IY4fP4DEYI0PGPfhn2m/XKQ5BexBt6MSdV8/6OKSTPGbxUKxnn7fGNMnXsrv6skAm0Sv+TQhj7+oghTnTXsOqE6OsjvyAgsBgSmjfiLYU3yHDMCjSOQid84xLmDjED/EMjE79+a5BFlBBpHIBO/cYhzBxmB/iHwEGyviEawy9XvAAAAAElFTkSuQmCC", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.8098355,"math_prob":0.98584676,"size":3912,"snap":"2021-04-2021-17","text_gpt3_token_len":1005,"char_repetition_ratio":0.13485159,"word_repetition_ratio":0.023988007,"special_character_ratio":0.2673824,"punctuation_ratio":0.15738499,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9996039,"pos_list":[0,1,2,3,4,5,6,7,8,9,10],"im_url_duplicate_count":[null,null,null,null,null,null,null,null,null,null,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2021-04-18T17:31:02Z\",\"WARC-Record-ID\":\"<urn:uuid:92890b17-8ece-4538-843f-9ab8546d559d>\",\"Content-Length\":\"56937\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:fc1be693-7c94-4b70-b894-ea7d16e299f0>\",\"WARC-Concurrent-To\":\"<urn:uuid:bb3d77c9-3ef4-46b4-b33e-32d671b53a08>\",\"WARC-IP-Address\":\"147.83.2.20\",\"WARC-Target-URI\":\"https://numfactory.upc.edu/web/introMatlab/html/ProgMatlab.html\",\"WARC-Payload-Digest\":\"sha1:2B4G2S43TIVMXRW3GCTXCSRDUWN7N4RQ\",\"WARC-Block-Digest\":\"sha1:BFGBTAPHV5DXAQHN6ZDOIV6V6QMREWC3\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2021/CC-MAIN-2021-17/CC-MAIN-2021-17_segments_1618038507477.62_warc_CC-MAIN-20210418163541-20210418193541-00088.warc.gz\"}"}
http://www.prismmodelchecker.org/casestudies/bluetooth.php
[ "# Bluetooth Device Discovery\n\n### Contents\n\nRelated publications: [DKNP04, DKNP06]\n\n### Introduction\n\nBluetooth is a short-range low-power open standard for implementing wireless personal area networks. To avoid potential problems of interference, it uses a frequency hopping scheme, where devices alternate rapidly in a pseudo-random fashion among agreed sets of frequencies. Before communication can take place, devices must undergo an initialisation process, organising themselves into small networks called piconets, comprising one master and up to 7 slave devices in which frequency hopping sequences are synchronised. The first step of this initialisation process is called device discovery or inquiry. During this, an inquiring device (which will become the master of a piconet) broadcasts messages to discover scanning devices in the vicinity (potential slave devices).\n\n### The Protocol\n\nThe following is a short description of the behaviour of inquiring and scanning devices according to the Bluetooth protocol. We work from the current version (1.2) of the Bluetooth Specification.\n\nAll Bluetooth devices use a 28 bit free-running clock which ticks in 312.5μs time-slots. Communicating devices use a previously agreed sequence of 32 frequencies which, for convenience, we refer simply to as 1,2,...,32.\n\n##### The Inquiring Device\n\nOn two consecutive 312.5μs time slots, the inquiring device broadcasts inquiry packets on two sequential frequencies. During the next two time slots, the device scans for a reply on these same two frequencies. It then moves on to the next two frequencies. This process continues until some specified bound on the number of replies received or the total time is exceeded.\n\nThe 32 frequencies used are split into two trains, A and B, of 16 frequencies each. The inquiring device swaps between trains, repeating each one 256 times (i.e. for 2.56 seconds). In addition, every 1.28 seconds, a frequency is swapped between train A and B. The resulting sequence of frequencies is shown below. Each line of the table is repeated 128 times, taking 1.28 seconds. Initially, train A (light grey) contains frequencies 1...16 and train B (dark grey) contains 17...32.\n\n 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 1 2 19 20 21 22 23 24 25 26 27 28 29 30 31 32 1 2 3 20 21 22 23 24 25 26 27 28 29 30 31 32 17 18 19 20 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 6 7 8 9 10 11 12 13 14 15 16 1 2 3 4 5 6 23 24 25 26 27 28 29 30 31 32 1 2 3 4 5 6 7 24 25 26 27 28 29 30 31 32 17 18 19 20 21 22 23 24 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 10 11 12 13 14 15 16 1 2 3 4 5 6 7 8 9 10 27 28 29 30 31 32 1 2 3 4 5 6 7 8 9 10 11 28 29 30 31 32 17 18 19 20 21 22 23 24 25 26 27 28 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 14 15 16 1 2 3 4 5 6 7 8 9 10 11 12 13 14 31 32 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 32 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 1 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 17 18 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 4 5 6 7 8 9 10 11 12 13 14 15 16 1 2 3 4 21 22 23 24 25 26 27 28 29 30 31 32 1 2 3 4 5 22 23 24 25 26 27 28 29 30 31 32 17 18 19 20 21 22 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 8 9 10 11 12 13 14 15 16 1 2 3 4 5 6 7 8 25 26 27 28 29 30 31 32 1 2 3 4 5 6 7 8 9 26 27 28 29 30 31 32 17 18 19 20 21 22 23 24 25 26 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 12 13 14 15 16 1 2 3 4 5 6 7 8 9 10 11 12 29 30 31 32 1 2 3 4 5 6 7 8 9 10 11 12 13 30 31 32 17 18 19 20 21 22 23 24 25 26 27 28 29 30 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 16\n##### The Scanning Device\n\nScanning devices, i.e. those which wish to be discovered, listen on the same 32 frequencies, but hopping at a much slower speed to ensure eventual synchronisation. The devices alternate between a sleep state and a scan state. If, in the scan state, they successfully hear an inquiry packet, they wait 2 time slots and then send a reply packet. Subsequently, they performs a random back-off, waiting a random number of slots (0...127) before returning to the scan state. This is to prevent repeated collision of messages sent by two or more contending devices. In fact, the official Bluetooth specification permits a range of values for the maximum back-off. Our choice (127) is consistent with the other parameters we have adopted. Below is a summary of the behaviour and the time spent in each state.", null, "The frequency on which a device listens when it enters its scan state, known as its phase, cycles through the 32 frequencies in order according to the value of its clock and changes every 1.28 seconds.\n\n### The Model\n\nWe consider a single inquiring device and a single scanning device, which we refer to as the sender and receiver, respectively. Since the clocks of each device are digital (resolution 312.5μs) and drift can be assumed to be negligible over the (relative short) inquiry process, we model time elapsing in a synchronous fashion and our PRISM model is a DTMC.\n\nA key modelling issue is the initial configuration of the model. We cannot assume that the sender and receiver both start in some fixed state because this is unrealistic. We restrict ourselves to the case where the sender is already transmitting inquiry packets and a receiver begins scanning after some unknown delay, which is a reasonable scenario. We can hence only fix the initial state of the receiver (variables r, freq and y) and also the counter rec. This gives a total of 17,179,869,184 possible initial states. For efficiency, we also fix the value of phase, and therefore must perform model checking on 32 separate DTMCs, each with 536,870,912 initial states.\n\nIn our model, the protocol ends when the sender has successfully received mrec replies from the receiver. For mrec=1, the average number of states in the 32 DTMCs was 3,440,187,794 and the PRISM MTBDD representation of it averaged at 13,713 nodes (267.8 KB). For mrec=2, the respective figures are 56,341,202,369 and 1,807,868 (35,310 KB).\n\nThe PRISM source code is given below.\n\n```// bluetooth model for one node in inquiry scan and one in inquiry\n// constants taken from standard\n// mxd/gxn/dxp 22/06/04\n\ndtmc\n\n// removed time spent in sleep (turned into one big time transition)\n// and combined this with scan (only go to sleep if do not hear anything in scan)\n// also numbered frequencies from 1..16 and used extra variable to say what train\n// (i.e. 1..16 train 0 and 17..32 train 1)\n\n// scan window: time to scan a frequency 11.25ms [36 slots]\n// scan interval: time between scans 0.64 seconds (less than 1.28 so we can have a smaller random choice) [2048 slots]\n// phase - time until frequency changes 1.28 seconds (as specified in the standard) [4096 slots]\n\n//----------------------------------------------------------------------------------------------------------------------------\n// CONSTANTS (from the standard)\nconst int phase = 4096; // length of a phase (one frequency for the receiver) [1.28s]\nconst int maxr = 127; // maximum random delay\nconst int mrep = 128; // number of times a train is repeated before switching\n\n//----------------------------------------------------------------------------------------------------------------------------\n// FORMULAE\n\n// we combine together scan and sleep when the receiver scans and hears nothing\n// the following formula is true in a state if and only if the receiver will hear something if it immediately starts scanning\n// (receiver scans for 32 slots so one loop of a train plus 4 more steps)\nformula success =\n// will see on current set of frequencies and there is time to complete a whole cycle\n((rep<mrep & ((t1=((freq<=c)?train:1-train) & f1<=c) | (t1=((freq<=c)?1-train:train) & f1>c)))\n// will see on current set of frequencies and there is not time to complete a whole cycle\n| (rep=mrep & ((send=1 & freq<=f1) | (send>1 & freq<f1)) & ((t1=((freq<=c)?train:1-train) & f1<=c) | (t1=((freq<=c)?1-train:train) & f1>c)))\n// will see on next set of frequencies and at least a whole cycle to to\n| (rep=mrep-1 & c<16 & swap & ((t1=((freq<=c)?train:1-train) & f1<=c+1) | (t1=((freq<=c)?1-train:train) & f1>c+1)) & ((f1=1 & freq>=15) | (f1=2 & freq=16)))\n| (rep=mrep-1 & c<16 & !swap & ((t1=((freq<=c)?1-train:train) & f1<=c+1) | (t1=((freq<=c)?train:1-train) & f1>c+1)) & ((f1=1 & freq>=15) | (f1=2 & freq=16)))\n| (rep=mrep-1 & c=16 & (t1=((f1=1)?1-train:train)) & ((f1=1 & freq>=15) | (f1=2 & freq=16)))\n// will see on next set of frequencies and less than a whole cycle to do\n| (rep=mrep & c<16 & swap & ((t1=((freq<=c)?train:1-train) & f1<=c+1) | (t1=((freq<=c)?1-train:train) & f1>c+1)) & f1<=freq+2)\n| (rep=mrep & c<16 & !swap & ((t1=((freq<=c)?1-train:train) & f1<=c+1) | (t1=((freq<=c)?train:1-train) & f1>c+1)) & f1<=freq+2)\n| (rep=mrep & c=16 & (t1=((f1=1)?1-train:train)) & f1<=freq+2 & c=16));\n\n// receiver swaps trains at end of sequence only when c is even\nformula swap = (((c=2)|(c=4)|(c=6)|(c=8)|(c=10)|(c=12)|(c=14)|(c=16)));\nformula swap2 = ((((c=2)|(c=4)|(c=6)|(c=8)|(c=10)|(c=12)|(c=14))) & freq=c+1) | (c=16 & freq=1)\n| ((((c=1)|(c=3)|(c=5)|(c=7)|(c=9)|(c=11)|(c=13)|(c=15))) & freq!=c+1);\n// state where receiver's next time step corresponds to the whole of scan and sleep (scan interval)\nformula sleep = (receiver=0 & y1=1);\n// when the receiver hears something\nformula hear = (freq1=freq & train1=train & send=1);\n\n//----------------------------------------------------------------------------------------------------------------------------\n\ny1 : [0..2*maxr+1]; //clock of the receiver\n// 0 - next state scan\n// 1 listeninging on frequencies\n// 2 sending and computing the random delay\n// 3 wait random delay\nfreq1 : [0..16]; // frequency of the receiver (use 0 for no frequency set)\ntrain1 : [0..1]; // train of the receiver\n\n[time] receiver=0 & y1=1 -> (y1'=y1-1); // time passes (2048 time slots pass)\n[] receiver=0 & y1=0 & success -> (receiver'=1) & (freq1'=f1) & (train1'=t1); // move to scan\n[] receiver=0 & y1=0 & !success -> (receiver'=0) & (y1'=1); // will not hear anything so scan then sleep\n// scanning (will hear something - unless I have done something wrong)\n[time] receiver=1 & !hear -> (y1'=y1); // hear nothing: stay in scan and let 1 slot pass\n[] receiver=1 & hear -> (receiver'=2) & (y1'=2) & (freq1'=0) & (train1'=0); // hear something: get ready to send reply and let time pass\n[time] receiver=2 & y1>0 -> (y1'=y1-1); // let time pass (1 slot)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*1)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*2)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*3)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*4)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*5)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*6)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*7)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*8)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*9)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*10)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*11)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*12)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*13)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*14)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*15)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*16)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*17)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*18)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*19)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*20)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*21)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*22)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*23)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*24)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*25)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*26)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*27)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*28)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*29)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*30)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*31)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*32)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*33)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*34)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*35)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*36)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*37)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*38)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*39)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*40)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*41)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*42)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*43)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*44)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*45)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*46)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*47)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*48)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*49)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*50)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*51)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*52)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*53)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*54)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*55)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*56)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*57)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*58)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*59)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*60)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*61)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*62)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*63)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*64)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*65)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*66)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*67)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*68)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*69)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*70)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*71)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*72)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*73)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*74)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*75)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*76)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*77)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*78)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*79)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*80)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*81)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*82)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*83)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*84)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*85)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*86)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*87)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*88)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*89)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*90)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*91)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*92)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*93)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*94)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*95)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*96)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*97)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*98)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*99)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*100)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*101)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*102)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*103)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*104)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*105)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*106)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*107)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*108)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*109)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*110)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*111)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*112)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*113)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*114)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*115)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*116)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*117)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*118)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*119)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*120)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*121)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*122)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*123)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*124)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*125)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*126)\n+ 1/(maxr+1) : (receiver'=3) & (y1'=2*127);\n// waiting random delay\n[time] receiver=3 & y1>0 -> (y1'=y1-1); // let time pass (1 slot)\n// finished waiting random delay (listen again)\n[] receiver=3 & y1=0 & success -> (receiver'=1) & (freq1'=f1) & (train1'=t1); // move to scan\n[] receiver=3 & y1=0 & !success -> (receiver'=0) & (y1'=1); // will not hear anything (combine scan and sleep)\n\nendmodule\n\n//----------------------------------------------------------------------------------------------------------------------------\nmodule frequency1\n\nz1 : [1..phase];// clock for phase\nf1 : [1..16]; // frequency of receiver\nt1 : [0..1]; // train of receiver\n\n// update frequency (1 slot passes)\n[time] !sleep & z1<phase -> (z1'=z1+1);\n[time] !sleep & z1=phase & f1<16 -> (z1'=1) & (f1'=f1+1);\n[time] !sleep & z1=phase & f1=16 -> (z1'=1) & (f1'=1) & (t1'=1-t1);\n// update frequency (2048 slots pass)\n[time] sleep & z1<=2048 -> (z1'=z1+2048);\n[time] sleep & z1>2048 & f1<16 -> (z1'=z1-2048) & (f1'=f1+1);\n[time] sleep & z1>2048 & f1=16 -> (z1'=z1-2048) & (f1'=1) & (t1'=1-t1);\n// update frequency: something is sent by the receiver (cannot be sleeping here)\n[reply] true -> (f1'=(f1<16)?f1+1:1) & (t1'=(f1<16)?t1:1-t1);\n\nendmodule\n\n//----------------------------------------------------------------------------------------------------------------------------\n\n// sender\n// note to make things easier we do not change the frequency for receiving\n// will only work if the delays are divisible by 4 otherwise it will cause problems\n// frequency of sender\nmodule sender\n\n// still should try some different orderings?\n\nsend : [1..3]; // 1 sending and 2,3 receiving\nfreq : [1..16]; // current frequency mod 16 freq+train*16\ntrain : [0..1]; // used to work out the frequency (actual frequency equals freq+train*16)\nc : [1..16]; // used to work out the trains\nrep : [1..mrep]; // no of repetitions of a train\n\n// sending\n[time] !sleep & send=1 & (((freq=1)|(freq=3)|(freq=5)|(freq=7)|(freq=9)|(freq=11)|(freq=13)|(freq=15))) & freq!=c -> (freq'=freq+1);\n[time] !sleep & send=1 & (((freq=1)|(freq=3)|(freq=5)|(freq=7)|(freq=9)|(freq=11)|(freq=13)|(freq=15))) & freq=c -> (freq'=freq+1) & (train'=1-train);\n[time] !sleep & send=1 & (((freq=2)|(freq=4)|(freq=6)|(freq=8)|(freq=10)|(freq=12)|(freq=14)|(freq=16))) -> (send'=2);\n// receiving\n[time] !sleep & send=2 -> (send'=3);\n[time] !sleep & send=3 & freq<16 & freq!=c -> (send'=1) & (freq'=freq+1);\n[time] !sleep & send=3 & freq<16 & freq=c -> (send'=1) & (freq'=freq+1) & (train'=1-train);\n[time] !sleep & send=3 & freq=16 & rep<mrep & c!=16 -> (send'=1) & (freq'=1) & (train'=1-train) & (rep'=rep+1);\n[time] !sleep & send=3 & freq=16 & rep<mrep & c=16 -> (send'=1) & (freq'=1) & (rep'=rep+1);\n[time] !sleep & send=3 & freq=16 & rep=mrep -> (send'=1) & (freq'=1) & (train'=swap?1-train:train) & (c'=c=16?1:c+1) & (rep'=1);\n// big time step (2048 slots = 64 repetitions)\n[time] sleep & rep<=64 -> (rep'=rep+64); // sleeping does not change frequency set\n[time] sleep & rep>64 -> (rep'=rep-64) & (c'=c=16?1:c+1) & (train'=swap2?1-train:train); // sleeping changes current frequency set\n\nendmodule\n\n//----------------------------------------------------------------------------------------------------------------------------\nconst int mrec; // after receiving mrec messages the inquiry is stopped\n\n// counts the number of replies received\nmodule replies\n\n// no of non garbled received messages\nrec : [0..mrec];\n\n[time] rec<mrec -> (rec'=rec);\n[] rec=mrec -> (rec'=rec);\n\nendmodule\n\n//----------------------------------------------------------------------------------------------------------------------------\n\n// specify initial state (only that receiver starts scanning and nothing sent)\n// note as changed the sender so that for freq to be odd send must equal 1 we need the extra condition\n// init rec=0 & y1=0 & receiver=0 & freq1=0 & train1=0 & (send=1 | freq=2,4,6,8,10,12,14,16) endinit\nconst int k; // frequency the sender starts on\nconst int T; // train that the sender starts on\n\ninit\nreceiver=0 & y1=0 & freq1=0 & train1=0 & // initial state of the receiver\nrec=0 & // nothing received yet\nf1=k & t1=T & // initial frequency of the receiver (based on its clock)\n(send=1 |((freq=2)|(freq=4)|(freq=6)|(freq=8)|(freq=10)|(freq=12)|(freq=14)|(freq=16))) // condition required on the sender\nendinit\n\n//----------------------------------------------------------------------------------------------------------------------------\n\n// rewards - to compute expected time\nrewards \"time\"\n[time] !(receiver=0 & y1=1) : 1;\n[time] receiver=0 & y1=1 : 2048;\nendrewards\n```\n\n### Analysis results\n\nWe compute the expected time for completion of the inquiry process, i.e. the time elapsed until the bound mrec on the number of replies heard by the sender has been met. We compute this for all 17,179,869,184 possible initial states.\n\nFor mrec=1, the best- and worst-case expected times are 635μs (2 slots) and 2.5716s (8,229 slots), respectively. Below is a plot of the expected time against the number of initial states which result in this time. The discontinuities (an example is highlighted in the inset) correspond to the cases where the receiver sleeps different numbers of times before successfully hearing an inquiry packet. The 5 peaks correspond to 0,1,2,3 and 4 sleeps.", null, "For mrec=2, the best- and worst-case expected times are 0.0456s (146.0 slots) and 5.1569 seconds 5.177 seconds (16,565 slots), respectively. Again, we plot the expected time against the number of initial states which result in this time. Here, there are 9 peaks (of which 4 are much smaller) corresponding to the receiver sleeping 0,1,...,8 times before hearing a message.", null, "By assuming that there is a uniform distribution on the set of possible initial configurations, we can calculate the cumulative probability distribution function for the time that it takes the sender to hear either one or two replies. This is shown in the graph below. In addition, we compare the function for two replies to a derived cumulative probability distribution function, based on the additional assumption that the times to hear each of the two messages are independent. This is constructed as a convolution of two copies of the distribution plotted for the case mrec=1 together with a distribution representing the random delay made by the receiver between sending the first reply and beginning its next scan. Clearly, we see that any analysis based on such an assumption is flawed and results in incorrect results. We illustrate that, if the receiver sleeps before sending its first reply, it is less likely to sleep the second time.", null, "Finally, we extract from the three cumulative probability distributions we have presented above, the probability that the receiver sleeps at most K times before sending its first reply to the sender. These are shown in the table below.\n\n K: Probability: mrec=1 mrec=1 (v.1.1) mrec=2 (exact) mrec=2 (derived) 0 0.500305 0.461240 0.455377 0.250305 1 0.633575 0.596265 0.591388 0.383657 2 0.759062 0.731585 0.729611 0.526981 3 0.879674 0.857913 0.855804 0.681114 4 1 0.984295 0.984253 0.849408 5 1 0.988269 0.988328 0.911750 6 1 0.992398 0.992546 0.956496 7 1 0.996294 0.996534 0.985521 8 1 1 1 1\n\nWe also include in this table results from an additional verification we have performed, on a model of the previous version (1.1) of the Bluetooth specification (our analysis uses the current version 1.2). The main difference is that, in version 1.1, the receiver only sends a reply to every second message received. We were able to modify our PRISM model to reflect this change and rerun our experiments. Above, we successfully illustrate that, as was intended, version 1.2 indeed results in improved expected times." ]
[ null, "http://www.prismmodelchecker.org/images/cs/bluetooth.scanning.gif", null, "http://www.prismmodelchecker.org/images/graphs/bluetooth.dist1.gif", null, "http://www.prismmodelchecker.org/images/graphs/bluetooth.dist2.gif", null, "http://www.prismmodelchecker.org/images/graphs/bluetooth.cdfs.gif", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.7513328,"math_prob":0.993304,"size":22259,"snap":"2020-24-2020-29","text_gpt3_token_len":7663,"char_repetition_ratio":0.33610424,"word_repetition_ratio":0.116611,"special_character_ratio":0.46075743,"punctuation_ratio":0.12436548,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.99720657,"pos_list":[0,1,2,3,4,5,6,7,8],"im_url_duplicate_count":[null,2,null,2,null,2,null,2,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2020-07-11T00:33:52Z\",\"WARC-Record-ID\":\"<urn:uuid:c43b91cb-21cc-4c0a-8d61-cbe75fb2885b>\",\"Content-Length\":\"104111\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:009dad7a-6b6c-48d9-9dde-6f966efb580a>\",\"WARC-Concurrent-To\":\"<urn:uuid:634e81e3-9b5f-4b3a-8546-32148a956c93>\",\"WARC-IP-Address\":\"163.1.88.73\",\"WARC-Target-URI\":\"http://www.prismmodelchecker.org/casestudies/bluetooth.php\",\"WARC-Payload-Digest\":\"sha1:U64JXEET75FO5EJAV54C7CALXC3MTMA3\",\"WARC-Block-Digest\":\"sha1:BMLBZZQRDVQR7CW5QPDZRUPSRI4BZOSH\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2020/CC-MAIN-2020-29/CC-MAIN-2020-29_segments_1593655919952.68_warc_CC-MAIN-20200711001811-20200711031811-00012.warc.gz\"}"}
https://tools.carboncollective.co/inflation/us/1938/30591/2009/
[ "# $30,591 in 1938 is worth$465,454.00 in 2009\n\n$30,591 in 1938 has the same purchasing power as$465,454.00 in 2009. Over the 71 years this is a change of $434,863.00. The average inflation rate of the dollar between 1938 and 2009 was 3.88% per year. The cumulative price increase of the dollar over this time was 1,421.54%. ## The value of$30,591 from 1938 to 2009\n\nSo what does this data mean? It means that the prices in 2009 are 4,654.54 higher than the average prices since 1938. A dollar in 2009 can buy 6.57% of what it could buy in 1938.\n\nWe can look at the buying power equivalent for $30,591 in 1938 to see how much you would need to adjust for in order to beat inflation. For 1938 to 2009, if you started with$30,591 in 1938, you would need to have $465,454.00 in 1938 to keep up with inflation rates. So if we are saying that$30,591 is equivalent to $465,454.00 over time, you can see the core concept of inflation in action. The \"real value\" of a single dollar decreases over time. It will pay for fewer items at the store than it did previously. In the chart below you can see how the value of the dollar is worth less over 71 years. ## Value of$30,591 Over Time\n\nIn the table below we can see the value of the US Dollar over time. According to the BLS, each of these amounts are equivalent in terms of what that amount could purchase at the time.\n\nYear Dollar Value Inflation Rate\n1938 $30,591.00 -2.08% 1939$30,157.09 -1.42%\n1940 $30,374.04 0.72% 1941$31,892.74 5.00%\n1942 $35,364.06 10.88% 1943$37,533.64 6.13%\n1944 $38,184.51 1.73% 1945$39,052.34 2.27%\n1946 $42,306.70 8.33% 1947$48,381.51 14.36%\n1948 $52,286.74 8.07% 1949$51,635.87 -1.24%\n1950 $52,286.74 1.26% 1951$56,408.94 7.88%\n1952 $57,493.72 1.92% 1953$57,927.64 0.75%\n1954 $58,361.55 0.75% 1955$58,144.60 -0.37%\n1956 $59,012.43 1.49% 1957$60,965.04 3.31%\n1958 $62,700.70 2.85% 1959$63,134.62 0.69%\n1960 $64,219.40 1.72% 1961$64,870.28 1.01%\n1962 $65,521.15 1.00% 1963$66,388.98 1.32%\n1964 $67,256.81 1.31% 1965$68,341.60 1.61%\n1966 $70,294.21 2.86% 1967$72,463.79 3.09%\n1968 $75,501.19 4.19% 1969$79,623.38 5.46%\n1970 $84,179.49 5.72% 1971$87,867.77 4.38%\n1972 $90,688.21 3.21% 1973$96,329.11 6.22%\n1974 $106,960.02 11.04% 1975$116,723.11 9.13%\n1976 $123,448.79 5.76% 1977$131,476.21 6.50%\n1978 $141,456.26 7.59% 1979$157,511.11 11.35%\n1980 $178,772.94 13.50% 1981$197,214.32 10.32%\n1982 $209,363.94 6.16% 1983$216,089.62 3.21%\n1984 $225,418.79 4.32% 1985$233,446.21 3.56%\n1986 $237,785.36 1.86% 1987$246,463.66 3.65%\n1988 $256,660.66 4.14% 1989$269,027.23 4.82%\n1990 $283,563.38 5.40% 1991$295,496.04 4.21%\n1992 $304,391.30 3.01% 1993$313,503.51 2.99%\n1994 $321,530.94 2.56% 1995$330,643.15 2.83%\n1996 $340,406.23 2.95% 1997$348,216.70 2.29%\n1998 $353,640.64 1.56% 1999$361,451.11 2.21%\n2000 $373,600.72 3.36% 2001$384,231.64 2.85%\n2002 $390,306.45 1.58% 2003$399,201.70 2.28%\n2004 $409,832.62 2.66% 2005$423,717.89 3.39%\n2006 $437,386.21 3.23% 2007$449,843.91 2.85%\n2008 $467,115.89 3.84% 2009$465,454.00 -0.36%\n\n## US Dollar Inflation Conversion\n\nIf you're interested to see the effect of inflation on various 1950 amounts, the table below shows how much each amount would be worth today based on the price increase of 1,421.54%.\n\nInitial Value Equivalent Value\n$1.00 in 1938$15.22 in 2009\n$5.00 in 1938$76.08 in 2009\n$10.00 in 1938$152.15 in 2009\n$50.00 in 1938$760.77 in 2009\n$100.00 in 1938$1,521.54 in 2009\n$500.00 in 1938$7,607.70 in 2009\n$1,000.00 in 1938$15,215.39 in 2009\n$5,000.00 in 1938$76,076.95 in 2009\n$10,000.00 in 1938$152,153.90 in 2009\n$50,000.00 in 1938$760,769.50 in 2009\n$100,000.00 in 1938$1,521,539.01 in 2009\n$500,000.00 in 1938$7,607,695.04 in 2009\n$1,000,000.00 in 1938$15,215,390.07 in 2009\n\n## Calculate Inflation Rate for $30,591 from 1938 to 2009 To calculate the inflation rate of$30,591 from 1938 to 2009, we use the following formula:\n\n$$\\dfrac{ 1938\\; USD\\; value \\times CPI\\; in\\; 2009 }{ CPI\\; in\\; 1938 } = 2009\\; USD\\; value$$\n\nWe then replace the variables with the historical CPI values. The CPI in 1938 was 14.1 and 214.537 in 2009.\n\n$$\\dfrac{ \\30,591 \\times 214.537 }{ 14.1 } = \\text{ \\465,454.00 }$$\n\n$30,591 in 1938 has the same purchasing power as$465,454.00 in 2009.\n\nTo work out the total inflation rate for the 71 years between 1938 and 2009, we can use a different formula:\n\n$$\\dfrac{\\text{CPI in 2009 } - \\text{ CPI in 1938 } }{\\text{CPI in 1938 }} \\times 100 = \\text{Cumulative rate for 71 years}$$\n\nAgain, we can replace those variables with the correct Consumer Price Index values to work out the cumulativate rate:\n\n$$\\dfrac{\\text{ 214.537 } - \\text{ 14.1 } }{\\text{ 14.1 }} \\times 100 = \\text{ 1,421.54\\% }$$\n\n## Inflation Rate Definition\n\nThe inflation rate is the percentage increase in the average level of prices of a basket of selected goods over time. It indicates a decrease in the purchasing power of currency and results in an increased consumer price index (CPI). Put simply, the inflation rate is the rate at which the general prices of consumer goods increases when the currency purchase power is falling.\n\nThe most common cause of inflation is an increase in the money supply, though it can be caused by many different circumstances and events. The value of the floating currency starts to decline when it becomes abundant. What this means is that the currency is not as scarce and, as a result, not as valuable.\n\nBy comparing a list of standard products (the CPI), the change in price over time will be measured by the inflation rate. The prices of products such as milk, bread, and gas will be tracked over time after they are grouped together. Inflation shows that the money used to buy these products is not worth as much as it used to be when there is an increase in these products’ prices over time.\n\nThe inflation rate is basically the rate at which money loses its value when compared to the basket of selected goods – which is a fixed set of consumer products and services that are valued on an annual basis." ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.85693264,"math_prob":0.9937988,"size":6726,"snap":"2022-05-2022-21","text_gpt3_token_len":2597,"char_repetition_ratio":0.1502529,"word_repetition_ratio":0.018535681,"special_character_ratio":0.54653585,"punctuation_ratio":0.21095102,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9886381,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2022-05-22T19:39:43Z\",\"WARC-Record-ID\":\"<urn:uuid:972f1f21-1ceb-4ee1-9de5-d0fb524edb49>\",\"Content-Length\":\"43988\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:b5562718-0b42-454d-9db3-488d9cb3bf72>\",\"WARC-Concurrent-To\":\"<urn:uuid:cd39f651-cc52-4c22-af41-27e8d1f8795c>\",\"WARC-IP-Address\":\"138.197.3.89\",\"WARC-Target-URI\":\"https://tools.carboncollective.co/inflation/us/1938/30591/2009/\",\"WARC-Payload-Digest\":\"sha1:2XXU5MCBEJRGNU4UU45V4R4GA3HEDYU2\",\"WARC-Block-Digest\":\"sha1:UKBACTDMEULJT34NIO3RYSXXRBJ6YETY\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2022/CC-MAIN-2022-21/CC-MAIN-2022-21_segments_1652662546071.13_warc_CC-MAIN-20220522190453-20220522220453-00096.warc.gz\"}"}
https://answers.yahoo.com/question/index?qid=20061228091953AABqUXL
[ "# explain the concept of components of a vector?\n\nRelevance\n\nOK!\n\nA vector has two components: size (magnitude) and direction.\n\nThink about velocity. A car traveling down a straight road has a speed (how fast) and a direction. Together, the car's velocity is described by a vector, e.g., 55 mph due west.\n\n•", null, "Login to reply the answers\n• A vector has 2 attributes a importance and a course. The course could be expressed in 2 or 3 dimensional area. each and each vector could be broken down into factors (making use of common trigonometry) alongside the X, Y, and Z axis of area to make the diagnosis of forces performing upon an merchandise much less stressful to visualise and calculate the sum of the the forces. in ballistic action, projectile shuttle in a predictable arc. the preliminary forces are the perspective and tension with which the object bypass away the barrel of the firing mechanism and gravity. making use of trigonometry we are able to in spite of the preliminary perspective wreck the launching tension into X, and Y factors. The Y component will act as we talk opposite gravity and could with the aid of the years decay the vertical component until the object returns to \"earth\". The X component assuming no friction won't decay and could tell how a techniques away the projectile will land.\n\n•", null, "Login to reply the answers\n• A vector has length and direction. For example a vector in a coordinate system could start at the origin (0,0) and extend to the point (3,4). In this case its length would be 5 units. Its horizontal component would be 3 and its vertcal component would be 4.\n\nIn other words, you could get from (0,0) to (3,4) by following the direct route of the vector, or you could get there by first going 3 units directly East and the 4 units directly North. In the latter case, you would travel along the components of the vector.\n\n•", null, "Login to reply the answers\n• In physics and in vector calculus, a spatial vector, or simply vector, is a concept characterized by a magnitude and a direction. A vector has properties that do not depend on the coordinate system used to describe it. However, a vector is often described by a fixed number of components, each of which is dependent upon the particular coordinate system being used, such as Cartesian coordinates, spherical coordinates or polar coordinates.\n\n•", null, "Login to reply the answers\n• Anonymous\n\nA vector is a direction with a length (also called the magnitude). A vector can be in any dimenshion but the second dimenshion is the easiest to imagine. If you think about a 2D graph with cartesian coordinates (just like regular graph paper) and draw a line from (0,0) outward, it can be a vector.\n\nSo for example, if you draw a line from (0,0) to (2,1) your vector has coordinates (2,1). 2 is the x coordinate, 1 is the y coordinate. So if you got a vector like (34, 125) then you know to draw the vector from (0,0) to (34,125). When we mention the components (35,125), we don't need to say from (0,0) because that is implied.\n\n•", null, "Login to reply the answers\n• Basically, an n-dimensional vector has n components - one for each of the dimensions represented by the vector. For example, in a 2-dimensional vector, the two components represent the distance in the directions of the coordinate axes.\n\n•", null, "Login to reply the answers\n• A vector is a magnitude (ex. speed) and a direction (ex. Northwest or 45 degrees north of west). If you were driving northwest at 55 mph for one hour, you would end up 55 miles NW from where you started.\n\nSay you wanted to take a different path but end up at the same place. You could use components to figure out how far north to travel and how far west to travel. In the example given, you would need to travel 38.89 miles north and 38.89 miles west. You can see that would require you to travel 77.78 miles, instead of 55 miles, but you would end up in the same place either way.\n\nVector components are very useful in the addition of vectors. (ex. adding forces to find the net force, adding velocities to find the net velocity)\n\n•", null, "Login to reply the answers" ]
[ null, "https://ct.yimg.com/cy/1768/39361574426_98028a_128sq.jpg", null, "https://ct.yimg.com/cy/1768/39361574426_98028a_128sq.jpg", null, "https://ct.yimg.com/cy/1768/39361574426_98028a_128sq.jpg", null, "https://ct.yimg.com/cy/1768/39361574426_98028a_128sq.jpg", null, "https://ct.yimg.com/cy/1768/39361574426_98028a_128sq.jpg", null, "https://ct.yimg.com/cy/1768/39361574426_98028a_128sq.jpg", null, "https://ct.yimg.com/cy/1768/39361574426_98028a_128sq.jpg", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.9352338,"math_prob":0.9335452,"size":4272,"snap":"2020-24-2020-29","text_gpt3_token_len":984,"char_repetition_ratio":0.1455014,"word_repetition_ratio":0.037859008,"special_character_ratio":0.2366573,"punctuation_ratio":0.106481485,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.99653345,"pos_list":[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],"im_url_duplicate_count":[null,null,null,null,null,null,null,null,null,null,null,null,null,null,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2020-05-26T09:15:54Z\",\"WARC-Record-ID\":\"<urn:uuid:dbc33ebb-eca4-4bc2-8fa0-ab31696add46>\",\"Content-Length\":\"130097\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:58762480-98ad-48a6-b541-4757ea02088d>\",\"WARC-Concurrent-To\":\"<urn:uuid:bf3d8575-345c-4f21-8886-dd6203642737>\",\"WARC-IP-Address\":\"69.147.92.11\",\"WARC-Target-URI\":\"https://answers.yahoo.com/question/index?qid=20061228091953AABqUXL\",\"WARC-Payload-Digest\":\"sha1:WEY3PTHWQ53OARJOPTOR5ISYXW46KJTB\",\"WARC-Block-Digest\":\"sha1:4S4PPDV4UHLBAXPDMVZB26FWDEZCNBKC\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2020/CC-MAIN-2020-24/CC-MAIN-2020-24_segments_1590347390755.1_warc_CC-MAIN-20200526081547-20200526111547-00566.warc.gz\"}"}
https://physics.stackexchange.com/questions/357124/eddington-finkelstein-coordinate-system
[ "# Eddington Finkelstein coordinate system\n\nDo particles in Eddington Finkelstein coordinate system take a finite amount of time to reach the horizon? Or do they take infinite time? How is the time coordinate used in the Eddington Finkelstein coordinate system be matched with the time measured by an observer?\n\nWhat will an observer who is sitting on the particle would observe when he is using Eddington Finkelstein coordinates? How much time will he take in his reference frame?\n\nA coordinate system is just a scheme for labelling events in spacetime, and it does not necessarily relate directly to what is experienced by an observer. So it doesn't make sense to ask about particles using the EF coordinate system or indeed any other coordinate system.\n\nThe Schwarzschild coordinates correspond to the experience of an observer an infinite distance from the black hole. We can see this because in the limit of $r \\to\\infty$ the Schwarzschild metric:\n\n$$ds^2 = -\\left(1-\\frac{r_s}{r}\\right)dt^2 + \\frac{dr^2}{1-\\frac{r_s}{r}}+r^2(d\\theta^2 + \\sin^2\\theta d\\phi^2)$$\n\nsimplifies to the flat space Minkowski metric (in polar coordinates):\n\n$$ds^2 = -dt^2 + dr^2 + r^2(d\\theta^2 + \\sin^2\\theta d\\phi^2)$$\n\nSo for the observer at $r=\\infty$ the time $dt$ is just the time shown by that observer's clock. In this sense the Schwarzschild coordinates are intuitively simple.\n\nThe coordinates that correspond to the experience of an observer freely falling into the black holes are the Fermi normal coordinates. Locally these look like flat spacetime so the falling observer considers the spacetime immediately around them to be flat.\n\nBut the Eddington-Finkelstein coordinates do not correspond to anything directly experienced by an observer. So for example the EF timelike coordinate $v$ is related to the Schwarzschild coordinates by:\n\n$$v = t \\pm r + 2GM\\log\\left( \\frac{r}{2GM} - 1\\right)$$\n\nThis \"time\" $v$ is not a quantity that would be measured by any observer's clock.\n\nThe time experienced by the falling observer has a nice simple geometric interpretation. It is just the length of the observer's world line calculated using the metric. If you're interested I go into this in more detail in What is time dilation really?.\n\nThe length of the trajectory is called the proper time and it is an invariant, meaning that we can use any coordinates to calculate it and whatever coordinates we choose we will get the same result.\n\n• But we do calculate null geodesics in Eddington Finkelstein coordinate system, and we interpret them as the path of light. These travel at an angle of 45 with respect to r. How do we understand this if t does not represent physical time? Sep 14, 2017 at 7:11\n• You can calculate a geodesic using any coordinates. The EF coordinates happen to be well suited to dealing with null geodesics. The geodesic will look different in different coordinate systems but the length of the geodesic (i.e. $\\int ds$) measured between any two points will be the same in all coordinate systems. This length is the proper time i.e. the elapsed time as shown by a clock carried by the falling observer. For a null geodesic the elapsed time is always zero. Sep 14, 2017 at 8:00\n• Can I say that when I move from Schwarzchild coordinate system to Kruskal coordinates, then my point of observation is changing from an observer at infinite distance to an observer who is infalling into the black hole. Sep 15, 2017 at 13:41\n• @Aniket: no. The Kruskal-Szekeres coordinates do not correspond to anything an observer could experience. They are entirely abstract. The nearest to the the experience of a freely falling observer is probably the Gullstrand-Painlevé coordinates, though while they accurately reflect the time experienced by the falling observer they do not correspond to distance measurements made by the observer. Sep 15, 2017 at 14:53\n• Sorry, this might be a stupid question. But, looking at a metric can I say anything about the observer, whether the observer is at infinity or infalling into the black hole? Sep 17, 2017 at 4:37" ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.8351125,"math_prob":0.970313,"size":1940,"snap":"2023-40-2023-50","text_gpt3_token_len":480,"char_repetition_ratio":0.1446281,"word_repetition_ratio":0.026058631,"special_character_ratio":0.235567,"punctuation_ratio":0.053370785,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.99000365,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2023-10-02T12:45:15Z\",\"WARC-Record-ID\":\"<urn:uuid:8fa707b5-fca9-4e52-96b8-3b78f6e9799d>\",\"Content-Length\":\"168181\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:519cf76a-922b-42b8-9bf1-24267836e9b2>\",\"WARC-Concurrent-To\":\"<urn:uuid:4c2df34e-209b-4bc0-83cd-ac9e26cf74c9>\",\"WARC-IP-Address\":\"104.18.11.86\",\"WARC-Target-URI\":\"https://physics.stackexchange.com/questions/357124/eddington-finkelstein-coordinate-system\",\"WARC-Payload-Digest\":\"sha1:ZGVGYIH5PXJRMPHSQBJ5NHK3OVRO6EZX\",\"WARC-Block-Digest\":\"sha1:5K5WXXHTHIFISSHZPBQ5KGGN7O5MXEQR\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2023/CC-MAIN-2023-40/CC-MAIN-2023-40_segments_1695233510994.61_warc_CC-MAIN-20231002100910-20231002130910-00603.warc.gz\"}"}
https://www.hindawi.com/journals/jmath/2021/5593705/
[ "#### Abstract\n\nArtificial intelligence (AI) based business process optimization has a significant impact on a country’s economic development. We argue that the use of artificial neural networks in business processes will help optimize these processes ensuring the necessary level in the functioning and compliance with the foundations of sustainable development. In this paper, we proposed a mathematical model using AI to detect outliers in the daily return of Saudi stock market (Tadawul). An outlier is defined as a data point that deviates too much from the rest of the observations in a data sample. Based on the Engle and Granger Causality test, we selected inflation rate, repo rate, and oil prices as input variables. In order to build the mathematical model, we first used the Tukey method to detect outliers in the stock return data from Tadawul that are collected during the period from October 2011 to December 2019. In this way, we categorized the stock return data into two classes, namely, outliers and nonoutliers. These data are further used to train artificial neural network in conjunction with particle swarm optimization algorithm. In order to assess the performance of the proposed model, we employed the mean squared error function. Our proposed model is signified by the mean squared error value of 0.05. The proposed model is capable of detecting outlier values directly from the inflation rate, repo rate, and oil prices. The proposed model can be helpful in developing and applying intelligent optimization techniques to solve problems in business processes.\n\n#### 1. Introduction\n\nThe Kingdom of Saudi Arabia is an active member of the Organization of Petroleum Exporting Countries that plays a significant role in the oil markets. Tadawul was launched as an informal organization during the 1970s with only 14 companies listed. In 1984, the government created a committee to develop and regulate the market. In 2003, the government established the Capital Market Authority to regulate the market where only Saudi investors were allowed to invest in the market. In 2007, Tadawul started functioning as a formal organization with nearly 200 companies listed. In addition, the investors from the Gulf Cooperation Council countries were permitted to invest in Tadawul. In 2008, the Capital Market Authority approved a new regulation that allowed non-Arab foreign investors to participate in stock trading. In 2015, the financial regulators opened Tadawul to qualified foreign investment firms. In 2018, the Saudi Financial Supervision Authority took additional steps by allowing foreign investors to own up to 49% in listed securities. These measures helped Tadawul to attract foreign investors in order to become one of the most dynamic capital markets in the region.\n\nThe Saudi stock market (Tadawul) is considered the largest financial market in developing countries . It should be noted that the economy of the Kingdom of Saudi Arabia tends to rely on oil as a major source of revenue, and the stock market volatility depends on the fluctuation of oil prices. It should be further noticed that Saudi Arabia is a part of Group of twenty (G20): an international economic cooperation forum that involves representatives from 19 countries plus the European Union. More than 66% of the worldwide population, 75% of the international trade, and 85% of the global economy belong to the members of G20 . Moreover, Saudi Arabia is currently implementing an economic reform package and opening up its venues to the world. These factors exposed the Kingdom to external crises due to which Tadawul has been suffering from heavy volatility during different periods.\n\nThe stock price variations are estimated by volatility that reflects the behavior of the stock market. It explains whether a stock price is changing rapidly over time that indicates high volatility or slowly over time that shows low volatility. Volatility is used to measure the standard deviation of stock prices. Stock market volatility (data contains outliers) measures risky stocks and plays an important role to support both market practitioners and policymakers, especially in emerging markets. The stock market volatility creates a wide variety of responses from market players providing an opportunity to some participants who see volatility as a chance to make money whereas some others see it as a threat. Therefore, a practitioner is always worried about the behavior of stock markets. In order to guarantee financial and macroeconomic stability, the economists try to reduce excessive volatility. Indeed, an effective quantitative approach is needed to model the volatility of a stock market in order to protect against its negative effects.\n\nOutliers in time series data are defined as a type of data anomalies where observed values deviate from their expected values, and naturally correspond to critical events [3, 4]. The problem of outlier detection has been considered in several application areas such as customized marketing, credit card fraud detection, sensor event detection, fault diagnosis in industry, weather prediction, and loan approval-related applications. The occurrence of outliers in data may be due to several reasons such as poor data quality or inaccurate measurements . On the other hand, outliers can also indicate interesting and meaningful information that can be represented by periods of high or low volatility particularly in financial time series data. Detecting outlier values is beneficial since these values reflect important information in many application domains . Similarly, in financial data and stock markets, outliers are defined as extreme points that deviate a lot from the other data points. Index and asset prices may demonstrate such behavior. Prior to modeling any financial time series data, it is required to identify the unlikely data points provided that certain fitted model is assumed to have generated the data. Financial time series data are frequently messed up with outliers due to the influence of unusual and nonrepetitive events. Forecast accuracy in such situations is decreased dramatically due to a carry-over effect of the outliers on the point forecast and a bias in the model parameter estimation.\n\nMetaheuristic optimization algorithms are used to estimate the optimal solutions to a set of parameters related to the optimization problems and computational models [9,10]. Note that metaheuristic optimization algorithms are characterized by their ability of quickly reaching the global optimum values of a problem and are easy to implement and control according to different problem models. Particle swarm optimization (PSO) , genetic algorithm (GA) , and prey-predator algorithm (PPA) are some of the widely used optimization algorithms in many fields. Machine learning, data mining, artificial intelligence (AI), and engineering applications are some of the real-world optimization problems where metaheuristic algorithms can be used effectively [9, 14]. ANNs are widely used for forecasting and classification problem-solving that can be fed with raw data and desired feature representation can be automatically constructed . Several factors add to the importance of ANN, the most important of which are accuracy, speed, and convergence.\n\nThere are many outlier detection methods available in the literature. In , a new model is proposed named as RBFNDDA, which combines the radial basis function network with dynamic decay adjustment to learn information from a dataset and group it in terms of prototypes. Then, a neighborhood procedure based on rough sets is applied to detect prototype outliers. Interested readers may refer to for more in-depth understanding of outlier detection. Moreover, ANN has been applied to solve real-world problems in various application domains such as medical diagnosis , pattern recognition , and other related applications. An ANN learns the nonlinearity of the input-output data mapping through a topological structure that is also self-adaptive with a universal functional approximation capability . An ANN generates a good knowledge base that represents different patterns of data samples [21, 22]. It is evident from the literature review that radial basis function neural network (RBFNN) has not been applied to detect and classify the outliers in Tadawul from inflation rate and repo rate that are collected from Saudi Authority for Statistics and oil prices that are collected from Saudi Central Bank . We have used RBFNN in conjunction with PSO for the classification of Saudi Arabia stock prices. The obtained results confirmed the effectiveness of our proposed technique for the mentioned task. It is worth mentioning that some of the most representative computational intelligence algorithms can also be used for training RBFNN, such as Earthworm Optimization Algorithm (EWA) , Moth Search (MS) algorithm , Slime Mould Algorithm (SMA) , and Harris Hawks Optimization (HHO).\n\nThis paper is structured as follows. In Section 2, we first present a description of the data that is followed by an overview of RBFNN, PSO, and Tukey method. In Section 3, we discuss the variable selection, correlation, causality test, and the proposed model along with its performance. In Section 4, we draw the conclusion.\n\n#### 2. Materials and Methods\n\n##### 2.1. Data Description\n\nThe sample data of closing prices are collected from the stock market (Tadawul) in Saudi Arabia. The day-to-day closing prices were collected during the period from October 2011 to December 2019. The sample size is 2026 [28, 29]. Table 1 shows the descriptive statistics of the dataset. Note that the natural logarithm of standard deviation for closing stock price is indicated by LSCS. In addition, the symbols Repo and Loil are used to represent the repo rate and the logarithm of oil price, respectively. The mean and standard deviation of LSCS are 6.75 and 0.6923, respectively. Furthermore, the minimum and maximum values of LSCS are 3.83 and 7.22, respectively. The mean and standard deviation of Repo are 0.70 and 0.28, correspondingly. The minimum and maximum values of Repo are, respectively, 0.13 and 4.55. The mean and standard deviation of Loil are 4.30 and 0.35. The minimum and maximum values of Loil are 3.33 and 4.84, respectively.\n\n##### 2.2. Radial Basis Function Neural Network\n\nRBFNN is considered a special three-layered network, which consists of an input layer, a hidden layer, and an output layer as shown in Figure 1. Note that each layer consists of a set of neurons. The input values of the RBFNN are passed through the input layer to the hidden layer via the “input weights”. After that, the output values of the hidden layer are forwarded to the output layer via the “output weights”. In this work, all hidden neurons have the same activation functions, i.e., the Gaussian function :where is the Gaussian function in the hidden neuron , is the input weight between the input neuron and the hidden neuron , is the center, and is the width. In this study, we have used three input neurons, five hidden neurons, and one output neuron (see Figure 1).\n\n##### 2.3. Particle Swarm Optimization Algorithm\n\nPSO, first proposed by , is a popular swarm intelligence-based metaheuristic technique inspired by the behavior of birds flying in flocks or fish swimming in schools that can solve complex mathematical problems. Similar to evolutionary algorithms in nature, PSO starts the optimization process with a population of some randomly generated solutions that are optimized with each passing generation. However, PSO does not incorporate any evolutionary operators like mutation and crossover. Particles (parameters) in PSO determine their new location by following the current optimum particle in the problem space. It has been found to be effective while applied to various optimization problems including artificial neural networks , mechanical engineering design optimization problems , and chaotic systems . Moreover, it is easy for it to achieve high accuracy with fast converging speed [38, 39]. It has been widely used in many real-life optimization problems of different domains [31, 33, 34]. In this study, we have used the PSO algorithm to determine the optimal parameter values of our RBFNN model in order to find the minimum value of mean squared error (MSE).\n\nThe algorithm in the search space for the optimal solution depends on equations (2) and (3). The algorithm keeps updating the parameter values until an appropriate solution is obtained.where indicates the position of particle at time and represents the velocity of particle at time . Similarly, indicates the dimension along which the vectors and are updated at time . Note that and where represents the number of swarm particles and indicates the dimension of particles. In this study, we have 15 input weight parameters, 10 hidden neuron parameters, and 5 output weight parameters. Figure 2 presents the procedure of PSO algorithm in this work.\n\nThe PSO procedure steps are summarized as follows (see Figure 2). The PSO starts with N initial solutions (N particles), which are always generated randomly. In this work, the initial values represent the parameters of the RBFNNs that include input weights, output weights, and the parameters of the activation function. Moreover, in each iteration, the solutions will develop over time to create a new generation by using equations (2) and (3). Moreover, the algorithm memorizes the optimal solution based on MSE.\n\n##### 2.4. Tukey Method\n\nTukey’s boxplot is a very famous method for detecting outliers. It reveals the spread, skewness, and locations of the data. This method works perfectly for the detection of outlier values when the data are symmetric . This method depends on the upper fence and lower fence where is the interquartile range, which is the difference between the third quartile and the first quartile , i.e., . In this study, outliers are defined as the observations that lie outside the interval whereas observations that fall 1.5 times interquartile range apart from the first and third quartiles are regarded as suspected outliers. The constants of fences, which are both fixed as 1.5 are considered too liberal for detecting outliers in random normally distributed data [5, 41]. In this study, the Tukey method is used to identify outliers and nonoutliers from the stock return data. After this categorization, RBFNN in conjunction with PSO is used to develop the mathematical model for learning the features of the two categories. Once the training is complete, the proposed model is used to predict outliers.\n\n#### 3. Results and Discussion\n\nThe experiments are performed using MATLAB2019 and R software running on a 64-bit OS (Windows 8 platform) equipped with a 2.2 GHz Intel Core i7 processor and 8 GB RAM.\n\nThe proposed model is constructed across two stages (see Figure 3). In the first stage, we used the Tukey method to determine the outlier values of the stock return dataset using R software. We found that the outlier values are those that are located outside the interval (−0.01783, 0.01908), as discussed in Section 3.2. After that, we constructed the RBFNN model to detect outliers that are outside the interval (−0.01783, 0.01908) in the daily return of Saudi stock market (Tadawul). Based on the Engle and Granger Causality test, we selected inflation rate, repo rate, and oil prices as input values to the RBFNN model with the same period of stock return. The optimal values of RBFNN parameters are obtained using the PSO algorithm.\n\n##### 3.1. Selecting Variables\n\nWe have selected three independent variables (inflation rate, repo rate, and oil prices) based on correlation and causality tests. These macroeconomic variables have a strong effect on stock returns.\n\n###### 3.1.1. Correlation\n\nIn this section, we carefully selected independent variables among a number of other variables, which are eliminated based on certain test. First, as already seen in Table 2, we removed variables because of multicollinearity among independent variables. The absence of perfect multicollinearity, which is an exact (nonstochastic) linear relationship between two or more independent variables, is generally referred to as no multicollinearity. We extracted some variables from independent variables according to the strong relation with other independent variables. Table 2 gives the correlations between the independent and the dependent variables.\n\n###### 3.1.2. Engle and Granger Causality Test\n\nThe test of Engle and Granger represents the causal relationships through cointegration. It generates residuals (errors) that depend on static regression. Augmented Dickey–Fuller test or another similar test can be performed that uses the residuals to see whether unit roots are available. If the time series is cointegrated, the residuals would be almost stationary .where is the dependent variable, is the independent variable set, and ECT indicates error correction. The parameters are , , and .\n\nThe null hypothesis of the Engle–Granger test (: there is no cointegration) is rejected if is negative and greater than 1.96. For more details, the rejecting null hypothesis suggests that dependent variables are caused by independent variables. In this study, the Engle and Granger test for dependent variables and independent variables is explained in Table 3. The findings indicate that the independent variables at a significant level of 5 percent have cointegration with dependent variables. This outcome virtually suggests that the dependent variable is caused by independent variables. Therefore, we have significant evidence to include the independent variables (inflation rate, repo rate, and oil price) in our study.\n\n##### 3.2. Outlier Detection\n\nReferring to Figure 3, this study has two stages. In the first stage, the outlier values are yielded for the return data using the Tukey method, which are based on the evaluation of upper and lower fences. As a result, we found that the upper fence and the lower fence values are 0.01908 and , respectively. Therefore, any value out of this interval will be an outlier value. In this study, we have labeled the two classes as outlier = 1 and nonoutlier = 0. Note that the procedures of this study are identified in Figure 4 where the return data have been sorted from smallest to largest. That is why the outliers can appear on the tails since these values are out of the two red bounded lines.\n\nThe current study examines the closed price data of Tadawul. It is chosen for different reasons; the emerging markets have interesting historical experience for stock market volatility. The Saudi market is an example of significant volatility due to an imbalance in the information, random trading, and unprofessional financial analysis. In addition, the investors of other countries except the Gulf Cooperation Council (GCC) are not allowed to invest in Saudi stocks. Tadawul is the largest in the Middle East. From 2011 to 2019, Tadawul has experienced numerous fluctuations. For example, the general index decreased to 6417.7 points in 2011 whereas increased to 8535 points in 2013. The trading mechanism has been changed from SAXESS to X-Stream INET by market management, and an interactive multiuser system (IFSAH) has been developed to enhance the market’s efficiency and effectiveness . One of the problems faced by different economies in the world is the fluctuation of stock prices. Domestic and foreign economies are influencing the Saudi stock market. External financial crises are transferred to the domestic markets. One such incident happened when the global financial crisis hit the Saudi stock market in 2008 .\n\n##### 3.3. RBFNN Model\n\nIn the second stage (see Figure 3), we defined the RBFNN architecture where it depends on the data generated by the Tukey method for its classification performance. According to the stock return dataset used in this study, we have three input neurons, ten hidden neurons, and one output neuron. Note that the input neurons accept the three variables (Inf., Repo, and oil prices) whereas the output neuron has two values, i.e., either a 0 or a 1. By using the PSO algorithm, we have successfully designed an RBFNN model that finds the outlier values of return data based on the input values (macroeconomic variables). The RBFNN model is trained with PSO algorithm for 20 trials and 1500 iterations where each trial covers a population size of 80. The computed MSE value is less than 0.05 that is the best model as evident from Figure 5. Therefore, we can successfully use this model to predict future unknown data, which is an expression of an AI model trained with the algorithm (PSO-RBFNN) for the classification of return data of Tadawul market.\n\n#### 4. Conclusions\n\nThe proposed model is sufficiently powerful to optimize business processes for economic development of a country. The main purpose of this study was to develop an ANN model to detect and classify outlier values in the daily return of Saudi stock market (Tadawul). We selected inflation rate month, repo rate, and oil prices as the independent variables and the daily return data as dependent variable from October 2011 to December 2019. We confirmed that these parameters are strongly correlated as demonstrated by Engle and Granger causality test. The outliers and nonoutliers from the stock return data have been first categorized using the Tukey method. We observed that the values outside the range [0.01908, −0.01783] are outliers. We labeled the data as 1 (outlier) and 0 (nonoutlier). As a result, the proposed model is capable of detecting the outlier values of the stock return data based on the inflation rate, repo rate, and oil prices.\n\nWe trained RBFNN classifier that effectively learned the patterns for detection and classification of outlier values based on independent variables without referring to stock return data (dependent variable). Note that we have used the PSO algorithm successfully to construct the optimal RBFNN model evaluated with MSE measure, which is used to test the effectiveness of the model. It is affirmative that the proposed model can be used to detect and classify the outlier values for any other stock return data.\n\n#### Data Availability\n\nThe data used to support the study can be made available upon request from the first author.\n\n#### Conflicts of Interest\n\nThe authors declare that they have no conflicts of interest." ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.8849682,"math_prob":0.89068633,"size":32035,"snap":"2023-40-2023-50","text_gpt3_token_len":7290,"char_repetition_ratio":0.13942742,"word_repetition_ratio":0.052070767,"special_character_ratio":0.22778212,"punctuation_ratio":0.1797013,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9566229,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2023-10-03T03:44:20Z\",\"WARC-Record-ID\":\"<urn:uuid:2101f396-1c65-431e-b424-ef0b1ba9058c>\",\"Content-Length\":\"362955\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:e1028b0a-705e-41bf-9490-417b16baab90>\",\"WARC-Concurrent-To\":\"<urn:uuid:92607748-1993-4b69-a9aa-f3d1c1aae07a>\",\"WARC-IP-Address\":\"104.18.40.243\",\"WARC-Target-URI\":\"https://www.hindawi.com/journals/jmath/2021/5593705/\",\"WARC-Payload-Digest\":\"sha1:NGYJIDO4HD4BCLGYAYSIWC4HVHVN762W\",\"WARC-Block-Digest\":\"sha1:YNQRP4OXWVXM3GTKIVQXPPWJNC5W2WOH\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2023/CC-MAIN-2023-40/CC-MAIN-2023-40_segments_1695233511053.67_warc_CC-MAIN-20231003024646-20231003054646-00602.warc.gz\"}"}
https://codedump.io/share/z3xpmTOnx4Xt/1/efficient-way-to-find-index-of-interval
[ "", null, "uqtredd1 - 2 years ago 105\nPython Question\n\n# Efficient way to find index of interval\n\nI'm writing a spline class in Python. The method to calculate the the spline interpolated value requires the index of the closest x data points. Currently a simplified version looks like this:\n\n``````def evaluate(x):\nfor ii in range(N): # N = len(x_data)\nif x_data[ii] <= x <= x_data[ii+1]:\nreturn calc(x,ii)\n``````\n\nSo it iterates through the list of\n`x_data`\npoints until it finds the lower index\n`ii`\nof interval in which\n`x`\nlies and uses that in the function\n`calc`\n, which performs the spline interpolation. While functional, it seems like this would be inefficient for large\n`x_data`\narrays if\n`x`\nis close to the end of the data set. Is there a more efficient or elegant way to perform the same functionality, which does not require every interval to be checked iteratively?\n\nNote:\n`x_data`\nmay be assumed to be sorted so\n`x_data[ii] < x_data[ii+1]`\n, but is not necessarily equally spaced.", null, "Joran Beasley\n\nthis is exactly what bisect is for https://docs.python.org/2/library/bisect.html\n\n``````from bisect import bisect\nindex = bisect(x_data,x)\n#I dont think you actually need the value of the 2 closest but if you do here it is\npoint_less = x_data[index-1] # note this will break if its index 0 so you probably want a special case for that\npoint_more = x_data[index]\n\nclosest_value = min([point_less,point_more],key=lambda y:abs(x-y))\n``````\n\nalternatively you should use binary search(in fact im pretty sure thats what bisect uses under the hood) .... it should be worst case `O(log n)` (assuming your input array is already sorted)\n\nRecommended from our users: Dynamic Network Monitoring from WhatsUp Gold from IPSwitch. Free Download" ]
[ null, "https://www.gravatar.com/avatar/fe51d1d1f00e201fe4761774eb53b369", null, "https://www.gravatar.com/avatar/a3a82559ae1ee7ec304fdbae095f063c", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.7257839,"math_prob":0.92719346,"size":971,"snap":"2019-13-2019-22","text_gpt3_token_len":254,"char_repetition_ratio":0.12926577,"word_repetition_ratio":0.0,"special_character_ratio":0.25952625,"punctuation_ratio":0.09793814,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.99721,"pos_list":[0,1,2,3,4],"im_url_duplicate_count":[null,null,null,null,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2019-03-26T00:25:34Z\",\"WARC-Record-ID\":\"<urn:uuid:597e1538-26ab-42a3-959b-72e982f18636>\",\"Content-Length\":\"46101\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:73e5a8e9-1893-4792-97e6-8abeb521dc99>\",\"WARC-Concurrent-To\":\"<urn:uuid:e171de9d-f87f-40e8-80d9-273cb323b481>\",\"WARC-IP-Address\":\"104.18.61.77\",\"WARC-Target-URI\":\"https://codedump.io/share/z3xpmTOnx4Xt/1/efficient-way-to-find-index-of-interval\",\"WARC-Payload-Digest\":\"sha1:7A3NH5TDCCRR6YHSSKIREJVRBSGHVB6S\",\"WARC-Block-Digest\":\"sha1:OFNHPETOYNSDZIVIBFX5MNQWVYDZ2M63\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2019/CC-MAIN-2019-13/CC-MAIN-2019-13_segments_1552912204736.6_warc_CC-MAIN-20190325234449-20190326020449-00370.warc.gz\"}"}
https://mathoverflow.net/questions/664/can-one-make-erd%C5%91ss-ramsey-lower-bound-explicit
[ "# Can one make Erdős's Ramsey lower bound explicit?\n\nErdős's 1947 probabilistic trick provided a lower exponential bound for the Ramsey number $R(k)$. Is it possible to explicitly construct 2-colourings on exponentially sized graphs without large monochromatic subgraphs?\n\nThat is, can we explicitly construct (edge) 2-colourings on graphs of size $c^k$, for some $c>0$, with no monochromatic complete subgraph of size $k$?\n\n• you mean c>1... – Gil Kalai Nov 7 '09 at 16:17\n• I somehow feel that Erdõs's idea is so fundamental and so generally applicable that the word \"trick\" doesn't do it justice. – gowers Nov 10 '15 at 21:41\n\nI believe the answer is \"no\"; the best known constructions only give no clique or independent set of size about $2^\\sqrt{n}$ in a graph with $2^n$ vertices. Bill Gasarch has a page on the subject here, although I don't know how frequently it updates.\n\nFinding explicit constructions for Ramsey graphs is a central problem in extremal combinatorics. Indeed, computational complexity gives a way to formalize this problem. Asking for a graph which can be constructed in polynomial time is a fairly good definition although sometimes the definition is taken as having a log-space construction.\n\nUntil quite recently the best method for explicit construction was based on extremal combinatorics. The vertices of the graphs were certain sets (say k-subset of an n element sets) and the edges represented pairs of sets with presecibed intersection. The best result was by Frankl and Wilson and it gives a graph with n vertices whose edges are colored by 2 colors with no monochromatic clique of size $\\exp (\\sqrt{(\\log n))}$. (I think this translates to $k^{\\log k}$ in the way the question was formulated here.) Using sum-products theorems Barak Rao Shaltiel and Wigderson improved the bound to $\\exp (\\log n^{o(1)})$.\n\nPayley graphs are conjectured be explicit examples for the correct behavior. But proving it is much beyond reach.\n\nUpdate(Nov 11, 2015): Gil Cohen found an explicit construction with no monochromatic cliques of size $2^{(\\log \\log n)^L}$. An independent construction which applies also to the bipartite case was achieved by Eshan Chattopadhyay and David Zuckerman\n\nI also believe the answer is \"no\". Another reference is this paper, which treats off-diagonal Ramsey numbers (e.g. graphs with no clique of size k and no anti-clique of size l).\n\nAs was mentioned in the previous answers, the answer is no. Or more accurately I'd say that the answer is currently no, but possibly yes.\n\nAlso, consider the related question of constructing a bipartite graph with parts of size $2^n$, which contains no $K_{k,k}$ and whose complement contains no $K_{k,k}$ where $k = O(n)$. Such an explicit construction will have as far as I can tell huge impact on derandomization of randomized algorithms, among other topics in theoretical computer science. See e.g. this paper, where such an explicit construction is given for $k = 2^{n^{o(1)}}$.\n\nYou might also be interested in the following accompanying paper (seems like I cannot post it, being a new user; you can google it though, its title is \"Pseudorandomness and Combinatorial Constructions\") to Luca Trvisan's talk at ICM '06. This may contain more connections between explicit constructions of combinatorial objects and applications in theoretical computer science.\n\nA question I have here is what do you mean by \"explicit\"?\n\nPersonally, I like the definition that a construction is explicit if it can be constructed in polynomial time (due to Alon? Wigderson??). Given that we are talking about exponentials in n here, this gets (slightly) complicated, but we'll say the controlling parameter here is $N=2^n$, the rough order of the number of vertices in a possible Ramsey graph.\n\nOne conjecture I have is that the set of Paley graphs on p vertices, where p ranges over all primes $1 \\mod 4$ between $2^{(n/2)}$ and $2^{(n-1)}$ gives a lower bound on $R(n)$. This is NOT an explicit set, by my definition above. ::::grin:::::\n\nIf memory serves me, I think the best result known for your original question is in a paper of Noga Alon from a few yrs back. You may want to check his web page as well as Gasartch's survey page mentioned before.\n\n• Gil, thanks for your comment. I like the log space condition on 'explicit constructions' as stronger(?) than P time. – Mike Nov 9 '09 at 7:08" ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.945572,"math_prob":0.94235307,"size":870,"snap":"2020-45-2020-50","text_gpt3_token_len":222,"char_repetition_ratio":0.085450344,"word_repetition_ratio":0.0,"special_character_ratio":0.25632185,"punctuation_ratio":0.13756613,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.990173,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2020-10-29T23:50:34Z\",\"WARC-Record-ID\":\"<urn:uuid:ba496276-124c-4240-bff9-a8701cf6c530>\",\"Content-Length\":\"165107\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:8a949927-8229-4ecf-861d-c955eb893a99>\",\"WARC-Concurrent-To\":\"<urn:uuid:ff30777e-b0c6-45dc-a101-a6e43013bc29>\",\"WARC-IP-Address\":\"151.101.1.69\",\"WARC-Target-URI\":\"https://mathoverflow.net/questions/664/can-one-make-erd%C5%91ss-ramsey-lower-bound-explicit\",\"WARC-Payload-Digest\":\"sha1:RKLYIWE5V3B5CHSZC5HB23HLJCEPAIMD\",\"WARC-Block-Digest\":\"sha1:TQQH45NA6DNGICPZI5LPO3BL3W3BMU76\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2020/CC-MAIN-2020-45/CC-MAIN-2020-45_segments_1603107905965.68_warc_CC-MAIN-20201029214439-20201030004439-00059.warc.gz\"}"}
https://feet-to-cm.appspot.com/3390-feet-to-cm.html
[ "Feet To Cm\n\n# 3390 ft to cm3390 Feet to Centimeters\n\nft\n=\ncm\n\n## How to convert 3390 feet to centimeters?\n\n 3390 ft * 30.48 cm = 103327.2 cm 1 ft\nA common question is How many foot in 3390 centimeter? And the answer is 111.220472441 ft in 3390 cm. Likewise the question how many centimeter in 3390 foot has the answer of 103327.2 cm in 3390 ft.\n\n## How much are 3390 feet in centimeters?\n\n3390 feet equal 103327.2 centimeters (3390ft = 103327.2cm). Converting 3390 ft to cm is easy. Simply use our calculator above, or apply the formula to change the length 3390 ft to cm.\n\n## Convert 3390 ft to common lengths\n\nUnitLengths\nNanometer1.033272e+12 nm\nMicrometer1033272000.0 µm\nMillimeter1033272.0 mm\nCentimeter103327.2 cm\nInch40680.0 in\nFoot3390.0 ft\nYard1130.0 yd\nMeter1033.272 m\nKilometer1.033272 km\nMile0.6420454545 mi\nNautical mile0.5579222462 nmi\n\n## What is 3390 feet in cm?\n\nTo convert 3390 ft to cm multiply the length in feet by 30.48. The 3390 ft in cm formula is [cm] = 3390 * 30.48. Thus, for 3390 feet in centimeter we get 103327.2 cm.\n\n## 3390 Foot Conversion Table", null, "## Alternative spelling\n\n3390 ft to cm, 3390 ft in cm, 3390 Feet to Centimeters, 3390 Feet in Centimeters, 3390 Feet to cm, 3390 Feet in cm, 3390 Foot to Centimeter, 3390 Foot in Centimeter, 3390 ft to Centimeter, 3390 ft in Centimeter, 3390 ft to Centimeters, 3390 ft in Centimeters, 3390 Feet to Centimeter, 3390 Feet in Centimeter" ]
[ null, "https://feet-to-cm.appspot.com/image/3390.png", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.7843073,"math_prob":0.9805235,"size":834,"snap":"2023-40-2023-50","text_gpt3_token_len":268,"char_repetition_ratio":0.28915662,"word_repetition_ratio":0.027210884,"special_character_ratio":0.39808154,"punctuation_ratio":0.15384616,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.95149934,"pos_list":[0,1,2],"im_url_duplicate_count":[null,1,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2023-09-22T13:15:29Z\",\"WARC-Record-ID\":\"<urn:uuid:3755f6aa-0405-447e-944e-205a5a36cb2f>\",\"Content-Length\":\"28226\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:be11c933-bf43-4d76-a48b-8b05c48c066a>\",\"WARC-Concurrent-To\":\"<urn:uuid:6b4be132-e29d-4f0e-a574-4b7166dacd4e>\",\"WARC-IP-Address\":\"142.251.16.153\",\"WARC-Target-URI\":\"https://feet-to-cm.appspot.com/3390-feet-to-cm.html\",\"WARC-Payload-Digest\":\"sha1:FYKJSR3RKTXV3I77WH4SNGKR7UKMWURO\",\"WARC-Block-Digest\":\"sha1:WLTLBYAUJLCEENWXSC7JDGEKHOVTNLRQ\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2023/CC-MAIN-2023-40/CC-MAIN-2023-40_segments_1695233506399.24_warc_CC-MAIN-20230922102329-20230922132329-00216.warc.gz\"}"}
https://www.yzlfxy.com/jiaocheng/php/50250.html
[ "# 使用array mutisort 实现按某字段对数据排序_PHP\n\narray_multisort 的用法\n\n\\$arr1 = array(1,9,5);\n\\$arr2 = array(6,2,4);\narray_multisort(\\$arr1,\\$arr2);\nprint_r(\\$arr1); // 得到的顺序是1,5,9\nprint_r(\\$arr2); // 得到的顺序是6,4,2\n\n\\$arr1 = array(1,9,5);\n\\$arr2 = array(6,2,4);\n\\$arr3 = array(3,7,8);\narray_multisort(\\$arr1,\\$arr2,\\$arr3);\n\narray_multisort会先按第一个数组(想像成列)排序,如果第一个数组(列)的值相同,则按第二个数组(列)排序。\n\n\\$arr1 = array(1,9,5,9);\n\\$arr2 = array(6,2,4,1);\n\\$arr3 = array(3,7,8,0);\narray_multisort(\\$arr1,\\$arr2,\\$arr3);\n\narray_multisort(\\$arr1, \\$arr2, SORT_DESC, SORT_STRING, \\$arr3);\n\nSORT_ASC - 按照上升顺序排序(默认)\nSORT_DESC - 按照下降顺序排序\n\nSORT_REGULAR - 将项目按照通常方法比较(默认)\nSORT_NUMERIC - 将项目按照数值比较\nSORT_STRING - 将项目按照字符串比较\n\n<?php\n\\$a=array(array('name'=>'张三','score'=>60),\narray('name'=>'李四','score'=>90),\narray('name'=>'王二','score'=>80)\n);\n\\$score=array();\nforeach(\\$a as \\$k => \\$v){\n\\$score[\\$k]=\\$v['score'];\n}\narray_multisort(\\$score,\\$a);\nvar_dump(\\$score);\nvar_dump(\\$a);\n?>\n\n 留言与评论(共有 0 条评论)\n 昵称: 匿名发表\n\n 验证码:", null, "" ]
[ null, "https://www.yzlfxy.com/e/ShowKey/", null ]
{"ft_lang_label":"__label__zh","ft_lang_prob":0.7402875,"math_prob":0.9975488,"size":1479,"snap":"2021-04-2021-17","text_gpt3_token_len":938,"char_repetition_ratio":0.17830509,"word_repetition_ratio":0.046511628,"special_character_ratio":0.33130494,"punctuation_ratio":0.2751938,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9857974,"pos_list":[0,1,2],"im_url_duplicate_count":[null,null,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2021-04-14T04:17:46Z\",\"WARC-Record-ID\":\"<urn:uuid:73e07daf-8900-4874-a506-02461b0ab8fc>\",\"Content-Length\":\"35240\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:08874f72-4c32-45fe-9ba1-d3e9d11aad1d>\",\"WARC-Concurrent-To\":\"<urn:uuid:93a66293-02ce-4b6f-8b2f-ecaa42b047d6>\",\"WARC-IP-Address\":\"101.32.73.227\",\"WARC-Target-URI\":\"https://www.yzlfxy.com/jiaocheng/php/50250.html\",\"WARC-Payload-Digest\":\"sha1:IESOLUED7HWWLAOIT3722GMAXDUXR62G\",\"WARC-Block-Digest\":\"sha1:HLU4CRS6RBXOXZPX6TZEOK7TA4EC7QOX\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2021/CC-MAIN-2021-17/CC-MAIN-2021-17_segments_1618038076819.36_warc_CC-MAIN-20210414034544-20210414064544-00282.warc.gz\"}"}
https://answers.everydaycalculation.com/simplify-fraction/2819-5400
[ "Solutions by everydaycalculation.com\n\n## Reduce 2819/5400 to lowest terms\n\n2819/5400 is already in the simplest form. It can be written as 0.522037 in decimal form (rounded to 6 decimal places).\n\n#### Steps to simplifying fractions\n\n1. Find the GCD (or HCF) of numerator and denominator\nGCD of 2819 and 5400 is 1\n2. Divide both the numerator and denominator by the GCD\n2819 ÷ 1/5400 ÷ 1\n3. Reduced fraction: 2819/5400\nTherefore, 2819/5400 simplified is 2819/5400\n\nMathStep (Works offline)", null, "Download our mobile app and learn to work with fractions in your own time:" ]
[ null, "https://answers.everydaycalculation.com/mathstep-app-icon.png", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.8187338,"math_prob":0.8190351,"size":403,"snap":"2020-34-2020-40","text_gpt3_token_len":112,"char_repetition_ratio":0.12531328,"word_repetition_ratio":0.0,"special_character_ratio":0.34491315,"punctuation_ratio":0.11111111,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9535792,"pos_list":[0,1,2],"im_url_duplicate_count":[null,null,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2020-08-13T09:27:19Z\",\"WARC-Record-ID\":\"<urn:uuid:18b07f4e-9c12-4e79-80ca-4f66e22c58d1>\",\"Content-Length\":\"5974\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:a44410ef-7a0b-4a12-81b5-fa55d4470e6f>\",\"WARC-Concurrent-To\":\"<urn:uuid:514c47e0-a32e-453e-8885-5ab77afce5b8>\",\"WARC-IP-Address\":\"96.126.107.130\",\"WARC-Target-URI\":\"https://answers.everydaycalculation.com/simplify-fraction/2819-5400\",\"WARC-Payload-Digest\":\"sha1:6D2ICWYRXNCDFH6PDNIDHFZR6YIBUV4I\",\"WARC-Block-Digest\":\"sha1:PK2HPUF5N5IQNOYLJ2MBVZMT7US4JB6V\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2020/CC-MAIN-2020-34/CC-MAIN-2020-34_segments_1596439738964.20_warc_CC-MAIN-20200813073451-20200813103451-00584.warc.gz\"}"}
https://www.sparrho.com/item/origin-of-the-inverse-spin-switch-effect-in-superconducting-spin-valves/8816a4/
[ "", null, "# Origin of the Inverse Spin Switch Effect in Superconducting Spin Valves\n\nResearch paper by J. Zhu, X. Cheng, C. Boone, I. N. Krivorotov\n\nIndexed on: 28 Feb '09Published on: 28 Feb '09Published in: Physics - Superconductivity\n\n#### Abstract\n\nThe resistance of a ferromagnet/superconductor/ferromagnet (F/S/F) spin valve near its superconducting transition temperature, $T_c$, depends on the state of magnetization of the F layers. This phenomenon, known as spin switch effect (SSE), manifests itself as a resistance difference between parallel ($R_P$) and antiparallel ($R_{AP}$) configurations of the F layers. Both standard ($R_{P}>R_{AP}$) and inverse ($R_{P}<R_{AP}$) SSE have been observed in different superconducting spin valve systems, but the origin of the inverse SSE was not understood. Here we report observation of a coexistence of the standard and inverse SSE in Ni$_{81}$Fe$_{19}$/Nb/Ni$_{81}$Fe$_{19}$/Ir$_{25}$Mn$_{75}$ spin valves. Our measurements reveal that the inverse SSE arises from a dissipative flow of vortices induced by stray magnetic fields from magnetostatically coupled N\\'eel domain wall pairs in the F layers.", null, "" ]
[ null, "https://pixel.quantserve.com/pixel/p-S5j449sRLqmpu.gif", null, "https://s3-eu-west-1.amazonaws.com/sparrho-seshat/generated-images/oai:arXiv.org:0903.0044.jpeg", null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.85919267,"math_prob":0.8887487,"size":989,"snap":"2020-45-2020-50","text_gpt3_token_len":250,"char_repetition_ratio":0.1106599,"word_repetition_ratio":0.0,"special_character_ratio":0.2527806,"punctuation_ratio":0.073446326,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.96658117,"pos_list":[0,1,2,3,4],"im_url_duplicate_count":[null,null,null,1,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2020-11-30T08:28:39Z\",\"WARC-Record-ID\":\"<urn:uuid:924e9f2f-43ce-4bd1-8101-41e96014aace>\",\"Content-Length\":\"85560\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:a6f046e1-3b8a-4fe8-b633-f927845d163b>\",\"WARC-Concurrent-To\":\"<urn:uuid:d2500a1c-10ff-4cac-98e5-e082ce0ea848>\",\"WARC-IP-Address\":\"54.86.205.222\",\"WARC-Target-URI\":\"https://www.sparrho.com/item/origin-of-the-inverse-spin-switch-effect-in-superconducting-spin-valves/8816a4/\",\"WARC-Payload-Digest\":\"sha1:GG5NDPW6UPPSP24K4SSXBXDC62LGRXHQ\",\"WARC-Block-Digest\":\"sha1:4OKSJYZ7RJRSZW3DG53UTGLPLB2LKXCJ\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2020/CC-MAIN-2020-50/CC-MAIN-2020-50_segments_1606141211510.56_warc_CC-MAIN-20201130065516-20201130095516-00332.warc.gz\"}"}
https://math.stackexchange.com/questions/2192449/euclidean-domain-mathbbz-sqrtd?noredirect=1
[ "# Euclidean domain $\\mathbb{Z}[\\sqrt{d}]$ [duplicate]\n\nI am trying to generalized, for which integral values of $d$,\n\n$\\mathbb{Z}[\\sqrt{d}] = \\{ a + b\\sqrt{d} \\vert a,b\\in\\mathbb{Z}\\}$ is an Euclidean domain?\n\nI am interested specially in positive integral values of $d$.\n\n• See also here for $d<0$. For norm-Euclidean see here. – Dietrich Burde Mar 18 '17 at 21:30\n• Sorry, I was unaware of the earlier question which is essentially the same as this one at the time I posted my \"answer,\" which really just elaborates one detail of another answer. – Robert Soupe Mar 19 '17 at 0:42\n\nAs it turns out, that's actually a highly non-trivial question. I presume you're aware that every Euclidean domain is a UFD. It is also useful, however, to recall the definition of an integrally closed domain. That is, an integral domain $R$ with field of fractions $K$ is considered integrally closed if for any monic polynomial $p(x) = x^n + a_{n-1}x^{n-1} + \\ldots + a_0\\in R[x]$, if $p$ has a root $\\alpha\\in K$, then $\\alpha\\in R$. It can be shown that any UFD is an integrally closed domain, and that $\\mathbb{Z}[\\sqrt{d}]$ will never be integrally closed for $d$ not square-free.\n\nSecondly, if $d$ is square-free, then since not being integrally closed is an obstruction to being a UFD (which is a necessary for being an ED), we will often extend $\\mathbb{Z}[\\sqrt{d}]$ into the subring of its fraction field $\\mathbb{Q}(\\sqrt{d})$ which contains precisely the solutions to monic polynomials in $\\mathbb{Z}[\\sqrt{d}]$, which is in fact a ring, and we will call this ring $\\mathcal{O}_{\\mathbb{Q}(\\sqrt{d})}$. It is a theorem in algebraic number theory that for square-free nonzero integers $d$, $$\\mathcal{O}_{\\mathbb{Q}(\\sqrt{d})} = \\begin{cases} \\mathbb{Z}[\\sqrt{d}] & \\mathrm{if\\ } d\\equiv 2,3\\mod 4 \\\\ \\mathbb{Z}\\left[\\frac{1+\\sqrt{d}}{2}\\right] & \\mathrm{if\\ } d\\equiv 1\\mod 4 \\end{cases}$$ which tells us that for $\\mathbb{Z}[\\sqrt{d}]$ to be a Euclidean domain, we must have that $d\\equiv 2,3\\mod 4$.\n\nHere is where we arrive at our next complication: algebraic number theory provides us with a natural norm $N(a+b\\sqrt{d}) = a^2 - db^2$ which is multiplicative and takes elements of $\\mathcal{O}_{\\mathbb{Q}(\\sqrt{d})}$ to integers, as can be checked. A ring which is Euclidean under this norm is said to be norm-Euclidean. There do exist rings which are Euclidean but not norm-Euclidean, such as $$\\mathbb{Z}\\left[\\frac{1+\\sqrt{69}}{2}\\right]$$ but to my knowledge, these types of rings are not fully understood. We do, however, fully understand which quadratic rings are norm-Euclidean. In fact $\\mathcal{O}_{\\mathbb{Q}(\\sqrt{d})}$ is norm-Euclidean if and only if $$d = -11, -7, -3, -2, -1, 2, 3, 5, 6, 7, 11, 13, 17, 19, 21, 29, 33, 37, 41, 57, \\mathrm{\\ or\\ }73$$ and so, $\\mathbb{Z}[\\sqrt{d}]$ is norm-Euclidean if and only if $$d = -2, -1, 2, 3, 6, 7, 11, \\mathrm{\\ or\\ }19.$$ I actually don't know if there are any Euclidean domains that are not norm-Euclidean of the form $\\mathbb{Z}[\\sqrt{d}]$. My suspicion is that there are not, though it is really way beyond my abilities to prove this.\n\n• This question math.stackexchange.com/questions/1148364/… delves into the case $d = 14$. In short, it's Euclidean because it has universal side divisors, e.g., $4 + \\sqrt{14}$, but it's not norm-Euclidean because the norm-Euclidean algorithm can fail to yield a result for $\\gcd(a, b)$ if neither $a$ nor $b$ is a unit nor a universal side divisor. – Robert Soupe Mar 18 '17 at 19:16\n• O my - every posting to MSE containing the word universal side divisor is nonsense. This includes the one concerning d=14. – franz lemmermeyer Mar 19 '17 at 8:22\n• If one of the universal side divisorists would write down a proof of that a ring is Euclidean if it has an element of norm 2 (that's what you seem to claim) it would become clear pretty quickly what I mean. – franz lemmermeyer Mar 19 '17 at 20:13\n• Your claim above, that the ring with d = 14 is Euclidean because $4 + sqrt{14}$ is a universal side divisor is false. Please prove me wrong by supplying the details of your proof. Or at least state the Theorem 3 you mention. Is there anything in Wei's article that isn't in math.buffalo.edu/~dhemmer/619F11/WilsonPaper.pdf or in Motzkin's original article? And please refrain from making assumptions concerning my behaviour towards students or professors - I find that somewhat rude. – franz lemmermeyer Mar 20 '17 at 17:46\n• > Theorem 3. The [ring] $R$ defined above [$\\mathcal O_{\\mathbb Q(\\sqrt{-19})}$] has no universal side divisors, hence is not a Euclidean Domain. – Robert Soupe Mar 20 '17 at 18:23\n\nI just want to fill in a detail that was hinted at in Monstrous Moonshine's answer, which is too long for a comment.\n\nIf $d \\equiv 1 \\pmod 4$, then $\\mathbb Z[\\sqrt d]$ is certainly not an Euclidean domain. It suffices to try $\\gcd(2, 1 + \\sqrt d)$. Clearly both numbers are of even norm, and the latter has a norm with absolute value larger than the former, which suggests the former ought to be a divisor of the latter.\n\nBut $$\\frac{1 + \\sqrt d}{2} \\not\\in \\mathbb Z[\\sqrt d].$$ Worse, $1 + \\sqrt d$ is probably irreducible, which would mean this domain does not have unique factorization.\n\nHowever, $$N\\left(\\frac{1 + \\sqrt d}{2}\\right) = \\left(\\frac{1}{2}\\right)^2 - \\left(\\frac{\\sqrt d}{2}\\right)^2 = \\frac{1}{4} - \\frac{d}{4} = \\frac{1 - d}{4},$$ which is an integer because $d \\equiv 1 \\pmod 4$, so this number that does not look like an algebraic integer is in fact an algebraic integer.\n\nSo $\\mathbb Z[\\sqrt d]$ is not a \"complete\" domain of algebraic integers. \"Lacks integral closure,\" is the technical term, I believe. If we broaden our view to this \"larger\" domain, which we can notate $\\mathcal O_{\\mathbb Q(\\sqrt d)}$, then to solve $\\gcd(2, 1 + \\sqrt d)$ with $1 + \\sqrt d = 2q + r$ so that $-4 < N(r) < 4$, we simply set $$q = \\frac{1 + \\sqrt d}{2}$$ and $r = 0$. Of course this does not guarantee that every pair of numbers in $\\mathcal O_{\\mathbb Q(\\sqrt d)}$ can have its GCD resolved by the Euclidean algorithm with some Euclidean function, let alone the norm function specifically.\n\nA concrete example: $\\mathbb Z[\\sqrt{21}]$. Then 2 has a norm of 4, and $1 + \\sqrt{21}$ has a norm of $-20$, which in absolute value is greater than 4. We see that $$\\frac{1 + \\sqrt{21}}{2}$$ is an algebraic integer having a minimal polynomial of $x^2 - x - 5$ and a norm of $-5$, and that's clearly a divisor of $-20$." ]
[ null ]
{"ft_lang_label":"__label__en","ft_lang_prob":0.87147075,"math_prob":0.9993142,"size":4762,"snap":"2019-51-2020-05","text_gpt3_token_len":1531,"char_repetition_ratio":0.15699874,"word_repetition_ratio":0.08796895,"special_character_ratio":0.32864344,"punctuation_ratio":0.109126985,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9999174,"pos_list":[0],"im_url_duplicate_count":[null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2019-12-06T03:19:28Z\",\"WARC-Record-ID\":\"<urn:uuid:a0ca1de1-6137-41d0-b5c2-303d7d850a29>\",\"Content-Length\":\"139852\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:c3f9f508-da1f-4be6-80fc-52c59f5dbe4a>\",\"WARC-Concurrent-To\":\"<urn:uuid:1bfd828d-dca2-41b6-bacc-ecc174290aea>\",\"WARC-IP-Address\":\"151.101.193.69\",\"WARC-Target-URI\":\"https://math.stackexchange.com/questions/2192449/euclidean-domain-mathbbz-sqrtd?noredirect=1\",\"WARC-Payload-Digest\":\"sha1:CGVHFKDRMBP4QCJQ3IOIXAJO6OVSF276\",\"WARC-Block-Digest\":\"sha1:HAJKT665OYQB7A7DPMGOIMQPAKNZ6ZPH\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2019/CC-MAIN-2019-51/CC-MAIN-2019-51_segments_1575540484477.5_warc_CC-MAIN-20191206023204-20191206051204-00074.warc.gz\"}"}
https://cn.comsol.com/blogs/coupling-heat-transfer-subsurface-porous-media-flow/
[ "", null, "", null, "# 传热与地下多孔介质流的耦合仿真\n\n2014年 4月 24日\n\n### 地下水换热系统中的方程\n\n(1)\n\n(\\rho C_p)_{eq} \\frac{\\partial T}{\\partial t} + \\rho C_p {\\bf u } \\cdot \\nabla T = \\nabla \\cdot (k_{eq} \\nabla T ) + Q + Q_{geo}\n\n(2)\n\n(\\rho C_p )_{eq} = \\sum_{i} ( \\theta_{pi}\\rho_{pi}C_{p,pi})+(1-\\sum_{i}\\theta_{pi})\\rho C_p\n\n(3)\n\nk_{eq}=\\sum_{i} \\theta_{pi} k_{pi} + ( 1-\\sum_{i} \\theta_{pi} ) \\rho C_p\n\n(4)\n\n{\\mathbf u} = -\\frac{\\kappa}{\\mu} \\nabla p\n\n(5)\n\n\\frac{\\partial}{\\partial t} (\\rho \\epsilon_p) + \\nabla \\cdot ( \\rho {\\bf u} ) = Q_m\n\n### 地下水换热应用的 COMSOL 模型:地热对井回灌", null, "", null, "", null, "", null, "", null, "", null, "### 延伸阅读\n\n#### 评论 (4)\n\n##### 留言", null, "##### chunhu zhao\n2017-12-28", null, "##### Phillip Oberdorfer\n2017-12-28\n\nhttp://cn.comsol.com/model/geothermal-doublet-29751", null, "##### 春伟 周\n2019-06-17", null, "2021-02-23" ]
[ null, "https://cdn.comsol.com/company/logo/comsol-logo-130x20.png", null, "https://cdn.comsol.com/wordpress/sites/2/2018/11/COMSOL_Blog_Header_Fluid.png", null, "https://cdn.comsol.com/wordpress/2014/04/Hydrothermal-doublet-system.png", null, "https://cdn.comsol.com/wordpress/2014/04/Hydrothermal-doublet-system-mesh.jpg", null, "https://cdn.comsol.com/wordpress/2014/04/Hydrothermal-doublet-system-after-heat-production.png", null, "https://cdn.comsol.com/wordpress/2014/04/Well-production-temperature.gif", null, "https://cdn.comsol.com/wordpress/2014/04/Single-borehole-approach-results.png", null, "https://cdn.comsol.com/wordpress/2014/04/Single-borehole-approach-results-after-heat-production-and-groundwater-flow.png", null, "https://www.gravatar.com/avatar/485b5d110c013717ac72015b2d2819be", null, "https://www.gravatar.com/avatar/2ebc9663d3371aa98d51651f3a98dcf9", null, "https://www.gravatar.com/avatar/40f6979bb18063171c41c7a1eea0eb94", null, "https://www.gravatar.com/avatar/1b178f61df9abaf2d0d6b531be0352e2", null ]
{"ft_lang_label":"__label__zh","ft_lang_prob":0.9739835,"math_prob":0.99325705,"size":3206,"snap":"2021-31-2021-39","text_gpt3_token_len":3056,"char_repetition_ratio":0.066208616,"word_repetition_ratio":0.0,"special_character_ratio":0.18402994,"punctuation_ratio":0.022038568,"nsfw_num_words":0,"has_unicode_error":false,"math_prob_llama3":0.9993625,"pos_list":[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24],"im_url_duplicate_count":[null,null,null,3,null,3,null,3,null,3,null,3,null,3,null,3,null,null,null,null,null,null,null,null,null],"WARC_HEADER":"{\"WARC-Type\":\"response\",\"WARC-Date\":\"2021-09-24T11:11:45Z\",\"WARC-Record-ID\":\"<urn:uuid:7ec52f03-94ed-4462-bd03-0cc89c46d123>\",\"Content-Length\":\"113502\",\"Content-Type\":\"application/http; msgtype=response\",\"WARC-Warcinfo-ID\":\"<urn:uuid:bebcc40c-9db1-4459-bd0b-098d26f4bb92>\",\"WARC-Concurrent-To\":\"<urn:uuid:dba643b1-76b1-4b12-94e9-43f82ab34717>\",\"WARC-IP-Address\":\"4.31.158.132\",\"WARC-Target-URI\":\"https://cn.comsol.com/blogs/coupling-heat-transfer-subsurface-porous-media-flow/\",\"WARC-Payload-Digest\":\"sha1:VB56HY33D2DVUHKJA4WYKLUCBHEKS3R7\",\"WARC-Block-Digest\":\"sha1:VYVR6TW3MT7LI3WWCRSNRXINRUZK5HQY\",\"WARC-Identified-Payload-Type\":\"text/html\",\"warc_filename\":\"/cc_download/warc_2021/CC-MAIN-2021-39/CC-MAIN-2021-39_segments_1631780057524.58_warc_CC-MAIN-20210924110455-20210924140455-00616.warc.gz\"}"}