/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */
public: LinkedList() : head(NULL) {} ~LinkedList() ; /*Append element to the end of the list*/ voidappend(int number);
///*Insert an element "num" at specific position "pos". //If "pos==1", insert "num" as the first element. //If "pos<1" or "pos>listSize", return false; else return true;*/ boolinsert(int pos, int num); // ///*The number of elements in the list*/ intgetSize(); // ///*Search element "num" from list. If exits, return the position; else return 0;*/ intsearch(int num); // ///*Remove an element at position "pos". If fail to remove, return false;*/ boolremove(int pos);
/*Print out all elements in the linked list*/ voiddisplay(); };
boolLinkedList::insert(int pos, int num){ int listSize = 1; int currPos = 1; Node* currNode = head; while (currNode) { currNode = currNode->getNext(); listSize++; } //cout << listSize << endl; if (pos<1 || pos>listSize - 1) { returnfalse; } else { Node* prevNode = NULL; Node* currNode = head; int currPos = 1; while (currNode && currPos != pos) { prevNode = currNode; currNode = currNode->getNext(); currPos++; } Node* newNode = new Node; newNode->setData(num); if (pos == 1) { newNode->setNext(head); head = newNode; } else { prevNode->setNext(newNode); newNode->setNext(currNode); } returntrue; } }
intLinkedList::getSize(){ int listSize = 1; int currPos = 1; Node* currNode = head; while (currNode) { currNode = currNode->getNext(); listSize++;//final result is larger than the real size } return listSize--; } /*Search element "num" from list. If exits, return the position; else return 0;*/ intLinkedList::search(int num){ Node* prevNode = NULL; Node* currNode = head; int currIndex = 1; while (currNode && currNode->getData() != num) {//check whether the num in the linkedlist or not prevNode = currNode; currNode = currNode->getNext(); currIndex++; } if (currNode != NULL) { if (currNode->getData() == num) { return currIndex; } else { return0; } } else { return0; } } /*Remove an element at position "pos". If fail to remove, return false;*/ boolLinkedList::remove(int pos){ if (pos < 0) returnfalse; else { int currIndex = 1; Node* prevNode = NULL; Node* currNode = head; while (currNode && pos != currIndex) { prevNode = currNode; currNode = currNode->getNext(); currIndex++; } if (currNode) {//check whether currNode is "null" or not,if it is "null",that is prove the postion is out of range. if (prevNode) { prevNode->setNext(currNode->getNext()); delete currNode; } else { head = currNode->getNext(); delete currNode; } returntrue; } else { returnfalse; } } } /*Print out all elements in the linked list*/ voidLinkedList::display(){ //int listSize = 1; //int currPos = 1; Node* currNode = head; while (currNode) { cout << currNode->getData() << "->"; currNode = currNode->getNext(); //listSize++; } cout << endl; }
int pos1 = list->search(2); cout << "The position of value 2 is:" << pos1 << endl; int pos2 = list->search(4); cout << "The position of value 4 is:" << pos2 << endl; int pos3 = list->search(1000); cout << "The position of value 1000 is:" << pos3 << endl; delete list; return0; }
voidLinkedList::append(int number){ if (head == NULL) { Node* newNode = new Node; newNode->setData(number); //head->setNext(newNode);//why the next step cannot be this step? head = newNode; newNode->setNext(NULL); } else { Node* prevNode = NULL; Node* currNode = head; int currIndex = 1;
First of all, we should check whether the “==head==” points anywhere, if not, then the “Liked List“ is an empty list. Then we create a “newNode“ to store the new element, since the list is empty, we let the “==head==” equal to the new node and by the function “setNext()” set the head next is empty, which is helpful for the latter append.
Secondly, since the list is not empty , our function move to the “else” part. There are many ways to do the “else” part, here I choose the way that create two node:”prevNode”, “currNode”. In the “while” loop, the condition that run out of the loop is check whether the “currNode” is empty or not. In the “while” loop, after every looping, we will update the “currNode”, then the “currNode” will point to its next position, so for the out-loop condition, when the “currNode” is NULL, the “while” loop will stop. But if there only have “currNode” then the later function will not easily implement what we want to do(because the “currNode” points to NULL), so that we need “prevNode” to store the “last” node in the list. So after the “while” loop, our “currNode” comes to the NULL, but the “prevNode” comes to the last node, which is point to the NULL(“currNode”). And then we can do append operation to let the “prevNode” point to the new element, and the new element points to the “currNode”——the NULL.
boolLinkedList::insert(int pos, int num){ int listSize = 1; int currPos = 1; Node* currNode = head; while (currNode) { currNode = currNode->getNext(); listSize++; } //cout << listSize << endl; if (pos<1 || pos>listSize - 1) { returnfalse; } else { Node* prevNode = NULL; Node* currNode = head; int currPos = 1; while (currNode && currPos != pos) { prevNode = currNode; currNode = currNode->getNext(); currPos++; } Node* newNode = new Node; newNode->setData(num); if (pos == 1) { newNode->setNext(head); head = newNode; } else { prevNode->setNext(newNode); newNode->setNext(currNode); } returntrue; } }
First, the “listSize” stores the size of the list, the size of the list is “listSize” -1, and the “listSize” will be updated in the first while loop.
Then the “if” function is to check whether the “pos” delivery into the insert() function is in in the range of the listSize. If not return false.
After check the whether the pos is available, the code goto the else part. It is the same idea of append().
3. The operation of getSize():
1 2 3 4 5 6 7 8 9 10
intLinkedList::getSize(){ int listSize = 1; int currPos = 1; Node* currNode = head; while (currNode) { currNode = currNode->getNext(); listSize++;//final result is larger than the real size } return listSize--; }
This code is a very simple function to return the size of the Linked List, it has the same principle with the first part of the insert( ) function.
boolLinkedList::remove(int pos){ if (pos < 0) returnfalse; else { int currIndex = 1; Node* prevNode = NULL; Node* currNode = head; while (currNode && pos != currIndex) { prevNode = currNode; currNode = currNode->getNext(); currIndex++; } if (currNode) {//check whether currNode is "null" or not,if it is "null",that is prove the postion is out of range. if (prevNode) { prevNode->setNext(currNode->getNext()); delete currNode; } else { head = currNode->getNext(); delete currNode; } returntrue; } else { returnfalse; } } }
The ability of this function is remove the element in the given position in the list.
First, the “if” equation is to check whether the pasted position is possible or not.
Move to the “else” part, the conditions of break the while loop are: 1. Whether the currNode is NULL or not ==and== whether the pasted position is out of the range of the Linked List or not.
After the while loop break, there is going to check what the condition make the it stop. So in the “if condition” we are going to check whether the currNode is NULL or not, if it is “null”,that proves the position is out of range, then it will return “false”. Oppositely, it will go to the “if”. For the “if” condition: if(prevNode), it is check the position is “1” or not, if it is “1”, then the while loop will not work, and the prevNode is NULL. Remove operation is easy to understand, let the prevNode points to the next node of currNode, and delete the currNode.
5. The display function
The display function is easy to understand, so I will not analysis here.
3. Stacks and Queues
3.1 Stacks
3.1.1 Stacks ADT
A ==stack== is a ==list== in which insertion and deletion take place at the same end
The end of “2” is called ==”top“==.
The other end is called ==”bottom“==.
Stacks are know as ==”LIFO“== (Last in, First out) lists.
The last element inserted will be the first to be retrieved.
/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */
#ifndef ARRAYSTACK_H #define ARRAYSTACK_H
classArrayStack{ private: int top; int maxSize; double* values; public: ArrayStack(int size); ~ArrayStack() {delete values;} boolIsEmpty(){return top==-1;} boolIsFull(){return top == maxSize;} doubleTop(); voidPush(constdouble x); doublePop(); voidDisplayStack(); };
/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */
voidArrayStack::Push(constdouble x){ //Put your code here if (IsFull()) // if stack is full, print error { //cout << "1" << endl; cout << "Error: the stack is full." << endl; } else { //cout << "2" << endl; values[++top] = x;//First update the top, in array satck, when the top pushs in the array, it will add into the behind of the last member. so the lst member is the top /*cout << "here" << endl;*/ cout << top << endl; } }
doubleArrayStack::Pop(){ //Put your code here if (IsEmpty()) { //if stack is empty, print error cout << "Error: the stack is empty." << endl; return-1; } else { return values[top--];//From the test, the fuction will return the value "values[top]" firstly, and then top will decrease 1 position, then the original top will be pop out the stack!! } }
doubleArrayStack::Top(){ //Put your code here if (IsEmpty()) { cout << "Error: the stack is empty." << endl; return0; } else return values[top]; }
/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */
boolCircularArrayQueue::IsEmpty(){ //Put your code here if (counter) returnfalse; elsereturntrue; }
boolCircularArrayQueue::IsFull(){ //Put your code here if (counter < maxSize) returnfalse; elsereturntrue; }
boolCircularArrayQueue::Enqueue(double x){ //Put your code here if (IsFull()) { cout << "Error: the queue is full." << endl; returnfalse; } else { // calculate the new rear position (circular) rear = (rear + 1) % maxSize; // insert new item values[rear] = x; //cout << "here" << endl; //cout << rear << endl; // update counter counter++; returntrue; } }
doubleCircularArrayQueue::Dequeue(){ //Put your code here if (IsEmpty()) { cout << "Error: the queue is empty." << endl; returnfalse; } else { // move front int old = front; front = (front + 1) % maxSize; // update counter counter--; return values[old]; } }
boolQueue::Dequeue(double & x){ if (IsEmpty()) { cout << "Error: the queue is empty." << endl; returnfalse; } else { x = front->data; Node* nextNode = front->next; delete front; front = nextNode; counter--; } }
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
voidQueue::DisplayQueue(){ cout << "front -->"; Node* currNode = front; for (int i = 0; i < counter; i++) { if (i == 0) cout << "\t"; else cout << "\t\t"; cout << currNode->data; if (i != counter - 1) cout << endl; else cout << "\t<-- rear" << endl; currNode = currNode->next; } }
4. Analysis of Algorithms
4.1. Introduction
What is Algorithm?
A clearly specified ==set of simple instructions== to be followed to solved a problem
Takes a set of vales, as input.
produces a value, or set of values, as output.
May be specified
In English.
As a computer program.
As a pseudo-code(伪代码).
Data structures
Methods of organizing data.
Program = algorithms + data structures
4.2. Algorithm Analysis
We only analyze correct algorithm.
An algorithm is correct.
If, for every input instance, it halts with the correct output.
Incorrect algorithms
Might not halt at all on some input instances.
Might halt with other than the desired answer.
Analyzing an algorithm
==Predicting== the resources that the algorithm requires.
Resources include
Memory
Communication bandwidth
Computational time(usually most important)
Factors affecting the running time
computer
compiler(编译器)
algorithm used
input to the algorithm
The content of the input affects the running time.
Typically, the ==input size== (number of items in the input) is the main consideration.
-E.g. sorting problem the number of items to be sorted.
-E.g. multiply two matrices together the total number of elements in the two matrices.
Machine model assumed
Instructions are executed one after another, with no concurrent operations Not parallel computers.
4.3. Time Complexity
Worst case running time
The longest running time for any ==input of size n==.(问题的规模)
An upper bound on the running time for any input guarantee that the algorithm will never take longer.(You can think like the Big-Oh notation)
Example: Sort a set of numbers in increasing order; and the data is in decreasing order.
==The worst case can occur fairly often.==
Best case of an running time
Example: Sort a set of numbers in increasing order; and the data is already in increasing order.
Average case running time
Difficult to define.
4.3.1 Big-Oh Notation
Definition
if there are positive constant c and such that when .
(Never mind the N and the n in the formula. They all represent the size of the problem.)
(的增长率小于或等于的增长率。for large N)
(,表示随问题规模N的增大,算法执行时间的增长率和的增长率相同)
(实际上我看相关的教材上一般会习惯把这里面的写作可能是更好地表达时间复杂度的意思。)
The growth rate of is less than or equal to the growth rate of .
====
Example:
Let Then the probable answer is:
(
)
There are also many other answer, but the third answer in here is the best answer.
Some rules
When considering the growth rate of a function using Big-Oh, ignore the lower order terms and the coefficients of the highest-order term.
Like: then .
No need to specify the base of logarithm.
(不需要指定对数的底数。)
Changing the base from one constant to another changes the value of the logarithm by only a constant factor.
Like: .
If and , then:
$T_1(N)T_2(N)=O(f(N)g(N))$
4.3.2 Big-Omega Notation
Definition
if there are positive constant c and such that when .
grows no slower than for “large” N
The growth rate of is greater than or equal to the growth rate of .
4.3.3 Big-Theta Notation
Definition
if and only if and .
The growth rate of equals the growth rate of .
Big-Theta means the bound is the tightest possible.
Example:
Rules:
If is a polynomial of degree k, then
For logarithmic functions,
4.3.4 General Rules
==Using L’Hopital’s Rule==
==Determine the relative growth rates (using L’Hopital’s rule if necessary)==
Compute
if 0: and is not ().
if constant , but not equal to 0: .
if: ().
limit oscillates: no relation.
Stirling’s approximation
-
Example:
;
Running time calculation
Rule 1- For loop:
The running time of a for loop is at most the running time of the statements inside the for loop (including tests) times the number of iterations.
Rule 2—Nested loops(嵌套循环)
Analyze these inside out. The total running time of a statement inside a group of nested loops is the running time of the statement multiplied by the product of the sizes of all the loops.
1 2 3
for( i = 0; i < n; ++i ) for( j = 0; j < n; ++j ) ++k;
Rule 3—Consecutive Statements
$T_1(N)T_2(N)=O(f(N)g(N))$
1 2 3 4 5
for( i = 0; i < n; ++i ) a[ i ] = 0; for( i = 0; i < n; ++i ) for( j = 0; j < n; ++j ) a[ i ] += a[ j ] + i + j;
Rule 4—If/Else
The running time of an if/else statement is never more than the running time of the test plus the larger of the running times of and .
Other rules are obvious, but a basic strategy of analyzing from the inside (or deepest part) out works. If there are function calls, these must be analyzed first. If there are recursive functions, there are several options. If the recursion is really just a thinly veiled for loop, the analysis is usually trivial. For instance, the following function is really just a simple loop and is O(N):
1 2 3 4 5 6 7
longfactorial( int n ) { if( n <= 1 ) return1; else return n * factorial( n - 1 ); }
This example is really a poor use of recursion. When recursion is properly used, it is difficult to convert the recursion into a simple loop structure. In this case, the analysis will involve a recurrence relation that needs to be solved. To see what might happen, consider the following program, which turns out to be a terrible use of recursion:
1 2 3 4 5 6 7
longfib( int n ) { 1if( n <= 1 ) 2return1; else 3returnfib( n - 1 ) + fib( n - 2 ); }
At first glance, this seems like a very clever use of recursion. However, if the program is coded up and run for values of N around 40, it becomes apparent that this program is terribly inefficient. The analysis is fairly simple. Let T(N) be the running time for the function call fib(n). If or , then the running time is some constant value, which is the time to do the test at line 1 and return. We can say that because constants do not matter. The running time for other values of N is then measured relative to the running time of the base case. For , the time to execute the function is the constant work at line 1 plus the work at line 3. Line 3 consists of an addition and two function calls. Since the function calls are not simple operations, they must be analyzed by themselves. The first function call is and hence, by the definition of , requires units of time. A similar argument shows that the second function call requires units of time. The total time required is then , where the 2 accounts for the work at line 1 plus the addition at line 3. Thus, for , we have the following formula for the running time of :
Since , it is easy to show by induction that T(N) ≥ fib(n). In Section 1.2.5, we showed that fib(N) < (5/3)N. A similar calculation shows that (for N > 4) , and so the running time of this program grows exponentially. This is about as bad as possible. By keeping a simple array and using a for loop, the running time can be reduced substantially.
4.4 Cases study: The Maximum Subsequence Sum Problem.
4.4.1 The maximum subsequence sum problem
Given(possible negative) integers find the maximum value of .
For convenience, the maximum subsequence sum is 0 if all the integers are negative.
4.4.2 Algorithms of the cases and analysis
Case 1: Brute Force(暴力穷举)
1.1 Code
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
intMaxSubSeqSum_BruteForce(int* arr, int size, int& max_start, int& max_end){ /*implement the function body below. max_start and max_end are used to return theo start and end index of the max subsequence found by the algorithm*/ int sum_max = INT_MIN; for (int i = 0; i < size; i++) { for (int j = i; j < size; j++) { int sum = 0; for (int k = i; k <= j; k++) { sum = sum + arr[k]; } if (sum > sum_max) { sum_max = sum; max_start = i; max_end = j; } } } return sum_max; }
1.2 Algorithm analysis
The Time Complexity
The most important part of the code if the third for loop if we can calculate the time of it spend, than we can get the time complexity of the whole function.
So there may be a roughly sum of the time that the code go through:====, where N represent the size of the problem.
-First :
-Second:
-Third:
Then the final result is the third result, then the time complexity: ====;
intMax_Divden(int* arr, int s, int e, int& start1, int& end1){ int left_start; int right_start; int lr_start; int left_end, right_end, lr_end; if (s == e) { start1 = s; end1 = e; return arr[s]; } int midpoint = (s + e) / 2; int Max_left_sum = Max_Divden(arr, s, midpoint, left_start, left_end); int Max_right_sum = Max_Divden(arr, midpoint + 1, e, right_start, right_end); int left_sum = INT_MIN; int right_sum = INT_MIN; int lr_sum; int left_wait = 0; int right_wait = 0;//wait to update for (int i = midpoint; i >= s; i--) { left_wait += arr[i]; if (left_wait > left_sum) { left_sum = left_wait; lr_start = i; } } for (int j = midpoint + 1; j <= e; j++) { right_wait += arr[j]; if (right_wait > right_sum) { right_sum = right_wait; lr_end = j; } } lr_sum = right_sum + left_sum; int MAX; if (lr_sum > Max_left_sum && lr_sum > Max_right_sum) { start1 = lr_start; end1 = lr_end; MAX = lr_sum; } elseif (Max_left_sum > lr_sum && Max_left_sum > Max_right_sum) { start1 = left_start; end1 = left_end; MAX = Max_left_sum; } else { start1 = right_start; end1 = right_end; MAX = Max_right_sum; } return MAX; }
intMaxSubSeqSum_Divide_Conquer(int* arr, int size, int& start, int& end){ /*Implement the function body below. max_start and max_end are used to return the start and end index of the max subsequence found by the algorithm*/ returnMax_Divden(arr, 0, size, start, end); }
voidSorting::BubbleSort(int* NumList){ //Write your code here. for (int i = 0; i < num-1 ; i++) { for (int j = 0; j < num-1 - i; j++) { if (NumList[j] > NumList[j + 1]) { int temp = NumList[j]; NumList[j] = NumList[j + 1]; NumList[j + 1] = temp; } } } }
2. The process
According to the code and the picture, you may understand the process of the “Bubble sort”. First of all, from the picture we can easily know that the whole function will at least do through n times then it may be sorted. So that, for the first for loop it may go n times. And in the second for loop, it will go to N-1 times to compare the num[j] and num[j+1] (前一个和后一个两两比较), then if the former is larger than the latter, the swap them. So that from the picture you can know that every time, the “largest” in the surplus sequence will be put the suitable position(其实就是说剩余序列里面最大那个数据最终会排在剩下的序列里面的最后的位置,最后,整个序列就会以一个升序的序列排好。).
The worst case : 最坏的情况跟冒泡排序差不多: Inner loop is executed p times, fr each p= 1,2,…,N-1;
再加上外层循环: ;
The best case:
The input is already sorted in increasing order When inserting into the sorted , only need to compare with and there is no data movement.
- For each iteration of the outer for-loop, the inner for loop terminates after checking the loop condition once $\geq O(N)$ time.
- If input is ==nearly sorted==, insertion sort runs fast.
/*Merge two sorted array. Used in MergeSort*/ voidMerge(int* NumList, int start, int mid, int end){ //Write your code here. int left = mid - start + 1, right = end - mid; // Create sub arrays to store the old elements. int* sub_left = newint[left]; int* sub_right = newint[right]; //Put the original data from NumList(the same position) into the temp arrays for (int i = 0; i < left; i++) { sub_left[i] = NumList[start + i]; } for (int j = 0; j < right; j++){sub_right[j] = NumList[mid + 1 + j];} int new_left = 0, new_right = 0; // Initial index of sub_left and sub_right int Merge_position = start; // Initial index of merged array //Actually every call of the Merge function is not start from the "0", the "start" values are different in different calls; // Merge the temp arrays back into array[left..right] while (new_left < left && new_right < right) { if (sub_left[new_left] <= sub_right[new_right]) { NumList[Merge_position++] = sub_left[new_left++];//don't froget update the index of the sub array } else {NumList[Merge_position++] = sub_right[new_right++];} } //When the first while loop finished, the left and right may have some value in the sub array not merge into the NumList //And since the index of subarray is update in the first loop, so it can be a condition to check whether the value in the sub array //And if the value is not all take back from the new, we will go into the two while loop below while (new_left < left) { NumList[Merge_position++] = sub_left[new_left++]; } while (new_right < right) { NumList[Merge_position++] = sub_right[new_right++];} } voidSorting::MergeSort(int* NumList, int start, int end){ //Write your code here. if (start >= end) return; int midpoint = (start + end) / 2; MergeSort(NumList, start, midpoint); MergeSort(NumList, midpoint + 1, end); Merge(NumList, start, midpoint, end); }
In the “MergeSort()” we can see that it is very similar to the “Divide and Conquer”, in the code we can see that:
For the line 1, actually every time the if will run, but only when the condition in it is true the “return” will run. And when the condition is true, which is , you may say that , because of the if and return, both of them are run, but from the book: ==”we choose ”==, since the constant is not affect the whole time the code will spend;
And for the line 2——the divide step, it’s time(意思是走常数次);
For the “Conquer step”: After we divide the original array, then the sub array should continue divide until catch the “if” condition. And to complete this part, we should do the divide step two times, just the operation of line 3 and line 4. And since we divide the original from the midpoint, the line 3&4 will spend the same time which equal to
Finally for the “Combine step”, here I will show the pseudo-code of the “Merge()” function:
voidSwap(int* NumList, int i, int j){ //Write your code here. int temp = NumList[i]; NumList[i] = NumList[j]; NumList[j] = temp; }
intMedianOfThree(int* NumList, int begin, int tail){ //Write your code here. int center = (begin + tail) / 2; if (NumList[center] < NumList[begin]) { Swap(NumList, begin, center); } if (NumList[tail] < NumList[begin]) { Swap(NumList, begin, tail); } if (NumList[tail] < NumList[center]) { Swap(NumList, center, tail); } Swap(NumList, center, tail - 1);//After compare the begin, center, tail and the sawp, the center is the pivot we choose, then swap it with the end-1 element. return NumList[tail - 1];//return the pivot back }
intPartition(int* NumList, int begin, int tail){ //Write your code here. //cout << "here" << endl; int pivot = MedianOfThree(NumList, begin, tail); int i = begin - 1; int j = tail - 1; for (;;) {//the update and the stop conditions are in the for loop function // cout << "here1" << endl; while (NumList[++i] < pivot) { } while (pivot < NumList[--j]) { } if (i < j) { Swap(NumList, i, j); }//this step is begin after the two while loop, if the "i" still smaller than the "j", which mean that the element between the position "i" and "j" haven not is not <=pivot or >=pivot,so we go into the if step. else { break; } } Swap(NumList, i, tail - 1); return i; }
voidSorting::QuickSort(int* NumList, int begin, int tail){ //Write your code here. if (begin < tail) { int partition = Partition(NumList, begin, tail); QuickSort(NumList, begin, partition - 1); QuickSort(NumList, partition + 1, tail); } }
3. Idea
3.1 Partitioning
This is a ==key step== of the quick sort algorithm.
==Goal==: given the picked pivot, partition the remaining elements into two smaller sets.
Many ways to implement how to partition:
Even the slightest deviations may cause surprisingly bad results.
3.2 Partitioning Strategy
3.3 Picking the Pivot
Use the first element as pivot
if the input is random, OK
if the input is presorted (or in reverse order)
all the elements go into (or )
this happens consistently throughout the recursive calls.
Results in behavior (Analyze this case later)
Choose the pivot randomly.
Generally safe.
Random number generation can be expensive.
Use the median of the array
Partitioning always cuts the array into roughly half.
An ==optimal== quick sort () .
However, hard to find the exact median
Sort an array to pick the value in the middle.
Median of three
Why we only do the partition , actually, the first and the last one we already compare with the pivot, the position they are is already smaller or larger than the pivot. So after we collect the pivot, we only need partition the element between the .
4. Time complexity
Like mergesort, quicksort is recursive; therefore, its analysis requires solving a recurrence formula. We will do the analysis for a quicksort, assuming a random pivot (no medianof-three partitioning) and no cutoff for small arrays. We will take , as in mergesort. The running time of quicksort is equal to the running time of the two recursive calls plus the linear time spent in the partition (the pivot selection takes only constant time). This gives the basic quicksort relation :
where is the number of elements in . We will look at three cases.
Worst-Case Analysis
The pivot is the smallest element, all the time. Then i = 0, and if we ignore T(0) = 1, which is insignificant, the recurrence is
;
Adding up all these equations yields :
;
as claimed earlier. To see that this is the worst possible case, note that the total cost of all the partitions in recursive calls at depth d must be at most N. Since the recursion depth is at most N, this gives an == worst-case== bound for quicksort.
Best-Case Analysis
In the best case, the pivot is in the middle. To simplify the math, we assume that the two subarrays are each exactly half the size of the original, and although this gives a slight overestimate, this is acceptable because we are only interested in a Big-Oh answer :
(1)
Divide both sides of Equation (1) by
(2)
We will telescope using this equation:
;
Add them up
;
Notice that this is the exact same analysis as mergesort; hence, we get the same answer.
Average-Case Analysis
Small Arrays
For very small arrays , quicksort does not perform as well as insertion sort. Furthermore, because quicksort is recursive, these cases will occur frequently. A common solution is not to use quicksort recursively for small arrays, but instead use a sorting algorithm that is efficient for small arrays, such as insertion sort. Using this strategy can actually save about 15 percent in the running time (over doing no cutoff at all). A good cutoff range is , although any cutoff between 5 and 20 is likely to produce similar results. This also saves nasty degenerate cases, such as taking the median of three elements when there are only one or two.
5.5 Heap Sort()
1. Background: Binary Trees
Has a ==root== at the topmost level.
Each ==node== has ==zero, one or two children==.
A node that has ==no child== is called a ==leaf==.
For a node x, we denote the left child, right child and the parent of x as left(x), right(x) and parent(x), respectively.
Height (Depth) of a Binary Tree :The number of ==edges== on the ==longest== path from the root to a leaf.
2. Background: Complete Binary Trees
A ==complete binary tree== is the tree.
Where a node can have 0 (for the leaves) or 2 children.
==All leaves are at the same depth==.
No. of nodes and height
A complete binary tree with ==N nodes has height ==.
A complete binary tree with ==height d has nodes==.
The property that allows operations to be performed quickly is the heap-order property. Since we want to be able to find the minimum quickly, it makes sense that the smallest element should be at the root. If we consider that any subtree should also be a heap, then any node should be smaller than all of its descendants. Applying this logic, we arrive at the heap-order property. In a heap, for every node X, the key in the parent of X is smaller than (or equal to) the key in X, with the exception of the root (which has no parent).2 In Figure 6.5 the tree on the left is a heap, but the tree on the right is not (the dashed line shows the violation of heap order). By the heap-order property, the minimum element can always be found at the root. Thus, we get the extra operation, findMin, in constant time.
//the children is smaller than the parent voidpercolateUp(int* heap, int currentSize){ if (currentSize == 1) { heap[0] = 0; return; } int i = currentSize; for (int j = (i) / 2; (j > 0 && i != 0) && heap[i] > heap[j]; i = j, j = (i) / 2) Swap(heap, i, j);//due to the calculation in the c++, never mind the left and the right child, atually just think about the parent. } //this step is compare the child with parent, if child larger than Swap child and the parent to build a max-heap. /**Append an element to the end of heap, and adjust heap to maintain the max-heap order.*/ voidInsertHeap(int* heap, int& currentSize, constint ele){ heap[++currentSize] = ele; percolateUp(heap, currentSize); }
/*Construct a max heap (Parent larger than its children)*/ int* BuildMaxHeap(int* NumList, int num){ int* heap = newint[num+1]; int currentSize = 0; for(int i=0; i<num; i++){ InsertHeap(heap, currentSize, NumList[i]); } return heap; }
/**Adjust heap to maintain the heap order*/ voidpercolateDown(int* MaxHeap, int currentSize){ //the opposite usage to the percolateUP if (currentSize <= 2) { if (MaxHeap[1] > MaxHeap[2]) { Swap(MaxHeap, 1, 2); }//To help the for loop. Due to some reason from the for loop } //for some cases, the MaxHeap[1] > MaxHeap[2], so I add one step here to complete. for (int i = 1, l = 2 * i, r = 2 * i + 1; (i < currentSize && l < currentSize && r < currentSize) && (MaxHeap[i] < MaxHeap[l] || MaxHeap[i] < MaxHeap[r]); ) {//some problrm inthe part of the i,j,h update comditions. if (MaxHeap[i] < MaxHeap[l] && MaxHeap[i] < MaxHeap[r]) { //MaxHeap[l] > MaxHeap[r] ? Swap(MaxHeap, i, l) : Swap(MaxHeap, i, r); if (MaxHeap[l] > MaxHeap[r]) { Swap(MaxHeap, i, l); i = l; l = 2 * i; r = l + 1; }//There is interesting, remember that due to the left and the right children, the update function is different in once loop. else { Swap(MaxHeap, i, r); i = r; l = 2 * i; r = l + 1; } } elseif (MaxHeap[i] < MaxHeap[l]) { Swap(MaxHeap, i, l); i = l; l = 2 * i; r = l + 1; } else { Swap(MaxHeap, i, r); i = r; l = 2 * i; r = l + 1; } } } /* 1. Save the max (top of the heap) element M 2. Move MaxHeap[currentSize] to the top 3. Call percolateDown() to maintain the max-heap order 4. Save "M" to MaxHeap[currentSize] 5. currentSize--*/ voidDeleteMin(int* MaxHeap, int& currentSize){ Swap(MaxHeap, 1, currentSize);//Actually I directly use the Swap function in the Quick sort, same usage like the reference above. Every time i change the first element and the end--. percolateDown(MaxHeap, currentSize); currentSize--; } int* Sorting::HeapSort(int* NumList){ int* MaxHeap = BuildMaxHeap(NumList, num); //Construct the max-heap; int currentSize = num; while(currentSize>0){ DeleteMin(MaxHeap, currentSize); //for (int i = 0; i < num; i++) { cout << MaxHeap[i] << " "; }cout << endl; } //After the while loop, the original MaxHeap array becomes a ascending-sorted array. return MaxHeap; }
/**Append an element to the end of heap, and adjust heap to maintain the max-heap order.*/ voidInsertHeap(int* heap, int& currentSize, constint ele){ heap[++currentSize] = ele; percolateUp(heap, currentSize); }
/*Construct a max heap (Parent larger than its children)*/ int* BuildMaxHeap(int* NumList, int num){ int* heap = newint[num+1]; int currentSize = 0; for(int i=0; i<num; i++){ InsertHeap(heap, currentSize, NumList[i]); } return heap; }
/**Adjust heap to maintain the heap order*/ voidpercolateDown(int* MaxHeap, int currentSize){ int hole = 1; int tmp = MaxHeap[hole]; while (hole * 2 < currentSize) { int child = hole * 2; if (MaxHeap[child] < MaxHeap[child + 1]) { child = child + 1; } if (MaxHeap[child] > tmp) { MaxHeap[hole] = MaxHeap[child]; hole = child; } elsebreak; } MaxHeap[hole] = tmp; // Move the top (max) element to the end of heap; }
/* 1. Save the max (top of the heap) element M 2. Move MaxHeap[currentSize] to the top 3. Call percolateDown() to maintain the max-heap order 4. Save "M" to MaxHeap[currentSize] 5. currentSize--*/ voidDeleteMin(int* MaxHeap, int& currentSize){ int max = MaxHeap[1]; //Save the top(Max) element. MaxHeap[1] = MaxHeap[currentSize]; //Move the end element of MaxHeap to the top; percolateDown(MaxHeap, currentSize); //Adjust MaxHeap to maintain the heap structure. MaxHeap[currentSize] = max; //Append the previous top element to the end of heap; currentSize--; }
int* Sorting::HeapSort(int* NumList){ int* MaxHeap = BuildMaxHeap(NumList, num); //Construct the max-heap; int currentSize = num; while(currentSize>0){ DeleteMin(MaxHeap, currentSize); } //After the while loop, the original MaxHeap array becomes a ascending-sorted array. return MaxHeap; }
6. Trees
Preliminaries
A ==tree== is a collection of nodes
The collection can be empty;
(Recursive Definition) If not empty, a tree consists of a distinguished node (the ==root==), and ==zero or more== nonempty ==subtrees== , each of whose roots are connected by a directed ==edge== from r;
==Child== and ==Parent==
Every node expect the root has one parent;
A node can have an zero or more children
==Leaves==
Leaves are nodes with no children
==Sibling==
nodes with same parent
==Path==
A path form is defined as a sequence of nodes such that is the parent of for .
==Length== of a path
The length of this path( to ) is the number of edges on the path, namely, .
There is a path of length zero from every node to itself;
Notice that in a tree there is exactly one path from the root to each node.
==Depth of a node==
The depth of is the length of the unique path from the root to
The root depth is 0
==Height of a node==
The height of is the length of the longest path from to a leaf.
All leaves are at height 0;
==The height of a tree is equal to the height of the root.==
==Ancestor== and ==Descendant==
If there is a path from to , then is an ancestor pf , and is a descendant of . If , then is a ==proper ancestor==(真祖先)of and is a ==proper descendant==(真后裔)of .
A binary tree is a tree in which ==no node can have more than two children==.
A property of a binary tree that is sometimes important is that the depth of an average binary tree is considerably smaller than N. An analysis shows that the average depth is , and that for a special type of binary tree, namely the ==binary search tree==, ==the average value of the depth is ==. Unfortunately, the depth can be as large as , as the example in Figure 4.12 shows.
Binary Search Tree
Property: For every node , all the keys in its ==left subtree are smaller== than the key value in X, and ==all the keys in the right subtree are larger== than the key value in X.
The ==average depth== of a (node)binary search tree turns out to be ; and the maximum depth of a node is .
Implementation of Binary Search Tree
1. Construct BST and the implementation of insertion
Construct: 利用insertion来构造BST。
Insertion:
Proceed down the tree as you would with a find;
If X is found, do nothing (or update something);
Otherwise, insert X at the last spot on the path traversed(将X插入到所遍历的路径的最后一点上)
/*Insert node into BST, keeping the BST property. Use the "Insertion" method learned in slide.*/ TreeNode* InsertBSTNode(TreeNode* root, int val){ // Input your code here. if (root == NULL) { root = newTreeNode(val); root->left = root->right = NULL; }//如果root为NULL,那么让root的key的值等于第一个进来的value elseif (val < root->data) { root->left = InsertBSTNode(root->left, val); }//如果进来的value小于root的key值,那么根据BST的特性我们就要将这个value插入到root的左子树里面,至于插到哪一个位置,这里用递归执行继续比较,直到最后可以插入进树中。 elseif (val > root->data) { root->right = InsertBSTNode(root->right, val); }//和上面的同理 return root; }
// Insert each node into BST one by one. TreeNode* ConstructBST(int* array, int arrayLength){ // Input your code here. TreeNode* root1=NULL; for (int i = 0; i < arrayLength; i++) { root1=InsertBSTNode(root1, array[i]); } return root1; }
Tree::Tree(int* array, int arrayLength){ root = ConstructBST(array, arrayLength); }
The time complexity of insertion function is , where means the height of the tree. Follow the result we discussed above we can know that the the average value of is , and the worst case is .
Then the time complexity of constructing tree is .
2. GetMinNode and GetMaxNode
Goal: Return the node containing the smallest(largest) key in the tree;
Algorithm: Start at the root and go left(right) as long as there is a left(right) child. The stopping point is the smallest (largest) element.
1 2 3 4 5 6
TreeNode* Tree::getMinNode(TreeNode* root){ // Input your code here. if (root == NULL) { returnNULL; } if (root->left == NULL) { return root; } returngetMinNode(root->left); }
3. Treeheight
Given a binary tree, find its maximum depth;
The maximum depth is ==the number of nodes== along the longest path from the root node down to the farthest leaf node.(这里的定义和前面的有点不一样)
Determine if a BST is height-balanced. If balance return true, else return false.
A height-balanced binary tree is defined as a binary tree in which the depth of the two subtrees of every node never differ by more than 1.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
boolTree::IsBalanced(TreeNode* root){ // Input your code here. int leftHight; int rightHight; /* If tree is empty then return true */ if (root == NULL) {return1;} leftHight = TreeHight(root->left); rightHight = TreeHight(root->right); /* Get the height of left and right sub trees */ if (abs(leftHight - rightHight) <= 1 && IsBalanced(root->left) && IsBalanced(root->right)) {return1;} /* If we reach here then tree is not height-balanced */ //如果左子树和右子树的高度差的绝对值≤1,并且左右子树也都是平衡树,那么这棵树就是平衡树。 return0; }
5. Deletion
Deletion is more complex than the other implement, because we should consider how we take care of the children of the deleted node, we should consider following cases:
The node is a leaf, then we just delete it immediately;
The node has one child: Adjust a pointer form the parent to bypass that node;
The node has 2 children
Replace the key of the node with the minimum element at the right subtree or the maximum element at the left subtree;
Delete the minimum element
Has either no child or only right child because if it has a left child, that left child would be smaller and would have been chosen. So invoke case 1 or 2.
/*Do a binary search. If input value "val" is found in BST, return true, else return false.*/ boolTree::Search(TreeNode* root, int val){ // Input your code here. if (root == NULL) { returnNULL; } elseif (val < root->data) { returnSearch(root->left, val); } elseif (val > root->data) { returnSearch(root->right, val); } else { return root; } } TreeNode* Tree::deleteNode(TreeNode* root, int key){ // Input your code here. if (Search(root, key)) { // Base case if (root == NULL) return root; if (root->data > key) { root->left = deleteNode(root->left, key); return root; } elseif (root->data < key) { root->right = deleteNode(root->right, key); return root; } if (root->left == NULL) { TreeNode* temp = root->right; delete root; return temp; } elseif (root->right == NULL) { TreeNode* temp = root->left; delete root; return temp; } // If both children exist else { TreeNode* PrevNode = root; TreeNode* CurrNode = root->right; while (CurrNode->left != NULL) { PrevNode = CurrNode; CurrNode = CurrNode->left; } if (PrevNode != root) { PrevNode->left = CurrNode->right; } else { PrevNode->right = CurrNode->right; } root->data = CurrNode->data;// Copy CurrNode Data to root delete CurrNode; return root;// Delete CurrNode and return root } } return root; }
6.1.1 Balance Binary Search Tree
6.2 AVL Tree
An (Adelson-Velskii and Landis) tree is a binary search tree with a balance condition. The balance condition must be easy to maintain, and it ensures that the depth of the tree is .
An tree is identical to a binary search tree, except that for every node in the tree, the height of the left and right subtrees can differ by ==at most 1==. (The height of an empty tree is defined to be −1.) (一棵AVL树是其每个节点的左子树和右子树的高度最多差1的二叉查找树)(空树的高度定义为-1);
Denote the ==minimum number of nodes== in an tree of ==height h==:
Base:
Recursive relation:
2. Insertion in tree
When we do an insertion, we need to update all the balancing information for the nodes on the path back to the root, but the reason that insertion is potentially difficult is that inserting a node could violate the tree property. (For instance, inserting 6 into the tree in Figure 4.32 would destroy the balance condition at the node with key 8.) If this is the case, then the property has to be restored before the insertion step is considered over. It turns out that this can always be done with a simple modification to the tree, known as a ==rotation==.
After an insertion, only nodes that are on the path from the insertion point to the root might have their balance altered because only those nodes have their subtrees altered. As we follow the path up to the root and update the balancing information, we may find a node whose new balance violates the condition. We will show how to ==rebalance== the tree at the first (i.e., deepest) such node, and we will prove that this rebalancing guarantees that the entire tree satisfies the property.(在一次插入后,只有那些插入点到根节点的路径上的节点的平衡可能被改变,因为只有这些节点的子树可能发生变化。当沿着这条路径上行到根并更新平衡信息时,我们可以发现一个节点,它的新平衡破坏了 条件。我们将指出如何在第一个这样的节点(即最深的节点)重新平衡这棵树,并证明,这一重新平衡保证整个树满足 性质。)
Different Cases for Rebalance
Denote the ==node== that must be rebalanced :
Case 1: An insertion into the left subtree of the left child of ; (对的左儿子的左子树进行一次插值)
Case 2: An insertion into the right subtree of the left child of ; (对的左儿子的右子树进行一次插值)
Case 3: An insertion into the left subtree of the right child of ;(对的右儿子的左子树进行一次插值)
Case 4: An insertion into the right subtree of the right child ;(对的右儿子的右子树进行一次插值)
情形1(2)和4(3)是关于点的镜像对称(mirror image symmetry)。
So that there are actually two kinds of questions:
Insertion occurs on the “outside” (i.e., left-left or right-right) is fixed by ==single rotation== of the tree;
Insertion occurs on the “inside” (i.e., left-right or right-left) is fixed by ==double rotation== of the tree;
Insertion Algorithm:
Firstly, insert the new key as a new leaf just as in ordinary binary search tree;
Then trace the path ==from the new leaf towards the root==. For each node x encountered, check if heights of left(x) and right(x) differ by at most 1.
If yes, proceed to parent(x);
If not, restructure by doing ==either a single rotation or a double rotation==.
Note: once we perform a rotation at a node x, we won’t need to perform any rotation at any ancestor of x.
1. Single rotation
Case study: Right rotation
1 2 3 4 5 6 7 8
TreeNode* AVLTree::rightRotate(TreeNode* curRoot){ // Put your code below TreeNode* Left = curRoot->left; TreeNode* temp = Left->right; Left->right = curRoot; curRoot->left = temp; return Left; }
It’s case 1, so we need to do right rotation to rebalance the tree.
Here to fit the code, the “curRoot” is and the “Left” is and the “temp” is .
And here let me show the process of the rotation:
In the later code, some functions will call the rightRotate function to do the right rotation;
The in the rightRotate function, we get the curRoot is , we have to let the replace the location of ;
So what have to do is let the right subtree of be , so that in the code;
And if there is an original right subtree of we cannot forget it, we should change it to the left subtree of .
After that we return left the “Left” node to the location where it call;
Here you may confused about what if also has parents node?
Actually that’s the question what I have before. In fact, we return the node back where the function call, there is must a node to accept the return one, actually that node is “”, we change the whole “” tree outside the whole tree, then we let the “” node change the value by (after rotation), then we finish the real replacement part.
Actually when we compile the code, we have not write another two functions of Double rotation. The process of implementation is call the single rotation function.
From that we can know that the double rotation have separate into two single rotations:
;
(Double Left rotation, firstly single left rotation then single right rotation)
.
(Double right rotation, firstly single right rotation then single left rotation)
A B+ tree of order is an M-ary tree with the following properties:
The data items are stored at leaves;(数据项存储在树叶上)
The nonleaf nodes store up to keys to guide the searching; key represents the smallest key in subtree ;(非叶子节点存储到个关键字以指示搜索的方向;关键字代表子树中的最小的关键字)
The root is either a leaf or has between two and children;(树的根或者是一片树叶,或者其儿子树在2和M之间)
All nonleaf nodes (except the root) have between and ==children==;(除根外,所有非叶子节点的==儿子数==在 and 之间)
All leaves are at the same depth and have between and ==data items==, for some (the determination of is described shortly).(所有的树叶都在相同的深度上,并且每片树叶拥有的数据项其个数在 and 之间)
Since ==L===5, each leaf has between 3 and 5 ==data items==
Since ==M===5, each nonleaf nodes has between 3 to 5 ==children==;
Requiring nodes to be half full guarantees that the B+ tree does not degenerate into a simple binary tree.(要求节点为半满保证了B+树不会退化为简单的二叉树。)
2. Algorithm
1. Searching Algorithm
Let x be the input search key;
Start the searching at the root;
If we encounter an internal node v, search (linear search or binary search) for x among the keys stored at v;
If at v, follow the left child pointer of ;
If for two consecutive keys and at , follow the left child pointer of ;
If at , follow the right child pointer of
If we encounter a leaf v, we search (linear search or binary search) for x among the keys stored at v. If found, we return the entire record; otherwise, report not found.
2. Insertion Algorithm
Supposed that we want to insert a key K and its associated record. First we should do is determine the location where it will be inserted. By search algorithm, which will bring us to a leaf x, then if the leaf is not full, we can directly insert the K, but what if it is already full?
Here is another crucial property of B+ tree: ==Splitting==, both insertion and deletion you will meet it.
Keeping going:
If leaf x contains < L keys, then insert K into x (at the correct position in node x);
If x is already full (i.e. containing L keys). Split x:
Cut x off from its parent;
Insert K into x, pretending x has space for K. Now x has L+1 keys.
After inserting K, split x into 2 new leaves , with containing the smallest keys, and containing the remaining keys. Let J be the minimum key in ;
Make a copy of to be the parent of and , and insert the copy together with its child pointers into the old parent of ;
But here just the situation of leaf node split if we keep doing insertion, actually the internal node will finally full. Next I will show you how to split the internal node:
Cut x off from its parent;
Insert K and its left and right child pointers into x, pretending there is space. Now x has M keys;
Split x into 2 new internal nodes and , with containing the ( ) smallest keys, and containing the largest keys. Note that the ()th key J is not placed in or ;
Make the parent of and , and insert J together with its child pointers into the old parent of x.
3. Deletion Algorithm
Same like the insertion, there are also two situations you have to take consideration.
(1) target is a key in some internal node (needs to be replaced, according to our convention)
(2) After deleting target from leaf x, x contains less than keys (needs to merge nodes)
(这里面的target应该指的是internal node 里面的key值)
Target can appear in at most one ancestor y of x as a key, Node y is seen when we searched down the tree. After deleting from node x, we can access y directly and replace target by the new smallest key in x.
Suppose we delete the record with key target from a leaf. Let u be the leaf that has keys (too few); Let v be a sibling of u with at least keys; Let k be the key in the parent of u and v that separates the pointers to u and v;There are two cases:
Case 1: v contains or more keys and v is the right sibling of u;
Move the leftmost record from v to u
Case 2: v contain or more keys and v is the left sibling of u;
Move the rightmost record from v to u
Then set the key in parent of u that separates u and v to be the new smallest key in u;
classGraph{ // Create pointer point to an array of list list<int> *l; int V;
public: Graph(int size){ V = size; l = new list<int> [V]; // [V] is the array contain V number of list }
voidaddEdge(int i, int j, bool undir=true){ l[i].push_back(j); if(undir){ l[j].push_back(i); } }
voidprintAdjList(){ // Iterate over all the rows for(int i = 0; i < V; i++){ cout << i << "->"; // every element of ith linked list for(auto node:l[i]){ cout << node << ","; } cout << endl; } }
Example: Given a graph representation and a vertex s in the graph, find all paths from s to other vertices.
Two common graph traversal algorithms
Breadth First Search ()
Find all paths from s to other vertices;
Depth First Search ()
Topological sort;
Find strongly connected components.
1. Algorithm 广度优先算法
1. Pseudocode
1 2 3 4 5 6 7 8 9 10 11 12 13 14
Algorothm BFS Input: s is the source vertex Output: Mark all vertices that can be visited from s. 1.for wach vertex v 2.do flag[v]:=false; //flag[]:visited table 3. Q=empty queue;//use queue: FIFO 4. flag[s]:=true; 5.enqueue(Q,s); 6.while Q is not empty 7.do v:=dequeue(Q); 8.for each w adjancent v 9.doif flag[w]=false 10. then flag[w]:=true; 11.enqueue(Q,w)
The line 6 of pseudocode: Each vertex will enter Q at most once;
The line 8 of pseudocode: Each iteration takes time proportional to deg(v) + 1 (the number 1 is to account for the case where deg(v) = 0 —- the work required is 1, not 0).(在这里我是没看懂这个+1的解释的,我更倾向于把这个+1理解为代表dequeue)
Then running time of adjacent list: This is summing over all the iterations in the while loop
But for the adjacent matrix it will spend .
So, with adjacency matrix, is independent of the number of edges m. With adjacent lists, is ; if like in a dense graph, .
2. Shortest Path Recording
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
Algorothm BFS Input: s is the source vertex Output: Mark all vertices that can be visited from s. 1.for wach vertex v 2.do flag[v]:=false; //flag[]:visited table 3. pred[v]:=-1;//initializa all pred[v]to -1(this array is to help you record the previous vertex) 4. Q=empty queue;//use queue: FIFO 5. flag[s]:=true; 6.enqueue(Q,s); 7.while Q is not empty 8.do v:=dequeue(Q); 9.for each w adjancent v 10.doif flag[w]=false 11. then flag[w]:=true; 12. pred[w]:=v;//record where you came from 13.enqueue(Q,w)
Path report:
1 2 3 4 5
Algorithm Path(w) 1. if pred[w]!=-1 2. then 3.Parh(pred[w]); 4. uutput w
The path returned is the shortest from s to v(minimum number of edges).
Record the Shortest distance
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
Algorothm BFS Input: s is the source vertex Output: Mark all vertices that can be visited from s. 1.for wach vertex v 2.do flag[v]:=false; //flag[]:visited table 3. pred[v]:=-1;d(v)=MAX_INT;//initializa all pred[v]to -1(this array is to help you record the previous vertex) 4. Q=empty queue;//use queue: FIFO 5. flag[s]:=true;d(s)=0; 6.enqueue(Q,s); 7.while Q is not empty 8.do v:=dequeue(Q); 9.for each w adjancent v 10.doif flag[w]=false 11. then flag[w]:=true; 12.d(w)=d(v)+1;pred[w]:=v;//record where you came from 13.enqueue(Q,w)
classGraph{ // Create pointer point to an array of list list<int> *l; int V;
public: Graph(int size){ V = size; l = new list<int> [V]; // [V] is the array contain V number of list }
voidaddEdge(int i, int j, bool undir=true){ l[i].push_back(j); if(undir){ l[j].push_back(i); } }
voidprintAdjList(){ // Iterate over all the rows for(int i = 0; i < V; i++){ cout << i << "->"; // every element of ith linked list for(auto node:l[i]){ cout << node << ","; } cout << endl; } }
while(!q.empty()){ // Do some work for every node int f = q.front(); cout << f << endl; q.pop();
// Push the nbrs of current node inside q if they are not already visited for(auto nbr : l[f]){ if(!visited[nbr]){ q.push(nbr); visited[nbr] = true; } } } }
Algorithm DFS(s) 1. for each vertex v 2. do flag[v]:=false;//Flag all vertices as not visited 3. RDFS;
Algorithm RDFS(v) 1. flag[v]:=true;//Flag yourself as visited 2.for each neighbor w of v//For unvisited neighbors call RDFS(w) recursively 3.doif flag[w]=false 4. then RDFS(w);
classGraph{ // Create pointer point to an array of list list<int> *l; int V;
public: Graph(int size){ V = size; l = new list<int> [V]; // [V] is the array contain V number of list }
voidaddEdge(int i, int j, bool undir=true){ l[i].push_back(j); if(undir){ l[j].push_back(i); } }
voidprintAdjList(){ // Iterate over all the rows for(int i = 0; i < V; i++){ cout << i << "->"; // every element of ith linked list for(auto node:l[i]){ cout << node << ","; } cout << endl; } }