https://paper.dropbox.com/doc/Interview-Notes-Dropbox-wl7npLjRBjrAKA5AOBsaM
http://www.1point3acres.com/bbs/thread-177627-1-1.html
/* 2-d array of "sharpness" values. Fine the path from left to right which has the highest minimum sharpness. 路径必须是从左往右,先有个起始点,然后每次要往右上,正右或右下跳一步。也就是说,路径长度是列数n,假设路径为(i1,1),(i2,2),...,(in,n),路径必须满足每对i_k与i_{k-1}的距离不能大于1.
. from: 1point3acres.com/bbs
.5 .7 .2
.7 .5 .8
.9 .1 .5-google 1point3acres
. 鍥磋鎴戜滑@1point 3 acres
在这个例子中,理想路径是.7->.7->.8因为这条路径中的最小值.7在所有路径中最大。只需要返回这个值,不需要返回路径。这是道dp问题
follow up:
如果图片是1million*1million的怎么办,整张图片读不进内存,我答说dp结构可以改成一维的,然后每次只读一列。小哥说每次读一列的第一个字符非常耗时,因为要不断的跳读指针,然后我说可以对这个矩阵转置写到另外一个文件里,然后每次想做这个函数就对这个新文件操作就好了,这样就能按行读。小哥就问我怎么转置再存到另外一个文件里,我说根据内存大小可以多读几列。然后小哥就说还可以再优化,他说这有一个balance,读行输出列,写文件就很耗时,读列输出行,读文件就很耗时(主要问题是 写指针或读指针跳转到下一行 所带来的时间消耗),其实听到这里我就应该有反应了,但当时还是傻傻的想。最后结果是每次根据内存大小读一个接近正方形的矩形,将他们写到新文件,再读下一块矩形。这样的话,读写指针跳转次数就最小了。
http://www.1point3acres.com/bbs/thread-180491-1-1.html
code是他家的duplicate file,一开始我尝试边写代码边说话,他不理我,楼主口语渣,也说不下去,结果后半段完全就是闷头写代码
我在read file时用的bfs,出了点小问题,在work through的时候修改,可能是扣分点
问了我复杂度,md5不能证明两个file一样,我后来说可以再作为filter,比较byte array才行
问我如果file很大怎么办,因为我读file的时候就是1kb的size读取,我回答说所有相同md5的file再拿出来一个一个比较
问我这个1kb的读取是从头好还是从尾好,我说从头好,但是说不出为什么,一开始说从头读速度快,他说从哪里读速度都一样,然后他给我解释了,然而我并没听明白他的英语。。
http://www.1point3acres.com/bbs/thread-170171-1-1.html
设计两个function,分别是log_hit()和get_hit_number_in_last_5_mins(). 之前在面经上也见过这题,就是要记录一个web被visited的次数以及返回过去5 min内被visited的次数
http://www.1point3acres.com/bbs/thread-180309-1-1.html
}
}
}
}
}
allocator.release(2);
}
}
}
}
}
}
}
}
}
index = index*2+1;
index = index*2+2;
}
}
updateTree(index);
}
}
}
}
}
}
}
}
}
}
Chunk chunk = chunks.get(i);
}
}
}
//keep merging if there are continous chunks
}
}
}
}
}
}
}
}
}
TreeSet<Interval> set;
}
});
}
Interval floor = set.floor(t);
t.start = floor.start;
set.remove(floor);
}
}
Interval ceil = set.higher(t);
t.end = ceil.end;
set.remove(ceil);
}
}
set.add(t);
}
}
}
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.*;
public class FindDuplicateFiles {
public List<List<String>> findDuplicates(String path) throws Exception{
List<List<String>> result = new ArrayList<>();
if(path==null || path.length()==0) return result;
List<String> filesPath = getAllFiles(path);
Map<String, List<String>> dupFilesMap = new HashMap<>();
for(String filePath: filesPath) {
File file = new File(filePath);
String hashCode = hashFile(file, "MD5");
if(!dupFilesMap.containsKey(hashCode)) {
dupFilesMap.put(hashCode, new ArrayList<>());
}
dupFilesMap.get(hashCode).add(filePath);
}
for(List<String> dupFiles: dupFilesMap.values()) {
if(dupFiles.size()>1)
result.add(dupFiles);
}
return result;
}
private List<String> getAllFiles(String path) {
List<String> filesPath = new ArrayList<>();
Stack<String> s = new Stack<>();
s.push(path); //DFS with Stack
while(!s.isEmpty()) {
String currPath = s.pop();
File file = new File(currPath);
if(file.isFile()) {
filesPath.add(currPath);
} else if(file.isDirectory()) {
for(String dir:subDir) {
s.push(currPath+"/"+dir);
}
}
}
return filesPath;
}
public List<List<String>> findDuplicatesOpt(String path) throws Exception{
List<List<String>> result = new ArrayList<>();
if(path==null || path.length()==0) return result;
Map<String, List<String>> dupFilesMap = new HashMap<>();
if(filePaths.size()<2) continue;
for(String filePath: filePaths) {
File file = new File(filePath);
String hashCode = hashFile(file, "MD5");
if (!dupFilesMap.containsKey(hashCode)) {
dupFilesMap.put(hashCode, new ArrayList<>());
}
dupFilesMap.get(hashCode).add(filePath);
}
}
for(List<String> dupFiles: dupFilesMap.values()) {
if(dupFiles.size()>1)
result.add(dupFiles);
}
return result;
}
private Map<Long, List<String>> getAllFilesBySize(String path) {
Stack<String> s = new Stack<>();
s.push(path);
while(!s.isEmpty()) { //DFS by stack
String currPath = s.pop();
File file = new File(currPath);
if(file.isFile()) {
if(!fileSizeMap.containsKey(size))
fileSizeMap.put(size, new ArrayList<>());
fileSizeMap.get(size).add(currPath);
} else if(file.isDirectory()) {
String[] subDir = file.list();
for(String dir:subDir) {
s.push(currPath+"/"+dir);
}
}
}
return fileSizeMap;
}
private static String hashFile(File file, String algorithm)
throws Exception {
try (FileInputStream inputStream = new FileInputStream(file)) {
MessageDigest digest = MessageDigest.getInstance(algorithm);
byte[] bytesBuffer = new byte[1024];
int bytesRead = -1;
while ((bytesRead = inputStream.read(bytesBuffer)) != -1) {
digest.update(bytesBuffer, 0, bytesRead);
}
byte[] hashedBytes = digest.digest();
return convertByteArrayToHexString(hashedBytes);
} catch (NoSuchAlgorithmException | IOException ex) {
throw new Exception(
"Could not generate hash from file", ex);
}
}
private static String convertByteArrayToHexString(byte[] arrayBytes) {
StringBuffer stringBuffer = new StringBuffer();
for (int i = 0; i < arrayBytes.length; i++) {
stringBuffer.append(Integer.toString((arrayBytes[i] & 0xff) + 0x100, 16)
.substring(1));
}
return stringBuffer.toString();
}
public static void main(String[] args) throws Exception{
List<List<String>> dupFiles = new FindDuplicateFiles().findDuplicatesOpt("./Resources/Dropbox");
for(List<String> dup: dupFiles) {
System.out.println(dup.toString());
}
//new FindDuplicateFiles().read1KBEachTime("./Resources/Dropbox/board.txt");
}
}
public void read1KBEachTime(String path)
File file = new File(path);
FileInputStream fis = new FileInputStream(file);
byte[] buffer = new byte[1024];
int count;
while((count=fis.read(buffer))!=-1) {
// hash(buffer);
System.out.print(buffer);
count = fis.read(buffer);
}
}
- • //Helper functions (need to be defined by you)
- public List<String> getFiles(String directoryPath); //Returns all files directly under this directory
- public List<String> getDirectories(String directoryPath); //Returns all directories directly under this directory
- public String getFileContent(String filePath); //Returns content of a file
- //Write this function (Interviewer just cared about this)
- public List<String> findDuplicates(String rootDirectory) {
- ...
- ...
["root/a 1.txt(abcd) 2.txt(efgh)", "root/c 3.txt(abcd)", "root/c/d 4.txt(efgh)", "root 4.txt(efgh)"]
[["root/a/2.txt","root/c/d/4.txt","root/4.txt"],["root/a/1.txt","root/c/3.txt"]]
class Solution {
public List<List<String>> findDuplicate(String[] paths) {
Map<String, List<String>> dupFilesMap = new HashMap<>();
for(String dir: paths) {
String[] fileAndContent = dir.split(" ");
String dirPath = fileAndContent[0];
for(int i=1; i<fileAndContent.length; i++) {
String fileName = fileAndContent[i].substring(0, fileAndContent[i].indexOf("("));
String fileContent = fileAndContent[i].substring(fileAndContent[i].indexOf("(")+1, fileAndContent[i].indexOf(")"));
if(!dupFilesMap.containsKey(fileContent))
dupFilesMap.put(fileContent, new ArrayList<>());
dupFilesMap.get(fileContent).add(dirPath+"/"+fileName);
}
}
List<List<String>> result = new ArrayList<>();
for(List<String> dupFiles: dupFilesMap.values()) {
if(dupFiles.size()>1)
result.add(dupFiles);
}
return result;
}
}
j++;
}
}
}
}
hashFun.recompute(text[i], text[i+m]);
}
}
}
window.offer(initialBytes[i]);
}
}
fis.close();
}
}
}
}
}
}
}
}
freqMap.put(id, freq);
}
}
freqBuckets[freqEntry.getValue()].add(freqEntry.getKey());
}
result.addAll(ids);
k -= freqBuckets[i].size();
result.add(ids.get(j));
}
}
}
}
}
freqMap.put(id, freq);
}
topKView.add(view);
topKView.poll();
topKView.offer(view);
}
}
}
}
}
}
}
}
@Override
}
}
}
}
}
}
}
}
}
solver.view(1);
solver.view(2);
solver.view(1);
solver.view(3);
solver.view(2);
solver.view(12);
solver.view(31);
solver.view(101);
solver.view(11);
solver.view(3);
solver.view(31);
solver.view(101);
solver.view(31);
solver.view(101);
}
}
}
String comb = sb.toString();
combinations.add(comb);
sb.append(keyLetters.charAt(k));
findCombinations(combinations, sb, digits, i+1);
sb.setLength(sb.length()-1);
}
}
}
}
}
}
}
class Trie {
private class Node {
Map<Character, Node> children;
boolean isWord;
public Node() {
children = new HashMap<Character, Node>();
isWord = false;
}
}
private Node root;
/** Initialize your data structure here. */
public Trie() {
root = new Node();
}
/** Inserts a word into the trie. */
public void insert(String word) {
Node curr = root;
for(int i=0; i<word.length(); i++) {
Node temp = curr.children.get(word.charAt(i));
if(temp==null) {
temp = new Node();
curr.children.put(word.charAt(i), temp);
}
curr = temp;
}
curr.isCompleteWord = true;
}
/** Returns if the word is in the trie. */
public boolean search(String word) {
Node curr = root;
for(int i=0; i<word.length(); i++) {
curr = curr.children.get(word.charAt(i));
if(curr==null) return false;
}
return curr.isWord;
}
public Node search(Node trieNode, Character c) {
return trieNode.children.get(c);
}
/** Returns if there is any word in the trie that starts with the given prefix. */
public boolean startsWith(String prefix) {
Node curr = root;
for(int i=0; i<prefix.length(); i++) {
curr = curr.children.get(prefix.charAt(i));
if(curr==null) return false;
}
return true;
}
}
top //check memory and cpu usage per process, but also reports total memory usage
free -m
sharpness[i][0] = matrix[i][0];
}
}
}
}
}
}
}
}
}
}
}
prev = sharpness[i];
}
}
- All numbers (including target) will be positive integers.
- The solution set must not contain duplicate combinations.
For example, given candidate set [2, 3, 6, 7] and target 7,
A solution set is:
[
[7],
[2, 2, 3]
]
class Solution {
public List<List<Integer>> combinationSum(int[] candidates, int target) {
List<List<Integer>> result = new ArrayList<>();
Arrays.sort(candidates);
combinationSum(result, new ArrayList(), candidates, target, 0);
return result;
}
public void combinationSum(List<List<Integer>> result, List<Integer> list, int[] candidates, int target, int index) {
if(target<0) return;
if(target==0) {
result.add(new ArrayList<Integer>(list)); return;
}
list.add(candidates[i]);
list.remove(list.size()-1);
}
}
}
class Solution {
public List<List<Integer>> combinationSum(int[] candidates, int target) {
List<List<List<Integer>>> dp = new ArrayList<>();//be careful, 0 base index
ArrayList<List<Integer>> newList = new ArrayList<>();
for(int i=0; i<candidates.length && candidates[i]<=t; i++) {
if(t==candidates[i]) {
newList.add(Arrays.asList(candidates[i]));
} else {
List<Integer> cumList = new ArrayList<>();
cumList.add(candidates[i]); cumList.addAll(list);
newList.add(cumList);
}
}
}
}
dp.add(newList);
}
return dp.get(target-1);
}
}
class Solution {
public boolean wordPattern(String pattern, String str) {
String[] patternStrMap = new String[258];
Set<String> mapped = new HashSet<>();
String[] words = str.split(" ");
if(pattern.length()!=words.length)
return false;
for(int i=0; i<pattern.length(); i++) {
char c = pattern.charAt(i);
if(patternStrMap[c]==null) {
if(mapped.contains(words[i]))
return false;
patternStrMap[c] = words[i];
mapped.add(words[i]);
} else {
return false;
}
}
return true;
}
}
}
String mappedStr = charStrMap.get(c);
}
charStrMap.put(c, word);
usedStr.remove(word);
charStrMap.remove(c);
}
}
}
}
}
For example, given
Return true because "leetcode" can be segmented as "leet code".
class Solution {
public boolean wordBreak(String s, List<String> wordDict) {
boolean[] dp = new boolean[s.length()+1];
dp[0]=true;
for(int j=0; j<i; j++) {
}
}
}
return dp[s.length()];
}
}
class Solution {
public List<String> wordBreak(String s, List<String> wordDict) {
Map<String, List<String>> lookup = new HashMap<>();
return wordBreak(s, wordDict, lookup);
}
public List<String> wordBreak(String s, List<String> wordDict,
if(lookup.get(s)!=null) return lookup.get(s);
List<String> result = new ArrayList<>();
if(s.length()==0) {
result.add("");
return result;
}
for(String word: wordDict) {
if(s.startsWith(word)) {
List<String> partialResult = wordBreak(s.substring(word.length()) ,
for(String str: partialResult) {
result.add(word + (str.equals("")? "":" "+ str));
}
}
}
lookup.put(s, result);
return result;
}
}
11010
11000
00000
class Solution {
public static final char ISLAND_MARK = '1';
public static final char WATER_MARK = '0';
public int numIslands(char[][] grid) {
int counter = 0;
if(grid==null || grid.length<1) return counter;
for(int i=0; i<grid.length; i++) {
for(int j=0; j<grid[i].length; j++) {
if(grid[i][j]==ISLAND_MARK) {
counter++;
mark(grid, i,j);
}
}
}
return counter;
}
public void mark(char[][] grid, int i, int j) {
if(i>=0 && i<grid.length && j>=0 && j<grid[i].length && grid[i][j] == ISLAND_MARK) {
grid[i][j] = WATER_MARK;
mark(grid, i-1, j);
mark(grid, i+1, j);
mark(grid, i, j-1);
mark(grid, i, j+1);
}
}
}
0 0 0
0 0 0
0 0 0
1 0 0
0 0 0 Number of islands = 1
0 0 0
class Solution {
private static final int[][] directions = {{0, 1}, {1, 0}, {-1, 0}, {0, -1}};
public List<Integer> numIslands2(int m, int n, int[][] positions) {
List<Integer> result = new ArrayList<>();
if(m==0 || n==0 || positions==null || positions.length<1) return result;
int[] root = new int[m*n]; //id = n*i+j
int[] rank = new int[m*n];
Arrays.fill(root, -1);
int numIslands = 0;
for(int[] position: positions) {
int id = position[0]*n + position[1];
root[id] = id; rank[id] = 1;
numIslands++;
for(int k=0; k<directions.length; k++) {
int i = position[0] + directions[k][0];
int j = position[1] + directions[k][1];
if(i>=0 && i<m && j>=0 && j<n && root[i*n+j]!=-1) {
numIslands = union(root, rank, id, i*n+j, numIslands);
}
}
result.add(numIslands);
}
return result;
}
public int union(int[] root, int[] rank, int x, int y, int numIslands) {
int parent1 = find(root, x);
int parent2 = find(root, y);
if(parent1!=parent2) { //union
numIslands--;
if(rank[parent1]< rank[parent2]) {
root[parent1] = parent2;
rank[parent2] += rank[parent1];
} else {
root[parent2] = parent1;
rank[parent1] += rank[parent2];
}
}
return numIslands;
}
public int find(int[] root, int id) {
while(root[id]!=id) {
root[id] = root[root[id]];
id = root[id];
}
return root[id];
}
}
ln -s path-to-actual-folder name-of-link
ls -ld name-of-link
}
String currFolder = folderName;
}
}
}
public Set<String> simplifyAccess() {
Set<String> simplifiedAccess = new HashSet<>();
for(String folder: access) {
String currFolder = foldersParent.get(folder);
boolean shouldDelete = false;
while(currFolder!=null && !shouldDelete) {
if(access.contains(currFolder)) {
shouldDelete = true;
} else {
currFolder = foldersParent.get(currFolder);
}
}
if(!shouldDelete)
simplifiedAccess.add(folder);
}
return simplifiedAccess;
}
}
class Solution {
public String countAndSay(int n) {
String curr = "1";
StringBuilder sb = new StringBuilder();
for(int i=2; i<=n; i++) {
sb.setLength(0);
int count = 1;
for(int j=0; j<curr.length(); j++) {
if(j==curr.length()-1 || curr.charAt(j)!=curr.charAt(j+1)) {
sb.append(count).append(curr.charAt(j));
count=1;
} else
count++;
}
curr = sb.toString();
}
return curr;
}
}
q.add(url); result.add(url);
String currUrl = q.poll();
List<String> childrenUrls = getUrls(currUrl);
q.offer(childUrl);
result.add(childUrl);
}
}
}
}
}
crawlDFSHelper(result, url);
}
List<String> childrenUrls = getUrls(url);
crawlDFSHelper(result, childUrl);
}
}
}
}
submitNewUrl(startUrl);
}
}
Future<List<String>> future = iterator.next();
newUrls.addAll(future.get());
}
}
}
submitNewUrl(url);
}
}
}
}
}
@Override
}
}
}
}
public List<String> getUrls(String urlStr) {
List<URL> urlList = new ArrayList<>();
URL url = new URL(urlStr);
Document doc = Jsoup.parse(url, TIMEOUT);
Elements links = document.select("a[href]");
for(Element link: links) {
String href = link.attr("href");
if(StringUtils.isBlank(href) || href.startsWith("#")) {
continue;
}
try {
URL nextUrl = new URL(url, href);
urlList.add(nextUrl);
} catch (MalformedURLException e) {
}
}
return urlList;
}
public class ReadWriteLock{
private int readers = 0;
private int writers = 0;
private int writeRequests = 0;
while(writers > 0 || writeRequests > 0){
wait();
}
readers++;
}
public synchronized void unlockRead(){
readers--;
}
public synchronized void lockWrite() throws InterruptedException{
while(readers > 0 || writers > 0){
wait();
}
writers++;
}
public synchronized void unlockWrite() throws InterruptedException{
writers--;
}
}
public class ReadWriteLock{
private Map<Thread, Integer> readingThreads =
new HashMap<Thread, Integer>();
private int writeAccesses = 0;
private int writeRequests = 0;
private Thread writingThread = null;
public synchronized void lockRead() throws InterruptedException{
Thread callingThread = Thread.currentThread();
while(! canGrantReadAccess(callingThread)){
wait();
}
readingThreads.put(callingThread,
(getReadAccessCount(callingThread) + 1));
}
private boolean canGrantReadAccess(Thread callingThread){
if( isWriter(callingThread) ) return true;
if( hasWriter() ) return false;
if( isReader(callingThread) ) return true;
if( hasWriteRequests() ) return false;
return true;
}
public synchronized void unlockRead(){
Thread callingThread = Thread.currentThread();
if(!isReader(callingThread)){
throw new IllegalMonitorStateException("Calling Thread does not" +
" hold a read lock on this ReadWriteLock");
}
int accessCount = getReadAccessCount(callingThread);
if(accessCount == 1){ readingThreads.remove(callingThread); }
else { readingThreads.put(callingThread, (accessCount -1)); }
notifyAll();
}
public synchronized void lockWrite() throws InterruptedException{
writeRequests++;
Thread callingThread = Thread.currentThread();
while(! canGrantWriteAccess(callingThread)){
wait();
}
writeRequests--;
writeAccesses++;
writingThread = callingThread;
}
public synchronized void unlockWrite() throws InterruptedException{
if(!isWriter(Thread.currentThread()){
throw new IllegalMonitorStateException("Calling Thread does not" +
" hold the write lock on this ReadWriteLock");
}
writeAccesses--;
if(writeAccesses == 0){
writingThread = null;
}
notifyAll();
}
private boolean canGrantWriteAccess(Thread callingThread){
if(isOnlyReader(callingThread)) return true;
if(hasReaders()) return false;
if(writingThread == null) return true;
if(!isWriter(callingThread)) return false;
return true;
}
private int getReadAccessCount(Thread callingThread){
Integer accessCount = readingThreads.get(callingThread);
if(accessCount == null) return 0;
return accessCount.intValue();
}
private boolean hasReaders(){
return readingThreads.size() > 0;
}
private boolean isReader(Thread callingThread){
return readingThreads.get(callingThread) != null;
}
private boolean isOnlyReader(Thread callingThread){
return readingThreads.size() == 1 &&
readingThreads.get(callingThread) != null;
}
private boolean hasWriter(){
return writingThread != null;
}
private boolean isWriter(Thread callingThread){
return writingThread == callingThread;
}
private boolean hasWriteRequests(){
return this.writeRequests > 0;
}
}
}
}
}
}
}
tokenAcquired++;
}
}
Runnable filler = () -> {
bucket.fill();
e.printStackTrace();
}
}
};
Runnable consumer = () -> {
e.printStackTrace();
}
}
};
}
}
public class HitCounter {
}
//circular array
//|--------------------|--------------------|
public class HitCounter {
private final int WINDOW_LENGTH; // in sec
private Hit[] hitRecords;
public HitCounter(int windowLength) {
this.WINDOW_LENGTH = windowLength;
hitRecords = new Hit[windowLength];
}
public void hit() {
int currTime = getCurrTimeSec();
int index = currTime % WINDOW_LENGTH;
hitRecords[index].count++;
} else {
hitRecords[index] = new Hit(currTime, 1);
}
}
public int getHits() {
int currTime = getCurrTimeSec();
int count=0;
for(Hit hit: hitRecords) {
if (hit!=null && hit.timeStamp>currTime-WINDOW_LENGTH)
count += hit.count;
}
return count;
}
public int getCurrTimeSec() {
return (int)System.currentTimeMillis()/1000;
}
private class Hit {
int timeStamp;
int count;
public Hit(int timeStamp, int count) {
this.timeStamp = timeStamp;
this.count = count;
}
}
}
}
}
}
import java.io.*;
import java.util.*;
/**
* NASA selects Dropbox as its official partner, and we’re tasked with managing
* a panorama for the universe. The Hubble telescope (or some other voyager we
* have out there) will occasionally snap a photo of a sector of the universe,
* and transmit it to us. You are to help write a data structure to manage this.
* For the purpose of this problem, assume that the observable universe has been .
* divided into 2D sectors. Sectors are indexed by x- and y-coordinates.
*/
public File {
public File(String path) {}
public Boolean exists() {}
public byte[] read() {}
public void write(bytes[] bytes) {}
}
public Image {
public Image(byte[] bytes) {}
byte[] getBytes() {} // no more than 1MB in size
}
public Sector {
public Sector(int x, int y) {}
int getX() {}
int getY() {}
@Override
public boolean equals(Object o) {
if(o==this) return true;
if(!(o instanceof Sector)){
return false;
}
Sector that = (Sector) o;
return this.x == that.getX() && this.y == that.getY();
}
@Override
public int hashCode() {
int prime = 31;
int result = 1;
result = prime*result + this.x;
result = prime*result + this.y;
return result;
}
}
/**
* row-major indexing to be consistent.
*/
if
public void removeHead() {
locMap.put(head.next.key, null);
head.next = head.next.next;
head.next.prev = head;
}
public void addTail(int key, int value) {
DLinkedList newEle = new DLinkedList(key, value);
moveToTail(newEle);
locMap.put(key, newEle);
}
public void moveToTail(DLinkedList e) {
e.prev = tail.prev;
tail.prev.next = e;
tail.prev = e;
e.next = tail;
}
}
class LRUCache {
private int capacity;
private LinkedHashMap<Integer, Integer> leastRecentUsedList;
public LRUCache(int capacity) {
this.capacity = capacity;
leastRecentUsedList = new LinkedHashMap<>();
}
public int get(int key) {
if(leastRecentUsedList.containsKey(key)) {
int value = leastRecentUsedList.get(key);
leastRecentUsedList.remove(key);
leastRecentUsedList.put(key, value);
return value;
}
return -1;
}
public void put(int key, int value) {
if(leastRecentUsedList.containsKey(key)) {
leastRecentUsedList.remove(key);
} else if(leastRecentUsedList.size()==capacity) {
leastRecentUsedList.remove(leastRecentUsedList.keySet().iterator().next());
}
leastRecentUsedList.put(key, value);
}
}
/*class LRUCache {
class DLinkedList {
int key, val;
DLinkedList prev, next;
public DLinkedList(int _key, int _val) {
key = _key; val = _val;
}
}
private int capacity, count = 0;
private DLinkedList head, tail;
private Map<Integer, DLinkedList> locMap = new HashMap<Integer, DLinkedList>();
public LRUCache(int capacity) {
this.capacity = capacity;
head = new DLinkedList(-1, -1);//dummy for easier pointer manipulation
tail = new DLinkedList(-1, -1);
head.next = tail;
tail.prev = head;
}
public int get(int key) {
DLinkedList e = getNode(key);
return e == null? -1: e.val;
}
public DLinkedList getNode(int key) {
if(locMap.get(key)!=null) {
DLinkedList e = locMap.get(key);
e.next.prev = e.prev;
e.prev.next = e.next;
moveToTail(e);
return e;
} else {
return null;
}
}
public void put(int key, int value) {
DLinkedList e = getNode(key);
if(e!=null) {
e.val = value;
} else {
if(count==capacity) { // remove LRU
removeHead();
}
addTail(key, value);
count++;
}
}
public void removeHead() {
locMap.put(head.next.key, null);
head.next = head.next.next;
head.next.prev = head;
count--;
}
public void addTail(int key, int value) {
DLinkedList newEle = new DLinkedList(key, value);
moveToTail(newEle);
locMap.put(key, newEle);
}
public void moveToTail(DLinkedList e) {
e.prev = tail.prev;
tail.prev.next = e;
tail.prev = e;
e.next = tail;
}
}*/
/**
* Your LRUCache object will be instantiated and called as such:
* LRUCache obj = new LRUCache(capacity);
* int param_1 = obj.get(key);
* obj.put(key,value);
*/
/* 2-d array of "sharpness" values. Fine the path from left to right which has the highest minimum sharpness. 路径必须是从左往右,先有个起始点,然后每次要往右上,正右或右下跳一步。也就是说,路径长度是列数n,假设路径为(i1,1),(i2,2),...,(in,n),路径必须满足每对i_k与i_{k-1}的距离不能大于1.
. from: 1point3acres.com/bbs
.5 .7 .2
.7 .5 .8
.9 .1 .5-google 1point3acres
. 鍥磋鎴戜滑@1point 3 acres
在这个例子中,理想路径是.7->.7->.8因为这条路径中的最小值.7在所有路径中最大。只需要返回这个值,不需要返回路径。这是道dp问题
follow up:
如果图片是1million*1million的怎么办,整张图片读不进内存,我答说dp结构可以改成一维的,然后每次只读一列。小哥说每次读一列的第一个字符非常耗时,因为要不断的跳读指针,然后我说可以对这个矩阵转置写到另外一个文件里,然后每次想做这个函数就对这个新文件操作就好了,这样就能按行读。小哥就问我怎么转置再存到另外一个文件里,我说根据内存大小可以多读几列。然后小哥就说还可以再优化,他说这有一个balance,读行输出列,写文件就很耗时,读列输出行,读文件就很耗时(主要问题是 写指针或读指针跳转到下一行 所带来的时间消耗),其实听到这里我就应该有反应了,但当时还是傻傻的想。最后结果是每次根据内存大小读一个接近正方形的矩形,将他们写到新文件,再读下一块矩形。这样的话,读写指针跳转次数就最小了。
http://www.1point3acres.com/bbs/thread-180491-1-1.html
code是他家的duplicate file,一开始我尝试边写代码边说话,他不理我,楼主口语渣,也说不下去,结果后半段完全就是闷头写代码
我在read file时用的bfs,出了点小问题,在work through的时候修改,可能是扣分点
问了我复杂度,md5不能证明两个file一样,我后来说可以再作为filter,比较byte array才行
问我如果file很大怎么办,因为我读file的时候就是1kb的size读取,我回答说所有相同md5的file再拿出来一个一个比较
问我这个1kb的读取是从头好还是从尾好,我说从头好,但是说不出为什么,一开始说从头读速度快,他说从哪里读速度都一样,然后他给我解释了,然而我并没听明白他的英语。。
http://www.1point3acres.com/bbs/thread-170171-1-1.html
设计两个function,分别是log_hit()和get_hit_number_in_last_5_mins(). 之前在面经上也见过这题,就是要记录一个web被visited的次数以及返回过去5 min内被visited的次数
http://www.1point3acres.com/bbs/thread-180309-1-1.html